aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/gpu.tmpl12
-rw-r--r--arch/x86/kernel/early-quirks.c1
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/dvo.h3
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c37
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c246
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c170
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h175
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c73
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c58
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c40
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h53
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c16
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c117
-rw-r--r--drivers/gpu/drm/i915/i915_params.c10
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2742
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c6
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h14
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c3
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c2
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c23
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c14
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c284
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c141
-rw-r--r--drivers/gpu/drm/i915/intel_display.c849
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1087
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c323
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c18
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h159
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c45
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c27
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c196
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c50
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c127
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h8
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h72
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c105
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c83
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c37
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c148
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h19
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c11
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c61
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c637
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c77
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c156
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h9
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c506
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c61
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c20
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c261
-rw-r--r--drivers/pci/quirks.c4
-rw-r--r--include/drm/i915_component.h69
-rw-r--r--include/drm/i915_pciids.h36
-rw-r--r--include/uapi/drm/i915_drm.h11
-rw-r--r--kernel/async.c1
68 files changed, 5268 insertions, 4316 deletions
diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl
index 944e65a87033..03f01e76add7 100644
--- a/Documentation/DocBook/gpu.tmpl
+++ b/Documentation/DocBook/gpu.tmpl
@@ -4177,17 +4177,21 @@ int num_ioctls;</synopsis>
4177 </sect2> 4177 </sect2>
4178 </sect1> 4178 </sect1>
4179 <sect1> 4179 <sect1>
4180 <title>GuC-based Command Submission</title> 4180 <title>GuC</title>
4181 <sect2> 4181 <sect2>
4182 <title>GuC</title> 4182 <title>GuC-specific firmware loader</title>
4183!Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader 4183!Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader
4184!Idrivers/gpu/drm/i915/intel_guc_loader.c 4184!Idrivers/gpu/drm/i915/intel_guc_loader.c
4185 </sect2> 4185 </sect2>
4186 <sect2> 4186 <sect2>
4187 <title>GuC Client</title> 4187 <title>GuC-based command submission</title>
4188!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submissison 4188!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submission
4189!Idrivers/gpu/drm/i915/i915_guc_submission.c 4189!Idrivers/gpu/drm/i915/i915_guc_submission.c
4190 </sect2> 4190 </sect2>
4191 <sect2>
4192 <title>GuC Firmware Layout</title>
4193!Pdrivers/gpu/drm/i915/intel_guc_fwif.h GuC Firmware Layout
4194 </sect2>
4191 </sect1> 4195 </sect1>
4192 4196
4193 <sect1> 4197 <sect1>
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index db9a675e751b..bca14c899137 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -547,6 +547,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = {
547 INTEL_CHV_IDS(&chv_stolen_funcs), 547 INTEL_CHV_IDS(&chv_stolen_funcs),
548 INTEL_SKL_IDS(&gen9_stolen_funcs), 548 INTEL_SKL_IDS(&gen9_stolen_funcs),
549 INTEL_BXT_IDS(&gen9_stolen_funcs), 549 INTEL_BXT_IDS(&gen9_stolen_funcs),
550 INTEL_KBL_IDS(&gen9_stolen_funcs),
550}; 551};
551 552
552static void __init intel_graphics_stolen(int num, int slot, int func) 553static void __init intel_graphics_stolen(int num, int slot, int func)
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 051eab33e4c7..fcd77b27514d 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -10,6 +10,7 @@ config DRM_I915
10 # the shmem_readpage() which depends upon tmpfs 10 # the shmem_readpage() which depends upon tmpfs
11 select SHMEM 11 select SHMEM
12 select TMPFS 12 select TMPFS
13 select STOP_MACHINE
13 select DRM_KMS_HELPER 14 select DRM_KMS_HELPER
14 select DRM_PANEL 15 select DRM_PANEL
15 select DRM_MIPI_DSI 16 select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 44d290ae1999..0851de07bd13 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -77,6 +77,7 @@ i915-y += dvo_ch7017.o \
77 dvo_tfp410.o \ 77 dvo_tfp410.o \
78 intel_crt.o \ 78 intel_crt.o \
79 intel_ddi.o \ 79 intel_ddi.o \
80 intel_dp_link_training.o \
80 intel_dp_mst.o \ 81 intel_dp_mst.o \
81 intel_dp.o \ 82 intel_dp.o \
82 intel_dsi.o \ 83 intel_dsi.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 0e2c1b9648a7..13dea4263554 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -32,7 +32,8 @@ struct intel_dvo_device {
32 const char *name; 32 const char *name;
33 int type; 33 int type;
34 /* DVOA/B/C output register */ 34 /* DVOA/B/C output register */
35 u32 dvo_reg; 35 i915_reg_t dvo_reg;
36 i915_reg_t dvo_srcdim_reg;
36 /* GPIO register used for i2c bus to control this device */ 37 /* GPIO register used for i2c bus to control this device */
37 u32 gpio; 38 u32 gpio;
38 int slave_addr; 39 int slave_addr;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index db58c8d664c2..814d894ed925 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -407,14 +407,14 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
407 * LRI. 407 * LRI.
408 */ 408 */
409struct drm_i915_reg_descriptor { 409struct drm_i915_reg_descriptor {
410 u32 addr; 410 i915_reg_t addr;
411 u32 mask; 411 u32 mask;
412 u32 value; 412 u32 value;
413}; 413};
414 414
415/* Convenience macro for adding 32-bit registers. */ 415/* Convenience macro for adding 32-bit registers. */
416#define REG32(address, ...) \ 416#define REG32(_reg, ...) \
417 { .addr = address, __VA_ARGS__ } 417 { .addr = (_reg), __VA_ARGS__ }
418 418
419/* 419/*
420 * Convenience macro for adding 64-bit registers. 420 * Convenience macro for adding 64-bit registers.
@@ -423,8 +423,13 @@ struct drm_i915_reg_descriptor {
423 * access commands only allow 32-bit accesses. Hence, we have to include 423 * access commands only allow 32-bit accesses. Hence, we have to include
424 * entries for both halves of the 64-bit registers. 424 * entries for both halves of the 64-bit registers.
425 */ 425 */
426#define REG64(addr) \ 426#define REG64(_reg) \
427 REG32(addr), REG32(addr + sizeof(u32)) 427 { .addr = _reg }, \
428 { .addr = _reg ## _UDW }
429
430#define REG64_IDX(_reg, idx) \
431 { .addr = _reg(idx) }, \
432 { .addr = _reg ## _UDW(idx) }
428 433
429static const struct drm_i915_reg_descriptor gen7_render_regs[] = { 434static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
430 REG64(GPGPU_THREADS_DISPATCHED), 435 REG64(GPGPU_THREADS_DISPATCHED),
@@ -451,14 +456,14 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
451 REG32(GEN7_GPGPU_DISPATCHDIMX), 456 REG32(GEN7_GPGPU_DISPATCHDIMX),
452 REG32(GEN7_GPGPU_DISPATCHDIMY), 457 REG32(GEN7_GPGPU_DISPATCHDIMY),
453 REG32(GEN7_GPGPU_DISPATCHDIMZ), 458 REG32(GEN7_GPGPU_DISPATCHDIMZ),
454 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)), 459 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0),
455 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)), 460 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1),
456 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)), 461 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2),
457 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)), 462 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 3),
458 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)), 463 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 0),
459 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)), 464 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 1),
460 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)), 465 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 2),
461 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)), 466 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 3),
462 REG32(GEN7_SO_WRITE_OFFSET(0)), 467 REG32(GEN7_SO_WRITE_OFFSET(0)),
463 REG32(GEN7_SO_WRITE_OFFSET(1)), 468 REG32(GEN7_SO_WRITE_OFFSET(1)),
464 REG32(GEN7_SO_WRITE_OFFSET(2)), 469 REG32(GEN7_SO_WRITE_OFFSET(2)),
@@ -592,7 +597,7 @@ static bool check_sorted(int ring_id,
592 bool ret = true; 597 bool ret = true;
593 598
594 for (i = 0; i < reg_count; i++) { 599 for (i = 0; i < reg_count; i++) {
595 u32 curr = reg_table[i].addr; 600 u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
596 601
597 if (curr < previous) { 602 if (curr < previous) {
598 DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n", 603 DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
@@ -847,7 +852,7 @@ find_reg(const struct drm_i915_reg_descriptor *table,
847 int i; 852 int i;
848 853
849 for (i = 0; i < count; i++) { 854 for (i = 0; i < count; i++) {
850 if (table[i].addr == addr) 855 if (i915_mmio_reg_offset(table[i].addr) == addr)
851 return &table[i]; 856 return &table[i];
852 } 857 }
853 } 858 }
@@ -1023,7 +1028,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
1023 * to the register. Hence, limit OACONTROL writes to 1028 * to the register. Hence, limit OACONTROL writes to
1024 * only MI_LOAD_REGISTER_IMM commands. 1029 * only MI_LOAD_REGISTER_IMM commands.
1025 */ 1030 */
1026 if (reg_addr == OACONTROL) { 1031 if (reg_addr == i915_mmio_reg_offset(OACONTROL)) {
1027 if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { 1032 if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
1028 DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); 1033 DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
1029 return false; 1034 return false;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a3b22bdacd44..411a9c68b4ee 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1252,18 +1252,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1252 1252
1253 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 : 1253 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
1254 rp_state_cap >> 16) & 0xff; 1254 rp_state_cap >> 16) & 0xff;
1255 max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); 1255 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1256 GEN9_FREQ_SCALER : 1);
1256 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1257 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1257 intel_gpu_freq(dev_priv, max_freq)); 1258 intel_gpu_freq(dev_priv, max_freq));
1258 1259
1259 max_freq = (rp_state_cap & 0xff00) >> 8; 1260 max_freq = (rp_state_cap & 0xff00) >> 8;
1260 max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); 1261 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1262 GEN9_FREQ_SCALER : 1);
1261 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1263 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1262 intel_gpu_freq(dev_priv, max_freq)); 1264 intel_gpu_freq(dev_priv, max_freq));
1263 1265
1264 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 : 1266 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
1265 rp_state_cap >> 0) & 0xff; 1267 rp_state_cap >> 0) & 0xff;
1266 max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); 1268 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1269 GEN9_FREQ_SCALER : 1);
1267 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1270 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1268 intel_gpu_freq(dev_priv, max_freq)); 1271 intel_gpu_freq(dev_priv, max_freq));
1269 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1272 seq_printf(m, "Max overclocked frequency: %dMHz\n",
@@ -1523,7 +1526,7 @@ static int gen6_drpc_info(struct seq_file *m)
1523 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1526 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1524 } 1527 }
1525 1528
1526 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1529 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1527 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1530 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1528 1531
1529 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1532 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
@@ -1640,7 +1643,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1640 seq_puts(m, "FBC enabled\n"); 1643 seq_puts(m, "FBC enabled\n");
1641 else 1644 else
1642 seq_printf(m, "FBC disabled: %s\n", 1645 seq_printf(m, "FBC disabled: %s\n",
1643 intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason)); 1646 dev_priv->fbc.no_fbc_reason);
1644 1647
1645 if (INTEL_INFO(dev_priv)->gen >= 7) 1648 if (INTEL_INFO(dev_priv)->gen >= 7)
1646 seq_printf(m, "Compressing: %s\n", 1649 seq_printf(m, "Compressing: %s\n",
@@ -1801,7 +1804,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1801 if (ret) 1804 if (ret)
1802 goto out; 1805 goto out;
1803 1806
1804 if (IS_SKYLAKE(dev)) { 1807 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1805 /* Convert GT frequency to 50 HZ units */ 1808 /* Convert GT frequency to 50 HZ units */
1806 min_gpu_freq = 1809 min_gpu_freq =
1807 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; 1810 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
@@ -1821,7 +1824,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1821 &ia_freq); 1824 &ia_freq);
1822 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1825 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1823 intel_gpu_freq(dev_priv, (gpu_freq * 1826 intel_gpu_freq(dev_priv, (gpu_freq *
1824 (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))), 1827 (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1828 GEN9_FREQ_SCALER : 1))),
1825 ((ia_freq >> 0) & 0xff) * 100, 1829 ((ia_freq >> 0) & 0xff) * 100,
1826 ((ia_freq >> 8) & 0xff) * 100); 1830 ((ia_freq >> 8) & 0xff) * 100);
1827 } 1831 }
@@ -1873,17 +1877,19 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1873 struct drm_i915_private *dev_priv = dev->dev_private; 1877 struct drm_i915_private *dev_priv = dev->dev_private;
1874 1878
1875 ifbdev = dev_priv->fbdev; 1879 ifbdev = dev_priv->fbdev;
1876 fb = to_intel_framebuffer(ifbdev->helper.fb); 1880 if (ifbdev) {
1877 1881 fb = to_intel_framebuffer(ifbdev->helper.fb);
1878 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1882
1879 fb->base.width, 1883 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1880 fb->base.height, 1884 fb->base.width,
1881 fb->base.depth, 1885 fb->base.height,
1882 fb->base.bits_per_pixel, 1886 fb->base.depth,
1883 fb->base.modifier[0], 1887 fb->base.bits_per_pixel,
1884 atomic_read(&fb->base.refcount.refcount)); 1888 fb->base.modifier[0],
1885 describe_obj(m, fb->obj); 1889 atomic_read(&fb->base.refcount.refcount));
1886 seq_putc(m, '\n'); 1890 describe_obj(m, fb->obj);
1891 seq_putc(m, '\n');
1892 }
1887#endif 1893#endif
1888 1894
1889 mutex_lock(&dev->mode_config.fb_lock); 1895 mutex_lock(&dev->mode_config.fb_lock);
@@ -2402,6 +2408,12 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
2402 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); 2408 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
2403 seq_printf(m, "\tversion found: %d.%d\n", 2409 seq_printf(m, "\tversion found: %d.%d\n",
2404 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found); 2410 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
2411 seq_printf(m, "\theader: offset is %d; size = %d\n",
2412 guc_fw->header_offset, guc_fw->header_size);
2413 seq_printf(m, "\tuCode: offset is %d; size = %d\n",
2414 guc_fw->ucode_offset, guc_fw->ucode_size);
2415 seq_printf(m, "\tRSA: offset is %d; size = %d\n",
2416 guc_fw->rsa_offset, guc_fw->rsa_size);
2405 2417
2406 tmp = I915_READ(GUC_STATUS); 2418 tmp = I915_READ(GUC_STATUS);
2407 2419
@@ -2550,7 +2562,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
2550 yesno(work_busy(&dev_priv->psr.work.work))); 2562 yesno(work_busy(&dev_priv->psr.work.work)));
2551 2563
2552 if (HAS_DDI(dev)) 2564 if (HAS_DDI(dev))
2553 enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; 2565 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2554 else { 2566 else {
2555 for_each_pipe(dev_priv, pipe) { 2567 for_each_pipe(dev_priv, pipe) {
2556 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & 2568 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
@@ -2572,7 +2584,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
2572 2584
2573 /* CHV PSR has no kind of performance counter */ 2585 /* CHV PSR has no kind of performance counter */
2574 if (HAS_DDI(dev)) { 2586 if (HAS_DDI(dev)) {
2575 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & 2587 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2576 EDP_PSR_PERF_CNT_MASK; 2588 EDP_PSR_PERF_CNT_MASK;
2577 2589
2578 seq_printf(m, "Performance_Counter: %u\n", psrperf); 2590 seq_printf(m, "Performance_Counter: %u\n", psrperf);
@@ -2696,24 +2708,16 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
2696 return "TRANSCODER_C"; 2708 return "TRANSCODER_C";
2697 case POWER_DOMAIN_TRANSCODER_EDP: 2709 case POWER_DOMAIN_TRANSCODER_EDP:
2698 return "TRANSCODER_EDP"; 2710 return "TRANSCODER_EDP";
2699 case POWER_DOMAIN_PORT_DDI_A_2_LANES: 2711 case POWER_DOMAIN_PORT_DDI_A_LANES:
2700 return "PORT_DDI_A_2_LANES"; 2712 return "PORT_DDI_A_LANES";
2701 case POWER_DOMAIN_PORT_DDI_A_4_LANES: 2713 case POWER_DOMAIN_PORT_DDI_B_LANES:
2702 return "PORT_DDI_A_4_LANES"; 2714 return "PORT_DDI_B_LANES";
2703 case POWER_DOMAIN_PORT_DDI_B_2_LANES: 2715 case POWER_DOMAIN_PORT_DDI_C_LANES:
2704 return "PORT_DDI_B_2_LANES"; 2716 return "PORT_DDI_C_LANES";
2705 case POWER_DOMAIN_PORT_DDI_B_4_LANES: 2717 case POWER_DOMAIN_PORT_DDI_D_LANES:
2706 return "PORT_DDI_B_4_LANES"; 2718 return "PORT_DDI_D_LANES";
2707 case POWER_DOMAIN_PORT_DDI_C_2_LANES: 2719 case POWER_DOMAIN_PORT_DDI_E_LANES:
2708 return "PORT_DDI_C_2_LANES"; 2720 return "PORT_DDI_E_LANES";
2709 case POWER_DOMAIN_PORT_DDI_C_4_LANES:
2710 return "PORT_DDI_C_4_LANES";
2711 case POWER_DOMAIN_PORT_DDI_D_2_LANES:
2712 return "PORT_DDI_D_2_LANES";
2713 case POWER_DOMAIN_PORT_DDI_D_4_LANES:
2714 return "PORT_DDI_D_4_LANES";
2715 case POWER_DOMAIN_PORT_DDI_E_2_LANES:
2716 return "PORT_DDI_E_2_LANES";
2717 case POWER_DOMAIN_PORT_DSI: 2721 case POWER_DOMAIN_PORT_DSI:
2718 return "PORT_DSI"; 2722 return "PORT_DSI";
2719 case POWER_DOMAIN_PORT_CRT: 2723 case POWER_DOMAIN_PORT_CRT:
@@ -2734,6 +2738,10 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
2734 return "AUX_C"; 2738 return "AUX_C";
2735 case POWER_DOMAIN_AUX_D: 2739 case POWER_DOMAIN_AUX_D:
2736 return "AUX_D"; 2740 return "AUX_D";
2741 case POWER_DOMAIN_GMBUS:
2742 return "GMBUS";
2743 case POWER_DOMAIN_MODESET:
2744 return "MODESET";
2737 case POWER_DOMAIN_INIT: 2745 case POWER_DOMAIN_INIT:
2738 return "INIT"; 2746 return "INIT";
2739 default: 2747 default:
@@ -2777,6 +2785,51 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
2777 return 0; 2785 return 0;
2778} 2786}
2779 2787
2788static int i915_dmc_info(struct seq_file *m, void *unused)
2789{
2790 struct drm_info_node *node = m->private;
2791 struct drm_device *dev = node->minor->dev;
2792 struct drm_i915_private *dev_priv = dev->dev_private;
2793 struct intel_csr *csr;
2794
2795 if (!HAS_CSR(dev)) {
2796 seq_puts(m, "not supported\n");
2797 return 0;
2798 }
2799
2800 csr = &dev_priv->csr;
2801
2802 intel_runtime_pm_get(dev_priv);
2803
2804 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2805 seq_printf(m, "path: %s\n", csr->fw_path);
2806
2807 if (!csr->dmc_payload)
2808 goto out;
2809
2810 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2811 CSR_VERSION_MINOR(csr->version));
2812
2813 if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
2814 seq_printf(m, "DC3 -> DC5 count: %d\n",
2815 I915_READ(SKL_CSR_DC3_DC5_COUNT));
2816 seq_printf(m, "DC5 -> DC6 count: %d\n",
2817 I915_READ(SKL_CSR_DC5_DC6_COUNT));
2818 } else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
2819 seq_printf(m, "DC3 -> DC5 count: %d\n",
2820 I915_READ(BXT_CSR_DC3_DC5_COUNT));
2821 }
2822
2823out:
2824 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2825 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2826 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2827
2828 intel_runtime_pm_put(dev_priv);
2829
2830 return 0;
2831}
2832
2780static void intel_seq_print_mode(struct seq_file *m, int tabs, 2833static void intel_seq_print_mode(struct seq_file *m, int tabs,
2781 struct drm_display_mode *mode) 2834 struct drm_display_mode *mode)
2782{ 2835{
@@ -2944,6 +2997,107 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2944 return cursor_active(dev, pipe); 2997 return cursor_active(dev, pipe);
2945} 2998}
2946 2999
3000static const char *plane_type(enum drm_plane_type type)
3001{
3002 switch (type) {
3003 case DRM_PLANE_TYPE_OVERLAY:
3004 return "OVL";
3005 case DRM_PLANE_TYPE_PRIMARY:
3006 return "PRI";
3007 case DRM_PLANE_TYPE_CURSOR:
3008 return "CUR";
3009 /*
3010 * Deliberately omitting default: to generate compiler warnings
3011 * when a new drm_plane_type gets added.
3012 */
3013 }
3014
3015 return "unknown";
3016}
3017
3018static const char *plane_rotation(unsigned int rotation)
3019{
3020 static char buf[48];
3021 /*
3022 * According to doc only one DRM_ROTATE_ is allowed but this
3023 * will print them all to visualize if the values are misused
3024 */
3025 snprintf(buf, sizeof(buf),
3026 "%s%s%s%s%s%s(0x%08x)",
3027 (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "",
3028 (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "",
3029 (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "",
3030 (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "",
3031 (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "",
3032 (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "",
3033 rotation);
3034
3035 return buf;
3036}
3037
3038static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3039{
3040 struct drm_info_node *node = m->private;
3041 struct drm_device *dev = node->minor->dev;
3042 struct intel_plane *intel_plane;
3043
3044 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3045 struct drm_plane_state *state;
3046 struct drm_plane *plane = &intel_plane->base;
3047
3048 if (!plane->state) {
3049 seq_puts(m, "plane->state is NULL!\n");
3050 continue;
3051 }
3052
3053 state = plane->state;
3054
3055 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3056 plane->base.id,
3057 plane_type(intel_plane->base.type),
3058 state->crtc_x, state->crtc_y,
3059 state->crtc_w, state->crtc_h,
3060 (state->src_x >> 16),
3061 ((state->src_x & 0xffff) * 15625) >> 10,
3062 (state->src_y >> 16),
3063 ((state->src_y & 0xffff) * 15625) >> 10,
3064 (state->src_w >> 16),
3065 ((state->src_w & 0xffff) * 15625) >> 10,
3066 (state->src_h >> 16),
3067 ((state->src_h & 0xffff) * 15625) >> 10,
3068 state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A",
3069 plane_rotation(state->rotation));
3070 }
3071}
3072
3073static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3074{
3075 struct intel_crtc_state *pipe_config;
3076 int num_scalers = intel_crtc->num_scalers;
3077 int i;
3078
3079 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3080
3081 /* Not all platformas have a scaler */
3082 if (num_scalers) {
3083 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3084 num_scalers,
3085 pipe_config->scaler_state.scaler_users,
3086 pipe_config->scaler_state.scaler_id);
3087
3088 for (i = 0; i < SKL_NUM_SCALERS; i++) {
3089 struct intel_scaler *sc =
3090 &pipe_config->scaler_state.scalers[i];
3091
3092 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3093 i, yesno(sc->in_use), sc->mode);
3094 }
3095 seq_puts(m, "\n");
3096 } else {
3097 seq_puts(m, "\tNo scalers available on this platform\n");
3098 }
3099}
3100
2947static int i915_display_info(struct seq_file *m, void *unused) 3101static int i915_display_info(struct seq_file *m, void *unused)
2948{ 3102{
2949 struct drm_info_node *node = m->private; 3103 struct drm_info_node *node = m->private;
@@ -2963,10 +3117,12 @@ static int i915_display_info(struct seq_file *m, void *unused)
2963 3117
2964 pipe_config = to_intel_crtc_state(crtc->base.state); 3118 pipe_config = to_intel_crtc_state(crtc->base.state);
2965 3119
2966 seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n", 3120 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
2967 crtc->base.base.id, pipe_name(crtc->pipe), 3121 crtc->base.base.id, pipe_name(crtc->pipe),
2968 yesno(pipe_config->base.active), 3122 yesno(pipe_config->base.active),
2969 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 3123 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3124 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3125
2970 if (pipe_config->base.active) { 3126 if (pipe_config->base.active) {
2971 intel_crtc_info(m, crtc); 3127 intel_crtc_info(m, crtc);
2972 3128
@@ -2976,6 +3132,8 @@ static int i915_display_info(struct seq_file *m, void *unused)
2976 x, y, crtc->base.cursor->state->crtc_w, 3132 x, y, crtc->base.cursor->state->crtc_w,
2977 crtc->base.cursor->state->crtc_h, 3133 crtc->base.cursor->state->crtc_h,
2978 crtc->cursor_addr, yesno(active)); 3134 crtc->cursor_addr, yesno(active));
3135 intel_scaler_info(m, crtc);
3136 intel_plane_info(m, crtc);
2979 } 3137 }
2980 3138
2981 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 3139 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
@@ -3110,7 +3268,8 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
3110 3268
3111 seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count); 3269 seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
3112 for (i = 0; i < dev_priv->workarounds.count; ++i) { 3270 for (i = 0; i < dev_priv->workarounds.count; ++i) {
3113 u32 addr, mask, value, read; 3271 i915_reg_t addr;
3272 u32 mask, value, read;
3114 bool ok; 3273 bool ok;
3115 3274
3116 addr = dev_priv->workarounds.reg[i].addr; 3275 addr = dev_priv->workarounds.reg[i].addr;
@@ -3119,7 +3278,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
3119 read = I915_READ(addr); 3278 read = I915_READ(addr);
3120 ok = (value & mask) == (read & mask); 3279 ok = (value & mask) == (read & mask);
3121 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", 3280 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3122 addr, value, mask, read, ok ? "OK" : "FAIL"); 3281 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3123 } 3282 }
3124 3283
3125 intel_runtime_pm_put(dev_priv); 3284 intel_runtime_pm_put(dev_priv);
@@ -5023,7 +5182,7 @@ static void gen9_sseu_device_status(struct drm_device *dev,
5023 5182
5024 stat->slice_total++; 5183 stat->slice_total++;
5025 5184
5026 if (IS_SKYLAKE(dev)) 5185 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
5027 ss_cnt = INTEL_INFO(dev)->subslice_per_slice; 5186 ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
5028 5187
5029 for (ss = 0; ss < ss_max; ss++) { 5188 for (ss = 0; ss < ss_max; ss++) {
@@ -5236,6 +5395,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
5236 {"i915_energy_uJ", i915_energy_uJ, 0}, 5395 {"i915_energy_uJ", i915_energy_uJ, 0},
5237 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 5396 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
5238 {"i915_power_domain_info", i915_power_domain_info, 0}, 5397 {"i915_power_domain_info", i915_power_domain_info, 0},
5398 {"i915_dmc_info", i915_dmc_info, 0},
5239 {"i915_display_info", i915_display_info, 0}, 5399 {"i915_display_info", i915_display_info, 0},
5240 {"i915_semaphore_status", i915_semaphore_status, 0}, 5400 {"i915_semaphore_status", i915_semaphore_status, 0},
5241 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 5401 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index b4741d121a74..a81c76603544 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -28,7 +28,6 @@
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 30
31#include <linux/async.h>
32#include <drm/drmP.h> 31#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h> 32#include <drm/drm_crtc_helper.h>
34#include <drm/drm_fb_helper.h> 33#include <drm/drm_fb_helper.h>
@@ -338,7 +337,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
338 i915_resume_switcheroo(dev); 337 i915_resume_switcheroo(dev);
339 dev->switch_power_state = DRM_SWITCH_POWER_ON; 338 dev->switch_power_state = DRM_SWITCH_POWER_ON;
340 } else { 339 } else {
341 pr_err("switched off\n"); 340 pr_info("switched off\n");
342 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 341 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
343 i915_suspend_switcheroo(dev, pmm); 342 i915_suspend_switcheroo(dev, pmm);
344 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 343 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
@@ -396,7 +395,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
396 if (ret) 395 if (ret)
397 goto cleanup_vga_switcheroo; 396 goto cleanup_vga_switcheroo;
398 397
399 intel_power_domains_init_hw(dev_priv); 398 intel_power_domains_init_hw(dev_priv, false);
399
400 intel_csr_ucode_init(dev_priv);
400 401
401 ret = intel_irq_install(dev_priv); 402 ret = intel_irq_install(dev_priv);
402 if (ret) 403 if (ret)
@@ -437,7 +438,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
437 * scanning against hotplug events. Hence do this first and ignore the 438 * scanning against hotplug events. Hence do this first and ignore the
438 * tiny window where we will loose hotplug notifactions. 439 * tiny window where we will loose hotplug notifactions.
439 */ 440 */
440 async_schedule(intel_fbdev_initial_config, dev_priv); 441 intel_fbdev_initial_config_async(dev);
441 442
442 drm_kms_helper_poll_init(dev); 443 drm_kms_helper_poll_init(dev);
443 444
@@ -663,7 +664,8 @@ static void gen9_sseu_info_init(struct drm_device *dev)
663 * supports EU power gating on devices with more than one EU 664 * supports EU power gating on devices with more than one EU
664 * pair per subslice. 665 * pair per subslice.
665 */ 666 */
666 info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1)); 667 info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
668 (info->slice_total > 1));
667 info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1)); 669 info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
668 info->has_eu_pg = (info->eu_per_subslice > 2); 670 info->has_eu_pg = (info->eu_per_subslice > 2);
669} 671}
@@ -890,7 +892,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
890 spin_lock_init(&dev_priv->mmio_flip_lock); 892 spin_lock_init(&dev_priv->mmio_flip_lock);
891 mutex_init(&dev_priv->sb_lock); 893 mutex_init(&dev_priv->sb_lock);
892 mutex_init(&dev_priv->modeset_restore_lock); 894 mutex_init(&dev_priv->modeset_restore_lock);
893 mutex_init(&dev_priv->csr_lock);
894 mutex_init(&dev_priv->av_mutex); 895 mutex_init(&dev_priv->av_mutex);
895 896
896 intel_pm_setup(dev); 897 intel_pm_setup(dev);
@@ -937,9 +938,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
937 938
938 intel_uncore_init(dev); 939 intel_uncore_init(dev);
939 940
940 /* Load CSR Firmware for SKL */
941 intel_csr_ucode_init(dev);
942
943 ret = i915_gem_gtt_init(dev); 941 ret = i915_gem_gtt_init(dev);
944 if (ret) 942 if (ret)
945 goto out_freecsr; 943 goto out_freecsr;
@@ -1113,7 +1111,7 @@ out_mtrrfree:
1113out_gtt: 1111out_gtt:
1114 i915_global_gtt_cleanup(dev); 1112 i915_global_gtt_cleanup(dev);
1115out_freecsr: 1113out_freecsr:
1116 intel_csr_ucode_fini(dev); 1114 intel_csr_ucode_fini(dev_priv);
1117 intel_uncore_fini(dev); 1115 intel_uncore_fini(dev);
1118 pci_iounmap(dev->pdev, dev_priv->regs); 1116 pci_iounmap(dev->pdev, dev_priv->regs);
1119put_bridge: 1117put_bridge:
@@ -1131,6 +1129,8 @@ int i915_driver_unload(struct drm_device *dev)
1131 struct drm_i915_private *dev_priv = dev->dev_private; 1129 struct drm_i915_private *dev_priv = dev->dev_private;
1132 int ret; 1130 int ret;
1133 1131
1132 intel_fbdev_fini(dev);
1133
1134 i915_audio_component_cleanup(dev_priv); 1134 i915_audio_component_cleanup(dev_priv);
1135 1135
1136 ret = i915_gem_suspend(dev); 1136 ret = i915_gem_suspend(dev);
@@ -1153,8 +1153,6 @@ int i915_driver_unload(struct drm_device *dev)
1153 1153
1154 acpi_video_unregister(); 1154 acpi_video_unregister();
1155 1155
1156 intel_fbdev_fini(dev);
1157
1158 drm_vblank_cleanup(dev); 1156 drm_vblank_cleanup(dev);
1159 1157
1160 intel_modeset_cleanup(dev); 1158 intel_modeset_cleanup(dev);
@@ -1196,7 +1194,7 @@ int i915_driver_unload(struct drm_device *dev)
1196 intel_fbc_cleanup_cfb(dev_priv); 1194 intel_fbc_cleanup_cfb(dev_priv);
1197 i915_gem_cleanup_stolen(dev); 1195 i915_gem_cleanup_stolen(dev);
1198 1196
1199 intel_csr_ucode_fini(dev); 1197 intel_csr_ucode_fini(dev_priv);
1200 1198
1201 intel_teardown_gmbus(dev); 1199 intel_teardown_gmbus(dev);
1202 intel_teardown_mchbar(dev); 1200 intel_teardown_mchbar(dev);
@@ -1264,8 +1262,6 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1264{ 1262{
1265 struct drm_i915_file_private *file_priv = file->driver_priv; 1263 struct drm_i915_file_private *file_priv = file->driver_priv;
1266 1264
1267 if (file_priv && file_priv->bsd_ring)
1268 file_priv->bsd_ring = NULL;
1269 kfree(file_priv); 1265 kfree(file_priv);
1270} 1266}
1271 1267
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 760e0ce4aa26..6344dfb72177 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -383,6 +383,7 @@ static const struct intel_device_info intel_skylake_gt3_info = {
383 383
384static const struct intel_device_info intel_broxton_info = { 384static const struct intel_device_info intel_broxton_info = {
385 .is_preliminary = 1, 385 .is_preliminary = 1,
386 .is_broxton = 1,
386 .gen = 9, 387 .gen = 9,
387 .need_gfx_hws = 1, .has_hotplug = 1, 388 .need_gfx_hws = 1, .has_hotplug = 1,
388 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 389 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -394,50 +395,81 @@ static const struct intel_device_info intel_broxton_info = {
394 IVB_CURSOR_OFFSETS, 395 IVB_CURSOR_OFFSETS,
395}; 396};
396 397
398static const struct intel_device_info intel_kabylake_info = {
399 .is_preliminary = 1,
400 .is_kabylake = 1,
401 .gen = 9,
402 .num_pipes = 3,
403 .need_gfx_hws = 1, .has_hotplug = 1,
404 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
405 .has_llc = 1,
406 .has_ddi = 1,
407 .has_fpga_dbg = 1,
408 .has_fbc = 1,
409 GEN_DEFAULT_PIPEOFFSETS,
410 IVB_CURSOR_OFFSETS,
411};
412
413static const struct intel_device_info intel_kabylake_gt3_info = {
414 .is_preliminary = 1,
415 .is_kabylake = 1,
416 .gen = 9,
417 .num_pipes = 3,
418 .need_gfx_hws = 1, .has_hotplug = 1,
419 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
420 .has_llc = 1,
421 .has_ddi = 1,
422 .has_fpga_dbg = 1,
423 .has_fbc = 1,
424 GEN_DEFAULT_PIPEOFFSETS,
425 IVB_CURSOR_OFFSETS,
426};
427
397/* 428/*
398 * Make sure any device matches here are from most specific to most 429 * Make sure any device matches here are from most specific to most
399 * general. For example, since the Quanta match is based on the subsystem 430 * general. For example, since the Quanta match is based on the subsystem
400 * and subvendor IDs, we need it to come before the more general IVB 431 * and subvendor IDs, we need it to come before the more general IVB
401 * PCI ID matches, otherwise we'll use the wrong info struct above. 432 * PCI ID matches, otherwise we'll use the wrong info struct above.
402 */ 433 */
403#define INTEL_PCI_IDS \ 434static const struct pci_device_id pciidlist[] = {
404 INTEL_I830_IDS(&intel_i830_info), \ 435 INTEL_I830_IDS(&intel_i830_info),
405 INTEL_I845G_IDS(&intel_845g_info), \ 436 INTEL_I845G_IDS(&intel_845g_info),
406 INTEL_I85X_IDS(&intel_i85x_info), \ 437 INTEL_I85X_IDS(&intel_i85x_info),
407 INTEL_I865G_IDS(&intel_i865g_info), \ 438 INTEL_I865G_IDS(&intel_i865g_info),
408 INTEL_I915G_IDS(&intel_i915g_info), \ 439 INTEL_I915G_IDS(&intel_i915g_info),
409 INTEL_I915GM_IDS(&intel_i915gm_info), \ 440 INTEL_I915GM_IDS(&intel_i915gm_info),
410 INTEL_I945G_IDS(&intel_i945g_info), \ 441 INTEL_I945G_IDS(&intel_i945g_info),
411 INTEL_I945GM_IDS(&intel_i945gm_info), \ 442 INTEL_I945GM_IDS(&intel_i945gm_info),
412 INTEL_I965G_IDS(&intel_i965g_info), \ 443 INTEL_I965G_IDS(&intel_i965g_info),
413 INTEL_G33_IDS(&intel_g33_info), \ 444 INTEL_G33_IDS(&intel_g33_info),
414 INTEL_I965GM_IDS(&intel_i965gm_info), \ 445 INTEL_I965GM_IDS(&intel_i965gm_info),
415 INTEL_GM45_IDS(&intel_gm45_info), \ 446 INTEL_GM45_IDS(&intel_gm45_info),
416 INTEL_G45_IDS(&intel_g45_info), \ 447 INTEL_G45_IDS(&intel_g45_info),
417 INTEL_PINEVIEW_IDS(&intel_pineview_info), \ 448 INTEL_PINEVIEW_IDS(&intel_pineview_info),
418 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \ 449 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
419 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \ 450 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
420 INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \ 451 INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
421 INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \ 452 INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
422 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \ 453 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
423 INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \ 454 INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
424 INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \ 455 INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
425 INTEL_HSW_D_IDS(&intel_haswell_d_info), \ 456 INTEL_HSW_D_IDS(&intel_haswell_d_info),
426 INTEL_HSW_M_IDS(&intel_haswell_m_info), \ 457 INTEL_HSW_M_IDS(&intel_haswell_m_info),
427 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \ 458 INTEL_VLV_M_IDS(&intel_valleyview_m_info),
428 INTEL_VLV_D_IDS(&intel_valleyview_d_info), \ 459 INTEL_VLV_D_IDS(&intel_valleyview_d_info),
429 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \ 460 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
430 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \ 461 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
431 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \ 462 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
432 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \ 463 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
433 INTEL_CHV_IDS(&intel_cherryview_info), \ 464 INTEL_CHV_IDS(&intel_cherryview_info),
434 INTEL_SKL_GT1_IDS(&intel_skylake_info), \ 465 INTEL_SKL_GT1_IDS(&intel_skylake_info),
435 INTEL_SKL_GT2_IDS(&intel_skylake_info), \ 466 INTEL_SKL_GT2_IDS(&intel_skylake_info),
436 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), \ 467 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
437 INTEL_BXT_IDS(&intel_broxton_info) 468 INTEL_BXT_IDS(&intel_broxton_info),
438 469 INTEL_KBL_GT1_IDS(&intel_kabylake_info),
439static const struct pci_device_id pciidlist[] = { /* aka */ 470 INTEL_KBL_GT2_IDS(&intel_kabylake_info),
440 INTEL_PCI_IDS, 471 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
472 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
441 {0, 0, 0} 473 {0, 0, 0}
442}; 474};
443 475
@@ -463,7 +495,7 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
463 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 495 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
464 ret = PCH_LPT; 496 ret = PCH_LPT;
465 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); 497 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
466 } else if (IS_SKYLAKE(dev)) { 498 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
467 ret = PCH_SPT; 499 ret = PCH_SPT;
468 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); 500 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
469 } 501 }
@@ -526,11 +558,13 @@ void intel_detect_pch(struct drm_device *dev)
526 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 558 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
527 dev_priv->pch_type = PCH_SPT; 559 dev_priv->pch_type = PCH_SPT;
528 DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 560 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
529 WARN_ON(!IS_SKYLAKE(dev)); 561 WARN_ON(!IS_SKYLAKE(dev) &&
562 !IS_KABYLAKE(dev));
530 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { 563 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
531 dev_priv->pch_type = PCH_SPT; 564 dev_priv->pch_type = PCH_SPT;
532 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 565 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
533 WARN_ON(!IS_SKYLAKE(dev)); 566 WARN_ON(!IS_SKYLAKE(dev) &&
567 !IS_KABYLAKE(dev));
534 } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) { 568 } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
535 dev_priv->pch_type = intel_virt_detect_pch(dev); 569 dev_priv->pch_type = intel_virt_detect_pch(dev);
536 } else 570 } else
@@ -570,26 +604,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
570 return true; 604 return true;
571} 605}
572 606
573void i915_firmware_load_error_print(const char *fw_path, int err)
574{
575 DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
576
577 /*
578 * If the reason is not known assume -ENOENT since that's the most
579 * usual failure mode.
580 */
581 if (!err)
582 err = -ENOENT;
583
584 if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
585 return;
586
587 DRM_ERROR(
588 "The driver is built-in, so to load the firmware you need to\n"
589 "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
590 "in your initrd/initramfs image.\n");
591}
592
593static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 607static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
594{ 608{
595 struct drm_device *dev = dev_priv->dev; 609 struct drm_device *dev = dev_priv->dev;
@@ -608,7 +622,6 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
608static int intel_suspend_complete(struct drm_i915_private *dev_priv); 622static int intel_suspend_complete(struct drm_i915_private *dev_priv);
609static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 623static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
610 bool rpm_resume); 624 bool rpm_resume);
611static int skl_resume_prepare(struct drm_i915_private *dev_priv);
612static int bxt_resume_prepare(struct drm_i915_private *dev_priv); 625static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
613 626
614 627
@@ -679,6 +692,9 @@ static int i915_drm_suspend(struct drm_device *dev)
679 692
680 intel_display_set_init_power(dev_priv, false); 693 intel_display_set_init_power(dev_priv, false);
681 694
695 if (HAS_CSR(dev_priv))
696 flush_work(&dev_priv->csr.work);
697
682 return 0; 698 return 0;
683} 699}
684 700
@@ -687,10 +703,13 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
687 struct drm_i915_private *dev_priv = drm_dev->dev_private; 703 struct drm_i915_private *dev_priv = drm_dev->dev_private;
688 int ret; 704 int ret;
689 705
706 intel_power_domains_suspend(dev_priv);
707
690 ret = intel_suspend_complete(dev_priv); 708 ret = intel_suspend_complete(dev_priv);
691 709
692 if (ret) { 710 if (ret) {
693 DRM_ERROR("Suspend complete failed: %d\n", ret); 711 DRM_ERROR("Suspend complete failed: %d\n", ret);
712 intel_power_domains_init_hw(dev_priv, true);
694 713
695 return ret; 714 return ret;
696 } 715 }
@@ -838,13 +857,11 @@ static int i915_drm_resume_early(struct drm_device *dev)
838 857
839 if (IS_BROXTON(dev)) 858 if (IS_BROXTON(dev))
840 ret = bxt_resume_prepare(dev_priv); 859 ret = bxt_resume_prepare(dev_priv);
841 else if (IS_SKYLAKE(dev_priv))
842 ret = skl_resume_prepare(dev_priv);
843 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 860 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
844 hsw_disable_pc8(dev_priv); 861 hsw_disable_pc8(dev_priv);
845 862
846 intel_uncore_sanitize(dev); 863 intel_uncore_sanitize(dev);
847 intel_power_domains_init_hw(dev_priv); 864 intel_power_domains_init_hw(dev_priv, true);
848 865
849 return ret; 866 return ret;
850} 867}
@@ -1051,15 +1068,6 @@ static int i915_pm_resume(struct device *dev)
1051 return i915_drm_resume(drm_dev); 1068 return i915_drm_resume(drm_dev);
1052} 1069}
1053 1070
1054static int skl_suspend_complete(struct drm_i915_private *dev_priv)
1055{
1056 /* Enabling DC6 is not a hard requirement to enter runtime D3 */
1057
1058 skl_uninit_cdclk(dev_priv);
1059
1060 return 0;
1061}
1062
1063static int hsw_suspend_complete(struct drm_i915_private *dev_priv) 1071static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
1064{ 1072{
1065 hsw_enable_pc8(dev_priv); 1073 hsw_enable_pc8(dev_priv);
@@ -1099,16 +1107,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
1099 return 0; 1107 return 0;
1100} 1108}
1101 1109
1102static int skl_resume_prepare(struct drm_i915_private *dev_priv)
1103{
1104 struct drm_device *dev = dev_priv->dev;
1105
1106 skl_init_cdclk(dev_priv);
1107 intel_csr_load_program(dev);
1108
1109 return 0;
1110}
1111
1112/* 1110/*
1113 * Save all Gunit registers that may be lost after a D3 and a subsequent 1111 * Save all Gunit registers that may be lost after a D3 and a subsequent
1114 * S0i[R123] transition. The list of registers needing a save/restore is 1112 * S0i[R123] transition. The list of registers needing a save/restore is
@@ -1572,8 +1570,6 @@ static int intel_runtime_resume(struct device *device)
1572 1570
1573 if (IS_BROXTON(dev)) 1571 if (IS_BROXTON(dev))
1574 ret = bxt_resume_prepare(dev_priv); 1572 ret = bxt_resume_prepare(dev_priv);
1575 else if (IS_SKYLAKE(dev))
1576 ret = skl_resume_prepare(dev_priv);
1577 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1573 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1578 hsw_disable_pc8(dev_priv); 1574 hsw_disable_pc8(dev_priv);
1579 else if (IS_VALLEYVIEW(dev_priv)) 1575 else if (IS_VALLEYVIEW(dev_priv))
@@ -1616,8 +1612,6 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1616 1612
1617 if (IS_BROXTON(dev_priv)) 1613 if (IS_BROXTON(dev_priv))
1618 ret = bxt_suspend_complete(dev_priv); 1614 ret = bxt_suspend_complete(dev_priv);
1619 else if (IS_SKYLAKE(dev_priv))
1620 ret = skl_suspend_complete(dev_priv);
1621 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1615 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1622 ret = hsw_suspend_complete(dev_priv); 1616 ret = hsw_suspend_complete(dev_priv);
1623 else if (IS_VALLEYVIEW(dev_priv)) 1617 else if (IS_VALLEYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fd88060b2596..15c6dc0b4f37 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -57,7 +57,7 @@
57 57
58#define DRIVER_NAME "i915" 58#define DRIVER_NAME "i915"
59#define DRIVER_DESC "Intel Graphics" 59#define DRIVER_DESC "Intel Graphics"
60#define DRIVER_DATE "20151010" 60#define DRIVER_DATE "20151120"
61 61
62#undef WARN_ON 62#undef WARN_ON
63/* Many gcc seem to no see through this and fall over :( */ 63/* Many gcc seem to no see through this and fall over :( */
@@ -180,15 +180,11 @@ enum intel_display_power_domain {
180 POWER_DOMAIN_TRANSCODER_B, 180 POWER_DOMAIN_TRANSCODER_B,
181 POWER_DOMAIN_TRANSCODER_C, 181 POWER_DOMAIN_TRANSCODER_C,
182 POWER_DOMAIN_TRANSCODER_EDP, 182 POWER_DOMAIN_TRANSCODER_EDP,
183 POWER_DOMAIN_PORT_DDI_A_2_LANES, 183 POWER_DOMAIN_PORT_DDI_A_LANES,
184 POWER_DOMAIN_PORT_DDI_A_4_LANES, 184 POWER_DOMAIN_PORT_DDI_B_LANES,
185 POWER_DOMAIN_PORT_DDI_B_2_LANES, 185 POWER_DOMAIN_PORT_DDI_C_LANES,
186 POWER_DOMAIN_PORT_DDI_B_4_LANES, 186 POWER_DOMAIN_PORT_DDI_D_LANES,
187 POWER_DOMAIN_PORT_DDI_C_2_LANES, 187 POWER_DOMAIN_PORT_DDI_E_LANES,
188 POWER_DOMAIN_PORT_DDI_C_4_LANES,
189 POWER_DOMAIN_PORT_DDI_D_2_LANES,
190 POWER_DOMAIN_PORT_DDI_D_4_LANES,
191 POWER_DOMAIN_PORT_DDI_E_2_LANES,
192 POWER_DOMAIN_PORT_DSI, 188 POWER_DOMAIN_PORT_DSI,
193 POWER_DOMAIN_PORT_CRT, 189 POWER_DOMAIN_PORT_CRT,
194 POWER_DOMAIN_PORT_OTHER, 190 POWER_DOMAIN_PORT_OTHER,
@@ -199,6 +195,8 @@ enum intel_display_power_domain {
199 POWER_DOMAIN_AUX_B, 195 POWER_DOMAIN_AUX_B,
200 POWER_DOMAIN_AUX_C, 196 POWER_DOMAIN_AUX_C,
201 POWER_DOMAIN_AUX_D, 197 POWER_DOMAIN_AUX_D,
198 POWER_DOMAIN_GMBUS,
199 POWER_DOMAIN_MODESET,
202 POWER_DOMAIN_INIT, 200 POWER_DOMAIN_INIT,
203 201
204 POWER_DOMAIN_NUM, 202 POWER_DOMAIN_NUM,
@@ -630,11 +628,9 @@ struct drm_i915_display_funcs {
630 int target, int refclk, 628 int target, int refclk,
631 struct dpll *match_clock, 629 struct dpll *match_clock,
632 struct dpll *best_clock); 630 struct dpll *best_clock);
631 int (*compute_pipe_wm)(struct intel_crtc *crtc,
632 struct drm_atomic_state *state);
633 void (*update_wm)(struct drm_crtc *crtc); 633 void (*update_wm)(struct drm_crtc *crtc);
634 void (*update_sprite_wm)(struct drm_plane *plane,
635 struct drm_crtc *crtc,
636 uint32_t sprite_width, uint32_t sprite_height,
637 int pixel_size, bool enable, bool scaled);
638 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 634 int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
639 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 635 void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
640 /* Returns the active state of the crtc, and if the crtc is active, 636 /* Returns the active state of the crtc, and if the crtc is active,
@@ -692,18 +688,18 @@ struct intel_uncore_funcs {
692 void (*force_wake_put)(struct drm_i915_private *dev_priv, 688 void (*force_wake_put)(struct drm_i915_private *dev_priv,
693 enum forcewake_domains domains); 689 enum forcewake_domains domains);
694 690
695 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 691 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
696 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 692 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
697 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 693 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
698 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 694 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
699 695
700 void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset, 696 void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r,
701 uint8_t val, bool trace); 697 uint8_t val, bool trace);
702 void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset, 698 void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r,
703 uint16_t val, bool trace); 699 uint16_t val, bool trace);
704 void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset, 700 void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
705 uint32_t val, bool trace); 701 uint32_t val, bool trace);
706 void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset, 702 void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r,
707 uint64_t val, bool trace); 703 uint64_t val, bool trace);
708}; 704};
709 705
@@ -720,11 +716,11 @@ struct intel_uncore {
720 enum forcewake_domain_id id; 716 enum forcewake_domain_id id;
721 unsigned wake_count; 717 unsigned wake_count;
722 struct timer_list timer; 718 struct timer_list timer;
723 u32 reg_set; 719 i915_reg_t reg_set;
724 u32 val_set; 720 u32 val_set;
725 u32 val_clear; 721 u32 val_clear;
726 u32 reg_ack; 722 i915_reg_t reg_ack;
727 u32 reg_post; 723 i915_reg_t reg_post;
728 u32 val_reset; 724 u32 val_reset;
729 } fw_domain[FW_DOMAIN_ID_COUNT]; 725 } fw_domain[FW_DOMAIN_ID_COUNT];
730}; 726};
@@ -739,20 +735,19 @@ struct intel_uncore {
739#define for_each_fw_domain(domain__, dev_priv__, i__) \ 735#define for_each_fw_domain(domain__, dev_priv__, i__) \
740 for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) 736 for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
741 737
742enum csr_state { 738#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
743 FW_UNINITIALIZED = 0, 739#define CSR_VERSION_MAJOR(version) ((version) >> 16)
744 FW_LOADED, 740#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
745 FW_FAILED
746};
747 741
748struct intel_csr { 742struct intel_csr {
743 struct work_struct work;
749 const char *fw_path; 744 const char *fw_path;
750 uint32_t *dmc_payload; 745 uint32_t *dmc_payload;
751 uint32_t dmc_fw_size; 746 uint32_t dmc_fw_size;
747 uint32_t version;
752 uint32_t mmio_count; 748 uint32_t mmio_count;
753 uint32_t mmioaddr[8]; 749 i915_reg_t mmioaddr[8];
754 uint32_t mmiodata[8]; 750 uint32_t mmiodata[8];
755 enum csr_state state;
756}; 751};
757 752
758#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 753#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -770,6 +765,8 @@ struct intel_csr {
770 func(is_valleyview) sep \ 765 func(is_valleyview) sep \
771 func(is_haswell) sep \ 766 func(is_haswell) sep \
772 func(is_skylake) sep \ 767 func(is_skylake) sep \
768 func(is_broxton) sep \
769 func(is_kabylake) sep \
773 func(is_preliminary) sep \ 770 func(is_preliminary) sep \
774 func(has_fbc) sep \ 771 func(has_fbc) sep \
775 func(has_pipe_cxsr) sep \ 772 func(has_pipe_cxsr) sep \
@@ -928,24 +925,7 @@ struct i915_fbc {
928 struct drm_framebuffer *fb; 925 struct drm_framebuffer *fb;
929 } *fbc_work; 926 } *fbc_work;
930 927
931 enum no_fbc_reason { 928 const char *no_fbc_reason;
932 FBC_OK, /* FBC is enabled */
933 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
934 FBC_NO_OUTPUT, /* no outputs enabled to compress */
935 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
936 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
937 FBC_MODE_TOO_LARGE, /* mode too large for compression */
938 FBC_BAD_PLANE, /* fbc not supported on plane */
939 FBC_NOT_TILED, /* buffer not tiled */
940 FBC_MULTIPLE_PIPES, /* more than one pipe active */
941 FBC_MODULE_PARAM,
942 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
943 FBC_ROTATION, /* rotation is not supported */
944 FBC_IN_DBG_MASTER, /* kernel debugger is active */
945 FBC_BAD_STRIDE, /* stride is not supported */
946 FBC_PIXEL_RATE, /* pixel rate is too big */
947 FBC_PIXEL_FORMAT /* pixel format is invalid */
948 } no_fbc_reason;
949 929
950 bool (*fbc_enabled)(struct drm_i915_private *dev_priv); 930 bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
951 void (*enable_fbc)(struct intel_crtc *crtc); 931 void (*enable_fbc)(struct intel_crtc *crtc);
@@ -1019,7 +999,7 @@ struct intel_gmbus {
1019 struct i2c_adapter adapter; 999 struct i2c_adapter adapter;
1020 u32 force_bit; 1000 u32 force_bit;
1021 u32 reg0; 1001 u32 reg0;
1022 u32 gpio_reg; 1002 i915_reg_t gpio_reg;
1023 struct i2c_algo_bit_data bit_algo; 1003 struct i2c_algo_bit_data bit_algo;
1024 struct drm_i915_private *dev_priv; 1004 struct drm_i915_private *dev_priv;
1025}; 1005};
@@ -1668,7 +1648,7 @@ struct i915_frontbuffer_tracking {
1668}; 1648};
1669 1649
1670struct i915_wa_reg { 1650struct i915_wa_reg {
1671 u32 addr; 1651 i915_reg_t addr;
1672 u32 value; 1652 u32 value;
1673 /* bitmask representing WA bits */ 1653 /* bitmask representing WA bits */
1674 u32 mask; 1654 u32 mask;
@@ -1697,6 +1677,13 @@ struct i915_execbuffer_params {
1697 struct drm_i915_gem_request *request; 1677 struct drm_i915_gem_request *request;
1698}; 1678};
1699 1679
1680/* used in computing the new watermarks state */
1681struct intel_wm_config {
1682 unsigned int num_pipes_active;
1683 bool sprites_enabled;
1684 bool sprites_scaled;
1685};
1686
1700struct drm_i915_private { 1687struct drm_i915_private {
1701 struct drm_device *dev; 1688 struct drm_device *dev;
1702 struct kmem_cache *objects; 1689 struct kmem_cache *objects;
@@ -1717,9 +1704,6 @@ struct drm_i915_private {
1717 1704
1718 struct intel_csr csr; 1705 struct intel_csr csr;
1719 1706
1720 /* Display CSR-related protection */
1721 struct mutex csr_lock;
1722
1723 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 1707 struct intel_gmbus gmbus[GMBUS_NUM_PINS];
1724 1708
1725 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1709 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
@@ -1734,6 +1718,8 @@ struct drm_i915_private {
1734 /* MMIO base address for MIPI regs */ 1718 /* MMIO base address for MIPI regs */
1735 uint32_t mipi_mmio_base; 1719 uint32_t mipi_mmio_base;
1736 1720
1721 uint32_t psr_mmio_base;
1722
1737 wait_queue_head_t gmbus_wait_queue; 1723 wait_queue_head_t gmbus_wait_queue;
1738 1724
1739 struct pci_dev *bridge_dev; 1725 struct pci_dev *bridge_dev;
@@ -1921,6 +1907,9 @@ struct drm_i915_private {
1921 */ 1907 */
1922 uint16_t skl_latency[8]; 1908 uint16_t skl_latency[8];
1923 1909
1910 /* Committed wm config */
1911 struct intel_wm_config config;
1912
1924 /* 1913 /*
1925 * The skl_wm_values structure is a bit too big for stack 1914 * The skl_wm_values structure is a bit too big for stack
1926 * allocation, so we keep the staging struct where we store 1915 * allocation, so we keep the staging struct where we store
@@ -2435,6 +2424,15 @@ struct drm_i915_cmd_table {
2435#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2424#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
2436#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) 2425#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
2437 2426
2427#define REVID_FOREVER 0xff
2428/*
2429 * Return true if revision is in range [since,until] inclusive.
2430 *
2431 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
2432 */
2433#define IS_REVID(p, since, until) \
2434 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
2435
2438#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) 2436#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
2439#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) 2437#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
2440#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 2438#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
@@ -2461,7 +2459,8 @@ struct drm_i915_cmd_table {
2461#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2459#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
2462#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2460#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2463#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) 2461#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
2464#define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev)) 2462#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
2463#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
2465#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2464#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
2466#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2465#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
2467 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2466 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
@@ -2496,16 +2495,21 @@ struct drm_i915_cmd_table {
2496 2495
2497#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2496#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
2498 2497
2499#define SKL_REVID_A0 (0x0) 2498#define SKL_REVID_A0 0x0
2500#define SKL_REVID_B0 (0x1) 2499#define SKL_REVID_B0 0x1
2501#define SKL_REVID_C0 (0x2) 2500#define SKL_REVID_C0 0x2
2502#define SKL_REVID_D0 (0x3) 2501#define SKL_REVID_D0 0x3
2503#define SKL_REVID_E0 (0x4) 2502#define SKL_REVID_E0 0x4
2504#define SKL_REVID_F0 (0x5) 2503#define SKL_REVID_F0 0x5
2504
2505#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
2506
2507#define BXT_REVID_A0 0x0
2508#define BXT_REVID_A1 0x1
2509#define BXT_REVID_B0 0x3
2510#define BXT_REVID_C0 0x9
2505 2511
2506#define BXT_REVID_A0 (0x0) 2512#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
2507#define BXT_REVID_B0 (0x3)
2508#define BXT_REVID_C0 (0x9)
2509 2513
2510/* 2514/*
2511 * The genX designation typically refers to the render engine, so render 2515 * The genX designation typically refers to the render engine, so render
@@ -2577,10 +2581,10 @@ struct drm_i915_cmd_table {
2577#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 2581#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
2578#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2582#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
2579 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ 2583 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
2580 IS_SKYLAKE(dev)) 2584 IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
2581#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ 2585#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
2582 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ 2586 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
2583 IS_SKYLAKE(dev)) 2587 IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
2584#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2588#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
2585#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2589#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
2586 2590
@@ -2640,6 +2644,7 @@ struct i915_params {
2640 int panel_use_ssc; 2644 int panel_use_ssc;
2641 int vbt_sdvo_panel_type; 2645 int vbt_sdvo_panel_type;
2642 int enable_rc6; 2646 int enable_rc6;
2647 int enable_dc;
2643 int enable_fbc; 2648 int enable_fbc;
2644 int enable_ppgtt; 2649 int enable_ppgtt;
2645 int enable_execlists; 2650 int enable_execlists;
@@ -2688,7 +2693,6 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2688extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2693extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2689extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2694extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2690int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2695int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2691void i915_firmware_load_error_print(const char *fw_path, int err);
2692 2696
2693/* intel_hotplug.c */ 2697/* intel_hotplug.c */
2694void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); 2698void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
@@ -2995,8 +2999,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
2995int __must_check 2999int __must_check
2996i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3000i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2997 u32 alignment, 3001 u32 alignment,
2998 struct intel_engine_cs *pipelined,
2999 struct drm_i915_gem_request **pipelined_request,
3000 const struct i915_ggtt_view *view); 3002 const struct i915_ggtt_view *view);
3001void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, 3003void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
3002 const struct i915_ggtt_view *view); 3004 const struct i915_ggtt_view *view);
@@ -3351,7 +3353,6 @@ extern void intel_set_rps(struct drm_device *dev, u8 val);
3351extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3353extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3352 bool enable); 3354 bool enable);
3353extern void intel_detect_pch(struct drm_device *dev); 3355extern void intel_detect_pch(struct drm_device *dev);
3354extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
3355extern int intel_enable_rc6(const struct drm_device *dev); 3356extern int intel_enable_rc6(const struct drm_device *dev);
3356 3357
3357extern bool i915_semaphore_is_enabled(struct drm_device *dev); 3358extern bool i915_semaphore_is_enabled(struct drm_device *dev);
@@ -3434,6 +3435,32 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3434#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3435#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
3435#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3436#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
3436 3437
3438#define __raw_read(x, s) \
3439static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \
3440 i915_reg_t reg) \
3441{ \
3442 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
3443}
3444
3445#define __raw_write(x, s) \
3446static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \
3447 i915_reg_t reg, uint##x##_t val) \
3448{ \
3449 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
3450}
3451__raw_read(8, b)
3452__raw_read(16, w)
3453__raw_read(32, l)
3454__raw_read(64, q)
3455
3456__raw_write(8, b)
3457__raw_write(16, w)
3458__raw_write(32, l)
3459__raw_write(64, q)
3460
3461#undef __raw_read
3462#undef __raw_write
3463
3437/* These are untraced mmio-accessors that are only valid to be used inside 3464/* These are untraced mmio-accessors that are only valid to be used inside
3438 * criticial sections inside IRQ handlers where forcewake is explicitly 3465 * criticial sections inside IRQ handlers where forcewake is explicitly
3439 * controlled. 3466 * controlled.
@@ -3441,8 +3468,8 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3441 * Note: Should only be used between intel_uncore_forcewake_irqlock() and 3468 * Note: Should only be used between intel_uncore_forcewake_irqlock() and
3442 * intel_uncore_forcewake_irqunlock(). 3469 * intel_uncore_forcewake_irqunlock().
3443 */ 3470 */
3444#define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__)) 3471#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
3445#define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__)) 3472#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
3446#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 3473#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
3447 3474
3448/* "Broadcast RGB" property */ 3475/* "Broadcast RGB" property */
@@ -3450,7 +3477,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3450#define INTEL_BROADCAST_RGB_FULL 1 3477#define INTEL_BROADCAST_RGB_FULL 1
3451#define INTEL_BROADCAST_RGB_LIMITED 2 3478#define INTEL_BROADCAST_RGB_LIMITED 2
3452 3479
3453static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) 3480static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
3454{ 3481{
3455 if (IS_VALLEYVIEW(dev)) 3482 if (IS_VALLEYVIEW(dev))
3456 return VLV_VGACNTRL; 3483 return VLV_VGACNTRL;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 91bb1fc27420..33adc8f8ab20 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2737,6 +2737,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2737static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, 2737static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2738 struct intel_engine_cs *ring) 2738 struct intel_engine_cs *ring)
2739{ 2739{
2740 struct intel_ringbuffer *buffer;
2741
2740 while (!list_empty(&ring->active_list)) { 2742 while (!list_empty(&ring->active_list)) {
2741 struct drm_i915_gem_object *obj; 2743 struct drm_i915_gem_object *obj;
2742 2744
@@ -2752,18 +2754,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2752 * are the ones that keep the context and ringbuffer backing objects 2754 * are the ones that keep the context and ringbuffer backing objects
2753 * pinned in place. 2755 * pinned in place.
2754 */ 2756 */
2755 while (!list_empty(&ring->execlist_queue)) {
2756 struct drm_i915_gem_request *submit_req;
2757 2757
2758 submit_req = list_first_entry(&ring->execlist_queue, 2758 if (i915.enable_execlists) {
2759 struct drm_i915_gem_request, 2759 spin_lock_irq(&ring->execlist_lock);
2760 execlist_link); 2760 while (!list_empty(&ring->execlist_queue)) {
2761 list_del(&submit_req->execlist_link); 2761 struct drm_i915_gem_request *submit_req;
2762
2763 submit_req = list_first_entry(&ring->execlist_queue,
2764 struct drm_i915_gem_request,
2765 execlist_link);
2766 list_del(&submit_req->execlist_link);
2762 2767
2763 if (submit_req->ctx != ring->default_context) 2768 if (submit_req->ctx != ring->default_context)
2764 intel_lr_context_unpin(submit_req); 2769 intel_lr_context_unpin(submit_req);
2765 2770
2766 i915_gem_request_unreference(submit_req); 2771 i915_gem_request_unreference(submit_req);
2772 }
2773 spin_unlock_irq(&ring->execlist_lock);
2767 } 2774 }
2768 2775
2769 /* 2776 /*
@@ -2782,6 +2789,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2782 2789
2783 i915_gem_request_retire(request); 2790 i915_gem_request_retire(request);
2784 } 2791 }
2792
2793 /* Having flushed all requests from all queues, we know that all
2794 * ringbuffers must now be empty. However, since we do not reclaim
2795 * all space when retiring the request (to prevent HEADs colliding
2796 * with rapid ringbuffer wraparound) the amount of available space
2797 * upon reset is less than when we start. Do one more pass over
2798 * all the ringbuffers to reset last_retired_head.
2799 */
2800 list_for_each_entry(buffer, &ring->buffers, link) {
2801 buffer->last_retired_head = buffer->tail;
2802 intel_ring_update_space(buffer);
2803 }
2785} 2804}
2786 2805
2787void i915_gem_reset(struct drm_device *dev) 2806void i915_gem_reset(struct drm_device *dev)
@@ -3826,7 +3845,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3826 * cacheline, whereas normally such cachelines would get 3845 * cacheline, whereas normally such cachelines would get
3827 * invalidated. 3846 * invalidated.
3828 */ 3847 */
3829 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) 3848 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
3830 return -ENODEV; 3849 return -ENODEV;
3831 3850
3832 level = I915_CACHE_LLC; 3851 level = I915_CACHE_LLC;
@@ -3869,17 +3888,11 @@ rpm_put:
3869int 3888int
3870i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3889i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3871 u32 alignment, 3890 u32 alignment,
3872 struct intel_engine_cs *pipelined,
3873 struct drm_i915_gem_request **pipelined_request,
3874 const struct i915_ggtt_view *view) 3891 const struct i915_ggtt_view *view)
3875{ 3892{
3876 u32 old_read_domains, old_write_domain; 3893 u32 old_read_domains, old_write_domain;
3877 int ret; 3894 int ret;
3878 3895
3879 ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
3880 if (ret)
3881 return ret;
3882
3883 /* Mark the pin_display early so that we account for the 3896 /* Mark the pin_display early so that we account for the
3884 * display coherency whilst setting up the cache domains. 3897 * display coherency whilst setting up the cache domains.
3885 */ 3898 */
@@ -4476,10 +4489,8 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4476{ 4489{
4477 struct i915_vma *vma; 4490 struct i915_vma *vma;
4478 list_for_each_entry(vma, &obj->vma_list, vma_link) { 4491 list_for_each_entry(vma, &obj->vma_list, vma_link) {
4479 if (i915_is_ggtt(vma->vm) && 4492 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4480 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 4493 vma->vm == vm)
4481 continue;
4482 if (vma->vm == vm)
4483 return vma; 4494 return vma;
4484 } 4495 }
4485 return NULL; 4496 return NULL;
@@ -4568,7 +4579,6 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
4568 struct intel_engine_cs *ring = req->ring; 4579 struct intel_engine_cs *ring = req->ring;
4569 struct drm_device *dev = ring->dev; 4580 struct drm_device *dev = ring->dev;
4570 struct drm_i915_private *dev_priv = dev->dev_private; 4581 struct drm_i915_private *dev_priv = dev->dev_private;
4571 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4572 u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; 4582 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4573 int i, ret; 4583 int i, ret;
4574 4584
@@ -4584,10 +4594,10 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
4584 * here because no other code should access these registers other than 4594 * here because no other code should access these registers other than
4585 * at initialization time. 4595 * at initialization time.
4586 */ 4596 */
4587 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { 4597 for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
4588 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 4598 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4589 intel_ring_emit(ring, reg_base + i); 4599 intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
4590 intel_ring_emit(ring, remap_info[i/4]); 4600 intel_ring_emit(ring, remap_info[i]);
4591 } 4601 }
4592 4602
4593 intel_ring_advance(ring); 4603 intel_ring_advance(ring);
@@ -4755,18 +4765,9 @@ i915_gem_init_hw(struct drm_device *dev)
4755 if (HAS_GUC_UCODE(dev)) { 4765 if (HAS_GUC_UCODE(dev)) {
4756 ret = intel_guc_ucode_load(dev); 4766 ret = intel_guc_ucode_load(dev);
4757 if (ret) { 4767 if (ret) {
4758 /* 4768 DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
4759 * If we got an error and GuC submission is enabled, map 4769 ret = -EIO;
4760 * the error to -EIO so the GPU will be declared wedged. 4770 goto out;
4761 * OTOH, if we didn't intend to use the GuC anyway, just
4762 * discard the error and carry on.
4763 */
4764 DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret,
4765 i915.enable_guc_submission ? "" :
4766 " (ignored)");
4767 ret = i915.enable_guc_submission ? -EIO : 0;
4768 if (ret)
4769 goto out;
4770 } 4771 }
4771 } 4772 }
4772 4773
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8c688a5f1589..4b9400402aa3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -556,7 +556,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
556 if (signaller == ring) 556 if (signaller == ring)
557 continue; 557 continue;
558 558
559 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); 559 intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
560 intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 560 intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
561 } 561 }
562 } 562 }
@@ -581,7 +581,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
581 if (signaller == ring) 581 if (signaller == ring)
582 continue; 582 continue;
583 583
584 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); 584 intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
585 intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 585 intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
586 } 586 }
587 } 587 }
@@ -925,6 +925,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
925 case I915_CONTEXT_PARAM_NO_ZEROMAP: 925 case I915_CONTEXT_PARAM_NO_ZEROMAP:
926 args->value = ctx->flags & CONTEXT_NO_ZEROMAP; 926 args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
927 break; 927 break;
928 case I915_CONTEXT_PARAM_GTT_SIZE:
929 if (ctx->ppgtt)
930 args->value = ctx->ppgtt->base.total;
931 else if (to_i915(dev)->mm.aliasing_ppgtt)
932 args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
933 else
934 args->value = to_i915(dev)->gtt.base.total;
935 break;
928 default: 936 default:
929 ret = -EINVAL; 937 ret = -EINVAL;
930 break; 938 break;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 6ed7d63a0688..a4c243cec4aa 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1114,7 +1114,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
1114 1114
1115 for (i = 0; i < 4; i++) { 1115 for (i = 0; i < 4; i++) {
1116 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 1116 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1117 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i)); 1117 intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
1118 intel_ring_emit(ring, 0); 1118 intel_ring_emit(ring, 0);
1119 } 1119 }
1120 1120
@@ -1241,7 +1241,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1241 1241
1242 intel_ring_emit(ring, MI_NOOP); 1242 intel_ring_emit(ring, MI_NOOP);
1243 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 1243 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1244 intel_ring_emit(ring, INSTPM); 1244 intel_ring_emit_reg(ring, INSTPM);
1245 intel_ring_emit(ring, instp_mask << 16 | instp_mode); 1245 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1246 intel_ring_advance(ring); 1246 intel_ring_advance(ring);
1247 1247
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 40a10b25956c..b80d0456fe03 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -59,7 +59,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
59 struct drm_i915_gem_object *obj) 59 struct drm_i915_gem_object *obj)
60{ 60{
61 struct drm_i915_private *dev_priv = dev->dev_private; 61 struct drm_i915_private *dev_priv = dev->dev_private;
62 int fence_reg_lo, fence_reg_hi; 62 i915_reg_t fence_reg_lo, fence_reg_hi;
63 int fence_pitch_shift; 63 int fence_pitch_shift;
64 64
65 if (INTEL_INFO(dev)->gen >= 6) { 65 if (INTEL_INFO(dev)->gen >= 6) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 43f35d12b677..1f7e6b9df45d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include <linux/seq_file.h> 26#include <linux/seq_file.h>
27#include <linux/stop_machine.h>
27#include <drm/drmP.h> 28#include <drm/drmP.h>
28#include <drm/i915_drm.h> 29#include <drm/i915_drm.h>
29#include "i915_drv.h" 30#include "i915_drv.h"
@@ -104,9 +105,11 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
104{ 105{
105 bool has_aliasing_ppgtt; 106 bool has_aliasing_ppgtt;
106 bool has_full_ppgtt; 107 bool has_full_ppgtt;
108 bool has_full_48bit_ppgtt;
107 109
108 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; 110 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
109 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; 111 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
112 has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
110 113
111 if (intel_vgpu_active(dev)) 114 if (intel_vgpu_active(dev))
112 has_full_ppgtt = false; /* emulation is too hard */ 115 has_full_ppgtt = false; /* emulation is too hard */
@@ -125,6 +128,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
125 if (enable_ppgtt == 2 && has_full_ppgtt) 128 if (enable_ppgtt == 2 && has_full_ppgtt)
126 return 2; 129 return 2;
127 130
131 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
132 return 3;
133
128#ifdef CONFIG_INTEL_IOMMU 134#ifdef CONFIG_INTEL_IOMMU
129 /* Disable ppgtt on SNB if VT-d is on. */ 135 /* Disable ppgtt on SNB if VT-d is on. */
130 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { 136 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
@@ -141,7 +147,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
141 } 147 }
142 148
143 if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) 149 if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
144 return 2; 150 return has_full_48bit_ppgtt ? 3 : 2;
145 else 151 else
146 return has_aliasing_ppgtt ? 1 : 0; 152 return has_aliasing_ppgtt ? 1 : 0;
147} 153}
@@ -661,10 +667,10 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
661 return ret; 667 return ret;
662 668
663 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 669 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
664 intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry)); 670 intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
665 intel_ring_emit(ring, upper_32_bits(addr)); 671 intel_ring_emit(ring, upper_32_bits(addr));
666 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 672 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
667 intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry)); 673 intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
668 intel_ring_emit(ring, lower_32_bits(addr)); 674 intel_ring_emit(ring, lower_32_bits(addr));
669 intel_ring_advance(ring); 675 intel_ring_advance(ring);
670 676
@@ -904,14 +910,13 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
904 enum vgt_g2v_type msg; 910 enum vgt_g2v_type msg;
905 struct drm_device *dev = ppgtt->base.dev; 911 struct drm_device *dev = ppgtt->base.dev;
906 struct drm_i915_private *dev_priv = dev->dev_private; 912 struct drm_i915_private *dev_priv = dev->dev_private;
907 unsigned int offset = vgtif_reg(pdp0_lo);
908 int i; 913 int i;
909 914
910 if (USES_FULL_48BIT_PPGTT(dev)) { 915 if (USES_FULL_48BIT_PPGTT(dev)) {
911 u64 daddr = px_dma(&ppgtt->pml4); 916 u64 daddr = px_dma(&ppgtt->pml4);
912 917
913 I915_WRITE(offset, lower_32_bits(daddr)); 918 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
914 I915_WRITE(offset + 4, upper_32_bits(daddr)); 919 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
915 920
916 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : 921 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
917 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); 922 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
@@ -919,10 +924,8 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
919 for (i = 0; i < GEN8_LEGACY_PDPES; i++) { 924 for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
920 u64 daddr = i915_page_dir_dma_addr(ppgtt, i); 925 u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
921 926
922 I915_WRITE(offset, lower_32_bits(daddr)); 927 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
923 I915_WRITE(offset + 4, upper_32_bits(daddr)); 928 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
924
925 offset += 8;
926 } 929 }
927 930
928 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : 931 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
@@ -1662,9 +1665,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1662 return ret; 1665 return ret;
1663 1666
1664 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); 1667 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1665 intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); 1668 intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
1666 intel_ring_emit(ring, PP_DIR_DCLV_2G); 1669 intel_ring_emit(ring, PP_DIR_DCLV_2G);
1667 intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); 1670 intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
1668 intel_ring_emit(ring, get_pd_offset(ppgtt)); 1671 intel_ring_emit(ring, get_pd_offset(ppgtt));
1669 intel_ring_emit(ring, MI_NOOP); 1672 intel_ring_emit(ring, MI_NOOP);
1670 intel_ring_advance(ring); 1673 intel_ring_advance(ring);
@@ -1699,9 +1702,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1699 return ret; 1702 return ret;
1700 1703
1701 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); 1704 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1702 intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); 1705 intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
1703 intel_ring_emit(ring, PP_DIR_DCLV_2G); 1706 intel_ring_emit(ring, PP_DIR_DCLV_2G);
1704 intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); 1707 intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
1705 intel_ring_emit(ring, get_pd_offset(ppgtt)); 1708 intel_ring_emit(ring, get_pd_offset(ppgtt));
1706 intel_ring_emit(ring, MI_NOOP); 1709 intel_ring_emit(ring, MI_NOOP);
1707 intel_ring_advance(ring); 1710 intel_ring_advance(ring);
@@ -2528,6 +2531,26 @@ static int ggtt_bind_vma(struct i915_vma *vma,
2528 return 0; 2531 return 0;
2529} 2532}
2530 2533
2534struct ggtt_bind_vma__cb {
2535 struct i915_vma *vma;
2536 enum i915_cache_level cache_level;
2537 u32 flags;
2538};
2539
2540static int ggtt_bind_vma__cb(void *_arg)
2541{
2542 struct ggtt_bind_vma__cb *arg = _arg;
2543 return ggtt_bind_vma(arg->vma, arg->cache_level, arg->flags);
2544}
2545
2546static int ggtt_bind_vma__BKL(struct i915_vma *vma,
2547 enum i915_cache_level cache_level,
2548 u32 flags)
2549{
2550 struct ggtt_bind_vma__cb arg = { vma, cache_level, flags };
2551 return stop_machine(ggtt_bind_vma__cb, &arg, NULL);
2552}
2553
2531static int aliasing_gtt_bind_vma(struct i915_vma *vma, 2554static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2532 enum i915_cache_level cache_level, 2555 enum i915_cache_level cache_level,
2533 u32 flags) 2556 u32 flags)
@@ -2995,6 +3018,9 @@ static int gen8_gmch_probe(struct drm_device *dev,
2995 dev_priv->gtt.base.bind_vma = ggtt_bind_vma; 3018 dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
2996 dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; 3019 dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
2997 3020
3021 if (IS_CHERRYVIEW(dev))
3022 dev_priv->gtt.base.bind_vma = ggtt_bind_vma__BKL;
3023
2998 return ret; 3024 return ret;
2999} 3025}
3000 3026
@@ -3302,7 +3328,7 @@ static struct sg_table *
3302intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, 3328intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
3303 struct drm_i915_gem_object *obj) 3329 struct drm_i915_gem_object *obj)
3304{ 3330{
3305 struct intel_rotation_info *rot_info = &ggtt_view->rotation_info; 3331 struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
3306 unsigned int size_pages = rot_info->size >> PAGE_SHIFT; 3332 unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
3307 unsigned int size_pages_uv; 3333 unsigned int size_pages_uv;
3308 struct sg_page_iter sg_iter; 3334 struct sg_page_iter sg_iter;
@@ -3534,7 +3560,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
3534 if (view->type == I915_GGTT_VIEW_NORMAL) { 3560 if (view->type == I915_GGTT_VIEW_NORMAL) {
3535 return obj->base.size; 3561 return obj->base.size;
3536 } else if (view->type == I915_GGTT_VIEW_ROTATED) { 3562 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
3537 return view->rotation_info.size; 3563 return view->params.rotation_info.size;
3538 } else if (view->type == I915_GGTT_VIEW_PARTIAL) { 3564 } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
3539 return view->params.partial.size << PAGE_SHIFT; 3565 return view->params.partial.size << PAGE_SHIFT;
3540 } else { 3566 } else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index a216397ead52..877c32c78a6a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -156,13 +156,10 @@ struct i915_ggtt_view {
156 u64 offset; 156 u64 offset;
157 unsigned int size; 157 unsigned int size;
158 } partial; 158 } partial;
159 struct intel_rotation_info rotation_info;
159 } params; 160 } params;
160 161
161 struct sg_table *pages; 162 struct sg_table *pages;
162
163 union {
164 struct intel_rotation_info rotation_info;
165 };
166}; 163};
167 164
168extern const struct i915_ggtt_view i915_ggtt_view_normal; 165extern const struct i915_ggtt_view i915_ggtt_view_normal;
@@ -556,7 +553,7 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
556 553
557 if (a->type != b->type) 554 if (a->type != b->type)
558 return false; 555 return false;
559 if (a->type == I915_GGTT_VIEW_PARTIAL) 556 if (a->type != I915_GGTT_VIEW_NORMAL)
560 return !memcmp(&a->params, &b->params, sizeof(a->params)); 557 return !memcmp(&a->params, &b->params, sizeof(a->params));
561 return true; 558 return true;
562} 559}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index cdacf3f5b77a..598ed2facf85 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -433,7 +433,8 @@ int i915_gem_init_stolen(struct drm_device *dev)
433 &reserved_size); 433 &reserved_size);
434 break; 434 break;
435 default: 435 default:
436 if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) 436 if (IS_BROADWELL(dev_priv) ||
437 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
437 bdw_get_stolen_reserved(dev_priv, &reserved_base, 438 bdw_get_stolen_reserved(dev_priv, &reserved_base,
438 &reserved_size); 439 &reserved_size);
439 else 440 else
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 8a6717cc265c..7410f6c962e7 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -176,6 +176,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
176 return -EINVAL; 176 return -EINVAL;
177 } 177 }
178 178
179 intel_runtime_pm_get(dev_priv);
180
179 mutex_lock(&dev->struct_mutex); 181 mutex_lock(&dev->struct_mutex);
180 if (obj->pin_display || obj->framebuffer_references) { 182 if (obj->pin_display || obj->framebuffer_references) {
181 ret = -EBUSY; 183 ret = -EBUSY;
@@ -269,6 +271,8 @@ err:
269 drm_gem_object_unreference(&obj->base); 271 drm_gem_object_unreference(&obj->base);
270 mutex_unlock(&dev->struct_mutex); 272 mutex_unlock(&dev->struct_mutex);
271 273
274 intel_runtime_pm_put(dev_priv);
275
272 return ret; 276 return ret;
273} 277}
274 278
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 2f04e4f2ff35..06ca4082735b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -366,6 +366,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
366 err_printf(m, "Suspend count: %u\n", error->suspend_count); 366 err_printf(m, "Suspend count: %u\n", error->suspend_count);
367 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device); 367 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
368 err_printf(m, "IOMMU enabled?: %d\n", error->iommu); 368 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
369
370 if (HAS_CSR(dev)) {
371 struct intel_csr *csr = &dev_priv->csr;
372
373 err_printf(m, "DMC loaded: %s\n",
374 yesno(csr->dmc_payload != NULL));
375 err_printf(m, "DMC fw version: %d.%d\n",
376 CSR_VERSION_MAJOR(csr->version),
377 CSR_VERSION_MINOR(csr->version));
378 }
379
369 err_printf(m, "EIR: 0x%08x\n", error->eir); 380 err_printf(m, "EIR: 0x%08x\n", error->eir);
370 err_printf(m, "IER: 0x%08x\n", error->ier); 381 err_printf(m, "IER: 0x%08x\n", error->ier);
371 if (INTEL_INFO(dev)->gen >= 8) { 382 if (INTEL_INFO(dev)->gen >= 8) {
@@ -862,7 +873,7 @@ static void i915_record_ring_state(struct drm_device *dev,
862 struct drm_i915_private *dev_priv = dev->dev_private; 873 struct drm_i915_private *dev_priv = dev->dev_private;
863 874
864 if (INTEL_INFO(dev)->gen >= 6) { 875 if (INTEL_INFO(dev)->gen >= 6) {
865 ering->rc_psmi = I915_READ(ring->mmio_base + 0x50); 876 ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base));
866 ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); 877 ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
867 if (INTEL_INFO(dev)->gen >= 8) 878 if (INTEL_INFO(dev)->gen >= 8)
868 gen8_record_semaphore_state(dev_priv, error, ring, ering); 879 gen8_record_semaphore_state(dev_priv, error, ring, ering);
@@ -899,7 +910,7 @@ static void i915_record_ring_state(struct drm_device *dev,
899 ering->ctl = I915_READ_CTL(ring); 910 ering->ctl = I915_READ_CTL(ring);
900 911
901 if (I915_NEED_GFX_HWS(dev)) { 912 if (I915_NEED_GFX_HWS(dev)) {
902 int mmio; 913 i915_reg_t mmio;
903 914
904 if (IS_GEN7(dev)) { 915 if (IS_GEN7(dev)) {
905 switch (ring->id) { 916 switch (ring->id) {
@@ -1071,6 +1082,25 @@ static void i915_gem_record_rings(struct drm_device *dev,
1071 list_for_each_entry(request, &ring->request_list, list) { 1082 list_for_each_entry(request, &ring->request_list, list) {
1072 struct drm_i915_error_request *erq; 1083 struct drm_i915_error_request *erq;
1073 1084
1085 if (count >= error->ring[i].num_requests) {
1086 /*
1087 * If the ring request list was changed in
1088 * between the point where the error request
1089 * list was created and dimensioned and this
1090 * point then just exit early to avoid crashes.
1091 *
1092 * We don't need to communicate that the
1093 * request list changed state during error
1094 * state capture and that the error state is
1095 * slightly incorrect as a consequence since we
1096 * are typically only interested in the request
1097 * list state at the point of error state
1098 * capture, not in any changes happening during
1099 * the capture.
1100 */
1101 break;
1102 }
1103
1074 erq = &error->ring[i].requests[count++]; 1104 erq = &error->ring[i].requests[count++];
1075 erq->seqno = request->seqno; 1105 erq->seqno = request->seqno;
1076 erq->jiffies = request->emitted_jiffies; 1106 erq->jiffies = request->emitted_jiffies;
@@ -1181,7 +1211,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1181 if (IS_VALLEYVIEW(dev)) { 1211 if (IS_VALLEYVIEW(dev)) {
1182 error->gtier[0] = I915_READ(GTIER); 1212 error->gtier[0] = I915_READ(GTIER);
1183 error->ier = I915_READ(VLV_IER); 1213 error->ier = I915_READ(VLV_IER);
1184 error->forcewake = I915_READ(FORCEWAKE_VLV); 1214 error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
1185 } 1215 }
1186 1216
1187 if (IS_GEN7(dev)) 1217 if (IS_GEN7(dev))
@@ -1193,14 +1223,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1193 } 1223 }
1194 1224
1195 if (IS_GEN6(dev)) { 1225 if (IS_GEN6(dev)) {
1196 error->forcewake = I915_READ(FORCEWAKE); 1226 error->forcewake = I915_READ_FW(FORCEWAKE);
1197 error->gab_ctl = I915_READ(GAB_CTL); 1227 error->gab_ctl = I915_READ(GAB_CTL);
1198 error->gfx_mode = I915_READ(GFX_MODE); 1228 error->gfx_mode = I915_READ(GFX_MODE);
1199 } 1229 }
1200 1230
1201 /* 2: Registers which belong to multiple generations */ 1231 /* 2: Registers which belong to multiple generations */
1202 if (INTEL_INFO(dev)->gen >= 7) 1232 if (INTEL_INFO(dev)->gen >= 7)
1203 error->forcewake = I915_READ(FORCEWAKE_MT); 1233 error->forcewake = I915_READ_FW(FORCEWAKE_MT);
1204 1234
1205 if (INTEL_INFO(dev)->gen >= 6) { 1235 if (INTEL_INFO(dev)->gen >= 6) {
1206 error->derrmr = I915_READ(DERRMR); 1236 error->derrmr = I915_READ(DERRMR);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index c4cb1c0c4d0d..685c7991e24f 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -26,7 +26,7 @@
26 26
27/* Definitions of GuC H/W registers, bits, etc */ 27/* Definitions of GuC H/W registers, bits, etc */
28 28
29#define GUC_STATUS 0xc000 29#define GUC_STATUS _MMIO(0xc000)
30#define GS_BOOTROM_SHIFT 1 30#define GS_BOOTROM_SHIFT 1
31#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT) 31#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT)
32#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT) 32#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT)
@@ -39,40 +39,41 @@
39#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) 39#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
40#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT) 40#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
41 41
42#define SOFT_SCRATCH(n) (0xc180 + ((n) * 4)) 42#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
43 43
44#define UOS_RSA_SCRATCH(i) (0xc200 + (i) * 4) 44#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
45#define DMA_ADDR_0_LOW 0xc300 45#define UOS_RSA_SCRATCH_MAX_COUNT 64
46#define DMA_ADDR_0_HIGH 0xc304 46#define DMA_ADDR_0_LOW _MMIO(0xc300)
47#define DMA_ADDR_1_LOW 0xc308 47#define DMA_ADDR_0_HIGH _MMIO(0xc304)
48#define DMA_ADDR_1_HIGH 0xc30c 48#define DMA_ADDR_1_LOW _MMIO(0xc308)
49#define DMA_ADDR_1_HIGH _MMIO(0xc30c)
49#define DMA_ADDRESS_SPACE_WOPCM (7 << 16) 50#define DMA_ADDRESS_SPACE_WOPCM (7 << 16)
50#define DMA_ADDRESS_SPACE_GTT (8 << 16) 51#define DMA_ADDRESS_SPACE_GTT (8 << 16)
51#define DMA_COPY_SIZE 0xc310 52#define DMA_COPY_SIZE _MMIO(0xc310)
52#define DMA_CTRL 0xc314 53#define DMA_CTRL _MMIO(0xc314)
53#define UOS_MOVE (1<<4) 54#define UOS_MOVE (1<<4)
54#define START_DMA (1<<0) 55#define START_DMA (1<<0)
55#define DMA_GUC_WOPCM_OFFSET 0xc340 56#define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340)
56#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ 57#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
57#define GUC_MAX_IDLE_COUNT 0xC3E4 58#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
58 59
59#define GUC_WOPCM_SIZE 0xc050 60#define GUC_WOPCM_SIZE _MMIO(0xc050)
60#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */ 61#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
61 62
62/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ 63/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
63#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) 64#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE)
64 65
65#define GEN8_GT_PM_CONFIG 0x138140 66#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
66#define GEN9LP_GT_PM_CONFIG 0x138140 67#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
67#define GEN9_GT_PM_CONFIG 0x13816c 68#define GEN9_GT_PM_CONFIG _MMIO(0x13816c)
68#define GT_DOORBELL_ENABLE (1<<0) 69#define GT_DOORBELL_ENABLE (1<<0)
69 70
70#define GEN8_GTCR 0x4274 71#define GEN8_GTCR _MMIO(0x4274)
71#define GEN8_GTCR_INVALIDATE (1<<0) 72#define GEN8_GTCR_INVALIDATE (1<<0)
72 73
73#define GUC_ARAT_C6DIS 0xA178 74#define GUC_ARAT_C6DIS _MMIO(0xA178)
74 75
75#define GUC_SHIM_CONTROL 0xc064 76#define GUC_SHIM_CONTROL _MMIO(0xc064)
76#define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0) 77#define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0)
77#define GUC_ENABLE_READ_CACHE_LOGIC (1<<1) 78#define GUC_ENABLE_READ_CACHE_LOGIC (1<<1)
78#define GUC_ENABLE_MIA_CACHING (1<<2) 79#define GUC_ENABLE_MIA_CACHING (1<<2)
@@ -89,21 +90,21 @@
89 GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \ 90 GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \
90 GUC_ENABLE_MIA_CLOCK_GATING) 91 GUC_ENABLE_MIA_CLOCK_GATING)
91 92
92#define HOST2GUC_INTERRUPT 0xc4c8 93#define HOST2GUC_INTERRUPT _MMIO(0xc4c8)
93#define HOST2GUC_TRIGGER (1<<0) 94#define HOST2GUC_TRIGGER (1<<0)
94 95
95#define DRBMISC1 0x1984 96#define DRBMISC1 0x1984
96#define DOORBELL_ENABLE (1<<0) 97#define DOORBELL_ENABLE (1<<0)
97 98
98#define GEN8_DRBREGL(x) (0x1000 + (x) * 8) 99#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
99#define GEN8_DRB_VALID (1<<0) 100#define GEN8_DRB_VALID (1<<0)
100#define GEN8_DRBREGU(x) (GEN8_DRBREGL(x) + 4) 101#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4)
101 102
102#define DE_GUCRMR 0x44054 103#define DE_GUCRMR _MMIO(0x44054)
103 104
104#define GUC_BCS_RCS_IER 0xC550 105#define GUC_BCS_RCS_IER _MMIO(0xC550)
105#define GUC_VCS2_VCS1_IER 0xC554 106#define GUC_VCS2_VCS1_IER _MMIO(0xC554)
106#define GUC_WD_VECS_IER 0xC558 107#define GUC_WD_VECS_IER _MMIO(0xC558)
107#define GUC_PM_P24C_IER 0xC55C 108#define GUC_PM_P24C_IER _MMIO(0xC55C)
108 109
109#endif 110#endif
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 036b42bae827..ed9f1002ab36 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -27,7 +27,7 @@
27#include "intel_guc.h" 27#include "intel_guc.h"
28 28
29/** 29/**
30 * DOC: GuC Client 30 * DOC: GuC-based command submission
31 * 31 *
32 * i915_guc_client: 32 * i915_guc_client:
33 * We use the term client to avoid confusion with contexts. A i915_guc_client is 33 * We use the term client to avoid confusion with contexts. A i915_guc_client is
@@ -161,9 +161,9 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
161 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; 161 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
162 /* WaRsDisableCoarsePowerGating:skl,bxt */ 162 /* WaRsDisableCoarsePowerGating:skl,bxt */
163 if (!intel_enable_rc6(dev_priv->dev) || 163 if (!intel_enable_rc6(dev_priv->dev) ||
164 (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || 164 IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
165 (IS_SKL_GT3(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)) || 165 (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
166 (IS_SKL_GT4(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0))) 166 (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
167 data[1] = 0; 167 data[1] = 0;
168 else 168 else
169 /* bit 0 and 1 are for Render and Media domain separately */ 169 /* bit 0 and 1 are for Render and Media domain separately */
@@ -258,7 +258,7 @@ static void guc_disable_doorbell(struct intel_guc *guc,
258 struct drm_i915_private *dev_priv = guc_to_i915(guc); 258 struct drm_i915_private *dev_priv = guc_to_i915(guc);
259 struct guc_doorbell_info *doorbell; 259 struct guc_doorbell_info *doorbell;
260 void *base; 260 void *base;
261 int drbreg = GEN8_DRBREGL(client->doorbell_id); 261 i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
262 int value; 262 int value;
263 263
264 base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0)); 264 base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
@@ -588,8 +588,7 @@ static void lr_context_update(struct drm_i915_gem_request *rq)
588/** 588/**
589 * i915_guc_submit() - Submit commands through GuC 589 * i915_guc_submit() - Submit commands through GuC
590 * @client: the guc client where commands will go through 590 * @client: the guc client where commands will go through
591 * @ctx: LRC where commands come from 591 * @rq: request associated with the commands
592 * @ring: HW engine that will excute the commands
593 * 592 *
594 * Return: 0 if succeed 593 * Return: 0 if succeed
595 */ 594 */
@@ -731,7 +730,8 @@ static void guc_client_free(struct drm_device *dev,
731 * The kernel client to replace ExecList submission is created with 730 * The kernel client to replace ExecList submission is created with
732 * NORMAL priority. Priority of a client for scheduler can be HIGH, 731 * NORMAL priority. Priority of a client for scheduler can be HIGH,
733 * while a preemption context can use CRITICAL. 732 * while a preemption context can use CRITICAL.
734 * @ctx the context to own the client (we use the default render context) 733 * @ctx: the context that owns the client (we use the default render
734 * context)
735 * 735 *
736 * Return: An i915_guc_client object if success. 736 * Return: An i915_guc_client object if success.
737 */ 737 */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0d228f909dcb..c8ba94968aaf 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -139,7 +139,8 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
139/* 139/*
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141 */ 141 */
142static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg) 142static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143 i915_reg_t reg)
143{ 144{
144 u32 val = I915_READ(reg); 145 u32 val = I915_READ(reg);
145 146
@@ -147,7 +148,7 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
147 return; 148 return;
148 149
149 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
150 reg, val); 151 i915_mmio_reg_offset(reg), val);
151 I915_WRITE(reg, 0xffffffff); 152 I915_WRITE(reg, 0xffffffff);
152 POSTING_READ(reg); 153 POSTING_READ(reg);
153 I915_WRITE(reg, 0xffffffff); 154 I915_WRITE(reg, 0xffffffff);
@@ -283,17 +284,17 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
283 ilk_update_gt_irq(dev_priv, mask, 0); 284 ilk_update_gt_irq(dev_priv, mask, 0);
284} 285}
285 286
286static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) 287static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
287{ 288{
288 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 289 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
289} 290}
290 291
291static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) 292static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
292{ 293{
293 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 294 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
294} 295}
295 296
296static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) 297static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
297{ 298{
298 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 299 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
299} 300}
@@ -350,7 +351,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
350void gen6_reset_rps_interrupts(struct drm_device *dev) 351void gen6_reset_rps_interrupts(struct drm_device *dev)
351{ 352{
352 struct drm_i915_private *dev_priv = dev->dev_private; 353 struct drm_i915_private *dev_priv = dev->dev_private;
353 uint32_t reg = gen6_pm_iir(dev_priv); 354 i915_reg_t reg = gen6_pm_iir(dev_priv);
354 355
355 spin_lock_irq(&dev_priv->irq_lock); 356 spin_lock_irq(&dev_priv->irq_lock);
356 I915_WRITE(reg, dev_priv->pm_rps_events); 357 I915_WRITE(reg, dev_priv->pm_rps_events);
@@ -477,7 +478,7 @@ static void
477__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 478__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
478 u32 enable_mask, u32 status_mask) 479 u32 enable_mask, u32 status_mask)
479{ 480{
480 u32 reg = PIPESTAT(pipe); 481 i915_reg_t reg = PIPESTAT(pipe);
481 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 482 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
482 483
483 assert_spin_locked(&dev_priv->irq_lock); 484 assert_spin_locked(&dev_priv->irq_lock);
@@ -504,7 +505,7 @@ static void
504__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 505__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
505 u32 enable_mask, u32 status_mask) 506 u32 enable_mask, u32 status_mask)
506{ 507{
507 u32 reg = PIPESTAT(pipe); 508 i915_reg_t reg = PIPESTAT(pipe);
508 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 509 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
509 510
510 assert_spin_locked(&dev_priv->irq_lock); 511 assert_spin_locked(&dev_priv->irq_lock);
@@ -665,8 +666,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
665static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 666static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
666{ 667{
667 struct drm_i915_private *dev_priv = dev->dev_private; 668 struct drm_i915_private *dev_priv = dev->dev_private;
668 unsigned long high_frame; 669 i915_reg_t high_frame, low_frame;
669 unsigned long low_frame;
670 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 670 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
671 struct intel_crtc *intel_crtc = 671 struct intel_crtc *intel_crtc =
672 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 672 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
@@ -717,9 +717,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
717 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 717 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
718} 718}
719 719
720/* raw reads, only for fast reads of display block, no need for forcewake etc. */ 720/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
721#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
722
723static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 721static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
724{ 722{
725 struct drm_device *dev = crtc->base.dev; 723 struct drm_device *dev = crtc->base.dev;
@@ -733,9 +731,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
733 vtotal /= 2; 731 vtotal /= 2;
734 732
735 if (IS_GEN2(dev)) 733 if (IS_GEN2(dev))
736 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 734 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
737 else 735 else
738 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 736 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
739 737
740 /* 738 /*
741 * On HSW, the DSL reg (0x70000) appears to return 0 if we 739 * On HSW, the DSL reg (0x70000) appears to return 0 if we
@@ -827,7 +825,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
827 * We can split this into vertical and horizontal 825 * We can split this into vertical and horizontal
828 * scanout position. 826 * scanout position.
829 */ 827 */
830 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 828 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
831 829
832 /* convert to pixel counts */ 830 /* convert to pixel counts */
833 vbl_start *= htotal; 831 vbl_start *= htotal;
@@ -1188,7 +1186,7 @@ static void ivybridge_parity_work(struct work_struct *work)
1188 POSTING_READ(GEN7_MISCCPCTL); 1186 POSTING_READ(GEN7_MISCCPCTL);
1189 1187
1190 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1188 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1191 u32 reg; 1189 i915_reg_t reg;
1192 1190
1193 slice--; 1191 slice--;
1194 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1192 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
@@ -1196,7 +1194,7 @@ static void ivybridge_parity_work(struct work_struct *work)
1196 1194
1197 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1195 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1198 1196
1199 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1197 reg = GEN7_L3CDERRST1(slice);
1200 1198
1201 error_status = I915_READ(reg); 1199 error_status = I915_READ(reg);
1202 row = GEN7_PARITY_ERROR_ROW(error_status); 1200 row = GEN7_PARITY_ERROR_ROW(error_status);
@@ -1290,70 +1288,69 @@ static void snb_gt_irq_handler(struct drm_device *dev,
1290 ivybridge_parity_error_irq_handler(dev, gt_iir); 1288 ivybridge_parity_error_irq_handler(dev, gt_iir);
1291} 1289}
1292 1290
1291static __always_inline void
1292gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
1293{
1294 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1295 notify_ring(ring);
1296 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1297 intel_lrc_irq_handler(ring);
1298}
1299
1293static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1300static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1294 u32 master_ctl) 1301 u32 master_ctl)
1295{ 1302{
1296 irqreturn_t ret = IRQ_NONE; 1303 irqreturn_t ret = IRQ_NONE;
1297 1304
1298 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1305 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1299 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0)); 1306 u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
1300 if (tmp) { 1307 if (iir) {
1301 I915_WRITE_FW(GEN8_GT_IIR(0), tmp); 1308 I915_WRITE_FW(GEN8_GT_IIR(0), iir);
1302 ret = IRQ_HANDLED; 1309 ret = IRQ_HANDLED;
1303 1310
1304 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) 1311 gen8_cs_irq_handler(&dev_priv->ring[RCS],
1305 intel_lrc_irq_handler(&dev_priv->ring[RCS]); 1312 iir, GEN8_RCS_IRQ_SHIFT);
1306 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1307 notify_ring(&dev_priv->ring[RCS]);
1308 1313
1309 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) 1314 gen8_cs_irq_handler(&dev_priv->ring[BCS],
1310 intel_lrc_irq_handler(&dev_priv->ring[BCS]); 1315 iir, GEN8_BCS_IRQ_SHIFT);
1311 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1312 notify_ring(&dev_priv->ring[BCS]);
1313 } else 1316 } else
1314 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1317 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1315 } 1318 }
1316 1319
1317 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1320 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1318 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1)); 1321 u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
1319 if (tmp) { 1322 if (iir) {
1320 I915_WRITE_FW(GEN8_GT_IIR(1), tmp); 1323 I915_WRITE_FW(GEN8_GT_IIR(1), iir);
1321 ret = IRQ_HANDLED; 1324 ret = IRQ_HANDLED;
1322 1325
1323 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) 1326 gen8_cs_irq_handler(&dev_priv->ring[VCS],
1324 intel_lrc_irq_handler(&dev_priv->ring[VCS]); 1327 iir, GEN8_VCS1_IRQ_SHIFT);
1325 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1326 notify_ring(&dev_priv->ring[VCS]);
1327 1328
1328 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) 1329 gen8_cs_irq_handler(&dev_priv->ring[VCS2],
1329 intel_lrc_irq_handler(&dev_priv->ring[VCS2]); 1330 iir, GEN8_VCS2_IRQ_SHIFT);
1330 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1331 notify_ring(&dev_priv->ring[VCS2]);
1332 } else 1331 } else
1333 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1332 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1334 } 1333 }
1335 1334
1336 if (master_ctl & GEN8_GT_VECS_IRQ) { 1335 if (master_ctl & GEN8_GT_VECS_IRQ) {
1337 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3)); 1336 u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
1338 if (tmp) { 1337 if (iir) {
1339 I915_WRITE_FW(GEN8_GT_IIR(3), tmp); 1338 I915_WRITE_FW(GEN8_GT_IIR(3), iir);
1340 ret = IRQ_HANDLED; 1339 ret = IRQ_HANDLED;
1341 1340
1342 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) 1341 gen8_cs_irq_handler(&dev_priv->ring[VECS],
1343 intel_lrc_irq_handler(&dev_priv->ring[VECS]); 1342 iir, GEN8_VECS_IRQ_SHIFT);
1344 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1345 notify_ring(&dev_priv->ring[VECS]);
1346 } else 1343 } else
1347 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1344 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1348 } 1345 }
1349 1346
1350 if (master_ctl & GEN8_GT_PM_IRQ) { 1347 if (master_ctl & GEN8_GT_PM_IRQ) {
1351 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2)); 1348 u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
1352 if (tmp & dev_priv->pm_rps_events) { 1349 if (iir & dev_priv->pm_rps_events) {
1353 I915_WRITE_FW(GEN8_GT_IIR(2), 1350 I915_WRITE_FW(GEN8_GT_IIR(2),
1354 tmp & dev_priv->pm_rps_events); 1351 iir & dev_priv->pm_rps_events);
1355 ret = IRQ_HANDLED; 1352 ret = IRQ_HANDLED;
1356 gen6_rps_irq_handler(dev_priv, tmp); 1353 gen6_rps_irq_handler(dev_priv, iir);
1357 } else 1354 } else
1358 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1355 DRM_ERROR("The master control interrupt lied (PM)!\n");
1359 } 1356 }
@@ -1625,7 +1622,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1625 1622
1626 spin_lock(&dev_priv->irq_lock); 1623 spin_lock(&dev_priv->irq_lock);
1627 for_each_pipe(dev_priv, pipe) { 1624 for_each_pipe(dev_priv, pipe) {
1628 int reg; 1625 i915_reg_t reg;
1629 u32 mask, iir_bit = 0; 1626 u32 mask, iir_bit = 0;
1630 1627
1631 /* 1628 /*
@@ -2354,9 +2351,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2354 spt_irq_handler(dev, pch_iir); 2351 spt_irq_handler(dev, pch_iir);
2355 else 2352 else
2356 cpt_irq_handler(dev, pch_iir); 2353 cpt_irq_handler(dev, pch_iir);
2357 } else 2354 } else {
2358 DRM_ERROR("The master control interrupt lied (SDE)!\n"); 2355 /*
2359 2356 * Like on previous PCH there seems to be something
2357 * fishy going on with forwarding PCH interrupts.
2358 */
2359 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2360 }
2360 } 2361 }
2361 2362
2362 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2363 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -3869,7 +3870,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3869 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3870 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3870 3871
3871 for_each_pipe(dev_priv, pipe) { 3872 for_each_pipe(dev_priv, pipe) {
3872 int reg = PIPESTAT(pipe); 3873 i915_reg_t reg = PIPESTAT(pipe);
3873 pipe_stats[pipe] = I915_READ(reg); 3874 pipe_stats[pipe] = I915_READ(reg);
3874 3875
3875 /* 3876 /*
@@ -4050,7 +4051,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4050 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4051 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4051 4052
4052 for_each_pipe(dev_priv, pipe) { 4053 for_each_pipe(dev_priv, pipe) {
4053 int reg = PIPESTAT(pipe); 4054 i915_reg_t reg = PIPESTAT(pipe);
4054 pipe_stats[pipe] = I915_READ(reg); 4055 pipe_stats[pipe] = I915_READ(reg);
4055 4056
4056 /* Clear the PIPE*STAT regs before the IIR */ 4057 /* Clear the PIPE*STAT regs before the IIR */
@@ -4272,7 +4273,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4272 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4273 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4273 4274
4274 for_each_pipe(dev_priv, pipe) { 4275 for_each_pipe(dev_priv, pipe) {
4275 int reg = PIPESTAT(pipe); 4276 i915_reg_t reg = PIPESTAT(pipe);
4276 pipe_stats[pipe] = I915_READ(reg); 4277 pipe_stats[pipe] = I915_READ(reg);
4277 4278
4278 /* 4279 /*
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 4be13a5eb932..835d6099c769 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -32,6 +32,7 @@ struct i915_params i915 __read_mostly = {
32 .panel_use_ssc = -1, 32 .panel_use_ssc = -1,
33 .vbt_sdvo_panel_type = -1, 33 .vbt_sdvo_panel_type = -1,
34 .enable_rc6 = -1, 34 .enable_rc6 = -1,
35 .enable_dc = -1,
35 .enable_fbc = -1, 36 .enable_fbc = -1,
36 .enable_execlists = -1, 37 .enable_execlists = -1,
37 .enable_hangcheck = true, 38 .enable_hangcheck = true,
@@ -80,6 +81,11 @@ MODULE_PARM_DESC(enable_rc6,
80 "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " 81 "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
81 "default: -1 (use per-chip default)"); 82 "default: -1 (use per-chip default)");
82 83
84module_param_named_unsafe(enable_dc, i915.enable_dc, int, 0400);
85MODULE_PARM_DESC(enable_dc,
86 "Enable power-saving display C-states. "
87 "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
88
83module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600); 89module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
84MODULE_PARM_DESC(enable_fbc, 90MODULE_PARM_DESC(enable_fbc,
85 "Enable frame buffer compression for power savings " 91 "Enable frame buffer compression for power savings "
@@ -112,7 +118,7 @@ MODULE_PARM_DESC(enable_hangcheck,
112module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400); 118module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
113MODULE_PARM_DESC(enable_ppgtt, 119MODULE_PARM_DESC(enable_ppgtt,
114 "Override PPGTT usage. " 120 "Override PPGTT usage. "
115 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)"); 121 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
116 122
117module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400); 123module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400);
118MODULE_PARM_DESC(enable_execlists, 124MODULE_PARM_DESC(enable_execlists,
@@ -126,7 +132,7 @@ module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, i
126MODULE_PARM_DESC(preliminary_hw_support, 132MODULE_PARM_DESC(preliminary_hw_support,
127 "Enable preliminary hardware support."); 133 "Enable preliminary hardware support.");
128 134
129module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0600); 135module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400);
130MODULE_PARM_DESC(disable_power_well, 136MODULE_PARM_DESC(disable_power_well,
131 "Disable display power wells when possible " 137 "Disable display power wells when possible "
132 "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); 138 "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index bc7b8faba84d..1a12d44b9710 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -25,14 +25,43 @@
25#ifndef _I915_REG_H_ 25#ifndef _I915_REG_H_
26#define _I915_REG_H_ 26#define _I915_REG_H_
27 27
28typedef struct {
29 uint32_t reg;
30} i915_reg_t;
31
32#define _MMIO(r) ((const i915_reg_t){ .reg = (r) })
33
34#define INVALID_MMIO_REG _MMIO(0)
35
36static inline uint32_t i915_mmio_reg_offset(i915_reg_t reg)
37{
38 return reg.reg;
39}
40
41static inline bool i915_mmio_reg_equal(i915_reg_t a, i915_reg_t b)
42{
43 return i915_mmio_reg_offset(a) == i915_mmio_reg_offset(b);
44}
45
46static inline bool i915_mmio_reg_valid(i915_reg_t reg)
47{
48 return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
49}
50
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) 51#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
52#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
29#define _PLANE(plane, a, b) _PIPE(plane, a, b) 53#define _PLANE(plane, a, b) _PIPE(plane, a, b)
30#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) 54#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b)
55#define _TRANS(tran, a, b) ((a) + (tran)*((b)-(a)))
56#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
31#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) 57#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
58#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
32#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \ 59#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
33 (pipe) == PIPE_B ? (b) : (c)) 60 (pipe) == PIPE_B ? (b) : (c))
61#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PIPE3(pipe, a, b, c))
34#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \ 62#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
35 (port) == PORT_B ? (b) : (c)) 63 (port) == PORT_B ? (b) : (c))
64#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PORT3(pipe, a, b, c))
36 65
37#define _MASKED_FIELD(mask, value) ({ \ 66#define _MASKED_FIELD(mask, value) ({ \
38 if (__builtin_constant_p(mask)) \ 67 if (__builtin_constant_p(mask)) \
@@ -105,14 +134,14 @@
105#define GRDOM_RESET_STATUS (1<<1) 134#define GRDOM_RESET_STATUS (1<<1)
106#define GRDOM_RESET_ENABLE (1<<0) 135#define GRDOM_RESET_ENABLE (1<<0)
107 136
108#define ILK_GDSR (MCHBAR_MIRROR_BASE + 0x2ca4) 137#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
109#define ILK_GRDOM_FULL (0<<1) 138#define ILK_GRDOM_FULL (0<<1)
110#define ILK_GRDOM_RENDER (1<<1) 139#define ILK_GRDOM_RENDER (1<<1)
111#define ILK_GRDOM_MEDIA (3<<1) 140#define ILK_GRDOM_MEDIA (3<<1)
112#define ILK_GRDOM_MASK (3<<1) 141#define ILK_GRDOM_MASK (3<<1)
113#define ILK_GRDOM_RESET_ENABLE (1<<0) 142#define ILK_GRDOM_RESET_ENABLE (1<<0)
114 143
115#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ 144#define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */
116#define GEN6_MBC_SNPCR_SHIFT 21 145#define GEN6_MBC_SNPCR_SHIFT 21
117#define GEN6_MBC_SNPCR_MASK (3<<21) 146#define GEN6_MBC_SNPCR_MASK (3<<21)
118#define GEN6_MBC_SNPCR_MAX (0<<21) 147#define GEN6_MBC_SNPCR_MAX (0<<21)
@@ -120,31 +149,31 @@
120#define GEN6_MBC_SNPCR_LOW (2<<21) 149#define GEN6_MBC_SNPCR_LOW (2<<21)
121#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ 150#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
122 151
123#define VLV_G3DCTL 0x9024 152#define VLV_G3DCTL _MMIO(0x9024)
124#define VLV_GSCKGCTL 0x9028 153#define VLV_GSCKGCTL _MMIO(0x9028)
125 154
126#define GEN6_MBCTL 0x0907c 155#define GEN6_MBCTL _MMIO(0x0907c)
127#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) 156#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
128#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) 157#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
129#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2) 158#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
130#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1) 159#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
131#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0) 160#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
132 161
133#define GEN6_GDRST 0x941c 162#define GEN6_GDRST _MMIO(0x941c)
134#define GEN6_GRDOM_FULL (1 << 0) 163#define GEN6_GRDOM_FULL (1 << 0)
135#define GEN6_GRDOM_RENDER (1 << 1) 164#define GEN6_GRDOM_RENDER (1 << 1)
136#define GEN6_GRDOM_MEDIA (1 << 2) 165#define GEN6_GRDOM_MEDIA (1 << 2)
137#define GEN6_GRDOM_BLT (1 << 3) 166#define GEN6_GRDOM_BLT (1 << 3)
138 167
139#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) 168#define RING_PP_DIR_BASE(ring) _MMIO((ring)->mmio_base+0x228)
140#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) 169#define RING_PP_DIR_BASE_READ(ring) _MMIO((ring)->mmio_base+0x518)
141#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) 170#define RING_PP_DIR_DCLV(ring) _MMIO((ring)->mmio_base+0x220)
142#define PP_DIR_DCLV_2G 0xffffffff 171#define PP_DIR_DCLV_2G 0xffffffff
143 172
144#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4)) 173#define GEN8_RING_PDP_UDW(ring, n) _MMIO((ring)->mmio_base+0x270 + (n) * 8 + 4)
145#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8) 174#define GEN8_RING_PDP_LDW(ring, n) _MMIO((ring)->mmio_base+0x270 + (n) * 8)
146 175
147#define GEN8_R_PWR_CLK_STATE 0x20C8 176#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8)
148#define GEN8_RPCS_ENABLE (1 << 31) 177#define GEN8_RPCS_ENABLE (1 << 31)
149#define GEN8_RPCS_S_CNT_ENABLE (1 << 18) 178#define GEN8_RPCS_S_CNT_ENABLE (1 << 18)
150#define GEN8_RPCS_S_CNT_SHIFT 15 179#define GEN8_RPCS_S_CNT_SHIFT 15
@@ -157,7 +186,7 @@
157#define GEN8_RPCS_EU_MIN_SHIFT 0 186#define GEN8_RPCS_EU_MIN_SHIFT 0
158#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT) 187#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT)
159 188
160#define GAM_ECOCHK 0x4090 189#define GAM_ECOCHK _MMIO(0x4090)
161#define BDW_DISABLE_HDC_INVALIDATION (1<<25) 190#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
162#define ECOCHK_SNB_BIT (1<<10) 191#define ECOCHK_SNB_BIT (1<<10)
163#define ECOCHK_DIS_TLB (1<<8) 192#define ECOCHK_DIS_TLB (1<<8)
@@ -170,15 +199,15 @@
170#define ECOCHK_PPGTT_WT_HSW (0x2<<3) 199#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
171#define ECOCHK_PPGTT_WB_HSW (0x3<<3) 200#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
172 201
173#define GAC_ECO_BITS 0x14090 202#define GAC_ECO_BITS _MMIO(0x14090)
174#define ECOBITS_SNB_BIT (1<<13) 203#define ECOBITS_SNB_BIT (1<<13)
175#define ECOBITS_PPGTT_CACHE64B (3<<8) 204#define ECOBITS_PPGTT_CACHE64B (3<<8)
176#define ECOBITS_PPGTT_CACHE4B (0<<8) 205#define ECOBITS_PPGTT_CACHE4B (0<<8)
177 206
178#define GAB_CTL 0x24000 207#define GAB_CTL _MMIO(0x24000)
179#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8) 208#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
180 209
181#define GEN6_STOLEN_RESERVED 0x1082C0 210#define GEN6_STOLEN_RESERVED _MMIO(0x1082C0)
182#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20) 211#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
183#define GEN7_STOLEN_RESERVED_ADDR_MASK (0x3FFF << 18) 212#define GEN7_STOLEN_RESERVED_ADDR_MASK (0x3FFF << 18)
184#define GEN6_STOLEN_RESERVED_SIZE_MASK (3 << 4) 213#define GEN6_STOLEN_RESERVED_SIZE_MASK (3 << 4)
@@ -200,6 +229,7 @@
200#define VGA_ST01_MDA 0x3ba 229#define VGA_ST01_MDA 0x3ba
201#define VGA_ST01_CGA 0x3da 230#define VGA_ST01_CGA 0x3da
202 231
232#define _VGA_MSR_WRITE _MMIO(0x3c2)
203#define VGA_MSR_WRITE 0x3c2 233#define VGA_MSR_WRITE 0x3c2
204#define VGA_MSR_READ 0x3cc 234#define VGA_MSR_READ 0x3cc
205#define VGA_MSR_MEM_EN (1<<1) 235#define VGA_MSR_MEM_EN (1<<1)
@@ -377,10 +407,12 @@
377#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) 407#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
378#define MI_BATCH_RESOURCE_STREAMER (1<<10) 408#define MI_BATCH_RESOURCE_STREAMER (1<<10)
379 409
380#define MI_PREDICATE_SRC0 (0x2400) 410#define MI_PREDICATE_SRC0 _MMIO(0x2400)
381#define MI_PREDICATE_SRC1 (0x2408) 411#define MI_PREDICATE_SRC0_UDW _MMIO(0x2400 + 4)
412#define MI_PREDICATE_SRC1 _MMIO(0x2408)
413#define MI_PREDICATE_SRC1_UDW _MMIO(0x2408 + 4)
382 414
383#define MI_PREDICATE_RESULT_2 (0x2214) 415#define MI_PREDICATE_RESULT_2 _MMIO(0x2214)
384#define LOWER_SLICE_ENABLED (1<<0) 416#define LOWER_SLICE_ENABLED (1<<0)
385#define LOWER_SLICE_DISABLED (0<<0) 417#define LOWER_SLICE_DISABLED (0<<0)
386 418
@@ -509,49 +541,61 @@
509/* 541/*
510 * Registers used only by the command parser 542 * Registers used only by the command parser
511 */ 543 */
512#define BCS_SWCTRL 0x22200 544#define BCS_SWCTRL _MMIO(0x22200)
513 545
514#define GPGPU_THREADS_DISPATCHED 0x2290 546#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
515#define HS_INVOCATION_COUNT 0x2300 547#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
516#define DS_INVOCATION_COUNT 0x2308 548#define HS_INVOCATION_COUNT _MMIO(0x2300)
517#define IA_VERTICES_COUNT 0x2310 549#define HS_INVOCATION_COUNT_UDW _MMIO(0x2300 + 4)
518#define IA_PRIMITIVES_COUNT 0x2318 550#define DS_INVOCATION_COUNT _MMIO(0x2308)
519#define VS_INVOCATION_COUNT 0x2320 551#define DS_INVOCATION_COUNT_UDW _MMIO(0x2308 + 4)
520#define GS_INVOCATION_COUNT 0x2328 552#define IA_VERTICES_COUNT _MMIO(0x2310)
521#define GS_PRIMITIVES_COUNT 0x2330 553#define IA_VERTICES_COUNT_UDW _MMIO(0x2310 + 4)
522#define CL_INVOCATION_COUNT 0x2338 554#define IA_PRIMITIVES_COUNT _MMIO(0x2318)
523#define CL_PRIMITIVES_COUNT 0x2340 555#define IA_PRIMITIVES_COUNT_UDW _MMIO(0x2318 + 4)
524#define PS_INVOCATION_COUNT 0x2348 556#define VS_INVOCATION_COUNT _MMIO(0x2320)
525#define PS_DEPTH_COUNT 0x2350 557#define VS_INVOCATION_COUNT_UDW _MMIO(0x2320 + 4)
558#define GS_INVOCATION_COUNT _MMIO(0x2328)
559#define GS_INVOCATION_COUNT_UDW _MMIO(0x2328 + 4)
560#define GS_PRIMITIVES_COUNT _MMIO(0x2330)
561#define GS_PRIMITIVES_COUNT_UDW _MMIO(0x2330 + 4)
562#define CL_INVOCATION_COUNT _MMIO(0x2338)
563#define CL_INVOCATION_COUNT_UDW _MMIO(0x2338 + 4)
564#define CL_PRIMITIVES_COUNT _MMIO(0x2340)
565#define CL_PRIMITIVES_COUNT_UDW _MMIO(0x2340 + 4)
566#define PS_INVOCATION_COUNT _MMIO(0x2348)
567#define PS_INVOCATION_COUNT_UDW _MMIO(0x2348 + 4)
568#define PS_DEPTH_COUNT _MMIO(0x2350)
569#define PS_DEPTH_COUNT_UDW _MMIO(0x2350 + 4)
526 570
527/* There are the 4 64-bit counter registers, one for each stream output */ 571/* There are the 4 64-bit counter registers, one for each stream output */
528#define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8) 572#define GEN7_SO_NUM_PRIMS_WRITTEN(n) _MMIO(0x5200 + (n) * 8)
573#define GEN7_SO_NUM_PRIMS_WRITTEN_UDW(n) _MMIO(0x5200 + (n) * 8 + 4)
529 574
530#define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8) 575#define GEN7_SO_PRIM_STORAGE_NEEDED(n) _MMIO(0x5240 + (n) * 8)
576#define GEN7_SO_PRIM_STORAGE_NEEDED_UDW(n) _MMIO(0x5240 + (n) * 8 + 4)
531 577
532#define GEN7_3DPRIM_END_OFFSET 0x2420 578#define GEN7_3DPRIM_END_OFFSET _MMIO(0x2420)
533#define GEN7_3DPRIM_START_VERTEX 0x2430 579#define GEN7_3DPRIM_START_VERTEX _MMIO(0x2430)
534#define GEN7_3DPRIM_VERTEX_COUNT 0x2434 580#define GEN7_3DPRIM_VERTEX_COUNT _MMIO(0x2434)
535#define GEN7_3DPRIM_INSTANCE_COUNT 0x2438 581#define GEN7_3DPRIM_INSTANCE_COUNT _MMIO(0x2438)
536#define GEN7_3DPRIM_START_INSTANCE 0x243C 582#define GEN7_3DPRIM_START_INSTANCE _MMIO(0x243C)
537#define GEN7_3DPRIM_BASE_VERTEX 0x2440 583#define GEN7_3DPRIM_BASE_VERTEX _MMIO(0x2440)
538 584
539#define GEN7_GPGPU_DISPATCHDIMX 0x2500 585#define GEN7_GPGPU_DISPATCHDIMX _MMIO(0x2500)
540#define GEN7_GPGPU_DISPATCHDIMY 0x2504 586#define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504)
541#define GEN7_GPGPU_DISPATCHDIMZ 0x2508 587#define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508)
542 588
543#define OACONTROL 0x2360 589#define OACONTROL _MMIO(0x2360)
544 590
545#define _GEN7_PIPEA_DE_LOAD_SL 0x70068 591#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
546#define _GEN7_PIPEB_DE_LOAD_SL 0x71068 592#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
547#define GEN7_PIPE_DE_LOAD_SL(pipe) _PIPE(pipe, \ 593#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL)
548 _GEN7_PIPEA_DE_LOAD_SL, \
549 _GEN7_PIPEB_DE_LOAD_SL)
550 594
551/* 595/*
552 * Reset registers 596 * Reset registers
553 */ 597 */
554#define DEBUG_RESET_I830 0x6070 598#define DEBUG_RESET_I830 _MMIO(0x6070)
555#define DEBUG_RESET_FULL (1<<7) 599#define DEBUG_RESET_FULL (1<<7)
556#define DEBUG_RESET_RENDER (1<<8) 600#define DEBUG_RESET_RENDER (1<<8)
557#define DEBUG_RESET_DISPLAY (1<<9) 601#define DEBUG_RESET_DISPLAY (1<<9)
@@ -559,7 +603,7 @@
559/* 603/*
560 * IOSF sideband 604 * IOSF sideband
561 */ 605 */
562#define VLV_IOSF_DOORBELL_REQ (VLV_DISPLAY_BASE + 0x2100) 606#define VLV_IOSF_DOORBELL_REQ _MMIO(VLV_DISPLAY_BASE + 0x2100)
563#define IOSF_DEVFN_SHIFT 24 607#define IOSF_DEVFN_SHIFT 24
564#define IOSF_OPCODE_SHIFT 16 608#define IOSF_OPCODE_SHIFT 16
565#define IOSF_PORT_SHIFT 8 609#define IOSF_PORT_SHIFT 8
@@ -576,8 +620,8 @@
576#define IOSF_PORT_CCU 0xA9 620#define IOSF_PORT_CCU 0xA9
577#define IOSF_PORT_GPS_CORE 0x48 621#define IOSF_PORT_GPS_CORE 0x48
578#define IOSF_PORT_FLISDSI 0x1B 622#define IOSF_PORT_FLISDSI 0x1B
579#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104) 623#define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104)
580#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108) 624#define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108)
581 625
582/* See configdb bunit SB addr map */ 626/* See configdb bunit SB addr map */
583#define BUNIT_REG_BISOC 0x11 627#define BUNIT_REG_BISOC 0x11
@@ -609,6 +653,7 @@
609 653
610/* See the PUNIT HAS v0.8 for the below bits */ 654/* See the PUNIT HAS v0.8 for the below bits */
611enum punit_power_well { 655enum punit_power_well {
656 /* These numbers are fixed and must match the position of the pw bits */
612 PUNIT_POWER_WELL_RENDER = 0, 657 PUNIT_POWER_WELL_RENDER = 0,
613 PUNIT_POWER_WELL_MEDIA = 1, 658 PUNIT_POWER_WELL_MEDIA = 1,
614 PUNIT_POWER_WELL_DISP2D = 3, 659 PUNIT_POWER_WELL_DISP2D = 3,
@@ -621,10 +666,12 @@ enum punit_power_well {
621 PUNIT_POWER_WELL_DPIO_RX1 = 11, 666 PUNIT_POWER_WELL_DPIO_RX1 = 11,
622 PUNIT_POWER_WELL_DPIO_CMN_D = 12, 667 PUNIT_POWER_WELL_DPIO_CMN_D = 12,
623 668
624 PUNIT_POWER_WELL_NUM, 669 /* Not actual bit groups. Used as IDs for lookup_power_well() */
670 PUNIT_POWER_WELL_ALWAYS_ON,
625}; 671};
626 672
627enum skl_disp_power_wells { 673enum skl_disp_power_wells {
674 /* These numbers are fixed and must match the position of the pw bits */
628 SKL_DISP_PW_MISC_IO, 675 SKL_DISP_PW_MISC_IO,
629 SKL_DISP_PW_DDI_A_E, 676 SKL_DISP_PW_DDI_A_E,
630 SKL_DISP_PW_DDI_B, 677 SKL_DISP_PW_DDI_B,
@@ -632,6 +679,10 @@ enum skl_disp_power_wells {
632 SKL_DISP_PW_DDI_D, 679 SKL_DISP_PW_DDI_D,
633 SKL_DISP_PW_1 = 14, 680 SKL_DISP_PW_1 = 14,
634 SKL_DISP_PW_2, 681 SKL_DISP_PW_2,
682
683 /* Not actual bit groups. Used as IDs for lookup_power_well() */
684 SKL_DISP_PW_ALWAYS_ON,
685 SKL_DISP_PW_DC_OFF,
635}; 686};
636 687
637#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2)) 688#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
@@ -832,7 +883,7 @@ enum skl_disp_power_wells {
832 */ 883 */
833#define DPIO_DEVFN 0 884#define DPIO_DEVFN 0
834 885
835#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110) 886#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
836#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ 887#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
837#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ 888#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
838#define DPIO_SFR_BYPASS (1<<1) 889#define DPIO_SFR_BYPASS (1<<1)
@@ -1185,9 +1236,9 @@ enum skl_disp_power_wells {
1185#define DPIO_UPAR_SHIFT 30 1236#define DPIO_UPAR_SHIFT 30
1186 1237
1187/* BXT PHY registers */ 1238/* BXT PHY registers */
1188#define _BXT_PHY(phy, a, b) _PIPE((phy), (a), (b)) 1239#define _BXT_PHY(phy, a, b) _MMIO_PIPE((phy), (a), (b))
1189 1240
1190#define BXT_P_CR_GT_DISP_PWRON 0x138090 1241#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
1191#define GT_DISPLAY_POWER_ON(phy) (1 << (phy)) 1242#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
1192 1243
1193#define _PHY_CTL_FAMILY_EDP 0x64C80 1244#define _PHY_CTL_FAMILY_EDP 0x64C80
@@ -1203,7 +1254,7 @@ enum skl_disp_power_wells {
1203#define PORT_PLL_ENABLE (1 << 31) 1254#define PORT_PLL_ENABLE (1 << 31)
1204#define PORT_PLL_LOCK (1 << 30) 1255#define PORT_PLL_LOCK (1 << 30)
1205#define PORT_PLL_REF_SEL (1 << 27) 1256#define PORT_PLL_REF_SEL (1 << 27)
1206#define BXT_PORT_PLL_ENABLE(port) _PORT(port, _PORT_PLL_A, _PORT_PLL_B) 1257#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B)
1207 1258
1208#define _PORT_PLL_EBB_0_A 0x162034 1259#define _PORT_PLL_EBB_0_A 0x162034
1209#define _PORT_PLL_EBB_0_B 0x6C034 1260#define _PORT_PLL_EBB_0_B 0x6C034
@@ -1214,7 +1265,7 @@ enum skl_disp_power_wells {
1214#define PORT_PLL_P2_SHIFT 8 1265#define PORT_PLL_P2_SHIFT 8
1215#define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT) 1266#define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT)
1216#define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT) 1267#define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT)
1217#define BXT_PORT_PLL_EBB_0(port) _PORT3(port, _PORT_PLL_EBB_0_A, \ 1268#define BXT_PORT_PLL_EBB_0(port) _MMIO_PORT3(port, _PORT_PLL_EBB_0_A, \
1218 _PORT_PLL_EBB_0_B, \ 1269 _PORT_PLL_EBB_0_B, \
1219 _PORT_PLL_EBB_0_C) 1270 _PORT_PLL_EBB_0_C)
1220 1271
@@ -1223,7 +1274,7 @@ enum skl_disp_power_wells {
1223#define _PORT_PLL_EBB_4_C 0x6C344 1274#define _PORT_PLL_EBB_4_C 0x6C344
1224#define PORT_PLL_10BIT_CLK_ENABLE (1 << 13) 1275#define PORT_PLL_10BIT_CLK_ENABLE (1 << 13)
1225#define PORT_PLL_RECALIBRATE (1 << 14) 1276#define PORT_PLL_RECALIBRATE (1 << 14)
1226#define BXT_PORT_PLL_EBB_4(port) _PORT3(port, _PORT_PLL_EBB_4_A, \ 1277#define BXT_PORT_PLL_EBB_4(port) _MMIO_PORT3(port, _PORT_PLL_EBB_4_A, \
1227 _PORT_PLL_EBB_4_B, \ 1278 _PORT_PLL_EBB_4_B, \
1228 _PORT_PLL_EBB_4_C) 1279 _PORT_PLL_EBB_4_C)
1229 1280
@@ -1259,7 +1310,7 @@ enum skl_disp_power_wells {
1259#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \ 1310#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
1260 _PORT_PLL_0_B, \ 1311 _PORT_PLL_0_B, \
1261 _PORT_PLL_0_C) 1312 _PORT_PLL_0_C)
1262#define BXT_PORT_PLL(port, idx) (_PORT_PLL_BASE(port) + (idx) * 4) 1313#define BXT_PORT_PLL(port, idx) _MMIO(_PORT_PLL_BASE(port) + (idx) * 4)
1263 1314
1264/* BXT PHY common lane registers */ 1315/* BXT PHY common lane registers */
1265#define _PORT_CL1CM_DW0_A 0x162000 1316#define _PORT_CL1CM_DW0_A 0x162000
@@ -1297,7 +1348,7 @@ enum skl_disp_power_wells {
1297 _PORT_CL1CM_DW30_A) 1348 _PORT_CL1CM_DW30_A)
1298 1349
1299/* Defined for PHY0 only */ 1350/* Defined for PHY0 only */
1300#define BXT_PORT_CL2CM_DW6_BC 0x6C358 1351#define BXT_PORT_CL2CM_DW6_BC _MMIO(0x6C358)
1301#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28) 1352#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
1302 1353
1303/* BXT PHY Ref registers */ 1354/* BXT PHY Ref registers */
@@ -1337,10 +1388,10 @@ enum skl_disp_power_wells {
1337#define _PORT_PCS_DW10_GRP_A 0x162C28 1388#define _PORT_PCS_DW10_GRP_A 0x162C28
1338#define _PORT_PCS_DW10_GRP_B 0x6CC28 1389#define _PORT_PCS_DW10_GRP_B 0x6CC28
1339#define _PORT_PCS_DW10_GRP_C 0x6CE28 1390#define _PORT_PCS_DW10_GRP_C 0x6CE28
1340#define BXT_PORT_PCS_DW10_LN01(port) _PORT3(port, _PORT_PCS_DW10_LN01_A, \ 1391#define BXT_PORT_PCS_DW10_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW10_LN01_A, \
1341 _PORT_PCS_DW10_LN01_B, \ 1392 _PORT_PCS_DW10_LN01_B, \
1342 _PORT_PCS_DW10_LN01_C) 1393 _PORT_PCS_DW10_LN01_C)
1343#define BXT_PORT_PCS_DW10_GRP(port) _PORT3(port, _PORT_PCS_DW10_GRP_A, \ 1394#define BXT_PORT_PCS_DW10_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW10_GRP_A, \
1344 _PORT_PCS_DW10_GRP_B, \ 1395 _PORT_PCS_DW10_GRP_B, \
1345 _PORT_PCS_DW10_GRP_C) 1396 _PORT_PCS_DW10_GRP_C)
1346#define TX2_SWING_CALC_INIT (1 << 31) 1397#define TX2_SWING_CALC_INIT (1 << 31)
@@ -1357,13 +1408,13 @@ enum skl_disp_power_wells {
1357#define _PORT_PCS_DW12_GRP_C 0x6CE30 1408#define _PORT_PCS_DW12_GRP_C 0x6CE30
1358#define LANESTAGGER_STRAP_OVRD (1 << 6) 1409#define LANESTAGGER_STRAP_OVRD (1 << 6)
1359#define LANE_STAGGER_MASK 0x1F 1410#define LANE_STAGGER_MASK 0x1F
1360#define BXT_PORT_PCS_DW12_LN01(port) _PORT3(port, _PORT_PCS_DW12_LN01_A, \ 1411#define BXT_PORT_PCS_DW12_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN01_A, \
1361 _PORT_PCS_DW12_LN01_B, \ 1412 _PORT_PCS_DW12_LN01_B, \
1362 _PORT_PCS_DW12_LN01_C) 1413 _PORT_PCS_DW12_LN01_C)
1363#define BXT_PORT_PCS_DW12_LN23(port) _PORT3(port, _PORT_PCS_DW12_LN23_A, \ 1414#define BXT_PORT_PCS_DW12_LN23(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN23_A, \
1364 _PORT_PCS_DW12_LN23_B, \ 1415 _PORT_PCS_DW12_LN23_B, \
1365 _PORT_PCS_DW12_LN23_C) 1416 _PORT_PCS_DW12_LN23_C)
1366#define BXT_PORT_PCS_DW12_GRP(port) _PORT3(port, _PORT_PCS_DW12_GRP_A, \ 1417#define BXT_PORT_PCS_DW12_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW12_GRP_A, \
1367 _PORT_PCS_DW12_GRP_B, \ 1418 _PORT_PCS_DW12_GRP_B, \
1368 _PORT_PCS_DW12_GRP_C) 1419 _PORT_PCS_DW12_GRP_C)
1369 1420
@@ -1377,10 +1428,10 @@ enum skl_disp_power_wells {
1377#define _PORT_TX_DW2_GRP_A 0x162D08 1428#define _PORT_TX_DW2_GRP_A 0x162D08
1378#define _PORT_TX_DW2_GRP_B 0x6CD08 1429#define _PORT_TX_DW2_GRP_B 0x6CD08
1379#define _PORT_TX_DW2_GRP_C 0x6CF08 1430#define _PORT_TX_DW2_GRP_C 0x6CF08
1380#define BXT_PORT_TX_DW2_GRP(port) _PORT3(port, _PORT_TX_DW2_GRP_A, \ 1431#define BXT_PORT_TX_DW2_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW2_GRP_A, \
1381 _PORT_TX_DW2_GRP_B, \ 1432 _PORT_TX_DW2_GRP_B, \
1382 _PORT_TX_DW2_GRP_C) 1433 _PORT_TX_DW2_GRP_C)
1383#define BXT_PORT_TX_DW2_LN0(port) _PORT3(port, _PORT_TX_DW2_LN0_A, \ 1434#define BXT_PORT_TX_DW2_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW2_LN0_A, \
1384 _PORT_TX_DW2_LN0_B, \ 1435 _PORT_TX_DW2_LN0_B, \
1385 _PORT_TX_DW2_LN0_C) 1436 _PORT_TX_DW2_LN0_C)
1386#define MARGIN_000_SHIFT 16 1437#define MARGIN_000_SHIFT 16
@@ -1394,10 +1445,10 @@ enum skl_disp_power_wells {
1394#define _PORT_TX_DW3_GRP_A 0x162D0C 1445#define _PORT_TX_DW3_GRP_A 0x162D0C
1395#define _PORT_TX_DW3_GRP_B 0x6CD0C 1446#define _PORT_TX_DW3_GRP_B 0x6CD0C
1396#define _PORT_TX_DW3_GRP_C 0x6CF0C 1447#define _PORT_TX_DW3_GRP_C 0x6CF0C
1397#define BXT_PORT_TX_DW3_GRP(port) _PORT3(port, _PORT_TX_DW3_GRP_A, \ 1448#define BXT_PORT_TX_DW3_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW3_GRP_A, \
1398 _PORT_TX_DW3_GRP_B, \ 1449 _PORT_TX_DW3_GRP_B, \
1399 _PORT_TX_DW3_GRP_C) 1450 _PORT_TX_DW3_GRP_C)
1400#define BXT_PORT_TX_DW3_LN0(port) _PORT3(port, _PORT_TX_DW3_LN0_A, \ 1451#define BXT_PORT_TX_DW3_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW3_LN0_A, \
1401 _PORT_TX_DW3_LN0_B, \ 1452 _PORT_TX_DW3_LN0_B, \
1402 _PORT_TX_DW3_LN0_C) 1453 _PORT_TX_DW3_LN0_C)
1403#define SCALE_DCOMP_METHOD (1 << 26) 1454#define SCALE_DCOMP_METHOD (1 << 26)
@@ -1409,10 +1460,10 @@ enum skl_disp_power_wells {
1409#define _PORT_TX_DW4_GRP_A 0x162D10 1460#define _PORT_TX_DW4_GRP_A 0x162D10
1410#define _PORT_TX_DW4_GRP_B 0x6CD10 1461#define _PORT_TX_DW4_GRP_B 0x6CD10
1411#define _PORT_TX_DW4_GRP_C 0x6CF10 1462#define _PORT_TX_DW4_GRP_C 0x6CF10
1412#define BXT_PORT_TX_DW4_LN0(port) _PORT3(port, _PORT_TX_DW4_LN0_A, \ 1463#define BXT_PORT_TX_DW4_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW4_LN0_A, \
1413 _PORT_TX_DW4_LN0_B, \ 1464 _PORT_TX_DW4_LN0_B, \
1414 _PORT_TX_DW4_LN0_C) 1465 _PORT_TX_DW4_LN0_C)
1415#define BXT_PORT_TX_DW4_GRP(port) _PORT3(port, _PORT_TX_DW4_GRP_A, \ 1466#define BXT_PORT_TX_DW4_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW4_GRP_A, \
1416 _PORT_TX_DW4_GRP_B, \ 1467 _PORT_TX_DW4_GRP_B, \
1417 _PORT_TX_DW4_GRP_C) 1468 _PORT_TX_DW4_GRP_C)
1418#define DEEMPH_SHIFT 24 1469#define DEEMPH_SHIFT 24
@@ -1423,17 +1474,17 @@ enum skl_disp_power_wells {
1423#define _PORT_TX_DW14_LN0_C 0x6C938 1474#define _PORT_TX_DW14_LN0_C 0x6C938
1424#define LATENCY_OPTIM_SHIFT 30 1475#define LATENCY_OPTIM_SHIFT 30
1425#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT) 1476#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT)
1426#define BXT_PORT_TX_DW14_LN(port, lane) (_PORT3((port), _PORT_TX_DW14_LN0_A, \ 1477#define BXT_PORT_TX_DW14_LN(port, lane) _MMIO(_PORT3((port), _PORT_TX_DW14_LN0_A, \
1427 _PORT_TX_DW14_LN0_B, \ 1478 _PORT_TX_DW14_LN0_B, \
1428 _PORT_TX_DW14_LN0_C) + \ 1479 _PORT_TX_DW14_LN0_C) + \
1429 _BXT_LANE_OFFSET(lane)) 1480 _BXT_LANE_OFFSET(lane))
1430 1481
1431/* UAIMI scratch pad register 1 */ 1482/* UAIMI scratch pad register 1 */
1432#define UAIMI_SPR1 0x4F074 1483#define UAIMI_SPR1 _MMIO(0x4F074)
1433/* SKL VccIO mask */ 1484/* SKL VccIO mask */
1434#define SKL_VCCIO_MASK 0x1 1485#define SKL_VCCIO_MASK 0x1
1435/* SKL balance leg register */ 1486/* SKL balance leg register */
1436#define DISPIO_CR_TX_BMU_CR0 0x6C00C 1487#define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C)
1437/* I_boost values */ 1488/* I_boost values */
1438#define BALANCE_LEG_SHIFT(port) (8+3*(port)) 1489#define BALANCE_LEG_SHIFT(port) (8+3*(port))
1439#define BALANCE_LEG_MASK(port) (7<<(8+3*(port))) 1490#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
@@ -1450,7 +1501,7 @@ enum skl_disp_power_wells {
1450 * [0-15] @ 0x100000 gen6,vlv,chv 1501 * [0-15] @ 0x100000 gen6,vlv,chv
1451 * [0-31] @ 0x100000 gen7+ 1502 * [0-31] @ 0x100000 gen7+
1452 */ 1503 */
1453#define FENCE_REG(i) (0x2000 + (((i) & 8) << 9) + ((i) & 7) * 4) 1504#define FENCE_REG(i) _MMIO(0x2000 + (((i) & 8) << 9) + ((i) & 7) * 4)
1454#define I830_FENCE_START_MASK 0x07f80000 1505#define I830_FENCE_START_MASK 0x07f80000
1455#define I830_FENCE_TILING_Y_SHIFT 12 1506#define I830_FENCE_TILING_Y_SHIFT 12
1456#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) 1507#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
@@ -1463,21 +1514,21 @@ enum skl_disp_power_wells {
1463#define I915_FENCE_START_MASK 0x0ff00000 1514#define I915_FENCE_START_MASK 0x0ff00000
1464#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8) 1515#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
1465 1516
1466#define FENCE_REG_965_LO(i) (0x03000 + (i) * 8) 1517#define FENCE_REG_965_LO(i) _MMIO(0x03000 + (i) * 8)
1467#define FENCE_REG_965_HI(i) (0x03000 + (i) * 8 + 4) 1518#define FENCE_REG_965_HI(i) _MMIO(0x03000 + (i) * 8 + 4)
1468#define I965_FENCE_PITCH_SHIFT 2 1519#define I965_FENCE_PITCH_SHIFT 2
1469#define I965_FENCE_TILING_Y_SHIFT 1 1520#define I965_FENCE_TILING_Y_SHIFT 1
1470#define I965_FENCE_REG_VALID (1<<0) 1521#define I965_FENCE_REG_VALID (1<<0)
1471#define I965_FENCE_MAX_PITCH_VAL 0x0400 1522#define I965_FENCE_MAX_PITCH_VAL 0x0400
1472 1523
1473#define FENCE_REG_GEN6_LO(i) (0x100000 + (i) * 8) 1524#define FENCE_REG_GEN6_LO(i) _MMIO(0x100000 + (i) * 8)
1474#define FENCE_REG_GEN6_HI(i) (0x100000 + (i) * 8 + 4) 1525#define FENCE_REG_GEN6_HI(i) _MMIO(0x100000 + (i) * 8 + 4)
1475#define GEN6_FENCE_PITCH_SHIFT 32 1526#define GEN6_FENCE_PITCH_SHIFT 32
1476#define GEN7_FENCE_MAX_PITCH_VAL 0x0800 1527#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
1477 1528
1478 1529
1479/* control register for cpu gtt access */ 1530/* control register for cpu gtt access */
1480#define TILECTL 0x101000 1531#define TILECTL _MMIO(0x101000)
1481#define TILECTL_SWZCTL (1 << 0) 1532#define TILECTL_SWZCTL (1 << 0)
1482#define TILECTL_TLBPF (1 << 1) 1533#define TILECTL_TLBPF (1 << 1)
1483#define TILECTL_TLB_PREFETCH_DIS (1 << 2) 1534#define TILECTL_TLB_PREFETCH_DIS (1 << 2)
@@ -1486,30 +1537,30 @@ enum skl_disp_power_wells {
1486/* 1537/*
1487 * Instruction and interrupt control regs 1538 * Instruction and interrupt control regs
1488 */ 1539 */
1489#define PGTBL_CTL 0x02020 1540#define PGTBL_CTL _MMIO(0x02020)
1490#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */ 1541#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
1491#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */ 1542#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
1492#define PGTBL_ER 0x02024 1543#define PGTBL_ER _MMIO(0x02024)
1493#define PRB0_BASE (0x2030-0x30) 1544#define PRB0_BASE (0x2030-0x30)
1494#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */ 1545#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */
1495#define PRB2_BASE (0x2050-0x30) /* gen3 */ 1546#define PRB2_BASE (0x2050-0x30) /* gen3 */
1496#define SRB0_BASE (0x2100-0x30) /* gen2 */ 1547#define SRB0_BASE (0x2100-0x30) /* gen2 */
1497#define SRB1_BASE (0x2110-0x30) /* gen2 */ 1548#define SRB1_BASE (0x2110-0x30) /* gen2 */
1498#define SRB2_BASE (0x2120-0x30) /* 830 */ 1549#define SRB2_BASE (0x2120-0x30) /* 830 */
1499#define SRB3_BASE (0x2130-0x30) /* 830 */ 1550#define SRB3_BASE (0x2130-0x30) /* 830 */
1500#define RENDER_RING_BASE 0x02000 1551#define RENDER_RING_BASE 0x02000
1501#define BSD_RING_BASE 0x04000 1552#define BSD_RING_BASE 0x04000
1502#define GEN6_BSD_RING_BASE 0x12000 1553#define GEN6_BSD_RING_BASE 0x12000
1503#define GEN8_BSD2_RING_BASE 0x1c000 1554#define GEN8_BSD2_RING_BASE 0x1c000
1504#define VEBOX_RING_BASE 0x1a000 1555#define VEBOX_RING_BASE 0x1a000
1505#define BLT_RING_BASE 0x22000 1556#define BLT_RING_BASE 0x22000
1506#define RING_TAIL(base) ((base)+0x30) 1557#define RING_TAIL(base) _MMIO((base)+0x30)
1507#define RING_HEAD(base) ((base)+0x34) 1558#define RING_HEAD(base) _MMIO((base)+0x34)
1508#define RING_START(base) ((base)+0x38) 1559#define RING_START(base) _MMIO((base)+0x38)
1509#define RING_CTL(base) ((base)+0x3c) 1560#define RING_CTL(base) _MMIO((base)+0x3c)
1510#define RING_SYNC_0(base) ((base)+0x40) 1561#define RING_SYNC_0(base) _MMIO((base)+0x40)
1511#define RING_SYNC_1(base) ((base)+0x44) 1562#define RING_SYNC_1(base) _MMIO((base)+0x44)
1512#define RING_SYNC_2(base) ((base)+0x48) 1563#define RING_SYNC_2(base) _MMIO((base)+0x48)
1513#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) 1564#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
1514#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) 1565#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
1515#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE)) 1566#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE))
@@ -1522,51 +1573,52 @@ enum skl_disp_power_wells {
1522#define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE)) 1573#define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE))
1523#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) 1574#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
1524#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) 1575#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
1525#define GEN6_NOSYNC 0 1576#define GEN6_NOSYNC INVALID_MMIO_REG
1526#define RING_PSMI_CTL(base) ((base)+0x50) 1577#define RING_PSMI_CTL(base) _MMIO((base)+0x50)
1527#define RING_MAX_IDLE(base) ((base)+0x54) 1578#define RING_MAX_IDLE(base) _MMIO((base)+0x54)
1528#define RING_HWS_PGA(base) ((base)+0x80) 1579#define RING_HWS_PGA(base) _MMIO((base)+0x80)
1529#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 1580#define RING_HWS_PGA_GEN6(base) _MMIO((base)+0x2080)
1530#define RING_RESET_CTL(base) ((base)+0xd0) 1581#define RING_RESET_CTL(base) _MMIO((base)+0xd0)
1531#define RESET_CTL_REQUEST_RESET (1 << 0) 1582#define RESET_CTL_REQUEST_RESET (1 << 0)
1532#define RESET_CTL_READY_TO_RESET (1 << 1) 1583#define RESET_CTL_READY_TO_RESET (1 << 1)
1533 1584
1534#define HSW_GTT_CACHE_EN 0x4024 1585#define HSW_GTT_CACHE_EN _MMIO(0x4024)
1535#define GTT_CACHE_EN_ALL 0xF0007FFF 1586#define GTT_CACHE_EN_ALL 0xF0007FFF
1536#define GEN7_WR_WATERMARK 0x4028 1587#define GEN7_WR_WATERMARK _MMIO(0x4028)
1537#define GEN7_GFX_PRIO_CTRL 0x402C 1588#define GEN7_GFX_PRIO_CTRL _MMIO(0x402C)
1538#define ARB_MODE 0x4030 1589#define ARB_MODE _MMIO(0x4030)
1539#define ARB_MODE_SWIZZLE_SNB (1<<4) 1590#define ARB_MODE_SWIZZLE_SNB (1<<4)
1540#define ARB_MODE_SWIZZLE_IVB (1<<5) 1591#define ARB_MODE_SWIZZLE_IVB (1<<5)
1541#define GEN7_GFX_PEND_TLB0 0x4034 1592#define GEN7_GFX_PEND_TLB0 _MMIO(0x4034)
1542#define GEN7_GFX_PEND_TLB1 0x4038 1593#define GEN7_GFX_PEND_TLB1 _MMIO(0x4038)
1543/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */ 1594/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
1544#define GEN7_LRA_LIMITS(i) (0x403C + (i) * 4) 1595#define GEN7_LRA_LIMITS(i) _MMIO(0x403C + (i) * 4)
1545#define GEN7_LRA_LIMITS_REG_NUM 13 1596#define GEN7_LRA_LIMITS_REG_NUM 13
1546#define GEN7_MEDIA_MAX_REQ_COUNT 0x4070 1597#define GEN7_MEDIA_MAX_REQ_COUNT _MMIO(0x4070)
1547#define GEN7_GFX_MAX_REQ_COUNT 0x4074 1598#define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074)
1548 1599
1549#define GAMTARBMODE 0x04a08 1600#define GAMTARBMODE _MMIO(0x04a08)
1550#define ARB_MODE_BWGTLB_DISABLE (1<<9) 1601#define ARB_MODE_BWGTLB_DISABLE (1<<9)
1551#define ARB_MODE_SWIZZLE_BDW (1<<1) 1602#define ARB_MODE_SWIZZLE_BDW (1<<1)
1552#define RENDER_HWS_PGA_GEN7 (0x04080) 1603#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
1553#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) 1604#define RING_FAULT_REG(ring) _MMIO(0x4094 + 0x100*(ring)->id)
1554#define RING_FAULT_GTTSEL_MASK (1<<11) 1605#define RING_FAULT_GTTSEL_MASK (1<<11)
1555#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff) 1606#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
1556#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3) 1607#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
1557#define RING_FAULT_VALID (1<<0) 1608#define RING_FAULT_VALID (1<<0)
1558#define DONE_REG 0x40b0 1609#define DONE_REG _MMIO(0x40b0)
1559#define GEN8_PRIVATE_PAT_LO 0x40e0 1610#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
1560#define GEN8_PRIVATE_PAT_HI (0x40e0 + 4) 1611#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
1561#define BSD_HWS_PGA_GEN7 (0x04180) 1612#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
1562#define BLT_HWS_PGA_GEN7 (0x04280) 1613#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
1563#define VEBOX_HWS_PGA_GEN7 (0x04380) 1614#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
1564#define RING_ACTHD(base) ((base)+0x74) 1615#define RING_ACTHD(base) _MMIO((base)+0x74)
1565#define RING_ACTHD_UDW(base) ((base)+0x5c) 1616#define RING_ACTHD_UDW(base) _MMIO((base)+0x5c)
1566#define RING_NOPID(base) ((base)+0x94) 1617#define RING_NOPID(base) _MMIO((base)+0x94)
1567#define RING_IMR(base) ((base)+0xa8) 1618#define RING_IMR(base) _MMIO((base)+0xa8)
1568#define RING_HWSTAM(base) ((base)+0x98) 1619#define RING_HWSTAM(base) _MMIO((base)+0x98)
1569#define RING_TIMESTAMP(base) ((base)+0x358) 1620#define RING_TIMESTAMP(base) _MMIO((base)+0x358)
1621#define RING_TIMESTAMP_UDW(base) _MMIO((base)+0x358 + 4)
1570#define TAIL_ADDR 0x001FFFF8 1622#define TAIL_ADDR 0x001FFFF8
1571#define HEAD_WRAP_COUNT 0xFFE00000 1623#define HEAD_WRAP_COUNT 0xFFE00000
1572#define HEAD_WRAP_ONE 0x00200000 1624#define HEAD_WRAP_ONE 0x00200000
@@ -1583,57 +1635,65 @@ enum skl_disp_power_wells {
1583#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ 1635#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
1584#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ 1636#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
1585 1637
1586#define GEN7_TLB_RD_ADDR 0x4700 1638#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
1587 1639
1588#if 0 1640#if 0
1589#define PRB0_TAIL 0x02030 1641#define PRB0_TAIL _MMIO(0x2030)
1590#define PRB0_HEAD 0x02034 1642#define PRB0_HEAD _MMIO(0x2034)
1591#define PRB0_START 0x02038 1643#define PRB0_START _MMIO(0x2038)
1592#define PRB0_CTL 0x0203c 1644#define PRB0_CTL _MMIO(0x203c)
1593#define PRB1_TAIL 0x02040 /* 915+ only */ 1645#define PRB1_TAIL _MMIO(0x2040) /* 915+ only */
1594#define PRB1_HEAD 0x02044 /* 915+ only */ 1646#define PRB1_HEAD _MMIO(0x2044) /* 915+ only */
1595#define PRB1_START 0x02048 /* 915+ only */ 1647#define PRB1_START _MMIO(0x2048) /* 915+ only */
1596#define PRB1_CTL 0x0204c /* 915+ only */ 1648#define PRB1_CTL _MMIO(0x204c) /* 915+ only */
1597#endif 1649#endif
1598#define IPEIR_I965 0x02064 1650#define IPEIR_I965 _MMIO(0x2064)
1599#define IPEHR_I965 0x02068 1651#define IPEHR_I965 _MMIO(0x2068)
1600#define GEN7_SC_INSTDONE 0x07100 1652#define GEN7_SC_INSTDONE _MMIO(0x7100)
1601#define GEN7_SAMPLER_INSTDONE 0x0e160 1653#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
1602#define GEN7_ROW_INSTDONE 0x0e164 1654#define GEN7_ROW_INSTDONE _MMIO(0xe164)
1603#define I915_NUM_INSTDONE_REG 4 1655#define I915_NUM_INSTDONE_REG 4
1604#define RING_IPEIR(base) ((base)+0x64) 1656#define RING_IPEIR(base) _MMIO((base)+0x64)
1605#define RING_IPEHR(base) ((base)+0x68) 1657#define RING_IPEHR(base) _MMIO((base)+0x68)
1606/* 1658/*
1607 * On GEN4, only the render ring INSTDONE exists and has a different 1659 * On GEN4, only the render ring INSTDONE exists and has a different
1608 * layout than the GEN7+ version. 1660 * layout than the GEN7+ version.
1609 * The GEN2 counterpart of this register is GEN2_INSTDONE. 1661 * The GEN2 counterpart of this register is GEN2_INSTDONE.
1610 */ 1662 */
1611#define RING_INSTDONE(base) ((base)+0x6c) 1663#define RING_INSTDONE(base) _MMIO((base)+0x6c)
1612#define RING_INSTPS(base) ((base)+0x70) 1664#define RING_INSTPS(base) _MMIO((base)+0x70)
1613#define RING_DMA_FADD(base) ((base)+0x78) 1665#define RING_DMA_FADD(base) _MMIO((base)+0x78)
1614#define RING_DMA_FADD_UDW(base) ((base)+0x60) /* gen8+ */ 1666#define RING_DMA_FADD_UDW(base) _MMIO((base)+0x60) /* gen8+ */
1615#define RING_INSTPM(base) ((base)+0xc0) 1667#define RING_INSTPM(base) _MMIO((base)+0xc0)
1616#define RING_MI_MODE(base) ((base)+0x9c) 1668#define RING_MI_MODE(base) _MMIO((base)+0x9c)
1617#define INSTPS 0x02070 /* 965+ only */ 1669#define INSTPS _MMIO(0x2070) /* 965+ only */
1618#define GEN4_INSTDONE1 0x0207c /* 965+ only, aka INSTDONE_2 on SNB */ 1670#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
1619#define ACTHD_I965 0x02074 1671#define ACTHD_I965 _MMIO(0x2074)
1620#define HWS_PGA 0x02080 1672#define HWS_PGA _MMIO(0x2080)
1621#define HWS_ADDRESS_MASK 0xfffff000 1673#define HWS_ADDRESS_MASK 0xfffff000
1622#define HWS_START_ADDRESS_SHIFT 4 1674#define HWS_START_ADDRESS_SHIFT 4
1623#define PWRCTXA 0x2088 /* 965GM+ only */ 1675#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
1624#define PWRCTX_EN (1<<0) 1676#define PWRCTX_EN (1<<0)
1625#define IPEIR 0x02088 1677#define IPEIR _MMIO(0x2088)
1626#define IPEHR 0x0208c 1678#define IPEHR _MMIO(0x208c)
1627#define GEN2_INSTDONE 0x02090 1679#define GEN2_INSTDONE _MMIO(0x2090)
1628#define NOPID 0x02094 1680#define NOPID _MMIO(0x2094)
1629#define HWSTAM 0x02098 1681#define HWSTAM _MMIO(0x2098)
1630#define DMA_FADD_I8XX 0x020d0 1682#define DMA_FADD_I8XX _MMIO(0x20d0)
1631#define RING_BBSTATE(base) ((base)+0x110) 1683#define RING_BBSTATE(base) _MMIO((base)+0x110)
1632#define RING_BBADDR(base) ((base)+0x140) 1684#define RING_BB_PPGTT (1 << 5)
1633#define RING_BBADDR_UDW(base) ((base)+0x168) /* gen8+ */ 1685#define RING_SBBADDR(base) _MMIO((base)+0x114) /* hsw+ */
1634 1686#define RING_SBBSTATE(base) _MMIO((base)+0x118) /* hsw+ */
1635#define ERROR_GEN6 0x040a0 1687#define RING_SBBADDR_UDW(base) _MMIO((base)+0x11c) /* gen8+ */
1636#define GEN7_ERR_INT 0x44040 1688#define RING_BBADDR(base) _MMIO((base)+0x140)
1689#define RING_BBADDR_UDW(base) _MMIO((base)+0x168) /* gen8+ */
1690#define RING_BB_PER_CTX_PTR(base) _MMIO((base)+0x1c0) /* gen8+ */
1691#define RING_INDIRECT_CTX(base) _MMIO((base)+0x1c4) /* gen8+ */
1692#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base)+0x1c8) /* gen8+ */
1693#define RING_CTX_TIMESTAMP(base) _MMIO((base)+0x3a8) /* gen8+ */
1694
1695#define ERROR_GEN6 _MMIO(0x40a0)
1696#define GEN7_ERR_INT _MMIO(0x44040)
1637#define ERR_INT_POISON (1<<31) 1697#define ERR_INT_POISON (1<<31)
1638#define ERR_INT_MMIO_UNCLAIMED (1<<13) 1698#define ERR_INT_MMIO_UNCLAIMED (1<<13)
1639#define ERR_INT_PIPE_CRC_DONE_C (1<<8) 1699#define ERR_INT_PIPE_CRC_DONE_C (1<<8)
@@ -1645,13 +1705,13 @@ enum skl_disp_power_wells {
1645#define ERR_INT_FIFO_UNDERRUN_A (1<<0) 1705#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
1646#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<((pipe)*3)) 1706#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
1647 1707
1648#define GEN8_FAULT_TLB_DATA0 0x04b10 1708#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
1649#define GEN8_FAULT_TLB_DATA1 0x04b14 1709#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
1650 1710
1651#define FPGA_DBG 0x42300 1711#define FPGA_DBG _MMIO(0x42300)
1652#define FPGA_DBG_RM_NOCLAIM (1<<31) 1712#define FPGA_DBG_RM_NOCLAIM (1<<31)
1653 1713
1654#define DERRMR 0x44050 1714#define DERRMR _MMIO(0x44050)
1655/* Note that HBLANK events are reserved on bdw+ */ 1715/* Note that HBLANK events are reserved on bdw+ */
1656#define DERRMR_PIPEA_SCANLINE (1<<0) 1716#define DERRMR_PIPEA_SCANLINE (1<<0)
1657#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1) 1717#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
@@ -1675,29 +1735,29 @@ enum skl_disp_power_wells {
1675 * for various sorts of correct behavior. The top 16 bits of each are 1735 * for various sorts of correct behavior. The top 16 bits of each are
1676 * the enables for writing to the corresponding low bit. 1736 * the enables for writing to the corresponding low bit.
1677 */ 1737 */
1678#define _3D_CHICKEN 0x02084 1738#define _3D_CHICKEN _MMIO(0x2084)
1679#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) 1739#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
1680#define _3D_CHICKEN2 0x0208c 1740#define _3D_CHICKEN2 _MMIO(0x208c)
1681/* Disables pipelining of read flushes past the SF-WIZ interface. 1741/* Disables pipelining of read flushes past the SF-WIZ interface.
1682 * Required on all Ironlake steppings according to the B-Spec, but the 1742 * Required on all Ironlake steppings according to the B-Spec, but the
1683 * particular danger of not doing so is not specified. 1743 * particular danger of not doing so is not specified.
1684 */ 1744 */
1685# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) 1745# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
1686#define _3D_CHICKEN3 0x02090 1746#define _3D_CHICKEN3 _MMIO(0x2090)
1687#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) 1747#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
1688#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) 1748#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
1689#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */ 1749#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
1690#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */ 1750#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
1691 1751
1692#define MI_MODE 0x0209c 1752#define MI_MODE _MMIO(0x209c)
1693# define VS_TIMER_DISPATCH (1 << 6) 1753# define VS_TIMER_DISPATCH (1 << 6)
1694# define MI_FLUSH_ENABLE (1 << 12) 1754# define MI_FLUSH_ENABLE (1 << 12)
1695# define ASYNC_FLIP_PERF_DISABLE (1 << 14) 1755# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
1696# define MODE_IDLE (1 << 9) 1756# define MODE_IDLE (1 << 9)
1697# define STOP_RING (1 << 8) 1757# define STOP_RING (1 << 8)
1698 1758
1699#define GEN6_GT_MODE 0x20d0 1759#define GEN6_GT_MODE _MMIO(0x20d0)
1700#define GEN7_GT_MODE 0x7008 1760#define GEN7_GT_MODE _MMIO(0x7008)
1701#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7)) 1761#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7))
1702#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0) 1762#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
1703#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1) 1763#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
@@ -1707,9 +1767,9 @@ enum skl_disp_power_wells {
1707#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2)) 1767#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
1708#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2)) 1768#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
1709 1769
1710#define GFX_MODE 0x02520 1770#define GFX_MODE _MMIO(0x2520)
1711#define GFX_MODE_GEN7 0x0229c 1771#define GFX_MODE_GEN7 _MMIO(0x229c)
1712#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) 1772#define RING_MODE_GEN7(ring) _MMIO((ring)->mmio_base+0x29c)
1713#define GFX_RUN_LIST_ENABLE (1<<15) 1773#define GFX_RUN_LIST_ENABLE (1<<15)
1714#define GFX_INTERRUPT_STEERING (1<<14) 1774#define GFX_INTERRUPT_STEERING (1<<14)
1715#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13) 1775#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
@@ -1727,36 +1787,36 @@ enum skl_disp_power_wells {
1727#define VLV_DISPLAY_BASE 0x180000 1787#define VLV_DISPLAY_BASE 0x180000
1728#define VLV_MIPI_BASE VLV_DISPLAY_BASE 1788#define VLV_MIPI_BASE VLV_DISPLAY_BASE
1729 1789
1730#define VLV_GU_CTL0 (VLV_DISPLAY_BASE + 0x2030) 1790#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
1731#define VLV_GU_CTL1 (VLV_DISPLAY_BASE + 0x2034) 1791#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
1732#define SCPD0 0x0209c /* 915+ only */ 1792#define SCPD0 _MMIO(0x209c) /* 915+ only */
1733#define IER 0x020a0 1793#define IER _MMIO(0x20a0)
1734#define IIR 0x020a4 1794#define IIR _MMIO(0x20a4)
1735#define IMR 0x020a8 1795#define IMR _MMIO(0x20a8)
1736#define ISR 0x020ac 1796#define ISR _MMIO(0x20ac)
1737#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060) 1797#define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060)
1738#define GINT_DIS (1<<22) 1798#define GINT_DIS (1<<22)
1739#define GCFG_DIS (1<<8) 1799#define GCFG_DIS (1<<8)
1740#define VLV_GUNIT_CLOCK_GATE2 (VLV_DISPLAY_BASE + 0x2064) 1800#define VLV_GUNIT_CLOCK_GATE2 _MMIO(VLV_DISPLAY_BASE + 0x2064)
1741#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084) 1801#define VLV_IIR_RW _MMIO(VLV_DISPLAY_BASE + 0x2084)
1742#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0) 1802#define VLV_IER _MMIO(VLV_DISPLAY_BASE + 0x20a0)
1743#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4) 1803#define VLV_IIR _MMIO(VLV_DISPLAY_BASE + 0x20a4)
1744#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) 1804#define VLV_IMR _MMIO(VLV_DISPLAY_BASE + 0x20a8)
1745#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) 1805#define VLV_ISR _MMIO(VLV_DISPLAY_BASE + 0x20ac)
1746#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120) 1806#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
1747#define VLV_PCBR_ADDR_SHIFT 12 1807#define VLV_PCBR_ADDR_SHIFT 12
1748 1808
1749#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */ 1809#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
1750#define EIR 0x020b0 1810#define EIR _MMIO(0x20b0)
1751#define EMR 0x020b4 1811#define EMR _MMIO(0x20b4)
1752#define ESR 0x020b8 1812#define ESR _MMIO(0x20b8)
1753#define GM45_ERROR_PAGE_TABLE (1<<5) 1813#define GM45_ERROR_PAGE_TABLE (1<<5)
1754#define GM45_ERROR_MEM_PRIV (1<<4) 1814#define GM45_ERROR_MEM_PRIV (1<<4)
1755#define I915_ERROR_PAGE_TABLE (1<<4) 1815#define I915_ERROR_PAGE_TABLE (1<<4)
1756#define GM45_ERROR_CP_PRIV (1<<3) 1816#define GM45_ERROR_CP_PRIV (1<<3)
1757#define I915_ERROR_MEMORY_REFRESH (1<<1) 1817#define I915_ERROR_MEMORY_REFRESH (1<<1)
1758#define I915_ERROR_INSTRUCTION (1<<0) 1818#define I915_ERROR_INSTRUCTION (1<<0)
1759#define INSTPM 0x020c0 1819#define INSTPM _MMIO(0x20c0)
1760#define INSTPM_SELF_EN (1<<12) /* 915GM only */ 1820#define INSTPM_SELF_EN (1<<12) /* 915GM only */
1761#define INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts 1821#define INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts
1762 will not assert AGPBUSY# and will only 1822 will not assert AGPBUSY# and will only
@@ -1764,14 +1824,14 @@ enum skl_disp_power_wells {
1764#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ 1824#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
1765#define INSTPM_TLB_INVALIDATE (1<<9) 1825#define INSTPM_TLB_INVALIDATE (1<<9)
1766#define INSTPM_SYNC_FLUSH (1<<5) 1826#define INSTPM_SYNC_FLUSH (1<<5)
1767#define ACTHD 0x020c8 1827#define ACTHD _MMIO(0x20c8)
1768#define MEM_MODE 0x020cc 1828#define MEM_MODE _MMIO(0x20cc)
1769#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */ 1829#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */
1770#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */ 1830#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */
1771#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */ 1831#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */
1772#define FW_BLC 0x020d8 1832#define FW_BLC _MMIO(0x20d8)
1773#define FW_BLC2 0x020dc 1833#define FW_BLC2 _MMIO(0x20dc)
1774#define FW_BLC_SELF 0x020e0 /* 915+ only */ 1834#define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */
1775#define FW_BLC_SELF_EN_MASK (1<<31) 1835#define FW_BLC_SELF_EN_MASK (1<<31)
1776#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ 1836#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
1777#define FW_BLC_SELF_EN (1<<15) /* 945 only */ 1837#define FW_BLC_SELF_EN (1<<15) /* 945 only */
@@ -1779,7 +1839,7 @@ enum skl_disp_power_wells {
1779#define MM_FIFO_WATERMARK 0x0001F000 1839#define MM_FIFO_WATERMARK 0x0001F000
1780#define LM_BURST_LENGTH 0x00000700 1840#define LM_BURST_LENGTH 0x00000700
1781#define LM_FIFO_WATERMARK 0x0000001F 1841#define LM_FIFO_WATERMARK 0x0000001F
1782#define MI_ARB_STATE 0x020e4 /* 915+ only */ 1842#define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */
1783 1843
1784/* Make render/texture TLB fetches lower priorty than associated data 1844/* Make render/texture TLB fetches lower priorty than associated data
1785 * fetches. This is not turned on by default 1845 * fetches. This is not turned on by default
@@ -1843,11 +1903,11 @@ enum skl_disp_power_wells {
1843#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */ 1903#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
1844#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ 1904#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
1845 1905
1846#define MI_STATE 0x020e4 /* gen2 only */ 1906#define MI_STATE _MMIO(0x20e4) /* gen2 only */
1847#define MI_AGPBUSY_INT_EN (1 << 1) /* 85x only */ 1907#define MI_AGPBUSY_INT_EN (1 << 1) /* 85x only */
1848#define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */ 1908#define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */
1849 1909
1850#define CACHE_MODE_0 0x02120 /* 915+ only */ 1910#define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */
1851#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8) 1911#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
1852#define CM0_IZ_OPT_DISABLE (1<<6) 1912#define CM0_IZ_OPT_DISABLE (1<<6)
1853#define CM0_ZR_OPT_DISABLE (1<<5) 1913#define CM0_ZR_OPT_DISABLE (1<<5)
@@ -1856,32 +1916,32 @@ enum skl_disp_power_wells {
1856#define CM0_COLOR_EVICT_DISABLE (1<<3) 1916#define CM0_COLOR_EVICT_DISABLE (1<<3)
1857#define CM0_DEPTH_WRITE_DISABLE (1<<1) 1917#define CM0_DEPTH_WRITE_DISABLE (1<<1)
1858#define CM0_RC_OP_FLUSH_DISABLE (1<<0) 1918#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
1859#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 1919#define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */
1860#define GFX_FLSH_CNTL_GEN6 0x101008 1920#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008)
1861#define GFX_FLSH_CNTL_EN (1<<0) 1921#define GFX_FLSH_CNTL_EN (1<<0)
1862#define ECOSKPD 0x021d0 1922#define ECOSKPD _MMIO(0x21d0)
1863#define ECO_GATING_CX_ONLY (1<<3) 1923#define ECO_GATING_CX_ONLY (1<<3)
1864#define ECO_FLIP_DONE (1<<0) 1924#define ECO_FLIP_DONE (1<<0)
1865 1925
1866#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */ 1926#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */
1867#define RC_OP_FLUSH_ENABLE (1<<0) 1927#define RC_OP_FLUSH_ENABLE (1<<0)
1868#define HIZ_RAW_STALL_OPT_DISABLE (1<<2) 1928#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
1869#define CACHE_MODE_1 0x7004 /* IVB+ */ 1929#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */
1870#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) 1930#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
1871#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6) 1931#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
1872#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1) 1932#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1)
1873 1933
1874#define GEN6_BLITTER_ECOSKPD 0x221d0 1934#define GEN6_BLITTER_ECOSKPD _MMIO(0x221d0)
1875#define GEN6_BLITTER_LOCK_SHIFT 16 1935#define GEN6_BLITTER_LOCK_SHIFT 16
1876#define GEN6_BLITTER_FBC_NOTIFY (1<<3) 1936#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
1877 1937
1878#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050 1938#define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050)
1879#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0) 1939#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
1880#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) 1940#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
1881#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10) 1941#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
1882 1942
1883/* Fuse readout registers for GT */ 1943/* Fuse readout registers for GT */
1884#define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168) 1944#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168)
1885#define CHV_FGT_DISABLE_SS0 (1 << 10) 1945#define CHV_FGT_DISABLE_SS0 (1 << 10)
1886#define CHV_FGT_DISABLE_SS1 (1 << 11) 1946#define CHV_FGT_DISABLE_SS1 (1 << 11)
1887#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16 1947#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
@@ -1893,7 +1953,7 @@ enum skl_disp_power_wells {
1893#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28 1953#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
1894#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT) 1954#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
1895 1955
1896#define GEN8_FUSE2 0x9120 1956#define GEN8_FUSE2 _MMIO(0x9120)
1897#define GEN8_F2_SS_DIS_SHIFT 21 1957#define GEN8_F2_SS_DIS_SHIFT 21
1898#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT) 1958#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT)
1899#define GEN8_F2_S_ENA_SHIFT 25 1959#define GEN8_F2_S_ENA_SHIFT 25
@@ -1902,22 +1962,22 @@ enum skl_disp_power_wells {
1902#define GEN9_F2_SS_DIS_SHIFT 20 1962#define GEN9_F2_SS_DIS_SHIFT 20
1903#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT) 1963#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
1904 1964
1905#define GEN8_EU_DISABLE0 0x9134 1965#define GEN8_EU_DISABLE0 _MMIO(0x9134)
1906#define GEN8_EU_DIS0_S0_MASK 0xffffff 1966#define GEN8_EU_DIS0_S0_MASK 0xffffff
1907#define GEN8_EU_DIS0_S1_SHIFT 24 1967#define GEN8_EU_DIS0_S1_SHIFT 24
1908#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT) 1968#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT)
1909 1969
1910#define GEN8_EU_DISABLE1 0x9138 1970#define GEN8_EU_DISABLE1 _MMIO(0x9138)
1911#define GEN8_EU_DIS1_S1_MASK 0xffff 1971#define GEN8_EU_DIS1_S1_MASK 0xffff
1912#define GEN8_EU_DIS1_S2_SHIFT 16 1972#define GEN8_EU_DIS1_S2_SHIFT 16
1913#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT) 1973#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT)
1914 1974
1915#define GEN8_EU_DISABLE2 0x913c 1975#define GEN8_EU_DISABLE2 _MMIO(0x913c)
1916#define GEN8_EU_DIS2_S2_MASK 0xff 1976#define GEN8_EU_DIS2_S2_MASK 0xff
1917 1977
1918#define GEN9_EU_DISABLE(slice) (0x9134 + (slice)*0x4) 1978#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice)*0x4)
1919 1979
1920#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 1980#define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050)
1921#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) 1981#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
1922#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) 1982#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
1923#define GEN6_BSD_SLEEP_INDICATOR (1 << 3) 1983#define GEN6_BSD_SLEEP_INDICATOR (1 << 3)
@@ -1995,9 +2055,9 @@ enum skl_disp_power_wells {
1995#define I915_ASLE_INTERRUPT (1<<0) 2055#define I915_ASLE_INTERRUPT (1<<0)
1996#define I915_BSD_USER_INTERRUPT (1<<25) 2056#define I915_BSD_USER_INTERRUPT (1<<25)
1997 2057
1998#define GEN6_BSD_RNCID 0x12198 2058#define GEN6_BSD_RNCID _MMIO(0x12198)
1999 2059
2000#define GEN7_FF_THREAD_MODE 0x20a0 2060#define GEN7_FF_THREAD_MODE _MMIO(0x20a0)
2001#define GEN7_FF_SCHED_MASK 0x0077070 2061#define GEN7_FF_SCHED_MASK 0x0077070
2002#define GEN8_FF_DS_REF_CNT_FFME (1 << 19) 2062#define GEN8_FF_DS_REF_CNT_FFME (1 << 19)
2003#define GEN7_FF_TS_SCHED_HS1 (0x5<<16) 2063#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
@@ -2018,9 +2078,9 @@ enum skl_disp_power_wells {
2018 * Framebuffer compression (915+ only) 2078 * Framebuffer compression (915+ only)
2019 */ 2079 */
2020 2080
2021#define FBC_CFB_BASE 0x03200 /* 4k page aligned */ 2081#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
2022#define FBC_LL_BASE 0x03204 /* 4k page aligned */ 2082#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
2023#define FBC_CONTROL 0x03208 2083#define FBC_CONTROL _MMIO(0x3208)
2024#define FBC_CTL_EN (1<<31) 2084#define FBC_CTL_EN (1<<31)
2025#define FBC_CTL_PERIODIC (1<<30) 2085#define FBC_CTL_PERIODIC (1<<30)
2026#define FBC_CTL_INTERVAL_SHIFT (16) 2086#define FBC_CTL_INTERVAL_SHIFT (16)
@@ -2028,14 +2088,14 @@ enum skl_disp_power_wells {
2028#define FBC_CTL_C3_IDLE (1<<13) 2088#define FBC_CTL_C3_IDLE (1<<13)
2029#define FBC_CTL_STRIDE_SHIFT (5) 2089#define FBC_CTL_STRIDE_SHIFT (5)
2030#define FBC_CTL_FENCENO_SHIFT (0) 2090#define FBC_CTL_FENCENO_SHIFT (0)
2031#define FBC_COMMAND 0x0320c 2091#define FBC_COMMAND _MMIO(0x320c)
2032#define FBC_CMD_COMPRESS (1<<0) 2092#define FBC_CMD_COMPRESS (1<<0)
2033#define FBC_STATUS 0x03210 2093#define FBC_STATUS _MMIO(0x3210)
2034#define FBC_STAT_COMPRESSING (1<<31) 2094#define FBC_STAT_COMPRESSING (1<<31)
2035#define FBC_STAT_COMPRESSED (1<<30) 2095#define FBC_STAT_COMPRESSED (1<<30)
2036#define FBC_STAT_MODIFIED (1<<29) 2096#define FBC_STAT_MODIFIED (1<<29)
2037#define FBC_STAT_CURRENT_LINE_SHIFT (0) 2097#define FBC_STAT_CURRENT_LINE_SHIFT (0)
2038#define FBC_CONTROL2 0x03214 2098#define FBC_CONTROL2 _MMIO(0x3214)
2039#define FBC_CTL_FENCE_DBL (0<<4) 2099#define FBC_CTL_FENCE_DBL (0<<4)
2040#define FBC_CTL_IDLE_IMM (0<<2) 2100#define FBC_CTL_IDLE_IMM (0<<2)
2041#define FBC_CTL_IDLE_FULL (1<<2) 2101#define FBC_CTL_IDLE_FULL (1<<2)
@@ -2043,17 +2103,17 @@ enum skl_disp_power_wells {
2043#define FBC_CTL_IDLE_DEBUG (3<<2) 2103#define FBC_CTL_IDLE_DEBUG (3<<2)
2044#define FBC_CTL_CPU_FENCE (1<<1) 2104#define FBC_CTL_CPU_FENCE (1<<1)
2045#define FBC_CTL_PLANE(plane) ((plane)<<0) 2105#define FBC_CTL_PLANE(plane) ((plane)<<0)
2046#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */ 2106#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
2047#define FBC_TAG(i) (0x03300 + (i) * 4) 2107#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
2048 2108
2049#define FBC_STATUS2 0x43214 2109#define FBC_STATUS2 _MMIO(0x43214)
2050#define FBC_COMPRESSION_MASK 0x7ff 2110#define FBC_COMPRESSION_MASK 0x7ff
2051 2111
2052#define FBC_LL_SIZE (1536) 2112#define FBC_LL_SIZE (1536)
2053 2113
2054/* Framebuffer compression for GM45+ */ 2114/* Framebuffer compression for GM45+ */
2055#define DPFC_CB_BASE 0x3200 2115#define DPFC_CB_BASE _MMIO(0x3200)
2056#define DPFC_CONTROL 0x3208 2116#define DPFC_CONTROL _MMIO(0x3208)
2057#define DPFC_CTL_EN (1<<31) 2117#define DPFC_CTL_EN (1<<31)
2058#define DPFC_CTL_PLANE(plane) ((plane)<<30) 2118#define DPFC_CTL_PLANE(plane) ((plane)<<30)
2059#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29) 2119#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29)
@@ -2064,37 +2124,37 @@ enum skl_disp_power_wells {
2064#define DPFC_CTL_LIMIT_1X (0<<6) 2124#define DPFC_CTL_LIMIT_1X (0<<6)
2065#define DPFC_CTL_LIMIT_2X (1<<6) 2125#define DPFC_CTL_LIMIT_2X (1<<6)
2066#define DPFC_CTL_LIMIT_4X (2<<6) 2126#define DPFC_CTL_LIMIT_4X (2<<6)
2067#define DPFC_RECOMP_CTL 0x320c 2127#define DPFC_RECOMP_CTL _MMIO(0x320c)
2068#define DPFC_RECOMP_STALL_EN (1<<27) 2128#define DPFC_RECOMP_STALL_EN (1<<27)
2069#define DPFC_RECOMP_STALL_WM_SHIFT (16) 2129#define DPFC_RECOMP_STALL_WM_SHIFT (16)
2070#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) 2130#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
2071#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) 2131#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
2072#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) 2132#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
2073#define DPFC_STATUS 0x3210 2133#define DPFC_STATUS _MMIO(0x3210)
2074#define DPFC_INVAL_SEG_SHIFT (16) 2134#define DPFC_INVAL_SEG_SHIFT (16)
2075#define DPFC_INVAL_SEG_MASK (0x07ff0000) 2135#define DPFC_INVAL_SEG_MASK (0x07ff0000)
2076#define DPFC_COMP_SEG_SHIFT (0) 2136#define DPFC_COMP_SEG_SHIFT (0)
2077#define DPFC_COMP_SEG_MASK (0x000003ff) 2137#define DPFC_COMP_SEG_MASK (0x000003ff)
2078#define DPFC_STATUS2 0x3214 2138#define DPFC_STATUS2 _MMIO(0x3214)
2079#define DPFC_FENCE_YOFF 0x3218 2139#define DPFC_FENCE_YOFF _MMIO(0x3218)
2080#define DPFC_CHICKEN 0x3224 2140#define DPFC_CHICKEN _MMIO(0x3224)
2081#define DPFC_HT_MODIFY (1<<31) 2141#define DPFC_HT_MODIFY (1<<31)
2082 2142
2083/* Framebuffer compression for Ironlake */ 2143/* Framebuffer compression for Ironlake */
2084#define ILK_DPFC_CB_BASE 0x43200 2144#define ILK_DPFC_CB_BASE _MMIO(0x43200)
2085#define ILK_DPFC_CONTROL 0x43208 2145#define ILK_DPFC_CONTROL _MMIO(0x43208)
2086#define FBC_CTL_FALSE_COLOR (1<<10) 2146#define FBC_CTL_FALSE_COLOR (1<<10)
2087/* The bit 28-8 is reserved */ 2147/* The bit 28-8 is reserved */
2088#define DPFC_RESERVED (0x1FFFFF00) 2148#define DPFC_RESERVED (0x1FFFFF00)
2089#define ILK_DPFC_RECOMP_CTL 0x4320c 2149#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c)
2090#define ILK_DPFC_STATUS 0x43210 2150#define ILK_DPFC_STATUS _MMIO(0x43210)
2091#define ILK_DPFC_FENCE_YOFF 0x43218 2151#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
2092#define ILK_DPFC_CHICKEN 0x43224 2152#define ILK_DPFC_CHICKEN _MMIO(0x43224)
2093#define ILK_FBC_RT_BASE 0x2128 2153#define ILK_FBC_RT_BASE _MMIO(0x2128)
2094#define ILK_FBC_RT_VALID (1<<0) 2154#define ILK_FBC_RT_VALID (1<<0)
2095#define SNB_FBC_FRONT_BUFFER (1<<1) 2155#define SNB_FBC_FRONT_BUFFER (1<<1)
2096 2156
2097#define ILK_DISPLAY_CHICKEN1 0x42000 2157#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
2098#define ILK_FBCQ_DIS (1<<22) 2158#define ILK_FBCQ_DIS (1<<22)
2099#define ILK_PABSTRETCH_DIS (1<<21) 2159#define ILK_PABSTRETCH_DIS (1<<21)
2100 2160
@@ -2104,31 +2164,31 @@ enum skl_disp_power_wells {
2104 * 2164 *
2105 * The following two registers are of type GTTMMADR 2165 * The following two registers are of type GTTMMADR
2106 */ 2166 */
2107#define SNB_DPFC_CTL_SA 0x100100 2167#define SNB_DPFC_CTL_SA _MMIO(0x100100)
2108#define SNB_CPU_FENCE_ENABLE (1<<29) 2168#define SNB_CPU_FENCE_ENABLE (1<<29)
2109#define DPFC_CPU_FENCE_OFFSET 0x100104 2169#define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
2110 2170
2111/* Framebuffer compression for Ivybridge */ 2171/* Framebuffer compression for Ivybridge */
2112#define IVB_FBC_RT_BASE 0x7020 2172#define IVB_FBC_RT_BASE _MMIO(0x7020)
2113 2173
2114#define IPS_CTL 0x43408 2174#define IPS_CTL _MMIO(0x43408)
2115#define IPS_ENABLE (1 << 31) 2175#define IPS_ENABLE (1 << 31)
2116 2176
2117#define MSG_FBC_REND_STATE 0x50380 2177#define MSG_FBC_REND_STATE _MMIO(0x50380)
2118#define FBC_REND_NUKE (1<<2) 2178#define FBC_REND_NUKE (1<<2)
2119#define FBC_REND_CACHE_CLEAN (1<<1) 2179#define FBC_REND_CACHE_CLEAN (1<<1)
2120 2180
2121/* 2181/*
2122 * GPIO regs 2182 * GPIO regs
2123 */ 2183 */
2124#define GPIOA 0x5010 2184#define GPIOA _MMIO(0x5010)
2125#define GPIOB 0x5014 2185#define GPIOB _MMIO(0x5014)
2126#define GPIOC 0x5018 2186#define GPIOC _MMIO(0x5018)
2127#define GPIOD 0x501c 2187#define GPIOD _MMIO(0x501c)
2128#define GPIOE 0x5020 2188#define GPIOE _MMIO(0x5020)
2129#define GPIOF 0x5024 2189#define GPIOF _MMIO(0x5024)
2130#define GPIOG 0x5028 2190#define GPIOG _MMIO(0x5028)
2131#define GPIOH 0x502c 2191#define GPIOH _MMIO(0x502c)
2132# define GPIO_CLOCK_DIR_MASK (1 << 0) 2192# define GPIO_CLOCK_DIR_MASK (1 << 0)
2133# define GPIO_CLOCK_DIR_IN (0 << 1) 2193# define GPIO_CLOCK_DIR_IN (0 << 1)
2134# define GPIO_CLOCK_DIR_OUT (1 << 1) 2194# define GPIO_CLOCK_DIR_OUT (1 << 1)
@@ -2144,7 +2204,7 @@ enum skl_disp_power_wells {
2144# define GPIO_DATA_VAL_IN (1 << 12) 2204# define GPIO_DATA_VAL_IN (1 << 12)
2145# define GPIO_DATA_PULLUP_DISABLE (1 << 13) 2205# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
2146 2206
2147#define GMBUS0 (dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */ 2207#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
2148#define GMBUS_RATE_100KHZ (0<<8) 2208#define GMBUS_RATE_100KHZ (0<<8)
2149#define GMBUS_RATE_50KHZ (1<<8) 2209#define GMBUS_RATE_50KHZ (1<<8)
2150#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ 2210#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
@@ -2163,7 +2223,7 @@ enum skl_disp_power_wells {
2163#define GMBUS_PIN_2_BXT 2 2223#define GMBUS_PIN_2_BXT 2
2164#define GMBUS_PIN_3_BXT 3 2224#define GMBUS_PIN_3_BXT 3
2165#define GMBUS_NUM_PINS 7 /* including 0 */ 2225#define GMBUS_NUM_PINS 7 /* including 0 */
2166#define GMBUS1 (dev_priv->gpio_mmio_base + 0x5104) /* command/status */ 2226#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
2167#define GMBUS_SW_CLR_INT (1<<31) 2227#define GMBUS_SW_CLR_INT (1<<31)
2168#define GMBUS_SW_RDY (1<<30) 2228#define GMBUS_SW_RDY (1<<30)
2169#define GMBUS_ENT (1<<29) /* enable timeout */ 2229#define GMBUS_ENT (1<<29) /* enable timeout */
@@ -2177,7 +2237,7 @@ enum skl_disp_power_wells {
2177#define GMBUS_SLAVE_ADDR_SHIFT 1 2237#define GMBUS_SLAVE_ADDR_SHIFT 1
2178#define GMBUS_SLAVE_READ (1<<0) 2238#define GMBUS_SLAVE_READ (1<<0)
2179#define GMBUS_SLAVE_WRITE (0<<0) 2239#define GMBUS_SLAVE_WRITE (0<<0)
2180#define GMBUS2 (dev_priv->gpio_mmio_base + 0x5108) /* status */ 2240#define GMBUS2 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */
2181#define GMBUS_INUSE (1<<15) 2241#define GMBUS_INUSE (1<<15)
2182#define GMBUS_HW_WAIT_PHASE (1<<14) 2242#define GMBUS_HW_WAIT_PHASE (1<<14)
2183#define GMBUS_STALL_TIMEOUT (1<<13) 2243#define GMBUS_STALL_TIMEOUT (1<<13)
@@ -2185,14 +2245,14 @@ enum skl_disp_power_wells {
2185#define GMBUS_HW_RDY (1<<11) 2245#define GMBUS_HW_RDY (1<<11)
2186#define GMBUS_SATOER (1<<10) 2246#define GMBUS_SATOER (1<<10)
2187#define GMBUS_ACTIVE (1<<9) 2247#define GMBUS_ACTIVE (1<<9)
2188#define GMBUS3 (dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */ 2248#define GMBUS3 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
2189#define GMBUS4 (dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */ 2249#define GMBUS4 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
2190#define GMBUS_SLAVE_TIMEOUT_EN (1<<4) 2250#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
2191#define GMBUS_NAK_EN (1<<3) 2251#define GMBUS_NAK_EN (1<<3)
2192#define GMBUS_IDLE_EN (1<<2) 2252#define GMBUS_IDLE_EN (1<<2)
2193#define GMBUS_HW_WAIT_EN (1<<1) 2253#define GMBUS_HW_WAIT_EN (1<<1)
2194#define GMBUS_HW_RDY_EN (1<<0) 2254#define GMBUS_HW_RDY_EN (1<<0)
2195#define GMBUS5 (dev_priv->gpio_mmio_base + 0x5120) /* byte index */ 2255#define GMBUS5 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */
2196#define GMBUS_2BYTE_INDEX_EN (1<<31) 2256#define GMBUS_2BYTE_INDEX_EN (1<<31)
2197 2257
2198/* 2258/*
@@ -2201,11 +2261,11 @@ enum skl_disp_power_wells {
2201#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014) 2261#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
2202#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018) 2262#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
2203#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030) 2263#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
2204#define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C) 2264#define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
2205 2265
2206#define VGA0 0x6000 2266#define VGA0 _MMIO(0x6000)
2207#define VGA1 0x6004 2267#define VGA1 _MMIO(0x6004)
2208#define VGA_PD 0x6010 2268#define VGA_PD _MMIO(0x6010)
2209#define VGA0_PD_P2_DIV_4 (1 << 7) 2269#define VGA0_PD_P2_DIV_4 (1 << 7)
2210#define VGA0_PD_P1_DIV_2 (1 << 5) 2270#define VGA0_PD_P1_DIV_2 (1 << 5)
2211#define VGA0_PD_P1_SHIFT 0 2271#define VGA0_PD_P1_SHIFT 0
@@ -2241,9 +2301,9 @@ enum skl_disp_power_wells {
2241#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 2301#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
2242 2302
2243/* Additional CHV pll/phy registers */ 2303/* Additional CHV pll/phy registers */
2244#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240) 2304#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
2245#define DPLL_PORTD_READY_MASK (0xf) 2305#define DPLL_PORTD_READY_MASK (0xf)
2246#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100) 2306#define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
2247#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27)) 2307#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27))
2248#define PHY_LDO_DELAY_0NS 0x0 2308#define PHY_LDO_DELAY_0NS 0x0
2249#define PHY_LDO_DELAY_200NS 0x1 2309#define PHY_LDO_DELAY_200NS 0x1
@@ -2254,7 +2314,7 @@ enum skl_disp_power_wells {
2254#define PHY_CH_DEEP_PSR 0x7 2314#define PHY_CH_DEEP_PSR 0x7
2255#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2)) 2315#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2))
2256#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy)) 2316#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
2257#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104) 2317#define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
2258#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30)) 2318#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
2259#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch)))) 2319#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch))))
2260#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline)))) 2320#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline))))
@@ -2300,7 +2360,7 @@ enum skl_disp_power_wells {
2300#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c) 2360#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
2301#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020) 2361#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
2302#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c) 2362#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
2303#define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD) 2363#define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
2304 2364
2305/* 2365/*
2306 * UDI pixel divider, controlling how many pixels are stuffed into a packet. 2366 * UDI pixel divider, controlling how many pixels are stuffed into a packet.
@@ -2339,12 +2399,12 @@ enum skl_disp_power_wells {
2339#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 2399#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
2340#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 2400#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
2341 2401
2342#define _FPA0 0x06040 2402#define _FPA0 0x6040
2343#define _FPA1 0x06044 2403#define _FPA1 0x6044
2344#define _FPB0 0x06048 2404#define _FPB0 0x6048
2345#define _FPB1 0x0604c 2405#define _FPB1 0x604c
2346#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0) 2406#define FP0(pipe) _MMIO_PIPE(pipe, _FPA0, _FPB0)
2347#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1) 2407#define FP1(pipe) _MMIO_PIPE(pipe, _FPA1, _FPB1)
2348#define FP_N_DIV_MASK 0x003f0000 2408#define FP_N_DIV_MASK 0x003f0000
2349#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 2409#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
2350#define FP_N_DIV_SHIFT 16 2410#define FP_N_DIV_SHIFT 16
@@ -2353,7 +2413,7 @@ enum skl_disp_power_wells {
2353#define FP_M2_DIV_MASK 0x0000003f 2413#define FP_M2_DIV_MASK 0x0000003f
2354#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff 2414#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
2355#define FP_M2_DIV_SHIFT 0 2415#define FP_M2_DIV_SHIFT 0
2356#define DPLL_TEST 0x606c 2416#define DPLL_TEST _MMIO(0x606c)
2357#define DPLLB_TEST_SDVO_DIV_1 (0 << 22) 2417#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
2358#define DPLLB_TEST_SDVO_DIV_2 (1 << 22) 2418#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
2359#define DPLLB_TEST_SDVO_DIV_4 (2 << 22) 2419#define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
@@ -2364,12 +2424,12 @@ enum skl_disp_power_wells {
2364#define DPLLA_TEST_N_BYPASS (1 << 3) 2424#define DPLLA_TEST_N_BYPASS (1 << 3)
2365#define DPLLA_TEST_M_BYPASS (1 << 2) 2425#define DPLLA_TEST_M_BYPASS (1 << 2)
2366#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) 2426#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
2367#define D_STATE 0x6104 2427#define D_STATE _MMIO(0x6104)
2368#define DSTATE_GFX_RESET_I830 (1<<6) 2428#define DSTATE_GFX_RESET_I830 (1<<6)
2369#define DSTATE_PLL_D3_OFF (1<<3) 2429#define DSTATE_PLL_D3_OFF (1<<3)
2370#define DSTATE_GFX_CLOCK_GATING (1<<1) 2430#define DSTATE_GFX_CLOCK_GATING (1<<1)
2371#define DSTATE_DOT_CLOCK_GATING (1<<0) 2431#define DSTATE_DOT_CLOCK_GATING (1<<0)
2372#define DSPCLK_GATE_D (dev_priv->info.display_mmio_offset + 0x6200) 2432#define DSPCLK_GATE_D _MMIO(dev_priv->info.display_mmio_offset + 0x6200)
2373# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ 2433# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
2374# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ 2434# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
2375# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ 2435# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
@@ -2408,7 +2468,7 @@ enum skl_disp_power_wells {
2408# define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */ 2468# define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */
2409# define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */ 2469# define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */
2410 2470
2411#define RENCLK_GATE_D1 0x6204 2471#define RENCLK_GATE_D1 _MMIO(0x6204)
2412# define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */ 2472# define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */
2413# define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */ 2473# define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */
2414# define PC_FE_CLOCK_GATE_DISABLE (1 << 11) 2474# define PC_FE_CLOCK_GATE_DISABLE (1 << 11)
@@ -2472,35 +2532,35 @@ enum skl_disp_power_wells {
2472# define I965_FT_CLOCK_GATE_DISABLE (1 << 1) 2532# define I965_FT_CLOCK_GATE_DISABLE (1 << 1)
2473# define I965_DM_CLOCK_GATE_DISABLE (1 << 0) 2533# define I965_DM_CLOCK_GATE_DISABLE (1 << 0)
2474 2534
2475#define RENCLK_GATE_D2 0x6208 2535#define RENCLK_GATE_D2 _MMIO(0x6208)
2476#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9) 2536#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9)
2477#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7) 2537#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7)
2478#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6) 2538#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6)
2479 2539
2480#define VDECCLK_GATE_D 0x620C /* g4x only */ 2540#define VDECCLK_GATE_D _MMIO(0x620C) /* g4x only */
2481#define VCP_UNIT_CLOCK_GATE_DISABLE (1 << 4) 2541#define VCP_UNIT_CLOCK_GATE_DISABLE (1 << 4)
2482 2542
2483#define RAMCLK_GATE_D 0x6210 /* CRL only */ 2543#define RAMCLK_GATE_D _MMIO(0x6210) /* CRL only */
2484#define DEUC 0x6214 /* CRL only */ 2544#define DEUC _MMIO(0x6214) /* CRL only */
2485 2545
2486#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500) 2546#define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500)
2487#define FW_CSPWRDWNEN (1<<15) 2547#define FW_CSPWRDWNEN (1<<15)
2488 2548
2489#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504) 2549#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
2490 2550
2491#define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508) 2551#define CZCLK_CDCLK_FREQ_RATIO _MMIO(VLV_DISPLAY_BASE + 0x6508)
2492#define CDCLK_FREQ_SHIFT 4 2552#define CDCLK_FREQ_SHIFT 4
2493#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT) 2553#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
2494#define CZCLK_FREQ_MASK 0xf 2554#define CZCLK_FREQ_MASK 0xf
2495 2555
2496#define GCI_CONTROL (VLV_DISPLAY_BASE + 0x650C) 2556#define GCI_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x650C)
2497#define PFI_CREDIT_63 (9 << 28) /* chv only */ 2557#define PFI_CREDIT_63 (9 << 28) /* chv only */
2498#define PFI_CREDIT_31 (8 << 28) /* chv only */ 2558#define PFI_CREDIT_31 (8 << 28) /* chv only */
2499#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */ 2559#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */
2500#define PFI_CREDIT_RESEND (1 << 27) 2560#define PFI_CREDIT_RESEND (1 << 27)
2501#define VGA_FAST_MODE_DISABLE (1 << 14) 2561#define VGA_FAST_MODE_DISABLE (1 << 14)
2502 2562
2503#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510) 2563#define GMBUSFREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6510)
2504 2564
2505/* 2565/*
2506 * Palette regs 2566 * Palette regs
@@ -2508,8 +2568,8 @@ enum skl_disp_power_wells {
2508#define PALETTE_A_OFFSET 0xa000 2568#define PALETTE_A_OFFSET 0xa000
2509#define PALETTE_B_OFFSET 0xa800 2569#define PALETTE_B_OFFSET 0xa800
2510#define CHV_PALETTE_C_OFFSET 0xc000 2570#define CHV_PALETTE_C_OFFSET 0xc000
2511#define PALETTE(pipe, i) (dev_priv->info.palette_offsets[pipe] + \ 2571#define PALETTE(pipe, i) _MMIO(dev_priv->info.palette_offsets[pipe] + \
2512 dev_priv->info.display_mmio_offset + (i) * 4) 2572 dev_priv->info.display_mmio_offset + (i) * 4)
2513 2573
2514/* MCH MMIO space */ 2574/* MCH MMIO space */
2515 2575
@@ -2527,37 +2587,37 @@ enum skl_disp_power_wells {
2527 2587
2528#define MCHBAR_MIRROR_BASE_SNB 0x140000 2588#define MCHBAR_MIRROR_BASE_SNB 0x140000
2529 2589
2530#define CTG_STOLEN_RESERVED (MCHBAR_MIRROR_BASE + 0x34) 2590#define CTG_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x34)
2531#define ELK_STOLEN_RESERVED (MCHBAR_MIRROR_BASE + 0x48) 2591#define ELK_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x48)
2532#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16) 2592#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16)
2533#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4) 2593#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4)
2534 2594
2535/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ 2595/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
2536#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04) 2596#define DCLK _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04)
2537 2597
2538/* 915-945 and GM965 MCH register controlling DRAM channel access */ 2598/* 915-945 and GM965 MCH register controlling DRAM channel access */
2539#define DCC 0x10200 2599#define DCC _MMIO(MCHBAR_MIRROR_BASE + 0x200)
2540#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) 2600#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
2541#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) 2601#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
2542#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) 2602#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
2543#define DCC_ADDRESSING_MODE_MASK (3 << 0) 2603#define DCC_ADDRESSING_MODE_MASK (3 << 0)
2544#define DCC_CHANNEL_XOR_DISABLE (1 << 10) 2604#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
2545#define DCC_CHANNEL_XOR_BIT_17 (1 << 9) 2605#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
2546#define DCC2 0x10204 2606#define DCC2 _MMIO(MCHBAR_MIRROR_BASE + 0x204)
2547#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20) 2607#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20)
2548 2608
2549/* Pineview MCH register contains DDR3 setting */ 2609/* Pineview MCH register contains DDR3 setting */
2550#define CSHRDDR3CTL 0x101a8 2610#define CSHRDDR3CTL _MMIO(MCHBAR_MIRROR_BASE + 0x1a8)
2551#define CSHRDDR3CTL_DDR3 (1 << 2) 2611#define CSHRDDR3CTL_DDR3 (1 << 2)
2552 2612
2553/* 965 MCH register controlling DRAM channel configuration */ 2613/* 965 MCH register controlling DRAM channel configuration */
2554#define C0DRB3 0x10206 2614#define C0DRB3 _MMIO(MCHBAR_MIRROR_BASE + 0x206)
2555#define C1DRB3 0x10606 2615#define C1DRB3 _MMIO(MCHBAR_MIRROR_BASE + 0x606)
2556 2616
2557/* snb MCH registers for reading the DRAM channel configuration */ 2617/* snb MCH registers for reading the DRAM channel configuration */
2558#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004) 2618#define MAD_DIMM_C0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5004)
2559#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008) 2619#define MAD_DIMM_C1 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5008)
2560#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C) 2620#define MAD_DIMM_C2 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
2561#define MAD_DIMM_ECC_MASK (0x3 << 24) 2621#define MAD_DIMM_ECC_MASK (0x3 << 24)
2562#define MAD_DIMM_ECC_OFF (0x0 << 24) 2622#define MAD_DIMM_ECC_OFF (0x0 << 24)
2563#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24) 2623#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
@@ -2577,14 +2637,14 @@ enum skl_disp_power_wells {
2577#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT) 2637#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
2578 2638
2579/* snb MCH registers for priority tuning */ 2639/* snb MCH registers for priority tuning */
2580#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10) 2640#define MCH_SSKPD _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5d10)
2581#define MCH_SSKPD_WM0_MASK 0x3f 2641#define MCH_SSKPD_WM0_MASK 0x3f
2582#define MCH_SSKPD_WM0_VAL 0xc 2642#define MCH_SSKPD_WM0_VAL 0xc
2583 2643
2584#define MCH_SECP_NRG_STTS (MCHBAR_MIRROR_BASE_SNB + 0x592c) 2644#define MCH_SECP_NRG_STTS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x592c)
2585 2645
2586/* Clocking configuration register */ 2646/* Clocking configuration register */
2587#define CLKCFG 0x10c00 2647#define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00)
2588#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ 2648#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
2589#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */ 2649#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
2590#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ 2650#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
@@ -2600,26 +2660,26 @@ enum skl_disp_power_wells {
2600#define CLKCFG_MEM_800 (3 << 4) 2660#define CLKCFG_MEM_800 (3 << 4)
2601#define CLKCFG_MEM_MASK (7 << 4) 2661#define CLKCFG_MEM_MASK (7 << 4)
2602 2662
2603#define HPLLVCO (MCHBAR_MIRROR_BASE + 0xc38) 2663#define HPLLVCO _MMIO(MCHBAR_MIRROR_BASE + 0xc38)
2604#define HPLLVCO_MOBILE (MCHBAR_MIRROR_BASE + 0xc0f) 2664#define HPLLVCO_MOBILE _MMIO(MCHBAR_MIRROR_BASE + 0xc0f)
2605 2665
2606#define TSC1 0x11001 2666#define TSC1 _MMIO(0x11001)
2607#define TSE (1<<0) 2667#define TSE (1<<0)
2608#define TR1 0x11006 2668#define TR1 _MMIO(0x11006)
2609#define TSFS 0x11020 2669#define TSFS _MMIO(0x11020)
2610#define TSFS_SLOPE_MASK 0x0000ff00 2670#define TSFS_SLOPE_MASK 0x0000ff00
2611#define TSFS_SLOPE_SHIFT 8 2671#define TSFS_SLOPE_SHIFT 8
2612#define TSFS_INTR_MASK 0x000000ff 2672#define TSFS_INTR_MASK 0x000000ff
2613 2673
2614#define CRSTANDVID 0x11100 2674#define CRSTANDVID _MMIO(0x11100)
2615#define PXVFREQ(i) (0x11110 + (i) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ 2675#define PXVFREQ(fstart) _MMIO(0x11110 + (fstart) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
2616#define PXVFREQ_PX_MASK 0x7f000000 2676#define PXVFREQ_PX_MASK 0x7f000000
2617#define PXVFREQ_PX_SHIFT 24 2677#define PXVFREQ_PX_SHIFT 24
2618#define VIDFREQ_BASE 0x11110 2678#define VIDFREQ_BASE _MMIO(0x11110)
2619#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */ 2679#define VIDFREQ1 _MMIO(0x11110) /* VIDFREQ1-4 (0x1111c) (Cantiga) */
2620#define VIDFREQ2 0x11114 2680#define VIDFREQ2 _MMIO(0x11114)
2621#define VIDFREQ3 0x11118 2681#define VIDFREQ3 _MMIO(0x11118)
2622#define VIDFREQ4 0x1111c 2682#define VIDFREQ4 _MMIO(0x1111c)
2623#define VIDFREQ_P0_MASK 0x1f000000 2683#define VIDFREQ_P0_MASK 0x1f000000
2624#define VIDFREQ_P0_SHIFT 24 2684#define VIDFREQ_P0_SHIFT 24
2625#define VIDFREQ_P0_CSCLK_MASK 0x00f00000 2685#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
@@ -2631,8 +2691,8 @@ enum skl_disp_power_wells {
2631#define VIDFREQ_P1_CSCLK_MASK 0x000000f0 2691#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
2632#define VIDFREQ_P1_CSCLK_SHIFT 4 2692#define VIDFREQ_P1_CSCLK_SHIFT 4
2633#define VIDFREQ_P1_CRCLK_MASK 0x0000000f 2693#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
2634#define INTTOEXT_BASE_ILK 0x11300 2694#define INTTOEXT_BASE_ILK _MMIO(0x11300)
2635#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */ 2695#define INTTOEXT_BASE _MMIO(0x11120) /* INTTOEXT1-8 (0x1113c) */
2636#define INTTOEXT_MAP3_SHIFT 24 2696#define INTTOEXT_MAP3_SHIFT 24
2637#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT) 2697#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
2638#define INTTOEXT_MAP2_SHIFT 16 2698#define INTTOEXT_MAP2_SHIFT 16
@@ -2641,7 +2701,7 @@ enum skl_disp_power_wells {
2641#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT) 2701#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
2642#define INTTOEXT_MAP0_SHIFT 0 2702#define INTTOEXT_MAP0_SHIFT 0
2643#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT) 2703#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
2644#define MEMSWCTL 0x11170 /* Ironlake only */ 2704#define MEMSWCTL _MMIO(0x11170) /* Ironlake only */
2645#define MEMCTL_CMD_MASK 0xe000 2705#define MEMCTL_CMD_MASK 0xe000
2646#define MEMCTL_CMD_SHIFT 13 2706#define MEMCTL_CMD_SHIFT 13
2647#define MEMCTL_CMD_RCLK_OFF 0 2707#define MEMCTL_CMD_RCLK_OFF 0
@@ -2656,8 +2716,8 @@ enum skl_disp_power_wells {
2656#define MEMCTL_FREQ_SHIFT 8 2716#define MEMCTL_FREQ_SHIFT 8
2657#define MEMCTL_SFCAVM (1<<7) 2717#define MEMCTL_SFCAVM (1<<7)
2658#define MEMCTL_TGT_VID_MASK 0x007f 2718#define MEMCTL_TGT_VID_MASK 0x007f
2659#define MEMIHYST 0x1117c 2719#define MEMIHYST _MMIO(0x1117c)
2660#define MEMINTREN 0x11180 /* 16 bits */ 2720#define MEMINTREN _MMIO(0x11180) /* 16 bits */
2661#define MEMINT_RSEXIT_EN (1<<8) 2721#define MEMINT_RSEXIT_EN (1<<8)
2662#define MEMINT_CX_SUPR_EN (1<<7) 2722#define MEMINT_CX_SUPR_EN (1<<7)
2663#define MEMINT_CONT_BUSY_EN (1<<6) 2723#define MEMINT_CONT_BUSY_EN (1<<6)
@@ -2667,7 +2727,7 @@ enum skl_disp_power_wells {
2667#define MEMINT_UP_EVAL_EN (1<<2) 2727#define MEMINT_UP_EVAL_EN (1<<2)
2668#define MEMINT_DOWN_EVAL_EN (1<<1) 2728#define MEMINT_DOWN_EVAL_EN (1<<1)
2669#define MEMINT_SW_CMD_EN (1<<0) 2729#define MEMINT_SW_CMD_EN (1<<0)
2670#define MEMINTRSTR 0x11182 /* 16 bits */ 2730#define MEMINTRSTR _MMIO(0x11182) /* 16 bits */
2671#define MEM_RSEXIT_MASK 0xc000 2731#define MEM_RSEXIT_MASK 0xc000
2672#define MEM_RSEXIT_SHIFT 14 2732#define MEM_RSEXIT_SHIFT 14
2673#define MEM_CONT_BUSY_MASK 0x3000 2733#define MEM_CONT_BUSY_MASK 0x3000
@@ -2687,7 +2747,7 @@ enum skl_disp_power_wells {
2687#define MEM_INT_STEER_CMR 1 2747#define MEM_INT_STEER_CMR 1
2688#define MEM_INT_STEER_SMI 2 2748#define MEM_INT_STEER_SMI 2
2689#define MEM_INT_STEER_SCI 3 2749#define MEM_INT_STEER_SCI 3
2690#define MEMINTRSTS 0x11184 2750#define MEMINTRSTS _MMIO(0x11184)
2691#define MEMINT_RSEXIT (1<<7) 2751#define MEMINT_RSEXIT (1<<7)
2692#define MEMINT_CONT_BUSY (1<<6) 2752#define MEMINT_CONT_BUSY (1<<6)
2693#define MEMINT_AVG_BUSY (1<<5) 2753#define MEMINT_AVG_BUSY (1<<5)
@@ -2696,7 +2756,7 @@ enum skl_disp_power_wells {
2696#define MEMINT_UP_EVAL (1<<2) 2756#define MEMINT_UP_EVAL (1<<2)
2697#define MEMINT_DOWN_EVAL (1<<1) 2757#define MEMINT_DOWN_EVAL (1<<1)
2698#define MEMINT_SW_CMD (1<<0) 2758#define MEMINT_SW_CMD (1<<0)
2699#define MEMMODECTL 0x11190 2759#define MEMMODECTL _MMIO(0x11190)
2700#define MEMMODE_BOOST_EN (1<<31) 2760#define MEMMODE_BOOST_EN (1<<31)
2701#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */ 2761#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
2702#define MEMMODE_BOOST_FREQ_SHIFT 24 2762#define MEMMODE_BOOST_FREQ_SHIFT 24
@@ -2713,8 +2773,8 @@ enum skl_disp_power_wells {
2713#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */ 2773#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
2714#define MEMMODE_FMAX_SHIFT 4 2774#define MEMMODE_FMAX_SHIFT 4
2715#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */ 2775#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
2716#define RCBMAXAVG 0x1119c 2776#define RCBMAXAVG _MMIO(0x1119c)
2717#define MEMSWCTL2 0x1119e /* Cantiga only */ 2777#define MEMSWCTL2 _MMIO(0x1119e) /* Cantiga only */
2718#define SWMEMCMD_RENDER_OFF (0 << 13) 2778#define SWMEMCMD_RENDER_OFF (0 << 13)
2719#define SWMEMCMD_RENDER_ON (1 << 13) 2779#define SWMEMCMD_RENDER_ON (1 << 13)
2720#define SWMEMCMD_SWFREQ (2 << 13) 2780#define SWMEMCMD_SWFREQ (2 << 13)
@@ -2726,11 +2786,11 @@ enum skl_disp_power_wells {
2726#define SWFREQ_MASK 0x0380 /* P0-7 */ 2786#define SWFREQ_MASK 0x0380 /* P0-7 */
2727#define SWFREQ_SHIFT 7 2787#define SWFREQ_SHIFT 7
2728#define TARVID_MASK 0x001f 2788#define TARVID_MASK 0x001f
2729#define MEMSTAT_CTG 0x111a0 2789#define MEMSTAT_CTG _MMIO(0x111a0)
2730#define RCBMINAVG 0x111a0 2790#define RCBMINAVG _MMIO(0x111a0)
2731#define RCUPEI 0x111b0 2791#define RCUPEI _MMIO(0x111b0)
2732#define RCDNEI 0x111b4 2792#define RCDNEI _MMIO(0x111b4)
2733#define RSTDBYCTL 0x111b8 2793#define RSTDBYCTL _MMIO(0x111b8)
2734#define RS1EN (1<<31) 2794#define RS1EN (1<<31)
2735#define RS2EN (1<<30) 2795#define RS2EN (1<<30)
2736#define RS3EN (1<<29) 2796#define RS3EN (1<<29)
@@ -2774,10 +2834,10 @@ enum skl_disp_power_wells {
2774#define RS_CSTATE_C367_RS2 (3<<4) 2834#define RS_CSTATE_C367_RS2 (3<<4)
2775#define REDSAVES (1<<3) /* no context save if was idle during rs0 */ 2835#define REDSAVES (1<<3) /* no context save if was idle during rs0 */
2776#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */ 2836#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */
2777#define VIDCTL 0x111c0 2837#define VIDCTL _MMIO(0x111c0)
2778#define VIDSTS 0x111c8 2838#define VIDSTS _MMIO(0x111c8)
2779#define VIDSTART 0x111cc /* 8 bits */ 2839#define VIDSTART _MMIO(0x111cc) /* 8 bits */
2780#define MEMSTAT_ILK 0x111f8 2840#define MEMSTAT_ILK _MMIO(0x111f8)
2781#define MEMSTAT_VID_MASK 0x7f00 2841#define MEMSTAT_VID_MASK 0x7f00
2782#define MEMSTAT_VID_SHIFT 8 2842#define MEMSTAT_VID_SHIFT 8
2783#define MEMSTAT_PSTATE_MASK 0x00f8 2843#define MEMSTAT_PSTATE_MASK 0x00f8
@@ -2788,55 +2848,55 @@ enum skl_disp_power_wells {
2788#define MEMSTAT_SRC_CTL_TRB 1 2848#define MEMSTAT_SRC_CTL_TRB 1
2789#define MEMSTAT_SRC_CTL_THM 2 2849#define MEMSTAT_SRC_CTL_THM 2
2790#define MEMSTAT_SRC_CTL_STDBY 3 2850#define MEMSTAT_SRC_CTL_STDBY 3
2791#define RCPREVBSYTUPAVG 0x113b8 2851#define RCPREVBSYTUPAVG _MMIO(0x113b8)
2792#define RCPREVBSYTDNAVG 0x113bc 2852#define RCPREVBSYTDNAVG _MMIO(0x113bc)
2793#define PMMISC 0x11214 2853#define PMMISC _MMIO(0x11214)
2794#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */ 2854#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
2795#define SDEW 0x1124c 2855#define SDEW _MMIO(0x1124c)
2796#define CSIEW0 0x11250 2856#define CSIEW0 _MMIO(0x11250)
2797#define CSIEW1 0x11254 2857#define CSIEW1 _MMIO(0x11254)
2798#define CSIEW2 0x11258 2858#define CSIEW2 _MMIO(0x11258)
2799#define PEW(i) (0x1125c + (i) * 4) /* 5 registers */ 2859#define PEW(i) _MMIO(0x1125c + (i) * 4) /* 5 registers */
2800#define DEW(i) (0x11270 + (i) * 4) /* 3 registers */ 2860#define DEW(i) _MMIO(0x11270 + (i) * 4) /* 3 registers */
2801#define MCHAFE 0x112c0 2861#define MCHAFE _MMIO(0x112c0)
2802#define CSIEC 0x112e0 2862#define CSIEC _MMIO(0x112e0)
2803#define DMIEC 0x112e4 2863#define DMIEC _MMIO(0x112e4)
2804#define DDREC 0x112e8 2864#define DDREC _MMIO(0x112e8)
2805#define PEG0EC 0x112ec 2865#define PEG0EC _MMIO(0x112ec)
2806#define PEG1EC 0x112f0 2866#define PEG1EC _MMIO(0x112f0)
2807#define GFXEC 0x112f4 2867#define GFXEC _MMIO(0x112f4)
2808#define RPPREVBSYTUPAVG 0x113b8 2868#define RPPREVBSYTUPAVG _MMIO(0x113b8)
2809#define RPPREVBSYTDNAVG 0x113bc 2869#define RPPREVBSYTDNAVG _MMIO(0x113bc)
2810#define ECR 0x11600 2870#define ECR _MMIO(0x11600)
2811#define ECR_GPFE (1<<31) 2871#define ECR_GPFE (1<<31)
2812#define ECR_IMONE (1<<30) 2872#define ECR_IMONE (1<<30)
2813#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ 2873#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
2814#define OGW0 0x11608 2874#define OGW0 _MMIO(0x11608)
2815#define OGW1 0x1160c 2875#define OGW1 _MMIO(0x1160c)
2816#define EG0 0x11610 2876#define EG0 _MMIO(0x11610)
2817#define EG1 0x11614 2877#define EG1 _MMIO(0x11614)
2818#define EG2 0x11618 2878#define EG2 _MMIO(0x11618)
2819#define EG3 0x1161c 2879#define EG3 _MMIO(0x1161c)
2820#define EG4 0x11620 2880#define EG4 _MMIO(0x11620)
2821#define EG5 0x11624 2881#define EG5 _MMIO(0x11624)
2822#define EG6 0x11628 2882#define EG6 _MMIO(0x11628)
2823#define EG7 0x1162c 2883#define EG7 _MMIO(0x1162c)
2824#define PXW(i) (0x11664 + (i) * 4) /* 4 registers */ 2884#define PXW(i) _MMIO(0x11664 + (i) * 4) /* 4 registers */
2825#define PXWL(i) (0x11680 + (i) * 4) /* 8 registers */ 2885#define PXWL(i) _MMIO(0x11680 + (i) * 8) /* 8 registers */
2826#define LCFUSE02 0x116c0 2886#define LCFUSE02 _MMIO(0x116c0)
2827#define LCFUSE_HIV_MASK 0x000000ff 2887#define LCFUSE_HIV_MASK 0x000000ff
2828#define CSIPLL0 0x12c10 2888#define CSIPLL0 _MMIO(0x12c10)
2829#define DDRMPLL1 0X12c20 2889#define DDRMPLL1 _MMIO(0X12c20)
2830#define PEG_BAND_GAP_DATA 0x14d68 2890#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
2831 2891
2832#define GEN6_GT_THREAD_STATUS_REG 0x13805c 2892#define GEN6_GT_THREAD_STATUS_REG _MMIO(0x13805c)
2833#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 2893#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
2834 2894
2835#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948) 2895#define GEN6_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5948)
2836#define BXT_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x7070) 2896#define BXT_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7070)
2837#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994) 2897#define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994)
2838#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998) 2898#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
2839#define BXT_RP_STATE_CAP 0x138170 2899#define BXT_RP_STATE_CAP _MMIO(0x138170)
2840 2900
2841#define INTERVAL_1_28_US(us) (((us) * 100) >> 7) 2901#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
2842#define INTERVAL_1_33_US(us) (((us) * 3) >> 2) 2902#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
@@ -2850,7 +2910,7 @@ enum skl_disp_power_wells {
2850/* 2910/*
2851 * Logical Context regs 2911 * Logical Context regs
2852 */ 2912 */
2853#define CCID 0x2180 2913#define CCID _MMIO(0x2180)
2854#define CCID_EN (1<<0) 2914#define CCID_EN (1<<0)
2855/* 2915/*
2856 * Notes on SNB/IVB/VLV context size: 2916 * Notes on SNB/IVB/VLV context size:
@@ -2865,7 +2925,7 @@ enum skl_disp_power_wells {
2865 * - GT1 size just indicates how much of render context 2925 * - GT1 size just indicates how much of render context
2866 * doesn't need saving on GT1 2926 * doesn't need saving on GT1
2867 */ 2927 */
2868#define CXT_SIZE 0x21a0 2928#define CXT_SIZE _MMIO(0x21a0)
2869#define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f) 2929#define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f)
2870#define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f) 2930#define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f)
2871#define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f) 2931#define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f)
@@ -2874,7 +2934,7 @@ enum skl_disp_power_wells {
2874#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \ 2934#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
2875 GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ 2935 GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
2876 GEN6_CXT_PIPELINE_SIZE(cxt_reg)) 2936 GEN6_CXT_PIPELINE_SIZE(cxt_reg))
2877#define GEN7_CXT_SIZE 0x21a8 2937#define GEN7_CXT_SIZE _MMIO(0x21a8)
2878#define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f) 2938#define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f)
2879#define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7) 2939#define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7)
2880#define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f) 2940#define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f)
@@ -2894,23 +2954,23 @@ enum skl_disp_power_wells {
2894/* Same as Haswell, but 72064 bytes now. */ 2954/* Same as Haswell, but 72064 bytes now. */
2895#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE) 2955#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
2896 2956
2897#define CHV_CLK_CTL1 0x101100 2957#define CHV_CLK_CTL1 _MMIO(0x101100)
2898#define VLV_CLK_CTL2 0x101104 2958#define VLV_CLK_CTL2 _MMIO(0x101104)
2899#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 2959#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
2900 2960
2901/* 2961/*
2902 * Overlay regs 2962 * Overlay regs
2903 */ 2963 */
2904 2964
2905#define OVADD 0x30000 2965#define OVADD _MMIO(0x30000)
2906#define DOVSTA 0x30008 2966#define DOVSTA _MMIO(0x30008)
2907#define OC_BUF (0x3<<20) 2967#define OC_BUF (0x3<<20)
2908#define OGAMC5 0x30010 2968#define OGAMC5 _MMIO(0x30010)
2909#define OGAMC4 0x30014 2969#define OGAMC4 _MMIO(0x30014)
2910#define OGAMC3 0x30018 2970#define OGAMC3 _MMIO(0x30018)
2911#define OGAMC2 0x3001c 2971#define OGAMC2 _MMIO(0x3001c)
2912#define OGAMC1 0x30020 2972#define OGAMC1 _MMIO(0x30020)
2913#define OGAMC0 0x30024 2973#define OGAMC0 _MMIO(0x30024)
2914 2974
2915/* 2975/*
2916 * Display engine regs 2976 * Display engine regs
@@ -2970,28 +3030,18 @@ enum skl_disp_power_wells {
2970#define _PIPE_CRC_RES_4_B_IVB 0x61070 3030#define _PIPE_CRC_RES_4_B_IVB 0x61070
2971#define _PIPE_CRC_RES_5_B_IVB 0x61074 3031#define _PIPE_CRC_RES_5_B_IVB 0x61074
2972 3032
2973#define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A) 3033#define PIPE_CRC_CTL(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_CTL_A)
2974#define PIPE_CRC_RES_1_IVB(pipe) \ 3034#define PIPE_CRC_RES_1_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_1_A_IVB)
2975 _TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB) 3035#define PIPE_CRC_RES_2_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_2_A_IVB)
2976#define PIPE_CRC_RES_2_IVB(pipe) \ 3036#define PIPE_CRC_RES_3_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_3_A_IVB)
2977 _TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB) 3037#define PIPE_CRC_RES_4_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_4_A_IVB)
2978#define PIPE_CRC_RES_3_IVB(pipe) \ 3038#define PIPE_CRC_RES_5_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_5_A_IVB)
2979 _TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB) 3039
2980#define PIPE_CRC_RES_4_IVB(pipe) \ 3040#define PIPE_CRC_RES_RED(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RED_A)
2981 _TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB) 3041#define PIPE_CRC_RES_GREEN(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_GREEN_A)
2982#define PIPE_CRC_RES_5_IVB(pipe) \ 3042#define PIPE_CRC_RES_BLUE(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_BLUE_A)
2983 _TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB) 3043#define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES1_A_I915)
2984 3044#define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
2985#define PIPE_CRC_RES_RED(pipe) \
2986 _TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A)
2987#define PIPE_CRC_RES_GREEN(pipe) \
2988 _TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A)
2989#define PIPE_CRC_RES_BLUE(pipe) \
2990 _TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A)
2991#define PIPE_CRC_RES_RES1_I915(pipe) \
2992 _TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915)
2993#define PIPE_CRC_RES_RES2_G4X(pipe) \
2994 _TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
2995 3045
2996/* Pipe A timing regs */ 3046/* Pipe A timing regs */
2997#define _HTOTAL_A 0x60000 3047#define _HTOTAL_A 0x60000
@@ -3023,20 +3073,20 @@ enum skl_disp_power_wells {
3023#define CHV_TRANSCODER_C_OFFSET 0x63000 3073#define CHV_TRANSCODER_C_OFFSET 0x63000
3024#define TRANSCODER_EDP_OFFSET 0x6f000 3074#define TRANSCODER_EDP_OFFSET 0x6f000
3025 3075
3026#define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \ 3076#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
3027 dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \ 3077 dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
3028 dev_priv->info.display_mmio_offset) 3078 dev_priv->info.display_mmio_offset)
3029 3079
3030#define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A) 3080#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A)
3031#define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A) 3081#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A)
3032#define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A) 3082#define HSYNC(trans) _MMIO_TRANS2(trans, _HSYNC_A)
3033#define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A) 3083#define VTOTAL(trans) _MMIO_TRANS2(trans, _VTOTAL_A)
3034#define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A) 3084#define VBLANK(trans) _MMIO_TRANS2(trans, _VBLANK_A)
3035#define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A) 3085#define VSYNC(trans) _MMIO_TRANS2(trans, _VSYNC_A)
3036#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A) 3086#define BCLRPAT(trans) _MMIO_TRANS2(trans, _BCLRPAT_A)
3037#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A) 3087#define VSYNCSHIFT(trans) _MMIO_TRANS2(trans, _VSYNCSHIFT_A)
3038#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC) 3088#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC)
3039#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A) 3089#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A)
3040 3090
3041/* VLV eDP PSR registers */ 3091/* VLV eDP PSR registers */
3042#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090) 3092#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
@@ -3052,14 +3102,14 @@ enum skl_disp_power_wells {
3052#define VLV_EDP_PSR_DBL_FRAME (1<<10) 3102#define VLV_EDP_PSR_DBL_FRAME (1<<10)
3053#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16) 3103#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16)
3054#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16 3104#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
3055#define VLV_PSRCTL(pipe) _PIPE(pipe, _PSRCTLA, _PSRCTLB) 3105#define VLV_PSRCTL(pipe) _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB)
3056 3106
3057#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0) 3107#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
3058#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0) 3108#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
3059#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30) 3109#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30)
3060#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31) 3110#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31)
3061#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30) 3111#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30)
3062#define VLV_VSCSDP(pipe) _PIPE(pipe, _VSCSDPA, _VSCSDPB) 3112#define VLV_VSCSDP(pipe) _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB)
3063 3113
3064#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094) 3114#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
3065#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094) 3115#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
@@ -3072,11 +3122,12 @@ enum skl_disp_power_wells {
3072#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0) 3122#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0)
3073#define VLV_EDP_PSR_EXIT (5<<0) 3123#define VLV_EDP_PSR_EXIT (5<<0)
3074#define VLV_EDP_PSR_IN_TRANS (1<<7) 3124#define VLV_EDP_PSR_IN_TRANS (1<<7)
3075#define VLV_PSRSTAT(pipe) _PIPE(pipe, _PSRSTATA, _PSRSTATB) 3125#define VLV_PSRSTAT(pipe) _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB)
3076 3126
3077/* HSW+ eDP PSR registers */ 3127/* HSW+ eDP PSR registers */
3078#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) 3128#define HSW_EDP_PSR_BASE 0x64800
3079#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0) 3129#define BDW_EDP_PSR_BASE 0x6f800
3130#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0)
3080#define EDP_PSR_ENABLE (1<<31) 3131#define EDP_PSR_ENABLE (1<<31)
3081#define BDW_PSR_SINGLE_FRAME (1<<30) 3132#define BDW_PSR_SINGLE_FRAME (1<<30)
3082#define EDP_PSR_LINK_STANDBY (1<<27) 3133#define EDP_PSR_LINK_STANDBY (1<<27)
@@ -3099,14 +3150,10 @@ enum skl_disp_power_wells {
3099#define EDP_PSR_TP1_TIME_0us (3<<4) 3150#define EDP_PSR_TP1_TIME_0us (3<<4)
3100#define EDP_PSR_IDLE_FRAME_SHIFT 0 3151#define EDP_PSR_IDLE_FRAME_SHIFT 0
3101 3152
3102#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10) 3153#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
3103#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14) 3154#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
3104#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
3105#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
3106#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
3107#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
3108 3155
3109#define EDP_PSR_STATUS_CTL(dev) (EDP_PSR_BASE(dev) + 0x40) 3156#define EDP_PSR_STATUS_CTL _MMIO(dev_priv->psr_mmio_base + 0x40)
3110#define EDP_PSR_STATUS_STATE_MASK (7<<29) 3157#define EDP_PSR_STATUS_STATE_MASK (7<<29)
3111#define EDP_PSR_STATUS_STATE_IDLE (0<<29) 3158#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
3112#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29) 3159#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
@@ -3130,15 +3177,15 @@ enum skl_disp_power_wells {
3130#define EDP_PSR_STATUS_SENDING_TP1 (1<<4) 3177#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
3131#define EDP_PSR_STATUS_IDLE_MASK 0xf 3178#define EDP_PSR_STATUS_IDLE_MASK 0xf
3132 3179
3133#define EDP_PSR_PERF_CNT(dev) (EDP_PSR_BASE(dev) + 0x44) 3180#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
3134#define EDP_PSR_PERF_CNT_MASK 0xffffff 3181#define EDP_PSR_PERF_CNT_MASK 0xffffff
3135 3182
3136#define EDP_PSR_DEBUG_CTL(dev) (EDP_PSR_BASE(dev) + 0x60) 3183#define EDP_PSR_DEBUG_CTL _MMIO(dev_priv->psr_mmio_base + 0x60)
3137#define EDP_PSR_DEBUG_MASK_LPSP (1<<27) 3184#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
3138#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26) 3185#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
3139#define EDP_PSR_DEBUG_MASK_HPD (1<<25) 3186#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
3140 3187
3141#define EDP_PSR2_CTL 0x6f900 3188#define EDP_PSR2_CTL _MMIO(0x6f900)
3142#define EDP_PSR2_ENABLE (1<<31) 3189#define EDP_PSR2_ENABLE (1<<31)
3143#define EDP_SU_TRACK_ENABLE (1<<30) 3190#define EDP_SU_TRACK_ENABLE (1<<30)
3144#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20) 3191#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20)
@@ -3153,9 +3200,9 @@ enum skl_disp_power_wells {
3153#define EDP_PSR2_IDLE_MASK 0xf 3200#define EDP_PSR2_IDLE_MASK 0xf
3154 3201
3155/* VGA port control */ 3202/* VGA port control */
3156#define ADPA 0x61100 3203#define ADPA _MMIO(0x61100)
3157#define PCH_ADPA 0xe1100 3204#define PCH_ADPA _MMIO(0xe1100)
3158#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA) 3205#define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100)
3159 3206
3160#define ADPA_DAC_ENABLE (1<<31) 3207#define ADPA_DAC_ENABLE (1<<31)
3161#define ADPA_DAC_DISABLE 0 3208#define ADPA_DAC_DISABLE 0
@@ -3201,7 +3248,7 @@ enum skl_disp_power_wells {
3201 3248
3202 3249
3203/* Hotplug control (945+ only) */ 3250/* Hotplug control (945+ only) */
3204#define PORT_HOTPLUG_EN (dev_priv->info.display_mmio_offset + 0x61110) 3251#define PORT_HOTPLUG_EN _MMIO(dev_priv->info.display_mmio_offset + 0x61110)
3205#define PORTB_HOTPLUG_INT_EN (1 << 29) 3252#define PORTB_HOTPLUG_INT_EN (1 << 29)
3206#define PORTC_HOTPLUG_INT_EN (1 << 28) 3253#define PORTC_HOTPLUG_INT_EN (1 << 28)
3207#define PORTD_HOTPLUG_INT_EN (1 << 27) 3254#define PORTD_HOTPLUG_INT_EN (1 << 27)
@@ -3231,7 +3278,7 @@ enum skl_disp_power_wells {
3231#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) 3278#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
3232#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 3279#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
3233 3280
3234#define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114) 3281#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114)
3235/* 3282/*
3236 * HDMI/DP bits are gen4+ 3283 * HDMI/DP bits are gen4+
3237 * 3284 *
@@ -3296,21 +3343,23 @@ enum skl_disp_power_wells {
3296 3343
3297/* SDVO and HDMI port control. 3344/* SDVO and HDMI port control.
3298 * The same register may be used for SDVO or HDMI */ 3345 * The same register may be used for SDVO or HDMI */
3299#define GEN3_SDVOB 0x61140 3346#define _GEN3_SDVOB 0x61140
3300#define GEN3_SDVOC 0x61160 3347#define _GEN3_SDVOC 0x61160
3348#define GEN3_SDVOB _MMIO(_GEN3_SDVOB)
3349#define GEN3_SDVOC _MMIO(_GEN3_SDVOC)
3301#define GEN4_HDMIB GEN3_SDVOB 3350#define GEN4_HDMIB GEN3_SDVOB
3302#define GEN4_HDMIC GEN3_SDVOC 3351#define GEN4_HDMIC GEN3_SDVOC
3303#define VLV_HDMIB (VLV_DISPLAY_BASE + GEN4_HDMIB) 3352#define VLV_HDMIB _MMIO(VLV_DISPLAY_BASE + 0x61140)
3304#define VLV_HDMIC (VLV_DISPLAY_BASE + GEN4_HDMIC) 3353#define VLV_HDMIC _MMIO(VLV_DISPLAY_BASE + 0x61160)
3305#define CHV_HDMID (VLV_DISPLAY_BASE + 0x6116C) 3354#define CHV_HDMID _MMIO(VLV_DISPLAY_BASE + 0x6116C)
3306#define PCH_SDVOB 0xe1140 3355#define PCH_SDVOB _MMIO(0xe1140)
3307#define PCH_HDMIB PCH_SDVOB 3356#define PCH_HDMIB PCH_SDVOB
3308#define PCH_HDMIC 0xe1150 3357#define PCH_HDMIC _MMIO(0xe1150)
3309#define PCH_HDMID 0xe1160 3358#define PCH_HDMID _MMIO(0xe1160)
3310 3359
3311#define PORT_DFT_I9XX 0x61150 3360#define PORT_DFT_I9XX _MMIO(0x61150)
3312#define DC_BALANCE_RESET (1 << 25) 3361#define DC_BALANCE_RESET (1 << 25)
3313#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154) 3362#define PORT_DFT2_G4X _MMIO(dev_priv->info.display_mmio_offset + 0x61154)
3314#define DC_BALANCE_RESET_VLV (1 << 31) 3363#define DC_BALANCE_RESET_VLV (1 << 31)
3315#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0)) 3364#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
3316#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */ 3365#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */
@@ -3370,9 +3419,12 @@ enum skl_disp_power_wells {
3370 3419
3371 3420
3372/* DVO port control */ 3421/* DVO port control */
3373#define DVOA 0x61120 3422#define _DVOA 0x61120
3374#define DVOB 0x61140 3423#define DVOA _MMIO(_DVOA)
3375#define DVOC 0x61160 3424#define _DVOB 0x61140
3425#define DVOB _MMIO(_DVOB)
3426#define _DVOC 0x61160
3427#define DVOC _MMIO(_DVOC)
3376#define DVO_ENABLE (1 << 31) 3428#define DVO_ENABLE (1 << 31)
3377#define DVO_PIPE_B_SELECT (1 << 30) 3429#define DVO_PIPE_B_SELECT (1 << 30)
3378#define DVO_PIPE_STALL_UNUSED (0 << 28) 3430#define DVO_PIPE_STALL_UNUSED (0 << 28)
@@ -3397,14 +3449,14 @@ enum skl_disp_power_wells {
3397#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */ 3449#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */
3398#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */ 3450#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */
3399#define DVO_PRESERVE_MASK (0x7<<24) 3451#define DVO_PRESERVE_MASK (0x7<<24)
3400#define DVOA_SRCDIM 0x61124 3452#define DVOA_SRCDIM _MMIO(0x61124)
3401#define DVOB_SRCDIM 0x61144 3453#define DVOB_SRCDIM _MMIO(0x61144)
3402#define DVOC_SRCDIM 0x61164 3454#define DVOC_SRCDIM _MMIO(0x61164)
3403#define DVO_SRCDIM_HORIZONTAL_SHIFT 12 3455#define DVO_SRCDIM_HORIZONTAL_SHIFT 12
3404#define DVO_SRCDIM_VERTICAL_SHIFT 0 3456#define DVO_SRCDIM_VERTICAL_SHIFT 0
3405 3457
3406/* LVDS port control */ 3458/* LVDS port control */
3407#define LVDS 0x61180 3459#define LVDS _MMIO(0x61180)
3408/* 3460/*
3409 * Enables the LVDS port. This bit must be set before DPLLs are enabled, as 3461 * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
3410 * the DPLL semantics change when the LVDS is assigned to that pipe. 3462 * the DPLL semantics change when the LVDS is assigned to that pipe.
@@ -3454,13 +3506,13 @@ enum skl_disp_power_wells {
3454#define LVDS_B0B3_POWER_UP (3 << 2) 3506#define LVDS_B0B3_POWER_UP (3 << 2)
3455 3507
3456/* Video Data Island Packet control */ 3508/* Video Data Island Packet control */
3457#define VIDEO_DIP_DATA 0x61178 3509#define VIDEO_DIP_DATA _MMIO(0x61178)
3458/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC 3510/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
3459 * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte 3511 * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
3460 * of the infoframe structure specified by CEA-861. */ 3512 * of the infoframe structure specified by CEA-861. */
3461#define VIDEO_DIP_DATA_SIZE 32 3513#define VIDEO_DIP_DATA_SIZE 32
3462#define VIDEO_DIP_VSC_DATA_SIZE 36 3514#define VIDEO_DIP_VSC_DATA_SIZE 36
3463#define VIDEO_DIP_CTL 0x61170 3515#define VIDEO_DIP_CTL _MMIO(0x61170)
3464/* Pre HSW: */ 3516/* Pre HSW: */
3465#define VIDEO_DIP_ENABLE (1 << 31) 3517#define VIDEO_DIP_ENABLE (1 << 31)
3466#define VIDEO_DIP_PORT(port) ((port) << 29) 3518#define VIDEO_DIP_PORT(port) ((port) << 29)
@@ -3487,7 +3539,7 @@ enum skl_disp_power_wells {
3487#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) 3539#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
3488 3540
3489/* Panel power sequencing */ 3541/* Panel power sequencing */
3490#define PP_STATUS 0x61200 3542#define PP_STATUS _MMIO(0x61200)
3491#define PP_ON (1 << 31) 3543#define PP_ON (1 << 31)
3492/* 3544/*
3493 * Indicates that all dependencies of the panel are on: 3545 * Indicates that all dependencies of the panel are on:
@@ -3513,14 +3565,14 @@ enum skl_disp_power_wells {
3513#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) 3565#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
3514#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) 3566#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
3515#define PP_SEQUENCE_STATE_RESET (0xf << 0) 3567#define PP_SEQUENCE_STATE_RESET (0xf << 0)
3516#define PP_CONTROL 0x61204 3568#define PP_CONTROL _MMIO(0x61204)
3517#define POWER_TARGET_ON (1 << 0) 3569#define POWER_TARGET_ON (1 << 0)
3518#define PP_ON_DELAYS 0x61208 3570#define PP_ON_DELAYS _MMIO(0x61208)
3519#define PP_OFF_DELAYS 0x6120c 3571#define PP_OFF_DELAYS _MMIO(0x6120c)
3520#define PP_DIVISOR 0x61210 3572#define PP_DIVISOR _MMIO(0x61210)
3521 3573
3522/* Panel fitting */ 3574/* Panel fitting */
3523#define PFIT_CONTROL (dev_priv->info.display_mmio_offset + 0x61230) 3575#define PFIT_CONTROL _MMIO(dev_priv->info.display_mmio_offset + 0x61230)
3524#define PFIT_ENABLE (1 << 31) 3576#define PFIT_ENABLE (1 << 31)
3525#define PFIT_PIPE_MASK (3 << 29) 3577#define PFIT_PIPE_MASK (3 << 29)
3526#define PFIT_PIPE_SHIFT 29 3578#define PFIT_PIPE_SHIFT 29
@@ -3538,7 +3590,7 @@ enum skl_disp_power_wells {
3538#define PFIT_SCALING_PROGRAMMED (1 << 26) 3590#define PFIT_SCALING_PROGRAMMED (1 << 26)
3539#define PFIT_SCALING_PILLAR (2 << 26) 3591#define PFIT_SCALING_PILLAR (2 << 26)
3540#define PFIT_SCALING_LETTER (3 << 26) 3592#define PFIT_SCALING_LETTER (3 << 26)
3541#define PFIT_PGM_RATIOS (dev_priv->info.display_mmio_offset + 0x61234) 3593#define PFIT_PGM_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61234)
3542/* Pre-965 */ 3594/* Pre-965 */
3543#define PFIT_VERT_SCALE_SHIFT 20 3595#define PFIT_VERT_SCALE_SHIFT 20
3544#define PFIT_VERT_SCALE_MASK 0xfff00000 3596#define PFIT_VERT_SCALE_MASK 0xfff00000
@@ -3550,25 +3602,25 @@ enum skl_disp_power_wells {
3550#define PFIT_HORIZ_SCALE_SHIFT_965 0 3602#define PFIT_HORIZ_SCALE_SHIFT_965 0
3551#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff 3603#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
3552 3604
3553#define PFIT_AUTO_RATIOS (dev_priv->info.display_mmio_offset + 0x61238) 3605#define PFIT_AUTO_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61238)
3554 3606
3555#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250) 3607#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250)
3556#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350) 3608#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350)
3557#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ 3609#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
3558 _VLV_BLC_PWM_CTL2_B) 3610 _VLV_BLC_PWM_CTL2_B)
3559 3611
3560#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254) 3612#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254)
3561#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354) 3613#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354)
3562#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ 3614#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
3563 _VLV_BLC_PWM_CTL_B) 3615 _VLV_BLC_PWM_CTL_B)
3564 3616
3565#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260) 3617#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260)
3566#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360) 3618#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360)
3567#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ 3619#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
3568 _VLV_BLC_HIST_CTL_B) 3620 _VLV_BLC_HIST_CTL_B)
3569 3621
3570/* Backlight control */ 3622/* Backlight control */
3571#define BLC_PWM_CTL2 (dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */ 3623#define BLC_PWM_CTL2 _MMIO(dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
3572#define BLM_PWM_ENABLE (1 << 31) 3624#define BLM_PWM_ENABLE (1 << 31)
3573#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ 3625#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
3574#define BLM_PIPE_SELECT (1 << 29) 3626#define BLM_PIPE_SELECT (1 << 29)
@@ -3591,7 +3643,7 @@ enum skl_disp_power_wells {
3591#define BLM_PHASE_IN_COUNT_MASK (0xff << 8) 3643#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
3592#define BLM_PHASE_IN_INCR_SHIFT (0) 3644#define BLM_PHASE_IN_INCR_SHIFT (0)
3593#define BLM_PHASE_IN_INCR_MASK (0xff << 0) 3645#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
3594#define BLC_PWM_CTL (dev_priv->info.display_mmio_offset + 0x61254) 3646#define BLC_PWM_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61254)
3595/* 3647/*
3596 * This is the most significant 15 bits of the number of backlight cycles in a 3648 * This is the most significant 15 bits of the number of backlight cycles in a
3597 * complete cycle of the modulated backlight control. 3649 * complete cycle of the modulated backlight control.
@@ -3613,25 +3665,25 @@ enum skl_disp_power_wells {
3613#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) 3665#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
3614#define BLM_POLARITY_PNV (1 << 0) /* pnv only */ 3666#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
3615 3667
3616#define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260) 3668#define BLC_HIST_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61260)
3617#define BLM_HISTOGRAM_ENABLE (1 << 31) 3669#define BLM_HISTOGRAM_ENABLE (1 << 31)
3618 3670
3619/* New registers for PCH-split platforms. Safe where new bits show up, the 3671/* New registers for PCH-split platforms. Safe where new bits show up, the
3620 * register layout machtes with gen4 BLC_PWM_CTL[12]. */ 3672 * register layout machtes with gen4 BLC_PWM_CTL[12]. */
3621#define BLC_PWM_CPU_CTL2 0x48250 3673#define BLC_PWM_CPU_CTL2 _MMIO(0x48250)
3622#define BLC_PWM_CPU_CTL 0x48254 3674#define BLC_PWM_CPU_CTL _MMIO(0x48254)
3623 3675
3624#define HSW_BLC_PWM2_CTL 0x48350 3676#define HSW_BLC_PWM2_CTL _MMIO(0x48350)
3625 3677
3626/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is 3678/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
3627 * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ 3679 * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
3628#define BLC_PWM_PCH_CTL1 0xc8250 3680#define BLC_PWM_PCH_CTL1 _MMIO(0xc8250)
3629#define BLM_PCH_PWM_ENABLE (1 << 31) 3681#define BLM_PCH_PWM_ENABLE (1 << 31)
3630#define BLM_PCH_OVERRIDE_ENABLE (1 << 30) 3682#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
3631#define BLM_PCH_POLARITY (1 << 29) 3683#define BLM_PCH_POLARITY (1 << 29)
3632#define BLC_PWM_PCH_CTL2 0xc8254 3684#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254)
3633 3685
3634#define UTIL_PIN_CTL 0x48400 3686#define UTIL_PIN_CTL _MMIO(0x48400)
3635#define UTIL_PIN_ENABLE (1 << 31) 3687#define UTIL_PIN_ENABLE (1 << 31)
3636 3688
3637#define UTIL_PIN_PIPE(x) ((x) << 29) 3689#define UTIL_PIN_PIPE(x) ((x) << 29)
@@ -3651,18 +3703,18 @@ enum skl_disp_power_wells {
3651#define _BXT_BLC_PWM_FREQ2 0xC8354 3703#define _BXT_BLC_PWM_FREQ2 0xC8354
3652#define _BXT_BLC_PWM_DUTY2 0xC8358 3704#define _BXT_BLC_PWM_DUTY2 0xC8358
3653 3705
3654#define BXT_BLC_PWM_CTL(controller) _PIPE(controller, \ 3706#define BXT_BLC_PWM_CTL(controller) _MMIO_PIPE(controller, \
3655 _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2) 3707 _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2)
3656#define BXT_BLC_PWM_FREQ(controller) _PIPE(controller, \ 3708#define BXT_BLC_PWM_FREQ(controller) _MMIO_PIPE(controller, \
3657 _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2) 3709 _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2)
3658#define BXT_BLC_PWM_DUTY(controller) _PIPE(controller, \ 3710#define BXT_BLC_PWM_DUTY(controller) _MMIO_PIPE(controller, \
3659 _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2) 3711 _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2)
3660 3712
3661#define PCH_GTC_CTL 0xe7000 3713#define PCH_GTC_CTL _MMIO(0xe7000)
3662#define PCH_GTC_ENABLE (1 << 31) 3714#define PCH_GTC_ENABLE (1 << 31)
3663 3715
3664/* TV port control */ 3716/* TV port control */
3665#define TV_CTL 0x68000 3717#define TV_CTL _MMIO(0x68000)
3666/* Enables the TV encoder */ 3718/* Enables the TV encoder */
3667# define TV_ENC_ENABLE (1 << 31) 3719# define TV_ENC_ENABLE (1 << 31)
3668/* Sources the TV encoder input from pipe B instead of A. */ 3720/* Sources the TV encoder input from pipe B instead of A. */
@@ -3729,7 +3781,7 @@ enum skl_disp_power_wells {
3729# define TV_TEST_MODE_MONITOR_DETECT (7 << 0) 3781# define TV_TEST_MODE_MONITOR_DETECT (7 << 0)
3730# define TV_TEST_MODE_MASK (7 << 0) 3782# define TV_TEST_MODE_MASK (7 << 0)
3731 3783
3732#define TV_DAC 0x68004 3784#define TV_DAC _MMIO(0x68004)
3733# define TV_DAC_SAVE 0x00ffff00 3785# define TV_DAC_SAVE 0x00ffff00
3734/* 3786/*
3735 * Reports that DAC state change logic has reported change (RO). 3787 * Reports that DAC state change logic has reported change (RO).
@@ -3780,13 +3832,13 @@ enum skl_disp_power_wells {
3780 * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with 3832 * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
3781 * -1 (0x3) being the only legal negative value. 3833 * -1 (0x3) being the only legal negative value.
3782 */ 3834 */
3783#define TV_CSC_Y 0x68010 3835#define TV_CSC_Y _MMIO(0x68010)
3784# define TV_RY_MASK 0x07ff0000 3836# define TV_RY_MASK 0x07ff0000
3785# define TV_RY_SHIFT 16 3837# define TV_RY_SHIFT 16
3786# define TV_GY_MASK 0x00000fff 3838# define TV_GY_MASK 0x00000fff
3787# define TV_GY_SHIFT 0 3839# define TV_GY_SHIFT 0
3788 3840
3789#define TV_CSC_Y2 0x68014 3841#define TV_CSC_Y2 _MMIO(0x68014)
3790# define TV_BY_MASK 0x07ff0000 3842# define TV_BY_MASK 0x07ff0000
3791# define TV_BY_SHIFT 16 3843# define TV_BY_SHIFT 16
3792/* 3844/*
@@ -3797,13 +3849,13 @@ enum skl_disp_power_wells {
3797# define TV_AY_MASK 0x000003ff 3849# define TV_AY_MASK 0x000003ff
3798# define TV_AY_SHIFT 0 3850# define TV_AY_SHIFT 0
3799 3851
3800#define TV_CSC_U 0x68018 3852#define TV_CSC_U _MMIO(0x68018)
3801# define TV_RU_MASK 0x07ff0000 3853# define TV_RU_MASK 0x07ff0000
3802# define TV_RU_SHIFT 16 3854# define TV_RU_SHIFT 16
3803# define TV_GU_MASK 0x000007ff 3855# define TV_GU_MASK 0x000007ff
3804# define TV_GU_SHIFT 0 3856# define TV_GU_SHIFT 0
3805 3857
3806#define TV_CSC_U2 0x6801c 3858#define TV_CSC_U2 _MMIO(0x6801c)
3807# define TV_BU_MASK 0x07ff0000 3859# define TV_BU_MASK 0x07ff0000
3808# define TV_BU_SHIFT 16 3860# define TV_BU_SHIFT 16
3809/* 3861/*
@@ -3814,13 +3866,13 @@ enum skl_disp_power_wells {
3814# define TV_AU_MASK 0x000003ff 3866# define TV_AU_MASK 0x000003ff
3815# define TV_AU_SHIFT 0 3867# define TV_AU_SHIFT 0
3816 3868
3817#define TV_CSC_V 0x68020 3869#define TV_CSC_V _MMIO(0x68020)
3818# define TV_RV_MASK 0x0fff0000 3870# define TV_RV_MASK 0x0fff0000
3819# define TV_RV_SHIFT 16 3871# define TV_RV_SHIFT 16
3820# define TV_GV_MASK 0x000007ff 3872# define TV_GV_MASK 0x000007ff
3821# define TV_GV_SHIFT 0 3873# define TV_GV_SHIFT 0
3822 3874
3823#define TV_CSC_V2 0x68024 3875#define TV_CSC_V2 _MMIO(0x68024)
3824# define TV_BV_MASK 0x07ff0000 3876# define TV_BV_MASK 0x07ff0000
3825# define TV_BV_SHIFT 16 3877# define TV_BV_SHIFT 16
3826/* 3878/*
@@ -3831,7 +3883,7 @@ enum skl_disp_power_wells {
3831# define TV_AV_MASK 0x000007ff 3883# define TV_AV_MASK 0x000007ff
3832# define TV_AV_SHIFT 0 3884# define TV_AV_SHIFT 0
3833 3885
3834#define TV_CLR_KNOBS 0x68028 3886#define TV_CLR_KNOBS _MMIO(0x68028)
3835/* 2s-complement brightness adjustment */ 3887/* 2s-complement brightness adjustment */
3836# define TV_BRIGHTNESS_MASK 0xff000000 3888# define TV_BRIGHTNESS_MASK 0xff000000
3837# define TV_BRIGHTNESS_SHIFT 24 3889# define TV_BRIGHTNESS_SHIFT 24
@@ -3845,7 +3897,7 @@ enum skl_disp_power_wells {
3845# define TV_HUE_MASK 0x000000ff 3897# define TV_HUE_MASK 0x000000ff
3846# define TV_HUE_SHIFT 0 3898# define TV_HUE_SHIFT 0
3847 3899
3848#define TV_CLR_LEVEL 0x6802c 3900#define TV_CLR_LEVEL _MMIO(0x6802c)
3849/* Controls the DAC level for black */ 3901/* Controls the DAC level for black */
3850# define TV_BLACK_LEVEL_MASK 0x01ff0000 3902# define TV_BLACK_LEVEL_MASK 0x01ff0000
3851# define TV_BLACK_LEVEL_SHIFT 16 3903# define TV_BLACK_LEVEL_SHIFT 16
@@ -3853,7 +3905,7 @@ enum skl_disp_power_wells {
3853# define TV_BLANK_LEVEL_MASK 0x000001ff 3905# define TV_BLANK_LEVEL_MASK 0x000001ff
3854# define TV_BLANK_LEVEL_SHIFT 0 3906# define TV_BLANK_LEVEL_SHIFT 0
3855 3907
3856#define TV_H_CTL_1 0x68030 3908#define TV_H_CTL_1 _MMIO(0x68030)
3857/* Number of pixels in the hsync. */ 3909/* Number of pixels in the hsync. */
3858# define TV_HSYNC_END_MASK 0x1fff0000 3910# define TV_HSYNC_END_MASK 0x1fff0000
3859# define TV_HSYNC_END_SHIFT 16 3911# define TV_HSYNC_END_SHIFT 16
@@ -3861,7 +3913,7 @@ enum skl_disp_power_wells {
3861# define TV_HTOTAL_MASK 0x00001fff 3913# define TV_HTOTAL_MASK 0x00001fff
3862# define TV_HTOTAL_SHIFT 0 3914# define TV_HTOTAL_SHIFT 0
3863 3915
3864#define TV_H_CTL_2 0x68034 3916#define TV_H_CTL_2 _MMIO(0x68034)
3865/* Enables the colorburst (needed for non-component color) */ 3917/* Enables the colorburst (needed for non-component color) */
3866# define TV_BURST_ENA (1 << 31) 3918# define TV_BURST_ENA (1 << 31)
3867/* Offset of the colorburst from the start of hsync, in pixels minus one. */ 3919/* Offset of the colorburst from the start of hsync, in pixels minus one. */
@@ -3871,7 +3923,7 @@ enum skl_disp_power_wells {
3871# define TV_HBURST_LEN_SHIFT 0 3923# define TV_HBURST_LEN_SHIFT 0
3872# define TV_HBURST_LEN_MASK 0x0001fff 3924# define TV_HBURST_LEN_MASK 0x0001fff
3873 3925
3874#define TV_H_CTL_3 0x68038 3926#define TV_H_CTL_3 _MMIO(0x68038)
3875/* End of hblank, measured in pixels minus one from start of hsync */ 3927/* End of hblank, measured in pixels minus one from start of hsync */
3876# define TV_HBLANK_END_SHIFT 16 3928# define TV_HBLANK_END_SHIFT 16
3877# define TV_HBLANK_END_MASK 0x1fff0000 3929# define TV_HBLANK_END_MASK 0x1fff0000
@@ -3879,7 +3931,7 @@ enum skl_disp_power_wells {
3879# define TV_HBLANK_START_SHIFT 0 3931# define TV_HBLANK_START_SHIFT 0
3880# define TV_HBLANK_START_MASK 0x0001fff 3932# define TV_HBLANK_START_MASK 0x0001fff
3881 3933
3882#define TV_V_CTL_1 0x6803c 3934#define TV_V_CTL_1 _MMIO(0x6803c)
3883/* XXX */ 3935/* XXX */
3884# define TV_NBR_END_SHIFT 16 3936# define TV_NBR_END_SHIFT 16
3885# define TV_NBR_END_MASK 0x07ff0000 3937# define TV_NBR_END_MASK 0x07ff0000
@@ -3890,7 +3942,7 @@ enum skl_disp_power_wells {
3890# define TV_VI_END_F2_SHIFT 0 3942# define TV_VI_END_F2_SHIFT 0
3891# define TV_VI_END_F2_MASK 0x0000003f 3943# define TV_VI_END_F2_MASK 0x0000003f
3892 3944
3893#define TV_V_CTL_2 0x68040 3945#define TV_V_CTL_2 _MMIO(0x68040)
3894/* Length of vsync, in half lines */ 3946/* Length of vsync, in half lines */
3895# define TV_VSYNC_LEN_MASK 0x07ff0000 3947# define TV_VSYNC_LEN_MASK 0x07ff0000
3896# define TV_VSYNC_LEN_SHIFT 16 3948# define TV_VSYNC_LEN_SHIFT 16
@@ -3906,7 +3958,7 @@ enum skl_disp_power_wells {
3906# define TV_VSYNC_START_F2_MASK 0x0000007f 3958# define TV_VSYNC_START_F2_MASK 0x0000007f
3907# define TV_VSYNC_START_F2_SHIFT 0 3959# define TV_VSYNC_START_F2_SHIFT 0
3908 3960
3909#define TV_V_CTL_3 0x68044 3961#define TV_V_CTL_3 _MMIO(0x68044)
3910/* Enables generation of the equalization signal */ 3962/* Enables generation of the equalization signal */
3911# define TV_EQUAL_ENA (1 << 31) 3963# define TV_EQUAL_ENA (1 << 31)
3912/* Length of vsync, in half lines */ 3964/* Length of vsync, in half lines */
@@ -3924,7 +3976,7 @@ enum skl_disp_power_wells {
3924# define TV_VEQ_START_F2_MASK 0x000007f 3976# define TV_VEQ_START_F2_MASK 0x000007f
3925# define TV_VEQ_START_F2_SHIFT 0 3977# define TV_VEQ_START_F2_SHIFT 0
3926 3978
3927#define TV_V_CTL_4 0x68048 3979#define TV_V_CTL_4 _MMIO(0x68048)
3928/* 3980/*
3929 * Offset to start of vertical colorburst, measured in one less than the 3981 * Offset to start of vertical colorburst, measured in one less than the
3930 * number of lines from vertical start. 3982 * number of lines from vertical start.
@@ -3938,7 +3990,7 @@ enum skl_disp_power_wells {
3938# define TV_VBURST_END_F1_MASK 0x000000ff 3990# define TV_VBURST_END_F1_MASK 0x000000ff
3939# define TV_VBURST_END_F1_SHIFT 0 3991# define TV_VBURST_END_F1_SHIFT 0
3940 3992
3941#define TV_V_CTL_5 0x6804c 3993#define TV_V_CTL_5 _MMIO(0x6804c)
3942/* 3994/*
3943 * Offset to start of vertical colorburst, measured in one less than the 3995 * Offset to start of vertical colorburst, measured in one less than the
3944 * number of lines from vertical start. 3996 * number of lines from vertical start.
@@ -3952,7 +4004,7 @@ enum skl_disp_power_wells {
3952# define TV_VBURST_END_F2_MASK 0x000000ff 4004# define TV_VBURST_END_F2_MASK 0x000000ff
3953# define TV_VBURST_END_F2_SHIFT 0 4005# define TV_VBURST_END_F2_SHIFT 0
3954 4006
3955#define TV_V_CTL_6 0x68050 4007#define TV_V_CTL_6 _MMIO(0x68050)
3956/* 4008/*
3957 * Offset to start of vertical colorburst, measured in one less than the 4009 * Offset to start of vertical colorburst, measured in one less than the
3958 * number of lines from vertical start. 4010 * number of lines from vertical start.
@@ -3966,7 +4018,7 @@ enum skl_disp_power_wells {
3966# define TV_VBURST_END_F3_MASK 0x000000ff 4018# define TV_VBURST_END_F3_MASK 0x000000ff
3967# define TV_VBURST_END_F3_SHIFT 0 4019# define TV_VBURST_END_F3_SHIFT 0
3968 4020
3969#define TV_V_CTL_7 0x68054 4021#define TV_V_CTL_7 _MMIO(0x68054)
3970/* 4022/*
3971 * Offset to start of vertical colorburst, measured in one less than the 4023 * Offset to start of vertical colorburst, measured in one less than the
3972 * number of lines from vertical start. 4024 * number of lines from vertical start.
@@ -3980,7 +4032,7 @@ enum skl_disp_power_wells {
3980# define TV_VBURST_END_F4_MASK 0x000000ff 4032# define TV_VBURST_END_F4_MASK 0x000000ff
3981# define TV_VBURST_END_F4_SHIFT 0 4033# define TV_VBURST_END_F4_SHIFT 0
3982 4034
3983#define TV_SC_CTL_1 0x68060 4035#define TV_SC_CTL_1 _MMIO(0x68060)
3984/* Turns on the first subcarrier phase generation DDA */ 4036/* Turns on the first subcarrier phase generation DDA */
3985# define TV_SC_DDA1_EN (1 << 31) 4037# define TV_SC_DDA1_EN (1 << 31)
3986/* Turns on the first subcarrier phase generation DDA */ 4038/* Turns on the first subcarrier phase generation DDA */
@@ -4002,7 +4054,7 @@ enum skl_disp_power_wells {
4002# define TV_SCDDA1_INC_MASK 0x00000fff 4054# define TV_SCDDA1_INC_MASK 0x00000fff
4003# define TV_SCDDA1_INC_SHIFT 0 4055# define TV_SCDDA1_INC_SHIFT 0
4004 4056
4005#define TV_SC_CTL_2 0x68064 4057#define TV_SC_CTL_2 _MMIO(0x68064)
4006/* Sets the rollover for the second subcarrier phase generation DDA */ 4058/* Sets the rollover for the second subcarrier phase generation DDA */
4007# define TV_SCDDA2_SIZE_MASK 0x7fff0000 4059# define TV_SCDDA2_SIZE_MASK 0x7fff0000
4008# define TV_SCDDA2_SIZE_SHIFT 16 4060# define TV_SCDDA2_SIZE_SHIFT 16
@@ -4010,7 +4062,7 @@ enum skl_disp_power_wells {
4010# define TV_SCDDA2_INC_MASK 0x00007fff 4062# define TV_SCDDA2_INC_MASK 0x00007fff
4011# define TV_SCDDA2_INC_SHIFT 0 4063# define TV_SCDDA2_INC_SHIFT 0
4012 4064
4013#define TV_SC_CTL_3 0x68068 4065#define TV_SC_CTL_3 _MMIO(0x68068)
4014/* Sets the rollover for the third subcarrier phase generation DDA */ 4066/* Sets the rollover for the third subcarrier phase generation DDA */
4015# define TV_SCDDA3_SIZE_MASK 0x7fff0000 4067# define TV_SCDDA3_SIZE_MASK 0x7fff0000
4016# define TV_SCDDA3_SIZE_SHIFT 16 4068# define TV_SCDDA3_SIZE_SHIFT 16
@@ -4018,7 +4070,7 @@ enum skl_disp_power_wells {
4018# define TV_SCDDA3_INC_MASK 0x00007fff 4070# define TV_SCDDA3_INC_MASK 0x00007fff
4019# define TV_SCDDA3_INC_SHIFT 0 4071# define TV_SCDDA3_INC_SHIFT 0
4020 4072
4021#define TV_WIN_POS 0x68070 4073#define TV_WIN_POS _MMIO(0x68070)
4022/* X coordinate of the display from the start of horizontal active */ 4074/* X coordinate of the display from the start of horizontal active */
4023# define TV_XPOS_MASK 0x1fff0000 4075# define TV_XPOS_MASK 0x1fff0000
4024# define TV_XPOS_SHIFT 16 4076# define TV_XPOS_SHIFT 16
@@ -4026,7 +4078,7 @@ enum skl_disp_power_wells {
4026# define TV_YPOS_MASK 0x00000fff 4078# define TV_YPOS_MASK 0x00000fff
4027# define TV_YPOS_SHIFT 0 4079# define TV_YPOS_SHIFT 0
4028 4080
4029#define TV_WIN_SIZE 0x68074 4081#define TV_WIN_SIZE _MMIO(0x68074)
4030/* Horizontal size of the display window, measured in pixels*/ 4082/* Horizontal size of the display window, measured in pixels*/
4031# define TV_XSIZE_MASK 0x1fff0000 4083# define TV_XSIZE_MASK 0x1fff0000
4032# define TV_XSIZE_SHIFT 16 4084# define TV_XSIZE_SHIFT 16
@@ -4038,7 +4090,7 @@ enum skl_disp_power_wells {
4038# define TV_YSIZE_MASK 0x00000fff 4090# define TV_YSIZE_MASK 0x00000fff
4039# define TV_YSIZE_SHIFT 0 4091# define TV_YSIZE_SHIFT 0
4040 4092
4041#define TV_FILTER_CTL_1 0x68080 4093#define TV_FILTER_CTL_1 _MMIO(0x68080)
4042/* 4094/*
4043 * Enables automatic scaling calculation. 4095 * Enables automatic scaling calculation.
4044 * 4096 *
@@ -4071,7 +4123,7 @@ enum skl_disp_power_wells {
4071# define TV_HSCALE_FRAC_MASK 0x00003fff 4123# define TV_HSCALE_FRAC_MASK 0x00003fff
4072# define TV_HSCALE_FRAC_SHIFT 0 4124# define TV_HSCALE_FRAC_SHIFT 0
4073 4125
4074#define TV_FILTER_CTL_2 0x68084 4126#define TV_FILTER_CTL_2 _MMIO(0x68084)
4075/* 4127/*
4076 * Sets the integer part of the 3.15 fixed-point vertical scaling factor. 4128 * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
4077 * 4129 *
@@ -4087,7 +4139,7 @@ enum skl_disp_power_wells {
4087# define TV_VSCALE_FRAC_MASK 0x00007fff 4139# define TV_VSCALE_FRAC_MASK 0x00007fff
4088# define TV_VSCALE_FRAC_SHIFT 0 4140# define TV_VSCALE_FRAC_SHIFT 0
4089 4141
4090#define TV_FILTER_CTL_3 0x68088 4142#define TV_FILTER_CTL_3 _MMIO(0x68088)
4091/* 4143/*
4092 * Sets the integer part of the 3.15 fixed-point vertical scaling factor. 4144 * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
4093 * 4145 *
@@ -4107,7 +4159,7 @@ enum skl_disp_power_wells {
4107# define TV_VSCALE_IP_FRAC_MASK 0x00007fff 4159# define TV_VSCALE_IP_FRAC_MASK 0x00007fff
4108# define TV_VSCALE_IP_FRAC_SHIFT 0 4160# define TV_VSCALE_IP_FRAC_SHIFT 0
4109 4161
4110#define TV_CC_CONTROL 0x68090 4162#define TV_CC_CONTROL _MMIO(0x68090)
4111# define TV_CC_ENABLE (1 << 31) 4163# define TV_CC_ENABLE (1 << 31)
4112/* 4164/*
4113 * Specifies which field to send the CC data in. 4165 * Specifies which field to send the CC data in.
@@ -4123,7 +4175,7 @@ enum skl_disp_power_wells {
4123# define TV_CC_LINE_MASK 0x0000003f 4175# define TV_CC_LINE_MASK 0x0000003f
4124# define TV_CC_LINE_SHIFT 0 4176# define TV_CC_LINE_SHIFT 0
4125 4177
4126#define TV_CC_DATA 0x68094 4178#define TV_CC_DATA _MMIO(0x68094)
4127# define TV_CC_RDY (1 << 31) 4179# define TV_CC_RDY (1 << 31)
4128/* Second word of CC data to be transmitted. */ 4180/* Second word of CC data to be transmitted. */
4129# define TV_CC_DATA_2_MASK 0x007f0000 4181# define TV_CC_DATA_2_MASK 0x007f0000
@@ -4132,20 +4184,20 @@ enum skl_disp_power_wells {
4132# define TV_CC_DATA_1_MASK 0x0000007f 4184# define TV_CC_DATA_1_MASK 0x0000007f
4133# define TV_CC_DATA_1_SHIFT 0 4185# define TV_CC_DATA_1_SHIFT 0
4134 4186
4135#define TV_H_LUMA(i) (0x68100 + (i) * 4) /* 60 registers */ 4187#define TV_H_LUMA(i) _MMIO(0x68100 + (i) * 4) /* 60 registers */
4136#define TV_H_CHROMA(i) (0x68200 + (i) * 4) /* 60 registers */ 4188#define TV_H_CHROMA(i) _MMIO(0x68200 + (i) * 4) /* 60 registers */
4137#define TV_V_LUMA(i) (0x68300 + (i) * 4) /* 43 registers */ 4189#define TV_V_LUMA(i) _MMIO(0x68300 + (i) * 4) /* 43 registers */
4138#define TV_V_CHROMA(i) (0x68400 + (i) * 4) /* 43 registers */ 4190#define TV_V_CHROMA(i) _MMIO(0x68400 + (i) * 4) /* 43 registers */
4139 4191
4140/* Display Port */ 4192/* Display Port */
4141#define DP_A 0x64000 /* eDP */ 4193#define DP_A _MMIO(0x64000) /* eDP */
4142#define DP_B 0x64100 4194#define DP_B _MMIO(0x64100)
4143#define DP_C 0x64200 4195#define DP_C _MMIO(0x64200)
4144#define DP_D 0x64300 4196#define DP_D _MMIO(0x64300)
4145 4197
4146#define VLV_DP_B (VLV_DISPLAY_BASE + DP_B) 4198#define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100)
4147#define VLV_DP_C (VLV_DISPLAY_BASE + DP_C) 4199#define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200)
4148#define CHV_DP_D (VLV_DISPLAY_BASE + DP_D) 4200#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
4149 4201
4150#define DP_PORT_EN (1 << 31) 4202#define DP_PORT_EN (1 << 31)
4151#define DP_PIPEB_SELECT (1 << 30) 4203#define DP_PIPEB_SELECT (1 << 30)
@@ -4199,7 +4251,7 @@ enum skl_disp_power_wells {
4199 4251
4200/* eDP */ 4252/* eDP */
4201#define DP_PLL_FREQ_270MHZ (0 << 16) 4253#define DP_PLL_FREQ_270MHZ (0 << 16)
4202#define DP_PLL_FREQ_160MHZ (1 << 16) 4254#define DP_PLL_FREQ_162MHZ (1 << 16)
4203#define DP_PLL_FREQ_MASK (3 << 16) 4255#define DP_PLL_FREQ_MASK (3 << 16)
4204 4256
4205/* locked once port is enabled */ 4257/* locked once port is enabled */
@@ -4232,33 +4284,36 @@ enum skl_disp_power_wells {
4232 * is 20 bytes in each direction, hence the 5 fixed 4284 * is 20 bytes in each direction, hence the 5 fixed
4233 * data registers 4285 * data registers
4234 */ 4286 */
4235#define DPA_AUX_CH_CTL 0x64010 4287#define _DPA_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64010)
4236#define DPA_AUX_CH_DATA1 0x64014 4288#define _DPA_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64014)
4237#define DPA_AUX_CH_DATA2 0x64018 4289#define _DPA_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64018)
4238#define DPA_AUX_CH_DATA3 0x6401c 4290#define _DPA_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6401c)
4239#define DPA_AUX_CH_DATA4 0x64020 4291#define _DPA_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64020)
4240#define DPA_AUX_CH_DATA5 0x64024 4292#define _DPA_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64024)
4241 4293
4242#define DPB_AUX_CH_CTL 0x64110 4294#define _DPB_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64110)
4243#define DPB_AUX_CH_DATA1 0x64114 4295#define _DPB_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64114)
4244#define DPB_AUX_CH_DATA2 0x64118 4296#define _DPB_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64118)
4245#define DPB_AUX_CH_DATA3 0x6411c 4297#define _DPB_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6411c)
4246#define DPB_AUX_CH_DATA4 0x64120 4298#define _DPB_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64120)
4247#define DPB_AUX_CH_DATA5 0x64124 4299#define _DPB_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64124)
4248 4300
4249#define DPC_AUX_CH_CTL 0x64210 4301#define _DPC_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64210)
4250#define DPC_AUX_CH_DATA1 0x64214 4302#define _DPC_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64214)
4251#define DPC_AUX_CH_DATA2 0x64218 4303#define _DPC_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64218)
4252#define DPC_AUX_CH_DATA3 0x6421c 4304#define _DPC_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6421c)
4253#define DPC_AUX_CH_DATA4 0x64220 4305#define _DPC_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64220)
4254#define DPC_AUX_CH_DATA5 0x64224 4306#define _DPC_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64224)
4255 4307
4256#define DPD_AUX_CH_CTL 0x64310 4308#define _DPD_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64310)
4257#define DPD_AUX_CH_DATA1 0x64314 4309#define _DPD_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64314)
4258#define DPD_AUX_CH_DATA2 0x64318 4310#define _DPD_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64318)
4259#define DPD_AUX_CH_DATA3 0x6431c 4311#define _DPD_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6431c)
4260#define DPD_AUX_CH_DATA4 0x64320 4312#define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320)
4261#define DPD_AUX_CH_DATA5 0x64324 4313#define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324)
4314
4315#define DP_AUX_CH_CTL(port) _MMIO_PORT(port, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
4316#define DP_AUX_CH_DATA(port, i) _MMIO(_PORT(port, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
4262 4317
4263#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) 4318#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
4264#define DP_AUX_CH_CTL_DONE (1 << 30) 4319#define DP_AUX_CH_CTL_DONE (1 << 30)
@@ -4335,10 +4390,10 @@ enum skl_disp_power_wells {
4335#define _PIPEB_LINK_N_G4X 0x71064 4390#define _PIPEB_LINK_N_G4X 0x71064
4336#define PIPEA_DP_LINK_N_MASK (0xffffff) 4391#define PIPEA_DP_LINK_N_MASK (0xffffff)
4337 4392
4338#define PIPE_DATA_M_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X) 4393#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
4339#define PIPE_DATA_N_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X) 4394#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
4340#define PIPE_LINK_M_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X) 4395#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
4341#define PIPE_LINK_N_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X) 4396#define PIPE_LINK_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
4342 4397
4343/* Display & cursor control */ 4398/* Display & cursor control */
4344 4399
@@ -4454,15 +4509,15 @@ enum skl_disp_power_wells {
4454 */ 4509 */
4455#define PIPE_EDP_OFFSET 0x7f000 4510#define PIPE_EDP_OFFSET 0x7f000
4456 4511
4457#define _PIPE2(pipe, reg) (dev_priv->info.pipe_offsets[pipe] - \ 4512#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
4458 dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \ 4513 dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
4459 dev_priv->info.display_mmio_offset) 4514 dev_priv->info.display_mmio_offset)
4460 4515
4461#define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF) 4516#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF)
4462#define PIPEDSL(pipe) _PIPE2(pipe, _PIPEADSL) 4517#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL)
4463#define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH) 4518#define PIPEFRAME(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEHIGH)
4464#define PIPEFRAMEPIXEL(pipe) _PIPE2(pipe, _PIPEAFRAMEPIXEL) 4519#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL)
4465#define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT) 4520#define PIPESTAT(pipe) _MMIO_PIPE2(pipe, _PIPEASTAT)
4466 4521
4467#define _PIPE_MISC_A 0x70030 4522#define _PIPE_MISC_A 0x70030
4468#define _PIPE_MISC_B 0x71030 4523#define _PIPE_MISC_B 0x71030
@@ -4474,9 +4529,9 @@ enum skl_disp_power_wells {
4474#define PIPEMISC_DITHER_ENABLE (1<<4) 4529#define PIPEMISC_DITHER_ENABLE (1<<4)
4475#define PIPEMISC_DITHER_TYPE_MASK (3<<2) 4530#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
4476#define PIPEMISC_DITHER_TYPE_SP (0<<2) 4531#define PIPEMISC_DITHER_TYPE_SP (0<<2)
4477#define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A) 4532#define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A)
4478 4533
4479#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) 4534#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
4480#define PIPEB_LINE_COMPARE_INT_EN (1<<29) 4535#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
4481#define PIPEB_HLINE_INT_EN (1<<28) 4536#define PIPEB_HLINE_INT_EN (1<<28)
4482#define PIPEB_VBLANK_INT_EN (1<<27) 4537#define PIPEB_VBLANK_INT_EN (1<<27)
@@ -4497,7 +4552,7 @@ enum skl_disp_power_wells {
4497#define SPRITEE_FLIPDONE_INT_EN (1<<9) 4552#define SPRITEE_FLIPDONE_INT_EN (1<<9)
4498#define PLANEC_FLIPDONE_INT_EN (1<<8) 4553#define PLANEC_FLIPDONE_INT_EN (1<<8)
4499 4554
4500#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */ 4555#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
4501#define SPRITEF_INVALID_GTT_INT_EN (1<<27) 4556#define SPRITEF_INVALID_GTT_INT_EN (1<<27)
4502#define SPRITEE_INVALID_GTT_INT_EN (1<<26) 4557#define SPRITEE_INVALID_GTT_INT_EN (1<<26)
4503#define PLANEC_INVALID_GTT_INT_EN (1<<25) 4558#define PLANEC_INVALID_GTT_INT_EN (1<<25)
@@ -4527,7 +4582,7 @@ enum skl_disp_power_wells {
4527#define DPINVGTT_STATUS_MASK 0xff 4582#define DPINVGTT_STATUS_MASK 0xff
4528#define DPINVGTT_STATUS_MASK_CHV 0xfff 4583#define DPINVGTT_STATUS_MASK_CHV 0xfff
4529 4584
4530#define DSPARB (dev_priv->info.display_mmio_offset + 0x70030) 4585#define DSPARB _MMIO(dev_priv->info.display_mmio_offset + 0x70030)
4531#define DSPARB_CSTART_MASK (0x7f << 7) 4586#define DSPARB_CSTART_MASK (0x7f << 7)
4532#define DSPARB_CSTART_SHIFT 7 4587#define DSPARB_CSTART_SHIFT 7
4533#define DSPARB_BSTART_MASK (0x7f) 4588#define DSPARB_BSTART_MASK (0x7f)
@@ -4542,7 +4597,7 @@ enum skl_disp_power_wells {
4542#define DSPARB_SPRITEC_MASK_VLV (0xff << 16) 4597#define DSPARB_SPRITEC_MASK_VLV (0xff << 16)
4543#define DSPARB_SPRITED_SHIFT_VLV 24 4598#define DSPARB_SPRITED_SHIFT_VLV 24
4544#define DSPARB_SPRITED_MASK_VLV (0xff << 24) 4599#define DSPARB_SPRITED_MASK_VLV (0xff << 24)
4545#define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */ 4600#define DSPARB2 _MMIO(VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
4546#define DSPARB_SPRITEA_HI_SHIFT_VLV 0 4601#define DSPARB_SPRITEA_HI_SHIFT_VLV 0
4547#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0) 4602#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0)
4548#define DSPARB_SPRITEB_HI_SHIFT_VLV 4 4603#define DSPARB_SPRITEB_HI_SHIFT_VLV 4
@@ -4555,14 +4610,14 @@ enum skl_disp_power_wells {
4555#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16) 4610#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16)
4556#define DSPARB_SPRITEF_HI_SHIFT_VLV 20 4611#define DSPARB_SPRITEF_HI_SHIFT_VLV 20
4557#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20) 4612#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20)
4558#define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */ 4613#define DSPARB3 _MMIO(VLV_DISPLAY_BASE + 0x7006c) /* chv */
4559#define DSPARB_SPRITEE_SHIFT_VLV 0 4614#define DSPARB_SPRITEE_SHIFT_VLV 0
4560#define DSPARB_SPRITEE_MASK_VLV (0xff << 0) 4615#define DSPARB_SPRITEE_MASK_VLV (0xff << 0)
4561#define DSPARB_SPRITEF_SHIFT_VLV 8 4616#define DSPARB_SPRITEF_SHIFT_VLV 8
4562#define DSPARB_SPRITEF_MASK_VLV (0xff << 8) 4617#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
4563 4618
4564/* pnv/gen4/g4x/vlv/chv */ 4619/* pnv/gen4/g4x/vlv/chv */
4565#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) 4620#define DSPFW1 _MMIO(dev_priv->info.display_mmio_offset + 0x70034)
4566#define DSPFW_SR_SHIFT 23 4621#define DSPFW_SR_SHIFT 23
4567#define DSPFW_SR_MASK (0x1ff<<23) 4622#define DSPFW_SR_MASK (0x1ff<<23)
4568#define DSPFW_CURSORB_SHIFT 16 4623#define DSPFW_CURSORB_SHIFT 16
@@ -4573,7 +4628,7 @@ enum skl_disp_power_wells {
4573#define DSPFW_PLANEA_SHIFT 0 4628#define DSPFW_PLANEA_SHIFT 0
4574#define DSPFW_PLANEA_MASK (0x7f<<0) 4629#define DSPFW_PLANEA_MASK (0x7f<<0)
4575#define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */ 4630#define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */
4576#define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038) 4631#define DSPFW2 _MMIO(dev_priv->info.display_mmio_offset + 0x70038)
4577#define DSPFW_FBC_SR_EN (1<<31) /* g4x */ 4632#define DSPFW_FBC_SR_EN (1<<31) /* g4x */
4578#define DSPFW_FBC_SR_SHIFT 28 4633#define DSPFW_FBC_SR_SHIFT 28
4579#define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */ 4634#define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */
@@ -4589,7 +4644,7 @@ enum skl_disp_power_wells {
4589#define DSPFW_SPRITEA_SHIFT 0 4644#define DSPFW_SPRITEA_SHIFT 0
4590#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */ 4645#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
4591#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */ 4646#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
4592#define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c) 4647#define DSPFW3 _MMIO(dev_priv->info.display_mmio_offset + 0x7003c)
4593#define DSPFW_HPLL_SR_EN (1<<31) 4648#define DSPFW_HPLL_SR_EN (1<<31)
4594#define PINEVIEW_SELF_REFRESH_EN (1<<30) 4649#define PINEVIEW_SELF_REFRESH_EN (1<<30)
4595#define DSPFW_CURSOR_SR_SHIFT 24 4650#define DSPFW_CURSOR_SR_SHIFT 24
@@ -4600,14 +4655,14 @@ enum skl_disp_power_wells {
4600#define DSPFW_HPLL_SR_MASK (0x1ff<<0) 4655#define DSPFW_HPLL_SR_MASK (0x1ff<<0)
4601 4656
4602/* vlv/chv */ 4657/* vlv/chv */
4603#define DSPFW4 (VLV_DISPLAY_BASE + 0x70070) 4658#define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070)
4604#define DSPFW_SPRITEB_WM1_SHIFT 16 4659#define DSPFW_SPRITEB_WM1_SHIFT 16
4605#define DSPFW_SPRITEB_WM1_MASK (0xff<<16) 4660#define DSPFW_SPRITEB_WM1_MASK (0xff<<16)
4606#define DSPFW_CURSORA_WM1_SHIFT 8 4661#define DSPFW_CURSORA_WM1_SHIFT 8
4607#define DSPFW_CURSORA_WM1_MASK (0x3f<<8) 4662#define DSPFW_CURSORA_WM1_MASK (0x3f<<8)
4608#define DSPFW_SPRITEA_WM1_SHIFT 0 4663#define DSPFW_SPRITEA_WM1_SHIFT 0
4609#define DSPFW_SPRITEA_WM1_MASK (0xff<<0) 4664#define DSPFW_SPRITEA_WM1_MASK (0xff<<0)
4610#define DSPFW5 (VLV_DISPLAY_BASE + 0x70074) 4665#define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074)
4611#define DSPFW_PLANEB_WM1_SHIFT 24 4666#define DSPFW_PLANEB_WM1_SHIFT 24
4612#define DSPFW_PLANEB_WM1_MASK (0xff<<24) 4667#define DSPFW_PLANEB_WM1_MASK (0xff<<24)
4613#define DSPFW_PLANEA_WM1_SHIFT 16 4668#define DSPFW_PLANEA_WM1_SHIFT 16
@@ -4616,11 +4671,11 @@ enum skl_disp_power_wells {
4616#define DSPFW_CURSORB_WM1_MASK (0x3f<<8) 4671#define DSPFW_CURSORB_WM1_MASK (0x3f<<8)
4617#define DSPFW_CURSOR_SR_WM1_SHIFT 0 4672#define DSPFW_CURSOR_SR_WM1_SHIFT 0
4618#define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0) 4673#define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0)
4619#define DSPFW6 (VLV_DISPLAY_BASE + 0x70078) 4674#define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078)
4620#define DSPFW_SR_WM1_SHIFT 0 4675#define DSPFW_SR_WM1_SHIFT 0
4621#define DSPFW_SR_WM1_MASK (0x1ff<<0) 4676#define DSPFW_SR_WM1_MASK (0x1ff<<0)
4622#define DSPFW7 (VLV_DISPLAY_BASE + 0x7007c) 4677#define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c)
4623#define DSPFW7_CHV (VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */ 4678#define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
4624#define DSPFW_SPRITED_WM1_SHIFT 24 4679#define DSPFW_SPRITED_WM1_SHIFT 24
4625#define DSPFW_SPRITED_WM1_MASK (0xff<<24) 4680#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
4626#define DSPFW_SPRITED_SHIFT 16 4681#define DSPFW_SPRITED_SHIFT 16
@@ -4629,7 +4684,7 @@ enum skl_disp_power_wells {
4629#define DSPFW_SPRITEC_WM1_MASK (0xff<<8) 4684#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
4630#define DSPFW_SPRITEC_SHIFT 0 4685#define DSPFW_SPRITEC_SHIFT 0
4631#define DSPFW_SPRITEC_MASK_VLV (0xff<<0) 4686#define DSPFW_SPRITEC_MASK_VLV (0xff<<0)
4632#define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8) 4687#define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8)
4633#define DSPFW_SPRITEF_WM1_SHIFT 24 4688#define DSPFW_SPRITEF_WM1_SHIFT 24
4634#define DSPFW_SPRITEF_WM1_MASK (0xff<<24) 4689#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
4635#define DSPFW_SPRITEF_SHIFT 16 4690#define DSPFW_SPRITEF_SHIFT 16
@@ -4638,7 +4693,7 @@ enum skl_disp_power_wells {
4638#define DSPFW_SPRITEE_WM1_MASK (0xff<<8) 4693#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
4639#define DSPFW_SPRITEE_SHIFT 0 4694#define DSPFW_SPRITEE_SHIFT 0
4640#define DSPFW_SPRITEE_MASK_VLV (0xff<<0) 4695#define DSPFW_SPRITEE_MASK_VLV (0xff<<0)
4641#define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */ 4696#define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
4642#define DSPFW_PLANEC_WM1_SHIFT 24 4697#define DSPFW_PLANEC_WM1_SHIFT 24
4643#define DSPFW_PLANEC_WM1_MASK (0xff<<24) 4698#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
4644#define DSPFW_PLANEC_SHIFT 16 4699#define DSPFW_PLANEC_SHIFT 16
@@ -4649,7 +4704,7 @@ enum skl_disp_power_wells {
4649#define DSPFW_CURSORC_MASK (0x3f<<0) 4704#define DSPFW_CURSORC_MASK (0x3f<<0)
4650 4705
4651/* vlv/chv high order bits */ 4706/* vlv/chv high order bits */
4652#define DSPHOWM (VLV_DISPLAY_BASE + 0x70064) 4707#define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064)
4653#define DSPFW_SR_HI_SHIFT 24 4708#define DSPFW_SR_HI_SHIFT 24
4654#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */ 4709#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
4655#define DSPFW_SPRITEF_HI_SHIFT 23 4710#define DSPFW_SPRITEF_HI_SHIFT 23
@@ -4670,7 +4725,7 @@ enum skl_disp_power_wells {
4670#define DSPFW_SPRITEA_HI_MASK (1<<4) 4725#define DSPFW_SPRITEA_HI_MASK (1<<4)
4671#define DSPFW_PLANEA_HI_SHIFT 0 4726#define DSPFW_PLANEA_HI_SHIFT 0
4672#define DSPFW_PLANEA_HI_MASK (1<<0) 4727#define DSPFW_PLANEA_HI_MASK (1<<0)
4673#define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068) 4728#define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068)
4674#define DSPFW_SR_WM1_HI_SHIFT 24 4729#define DSPFW_SR_WM1_HI_SHIFT 24
4675#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */ 4730#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
4676#define DSPFW_SPRITEF_WM1_HI_SHIFT 23 4731#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
@@ -4693,7 +4748,7 @@ enum skl_disp_power_wells {
4693#define DSPFW_PLANEA_WM1_HI_MASK (1<<0) 4748#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
4694 4749
4695/* drain latency register values*/ 4750/* drain latency register values*/
4696#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe)) 4751#define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
4697#define DDL_CURSOR_SHIFT 24 4752#define DDL_CURSOR_SHIFT 24
4698#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite)) 4753#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
4699#define DDL_PLANE_SHIFT 0 4754#define DDL_PLANE_SHIFT 0
@@ -4701,7 +4756,7 @@ enum skl_disp_power_wells {
4701#define DDL_PRECISION_LOW (0<<7) 4756#define DDL_PRECISION_LOW (0<<7)
4702#define DRAIN_LATENCY_MASK 0x7f 4757#define DRAIN_LATENCY_MASK 0x7f
4703 4758
4704#define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400) 4759#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
4705#define CBR_PND_DEADLINE_DISABLE (1<<31) 4760#define CBR_PND_DEADLINE_DISABLE (1<<31)
4706#define CBR_PWM_CLOCK_MUX_SELECT (1<<30) 4761#define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
4707 4762
@@ -4739,51 +4794,51 @@ enum skl_disp_power_wells {
4739#define I965_CURSOR_DFT_WM 8 4794#define I965_CURSOR_DFT_WM 8
4740 4795
4741/* Watermark register definitions for SKL */ 4796/* Watermark register definitions for SKL */
4742#define CUR_WM_A_0 0x70140 4797#define _CUR_WM_A_0 0x70140
4743#define CUR_WM_B_0 0x71140 4798#define _CUR_WM_B_0 0x71140
4744#define PLANE_WM_1_A_0 0x70240 4799#define _PLANE_WM_1_A_0 0x70240
4745#define PLANE_WM_1_B_0 0x71240 4800#define _PLANE_WM_1_B_0 0x71240
4746#define PLANE_WM_2_A_0 0x70340 4801#define _PLANE_WM_2_A_0 0x70340
4747#define PLANE_WM_2_B_0 0x71340 4802#define _PLANE_WM_2_B_0 0x71340
4748#define PLANE_WM_TRANS_1_A_0 0x70268 4803#define _PLANE_WM_TRANS_1_A_0 0x70268
4749#define PLANE_WM_TRANS_1_B_0 0x71268 4804#define _PLANE_WM_TRANS_1_B_0 0x71268
4750#define PLANE_WM_TRANS_2_A_0 0x70368 4805#define _PLANE_WM_TRANS_2_A_0 0x70368
4751#define PLANE_WM_TRANS_2_B_0 0x71368 4806#define _PLANE_WM_TRANS_2_B_0 0x71368
4752#define CUR_WM_TRANS_A_0 0x70168 4807#define _CUR_WM_TRANS_A_0 0x70168
4753#define CUR_WM_TRANS_B_0 0x71168 4808#define _CUR_WM_TRANS_B_0 0x71168
4754#define PLANE_WM_EN (1 << 31) 4809#define PLANE_WM_EN (1 << 31)
4755#define PLANE_WM_LINES_SHIFT 14 4810#define PLANE_WM_LINES_SHIFT 14
4756#define PLANE_WM_LINES_MASK 0x1f 4811#define PLANE_WM_LINES_MASK 0x1f
4757#define PLANE_WM_BLOCKS_MASK 0x3ff 4812#define PLANE_WM_BLOCKS_MASK 0x3ff
4758 4813
4759#define CUR_WM_0(pipe) _PIPE(pipe, CUR_WM_A_0, CUR_WM_B_0) 4814#define _CUR_WM_0(pipe) _PIPE(pipe, _CUR_WM_A_0, _CUR_WM_B_0)
4760#define CUR_WM(pipe, level) (CUR_WM_0(pipe) + ((4) * (level))) 4815#define CUR_WM(pipe, level) _MMIO(_CUR_WM_0(pipe) + ((4) * (level)))
4761#define CUR_WM_TRANS(pipe) _PIPE(pipe, CUR_WM_TRANS_A_0, CUR_WM_TRANS_B_0) 4816#define CUR_WM_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_TRANS_A_0, _CUR_WM_TRANS_B_0)
4762 4817
4763#define _PLANE_WM_1(pipe) _PIPE(pipe, PLANE_WM_1_A_0, PLANE_WM_1_B_0) 4818#define _PLANE_WM_1(pipe) _PIPE(pipe, _PLANE_WM_1_A_0, _PLANE_WM_1_B_0)
4764#define _PLANE_WM_2(pipe) _PIPE(pipe, PLANE_WM_2_A_0, PLANE_WM_2_B_0) 4819#define _PLANE_WM_2(pipe) _PIPE(pipe, _PLANE_WM_2_A_0, _PLANE_WM_2_B_0)
4765#define _PLANE_WM_BASE(pipe, plane) \ 4820#define _PLANE_WM_BASE(pipe, plane) \
4766 _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe)) 4821 _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe))
4767#define PLANE_WM(pipe, plane, level) \ 4822#define PLANE_WM(pipe, plane, level) \
4768 (_PLANE_WM_BASE(pipe, plane) + ((4) * (level))) 4823 _MMIO(_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
4769#define _PLANE_WM_TRANS_1(pipe) \ 4824#define _PLANE_WM_TRANS_1(pipe) \
4770 _PIPE(pipe, PLANE_WM_TRANS_1_A_0, PLANE_WM_TRANS_1_B_0) 4825 _PIPE(pipe, _PLANE_WM_TRANS_1_A_0, _PLANE_WM_TRANS_1_B_0)
4771#define _PLANE_WM_TRANS_2(pipe) \ 4826#define _PLANE_WM_TRANS_2(pipe) \
4772 _PIPE(pipe, PLANE_WM_TRANS_2_A_0, PLANE_WM_TRANS_2_B_0) 4827 _PIPE(pipe, _PLANE_WM_TRANS_2_A_0, _PLANE_WM_TRANS_2_B_0)
4773#define PLANE_WM_TRANS(pipe, plane) \ 4828#define PLANE_WM_TRANS(pipe, plane) \
4774 _PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe)) 4829 _MMIO(_PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe)))
4775 4830
4776/* define the Watermark register on Ironlake */ 4831/* define the Watermark register on Ironlake */
4777#define WM0_PIPEA_ILK 0x45100 4832#define WM0_PIPEA_ILK _MMIO(0x45100)
4778#define WM0_PIPE_PLANE_MASK (0xffff<<16) 4833#define WM0_PIPE_PLANE_MASK (0xffff<<16)
4779#define WM0_PIPE_PLANE_SHIFT 16 4834#define WM0_PIPE_PLANE_SHIFT 16
4780#define WM0_PIPE_SPRITE_MASK (0xff<<8) 4835#define WM0_PIPE_SPRITE_MASK (0xff<<8)
4781#define WM0_PIPE_SPRITE_SHIFT 8 4836#define WM0_PIPE_SPRITE_SHIFT 8
4782#define WM0_PIPE_CURSOR_MASK (0xff) 4837#define WM0_PIPE_CURSOR_MASK (0xff)
4783 4838
4784#define WM0_PIPEB_ILK 0x45104 4839#define WM0_PIPEB_ILK _MMIO(0x45104)
4785#define WM0_PIPEC_IVB 0x45200 4840#define WM0_PIPEC_IVB _MMIO(0x45200)
4786#define WM1_LP_ILK 0x45108 4841#define WM1_LP_ILK _MMIO(0x45108)
4787#define WM1_LP_SR_EN (1<<31) 4842#define WM1_LP_SR_EN (1<<31)
4788#define WM1_LP_LATENCY_SHIFT 24 4843#define WM1_LP_LATENCY_SHIFT 24
4789#define WM1_LP_LATENCY_MASK (0x7f<<24) 4844#define WM1_LP_LATENCY_MASK (0x7f<<24)
@@ -4793,13 +4848,13 @@ enum skl_disp_power_wells {
4793#define WM1_LP_SR_MASK (0x7ff<<8) 4848#define WM1_LP_SR_MASK (0x7ff<<8)
4794#define WM1_LP_SR_SHIFT 8 4849#define WM1_LP_SR_SHIFT 8
4795#define WM1_LP_CURSOR_MASK (0xff) 4850#define WM1_LP_CURSOR_MASK (0xff)
4796#define WM2_LP_ILK 0x4510c 4851#define WM2_LP_ILK _MMIO(0x4510c)
4797#define WM2_LP_EN (1<<31) 4852#define WM2_LP_EN (1<<31)
4798#define WM3_LP_ILK 0x45110 4853#define WM3_LP_ILK _MMIO(0x45110)
4799#define WM3_LP_EN (1<<31) 4854#define WM3_LP_EN (1<<31)
4800#define WM1S_LP_ILK 0x45120 4855#define WM1S_LP_ILK _MMIO(0x45120)
4801#define WM2S_LP_IVB 0x45124 4856#define WM2S_LP_IVB _MMIO(0x45124)
4802#define WM3S_LP_IVB 0x45128 4857#define WM3S_LP_IVB _MMIO(0x45128)
4803#define WM1S_LP_EN (1<<31) 4858#define WM1S_LP_EN (1<<31)
4804 4859
4805#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \ 4860#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
@@ -4807,7 +4862,7 @@ enum skl_disp_power_wells {
4807 ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur)) 4862 ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur))
4808 4863
4809/* Memory latency timer register */ 4864/* Memory latency timer register */
4810#define MLTR_ILK 0x11222 4865#define MLTR_ILK _MMIO(0x11222)
4811#define MLTR_WM1_SHIFT 0 4866#define MLTR_WM1_SHIFT 0
4812#define MLTR_WM2_SHIFT 8 4867#define MLTR_WM2_SHIFT 8
4813/* the unit of memory self-refresh latency time is 0.5us */ 4868/* the unit of memory self-refresh latency time is 0.5us */
@@ -4815,7 +4870,7 @@ enum skl_disp_power_wells {
4815 4870
4816 4871
4817/* the address where we get all kinds of latency value */ 4872/* the address where we get all kinds of latency value */
4818#define SSKPD 0x5d10 4873#define SSKPD _MMIO(0x5d10)
4819#define SSKPD_WM_MASK 0x3f 4874#define SSKPD_WM_MASK 0x3f
4820#define SSKPD_WM0_SHIFT 0 4875#define SSKPD_WM0_SHIFT 0
4821#define SSKPD_WM1_SHIFT 8 4876#define SSKPD_WM1_SHIFT 8
@@ -4848,8 +4903,8 @@ enum skl_disp_power_wells {
4848/* GM45+ just has to be different */ 4903/* GM45+ just has to be different */
4849#define _PIPEA_FRMCOUNT_G4X 0x70040 4904#define _PIPEA_FRMCOUNT_G4X 0x70040
4850#define _PIPEA_FLIPCOUNT_G4X 0x70044 4905#define _PIPEA_FLIPCOUNT_G4X 0x70044
4851#define PIPE_FRMCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_G4X) 4906#define PIPE_FRMCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FRMCOUNT_G4X)
4852#define PIPE_FLIPCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X) 4907#define PIPE_FLIPCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X)
4853 4908
4854/* Cursor A & B regs */ 4909/* Cursor A & B regs */
4855#define _CURACNTR 0x70080 4910#define _CURACNTR 0x70080
@@ -4887,7 +4942,7 @@ enum skl_disp_power_wells {
4887#define CURSOR_POS_SIGN 0x8000 4942#define CURSOR_POS_SIGN 0x8000
4888#define CURSOR_X_SHIFT 0 4943#define CURSOR_X_SHIFT 0
4889#define CURSOR_Y_SHIFT 16 4944#define CURSOR_Y_SHIFT 16
4890#define CURSIZE 0x700a0 4945#define CURSIZE _MMIO(0x700a0)
4891#define _CURBCNTR 0x700c0 4946#define _CURBCNTR 0x700c0
4892#define _CURBBASE 0x700c4 4947#define _CURBBASE 0x700c4
4893#define _CURBPOS 0x700c8 4948#define _CURBPOS 0x700c8
@@ -4896,7 +4951,7 @@ enum skl_disp_power_wells {
4896#define _CURBBASE_IVB 0x71084 4951#define _CURBBASE_IVB 0x71084
4897#define _CURBPOS_IVB 0x71088 4952#define _CURBPOS_IVB 0x71088
4898 4953
4899#define _CURSOR2(pipe, reg) (dev_priv->info.cursor_offsets[(pipe)] - \ 4954#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
4900 dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \ 4955 dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
4901 dev_priv->info.display_mmio_offset) 4956 dev_priv->info.display_mmio_offset)
4902 4957
@@ -4957,16 +5012,16 @@ enum skl_disp_power_wells {
4957#define _DSPAOFFSET 0x701A4 /* HSW */ 5012#define _DSPAOFFSET 0x701A4 /* HSW */
4958#define _DSPASURFLIVE 0x701AC 5013#define _DSPASURFLIVE 0x701AC
4959 5014
4960#define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR) 5015#define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR)
4961#define DSPADDR(plane) _PIPE2(plane, _DSPAADDR) 5016#define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR)
4962#define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE) 5017#define DSPSTRIDE(plane) _MMIO_PIPE2(plane, _DSPASTRIDE)
4963#define DSPPOS(plane) _PIPE2(plane, _DSPAPOS) 5018#define DSPPOS(plane) _MMIO_PIPE2(plane, _DSPAPOS)
4964#define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE) 5019#define DSPSIZE(plane) _MMIO_PIPE2(plane, _DSPASIZE)
4965#define DSPSURF(plane) _PIPE2(plane, _DSPASURF) 5020#define DSPSURF(plane) _MMIO_PIPE2(plane, _DSPASURF)
4966#define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF) 5021#define DSPTILEOFF(plane) _MMIO_PIPE2(plane, _DSPATILEOFF)
4967#define DSPLINOFF(plane) DSPADDR(plane) 5022#define DSPLINOFF(plane) DSPADDR(plane)
4968#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET) 5023#define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET)
4969#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE) 5024#define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE)
4970 5025
4971/* CHV pipe B blender and primary plane */ 5026/* CHV pipe B blender and primary plane */
4972#define _CHV_BLEND_A 0x60a00 5027#define _CHV_BLEND_A 0x60a00
@@ -4980,11 +5035,11 @@ enum skl_disp_power_wells {
4980#define _PRIMCNSTALPHA_A 0x60a10 5035#define _PRIMCNSTALPHA_A 0x60a10
4981#define PRIM_CONST_ALPHA_ENABLE (1<<31) 5036#define PRIM_CONST_ALPHA_ENABLE (1<<31)
4982 5037
4983#define CHV_BLEND(pipe) _TRANSCODER2(pipe, _CHV_BLEND_A) 5038#define CHV_BLEND(pipe) _MMIO_TRANS2(pipe, _CHV_BLEND_A)
4984#define CHV_CANVAS(pipe) _TRANSCODER2(pipe, _CHV_CANVAS_A) 5039#define CHV_CANVAS(pipe) _MMIO_TRANS2(pipe, _CHV_CANVAS_A)
4985#define PRIMPOS(plane) _TRANSCODER2(plane, _PRIMPOS_A) 5040#define PRIMPOS(plane) _MMIO_TRANS2(plane, _PRIMPOS_A)
4986#define PRIMSIZE(plane) _TRANSCODER2(plane, _PRIMSIZE_A) 5041#define PRIMSIZE(plane) _MMIO_TRANS2(plane, _PRIMSIZE_A)
4987#define PRIMCNSTALPHA(plane) _TRANSCODER2(plane, _PRIMCNSTALPHA_A) 5042#define PRIMCNSTALPHA(plane) _MMIO_TRANS2(plane, _PRIMCNSTALPHA_A)
4988 5043
4989/* Display/Sprite base address macros */ 5044/* Display/Sprite base address macros */
4990#define DISP_BASEADDR_MASK (0xfffff000) 5045#define DISP_BASEADDR_MASK (0xfffff000)
@@ -5002,9 +5057,10 @@ enum skl_disp_power_wells {
5002 * [10:1f] all 5057 * [10:1f] all
5003 * [30:32] all 5058 * [30:32] all
5004 */ 5059 */
5005#define SWF0(i) (dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4) 5060#define SWF0(i) _MMIO(dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4)
5006#define SWF1(i) (dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4) 5061#define SWF1(i) _MMIO(dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4)
5007#define SWF3(i) (dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4) 5062#define SWF3(i) _MMIO(dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4)
5063#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
5008 5064
5009/* Pipe B */ 5065/* Pipe B */
5010#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000) 5066#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000)
@@ -5086,18 +5142,18 @@ enum skl_disp_power_wells {
5086#define _DVSBSCALE 0x73204 5142#define _DVSBSCALE 0x73204
5087#define _DVSBGAMC 0x73300 5143#define _DVSBGAMC 0x73300
5088 5144
5089#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR) 5145#define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR)
5090#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF) 5146#define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
5091#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE) 5147#define DVSSTRIDE(pipe) _MMIO_PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
5092#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS) 5148#define DVSPOS(pipe) _MMIO_PIPE(pipe, _DVSAPOS, _DVSBPOS)
5093#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF) 5149#define DVSSURF(pipe) _MMIO_PIPE(pipe, _DVSASURF, _DVSBSURF)
5094#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL) 5150#define DVSKEYMAX(pipe) _MMIO_PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
5095#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE) 5151#define DVSSIZE(pipe) _MMIO_PIPE(pipe, _DVSASIZE, _DVSBSIZE)
5096#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE) 5152#define DVSSCALE(pipe) _MMIO_PIPE(pipe, _DVSASCALE, _DVSBSCALE)
5097#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) 5153#define DVSTILEOFF(pipe) _MMIO_PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
5098#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) 5154#define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
5099#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) 5155#define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
5100#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) 5156#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
5101 5157
5102#define _SPRA_CTL 0x70280 5158#define _SPRA_CTL 0x70280
5103#define SPRITE_ENABLE (1<<31) 5159#define SPRITE_ENABLE (1<<31)
@@ -5160,20 +5216,20 @@ enum skl_disp_power_wells {
5160#define _SPRB_SCALE 0x71304 5216#define _SPRB_SCALE 0x71304
5161#define _SPRB_GAMC 0x71400 5217#define _SPRB_GAMC 0x71400
5162 5218
5163#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL) 5219#define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
5164#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF) 5220#define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
5165#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE) 5221#define SPRSTRIDE(pipe) _MMIO_PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
5166#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS) 5222#define SPRPOS(pipe) _MMIO_PIPE(pipe, _SPRA_POS, _SPRB_POS)
5167#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE) 5223#define SPRSIZE(pipe) _MMIO_PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
5168#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL) 5224#define SPRKEYVAL(pipe) _MMIO_PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
5169#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK) 5225#define SPRKEYMSK(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
5170#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF) 5226#define SPRSURF(pipe) _MMIO_PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
5171#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) 5227#define SPRKEYMAX(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
5172#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) 5228#define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
5173#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET) 5229#define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
5174#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) 5230#define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
5175#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) 5231#define SPRGAMC(pipe) _MMIO_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
5176#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) 5232#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
5177 5233
5178#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180) 5234#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
5179#define SP_ENABLE (1<<31) 5235#define SP_ENABLE (1<<31)
@@ -5223,18 +5279,18 @@ enum skl_disp_power_wells {
5223#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) 5279#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
5224#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4) 5280#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
5225 5281
5226#define SPCNTR(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR) 5282#define SPCNTR(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR)
5227#define SPLINOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF) 5283#define SPLINOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF)
5228#define SPSTRIDE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE) 5284#define SPSTRIDE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE)
5229#define SPPOS(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS) 5285#define SPPOS(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS)
5230#define SPSIZE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE) 5286#define SPSIZE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE)
5231#define SPKEYMINVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL) 5287#define SPKEYMINVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL)
5232#define SPKEYMSK(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK) 5288#define SPKEYMSK(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK)
5233#define SPSURF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF) 5289#define SPSURF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF)
5234#define SPKEYMAXVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL) 5290#define SPKEYMAXVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
5235#define SPTILEOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF) 5291#define SPTILEOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF)
5236#define SPCONSTALPHA(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA) 5292#define SPCONSTALPHA(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA)
5237#define SPGAMC(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC) 5293#define SPGAMC(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC)
5238 5294
5239/* 5295/*
5240 * CHV pipe B sprite CSC 5296 * CHV pipe B sprite CSC
@@ -5243,29 +5299,29 @@ enum skl_disp_power_wells {
5243 * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff| 5299 * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
5244 * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff| 5300 * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
5245 */ 5301 */
5246#define SPCSCYGOFF(sprite) (VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000) 5302#define SPCSCYGOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000)
5247#define SPCSCCBOFF(sprite) (VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000) 5303#define SPCSCCBOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000)
5248#define SPCSCCROFF(sprite) (VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000) 5304#define SPCSCCROFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000)
5249#define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */ 5305#define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */
5250#define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */ 5306#define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */
5251 5307
5252#define SPCSCC01(sprite) (VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000) 5308#define SPCSCC01(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000)
5253#define SPCSCC23(sprite) (VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000) 5309#define SPCSCC23(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000)
5254#define SPCSCC45(sprite) (VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000) 5310#define SPCSCC45(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000)
5255#define SPCSCC67(sprite) (VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000) 5311#define SPCSCC67(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000)
5256#define SPCSCC8(sprite) (VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000) 5312#define SPCSCC8(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000)
5257#define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */ 5313#define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */
5258#define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */ 5314#define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */
5259 5315
5260#define SPCSCYGICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000) 5316#define SPCSCYGICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000)
5261#define SPCSCCBICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000) 5317#define SPCSCCBICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000)
5262#define SPCSCCRICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000) 5318#define SPCSCCRICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000)
5263#define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */ 5319#define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */
5264#define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */ 5320#define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */
5265 5321
5266#define SPCSCYGOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000) 5322#define SPCSCYGOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000)
5267#define SPCSCCBOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000) 5323#define SPCSCCBOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000)
5268#define SPCSCCROCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000) 5324#define SPCSCCROCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000)
5269#define SPCSC_OMAX(x) ((x) << 16) /* u10 */ 5325#define SPCSC_OMAX(x) ((x) << 16) /* u10 */
5270#define SPCSC_OMIN(x) ((x) << 0) /* u10 */ 5326#define SPCSC_OMIN(x) ((x) << 0) /* u10 */
5271 5327
@@ -5346,7 +5402,7 @@ enum skl_disp_power_wells {
5346#define _PLANE_CTL_2(pipe) _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B) 5402#define _PLANE_CTL_2(pipe) _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B)
5347#define _PLANE_CTL_3(pipe) _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B) 5403#define _PLANE_CTL_3(pipe) _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B)
5348#define PLANE_CTL(pipe, plane) \ 5404#define PLANE_CTL(pipe, plane) \
5349 _PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe)) 5405 _MMIO_PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
5350 5406
5351#define _PLANE_STRIDE_1_B 0x71188 5407#define _PLANE_STRIDE_1_B 0x71188
5352#define _PLANE_STRIDE_2_B 0x71288 5408#define _PLANE_STRIDE_2_B 0x71288
@@ -5358,7 +5414,7 @@ enum skl_disp_power_wells {
5358#define _PLANE_STRIDE_3(pipe) \ 5414#define _PLANE_STRIDE_3(pipe) \
5359 _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B) 5415 _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
5360#define PLANE_STRIDE(pipe, plane) \ 5416#define PLANE_STRIDE(pipe, plane) \
5361 _PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe)) 5417 _MMIO_PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
5362 5418
5363#define _PLANE_POS_1_B 0x7118c 5419#define _PLANE_POS_1_B 0x7118c
5364#define _PLANE_POS_2_B 0x7128c 5420#define _PLANE_POS_2_B 0x7128c
@@ -5367,7 +5423,7 @@ enum skl_disp_power_wells {
5367#define _PLANE_POS_2(pipe) _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B) 5423#define _PLANE_POS_2(pipe) _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B)
5368#define _PLANE_POS_3(pipe) _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B) 5424#define _PLANE_POS_3(pipe) _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B)
5369#define PLANE_POS(pipe, plane) \ 5425#define PLANE_POS(pipe, plane) \
5370 _PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe)) 5426 _MMIO_PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
5371 5427
5372#define _PLANE_SIZE_1_B 0x71190 5428#define _PLANE_SIZE_1_B 0x71190
5373#define _PLANE_SIZE_2_B 0x71290 5429#define _PLANE_SIZE_2_B 0x71290
@@ -5376,7 +5432,7 @@ enum skl_disp_power_wells {
5376#define _PLANE_SIZE_2(pipe) _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B) 5432#define _PLANE_SIZE_2(pipe) _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B)
5377#define _PLANE_SIZE_3(pipe) _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B) 5433#define _PLANE_SIZE_3(pipe) _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B)
5378#define PLANE_SIZE(pipe, plane) \ 5434#define PLANE_SIZE(pipe, plane) \
5379 _PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe)) 5435 _MMIO_PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
5380 5436
5381#define _PLANE_SURF_1_B 0x7119c 5437#define _PLANE_SURF_1_B 0x7119c
5382#define _PLANE_SURF_2_B 0x7129c 5438#define _PLANE_SURF_2_B 0x7129c
@@ -5385,35 +5441,35 @@ enum skl_disp_power_wells {
5385#define _PLANE_SURF_2(pipe) _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B) 5441#define _PLANE_SURF_2(pipe) _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B)
5386#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B) 5442#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
5387#define PLANE_SURF(pipe, plane) \ 5443#define PLANE_SURF(pipe, plane) \
5388 _PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe)) 5444 _MMIO_PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
5389 5445
5390#define _PLANE_OFFSET_1_B 0x711a4 5446#define _PLANE_OFFSET_1_B 0x711a4
5391#define _PLANE_OFFSET_2_B 0x712a4 5447#define _PLANE_OFFSET_2_B 0x712a4
5392#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B) 5448#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B)
5393#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B) 5449#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B)
5394#define PLANE_OFFSET(pipe, plane) \ 5450#define PLANE_OFFSET(pipe, plane) \
5395 _PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe)) 5451 _MMIO_PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
5396 5452
5397#define _PLANE_KEYVAL_1_B 0x71194 5453#define _PLANE_KEYVAL_1_B 0x71194
5398#define _PLANE_KEYVAL_2_B 0x71294 5454#define _PLANE_KEYVAL_2_B 0x71294
5399#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B) 5455#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B)
5400#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B) 5456#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B)
5401#define PLANE_KEYVAL(pipe, plane) \ 5457#define PLANE_KEYVAL(pipe, plane) \
5402 _PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe)) 5458 _MMIO_PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
5403 5459
5404#define _PLANE_KEYMSK_1_B 0x71198 5460#define _PLANE_KEYMSK_1_B 0x71198
5405#define _PLANE_KEYMSK_2_B 0x71298 5461#define _PLANE_KEYMSK_2_B 0x71298
5406#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B) 5462#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B)
5407#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B) 5463#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B)
5408#define PLANE_KEYMSK(pipe, plane) \ 5464#define PLANE_KEYMSK(pipe, plane) \
5409 _PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe)) 5465 _MMIO_PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
5410 5466
5411#define _PLANE_KEYMAX_1_B 0x711a0 5467#define _PLANE_KEYMAX_1_B 0x711a0
5412#define _PLANE_KEYMAX_2_B 0x712a0 5468#define _PLANE_KEYMAX_2_B 0x712a0
5413#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B) 5469#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B)
5414#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B) 5470#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B)
5415#define PLANE_KEYMAX(pipe, plane) \ 5471#define PLANE_KEYMAX(pipe, plane) \
5416 _PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe)) 5472 _MMIO_PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
5417 5473
5418#define _PLANE_BUF_CFG_1_B 0x7127c 5474#define _PLANE_BUF_CFG_1_B 0x7127c
5419#define _PLANE_BUF_CFG_2_B 0x7137c 5475#define _PLANE_BUF_CFG_2_B 0x7137c
@@ -5422,7 +5478,7 @@ enum skl_disp_power_wells {
5422#define _PLANE_BUF_CFG_2(pipe) \ 5478#define _PLANE_BUF_CFG_2(pipe) \
5423 _PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B) 5479 _PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
5424#define PLANE_BUF_CFG(pipe, plane) \ 5480#define PLANE_BUF_CFG(pipe, plane) \
5425 _PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe)) 5481 _MMIO_PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe))
5426 5482
5427#define _PLANE_NV12_BUF_CFG_1_B 0x71278 5483#define _PLANE_NV12_BUF_CFG_1_B 0x71278
5428#define _PLANE_NV12_BUF_CFG_2_B 0x71378 5484#define _PLANE_NV12_BUF_CFG_2_B 0x71378
@@ -5431,26 +5487,26 @@ enum skl_disp_power_wells {
5431#define _PLANE_NV12_BUF_CFG_2(pipe) \ 5487#define _PLANE_NV12_BUF_CFG_2(pipe) \
5432 _PIPE(pipe, _PLANE_NV12_BUF_CFG_2_A, _PLANE_NV12_BUF_CFG_2_B) 5488 _PIPE(pipe, _PLANE_NV12_BUF_CFG_2_A, _PLANE_NV12_BUF_CFG_2_B)
5433#define PLANE_NV12_BUF_CFG(pipe, plane) \ 5489#define PLANE_NV12_BUF_CFG(pipe, plane) \
5434 _PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe)) 5490 _MMIO_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe))
5435 5491
5436/* SKL new cursor registers */ 5492/* SKL new cursor registers */
5437#define _CUR_BUF_CFG_A 0x7017c 5493#define _CUR_BUF_CFG_A 0x7017c
5438#define _CUR_BUF_CFG_B 0x7117c 5494#define _CUR_BUF_CFG_B 0x7117c
5439#define CUR_BUF_CFG(pipe) _PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B) 5495#define CUR_BUF_CFG(pipe) _MMIO_PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
5440 5496
5441/* VBIOS regs */ 5497/* VBIOS regs */
5442#define VGACNTRL 0x71400 5498#define VGACNTRL _MMIO(0x71400)
5443# define VGA_DISP_DISABLE (1 << 31) 5499# define VGA_DISP_DISABLE (1 << 31)
5444# define VGA_2X_MODE (1 << 30) 5500# define VGA_2X_MODE (1 << 30)
5445# define VGA_PIPE_B_SELECT (1 << 29) 5501# define VGA_PIPE_B_SELECT (1 << 29)
5446 5502
5447#define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400) 5503#define VLV_VGACNTRL _MMIO(VLV_DISPLAY_BASE + 0x71400)
5448 5504
5449/* Ironlake */ 5505/* Ironlake */
5450 5506
5451#define CPU_VGACNTRL 0x41000 5507#define CPU_VGACNTRL _MMIO(0x41000)
5452 5508
5453#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030 5509#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030)
5454#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4) 5510#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
5455#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */ 5511#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
5456#define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */ 5512#define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */
@@ -5463,26 +5519,26 @@ enum skl_disp_power_wells {
5463#define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0) 5519#define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0)
5464 5520
5465/* refresh rate hardware control */ 5521/* refresh rate hardware control */
5466#define RR_HW_CTL 0x45300 5522#define RR_HW_CTL _MMIO(0x45300)
5467#define RR_HW_LOW_POWER_FRAMES_MASK 0xff 5523#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
5468#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00 5524#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
5469 5525
5470#define FDI_PLL_BIOS_0 0x46000 5526#define FDI_PLL_BIOS_0 _MMIO(0x46000)
5471#define FDI_PLL_FB_CLOCK_MASK 0xff 5527#define FDI_PLL_FB_CLOCK_MASK 0xff
5472#define FDI_PLL_BIOS_1 0x46004 5528#define FDI_PLL_BIOS_1 _MMIO(0x46004)
5473#define FDI_PLL_BIOS_2 0x46008 5529#define FDI_PLL_BIOS_2 _MMIO(0x46008)
5474#define DISPLAY_PORT_PLL_BIOS_0 0x4600c 5530#define DISPLAY_PORT_PLL_BIOS_0 _MMIO(0x4600c)
5475#define DISPLAY_PORT_PLL_BIOS_1 0x46010 5531#define DISPLAY_PORT_PLL_BIOS_1 _MMIO(0x46010)
5476#define DISPLAY_PORT_PLL_BIOS_2 0x46014 5532#define DISPLAY_PORT_PLL_BIOS_2 _MMIO(0x46014)
5477 5533
5478#define PCH_3DCGDIS0 0x46020 5534#define PCH_3DCGDIS0 _MMIO(0x46020)
5479# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) 5535# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
5480# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) 5536# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
5481 5537
5482#define PCH_3DCGDIS1 0x46024 5538#define PCH_3DCGDIS1 _MMIO(0x46024)
5483# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11) 5539# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
5484 5540
5485#define FDI_PLL_FREQ_CTL 0x46030 5541#define FDI_PLL_FREQ_CTL _MMIO(0x46030)
5486#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) 5542#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
5487#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 5543#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
5488#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff 5544#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
@@ -5519,14 +5575,14 @@ enum skl_disp_power_wells {
5519#define _PIPEB_LINK_M2 0x61048 5575#define _PIPEB_LINK_M2 0x61048
5520#define _PIPEB_LINK_N2 0x6104c 5576#define _PIPEB_LINK_N2 0x6104c
5521 5577
5522#define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1) 5578#define PIPE_DATA_M1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M1)
5523#define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1) 5579#define PIPE_DATA_N1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N1)
5524#define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2) 5580#define PIPE_DATA_M2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M2)
5525#define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2) 5581#define PIPE_DATA_N2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N2)
5526#define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1) 5582#define PIPE_LINK_M1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M1)
5527#define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1) 5583#define PIPE_LINK_N1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N1)
5528#define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2) 5584#define PIPE_LINK_M2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M2)
5529#define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2) 5585#define PIPE_LINK_N2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N2)
5530 5586
5531/* CPU panel fitter */ 5587/* CPU panel fitter */
5532/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ 5588/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
@@ -5549,11 +5605,11 @@ enum skl_disp_power_wells {
5549#define _PFA_HSCALE 0x68090 5605#define _PFA_HSCALE 0x68090
5550#define _PFB_HSCALE 0x68890 5606#define _PFB_HSCALE 0x68890
5551 5607
5552#define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1) 5608#define PF_CTL(pipe) _MMIO_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
5553#define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ) 5609#define PF_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
5554#define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS) 5610#define PF_WIN_POS(pipe) _MMIO_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
5555#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) 5611#define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
5556#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) 5612#define PF_HSCALE(pipe) _MMIO_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
5557 5613
5558#define _PSA_CTL 0x68180 5614#define _PSA_CTL 0x68180
5559#define _PSB_CTL 0x68980 5615#define _PSB_CTL 0x68980
@@ -5563,9 +5619,9 @@ enum skl_disp_power_wells {
5563#define _PSA_WIN_POS 0x68170 5619#define _PSA_WIN_POS 0x68170
5564#define _PSB_WIN_POS 0x68970 5620#define _PSB_WIN_POS 0x68970
5565 5621
5566#define PS_CTL(pipe) _PIPE(pipe, _PSA_CTL, _PSB_CTL) 5622#define PS_CTL(pipe) _MMIO_PIPE(pipe, _PSA_CTL, _PSB_CTL)
5567#define PS_WIN_SZ(pipe) _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ) 5623#define PS_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
5568#define PS_WIN_POS(pipe) _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS) 5624#define PS_WIN_POS(pipe) _MMIO_PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
5569 5625
5570/* 5626/*
5571 * Skylake scalers 5627 * Skylake scalers
@@ -5654,48 +5710,63 @@ enum skl_disp_power_wells {
5654#define _PS_ECC_STAT_1C 0x691D0 5710#define _PS_ECC_STAT_1C 0x691D0
5655 5711
5656#define _ID(id, a, b) ((a) + (id)*((b)-(a))) 5712#define _ID(id, a, b) ((a) + (id)*((b)-(a)))
5657#define SKL_PS_CTRL(pipe, id) _PIPE(pipe, \ 5713#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
5658 _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \ 5714 _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
5659 _ID(id, _PS_1B_CTRL, _PS_2B_CTRL)) 5715 _ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
5660#define SKL_PS_PWR_GATE(pipe, id) _PIPE(pipe, \ 5716#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \
5661 _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \ 5717 _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
5662 _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B)) 5718 _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
5663#define SKL_PS_WIN_POS(pipe, id) _PIPE(pipe, \ 5719#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \
5664 _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \ 5720 _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
5665 _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B)) 5721 _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
5666#define SKL_PS_WIN_SZ(pipe, id) _PIPE(pipe, \ 5722#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \
5667 _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \ 5723 _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \
5668 _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B)) 5724 _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
5669#define SKL_PS_VSCALE(pipe, id) _PIPE(pipe, \ 5725#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \
5670 _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \ 5726 _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \
5671 _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B)) 5727 _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
5672#define SKL_PS_HSCALE(pipe, id) _PIPE(pipe, \ 5728#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \
5673 _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \ 5729 _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \
5674 _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B)) 5730 _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
5675#define SKL_PS_VPHASE(pipe, id) _PIPE(pipe, \ 5731#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \
5676 _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \ 5732 _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \
5677 _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B)) 5733 _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
5678#define SKL_PS_HPHASE(pipe, id) _PIPE(pipe, \ 5734#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \
5679 _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \ 5735 _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \
5680 _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B)) 5736 _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
5681#define SKL_PS_ECC_STAT(pipe, id) _PIPE(pipe, \ 5737#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \
5682 _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \ 5738 _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \
5683 _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B) 5739 _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
5684 5740
5685/* legacy palette */ 5741/* legacy palette */
5686#define _LGC_PALETTE_A 0x4a000 5742#define _LGC_PALETTE_A 0x4a000
5687#define _LGC_PALETTE_B 0x4a800 5743#define _LGC_PALETTE_B 0x4a800
5688#define LGC_PALETTE(pipe, i) (_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4) 5744#define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
5689 5745
5690#define _GAMMA_MODE_A 0x4a480 5746#define _GAMMA_MODE_A 0x4a480
5691#define _GAMMA_MODE_B 0x4ac80 5747#define _GAMMA_MODE_B 0x4ac80
5692#define GAMMA_MODE(pipe) _PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B) 5748#define GAMMA_MODE(pipe) _MMIO_PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
5693#define GAMMA_MODE_MODE_MASK (3 << 0) 5749#define GAMMA_MODE_MODE_MASK (3 << 0)
5694#define GAMMA_MODE_MODE_8BIT (0 << 0) 5750#define GAMMA_MODE_MODE_8BIT (0 << 0)
5695#define GAMMA_MODE_MODE_10BIT (1 << 0) 5751#define GAMMA_MODE_MODE_10BIT (1 << 0)
5696#define GAMMA_MODE_MODE_12BIT (2 << 0) 5752#define GAMMA_MODE_MODE_12BIT (2 << 0)
5697#define GAMMA_MODE_MODE_SPLIT (3 << 0) 5753#define GAMMA_MODE_MODE_SPLIT (3 << 0)
5698 5754
5755/* DMC/CSR */
5756#define CSR_PROGRAM(i) _MMIO(0x80000 + (i) * 4)
5757#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
5758#define CSR_HTP_ADDR_SKL 0x00500034
5759#define CSR_SSP_BASE _MMIO(0x8F074)
5760#define CSR_HTP_SKL _MMIO(0x8F004)
5761#define CSR_LAST_WRITE _MMIO(0x8F034)
5762#define CSR_LAST_WRITE_VALUE 0xc003b400
5763/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
5764#define CSR_MMIO_START_RANGE 0x80000
5765#define CSR_MMIO_END_RANGE 0x8FFFF
5766#define SKL_CSR_DC3_DC5_COUNT _MMIO(0x80030)
5767#define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C)
5768#define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038)
5769
5699/* interrupts */ 5770/* interrupts */
5700#define DE_MASTER_IRQ_CONTROL (1 << 31) 5771#define DE_MASTER_IRQ_CONTROL (1 << 31)
5701#define DE_SPRITEB_FLIP_DONE (1 << 29) 5772#define DE_SPRITEB_FLIP_DONE (1 << 29)
@@ -5747,20 +5818,20 @@ enum skl_disp_power_wells {
5747#define DE_PIPEA_VBLANK_IVB (1<<0) 5818#define DE_PIPEA_VBLANK_IVB (1<<0)
5748#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5)) 5819#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
5749 5820
5750#define VLV_MASTER_IER 0x4400c /* Gunit master IER */ 5821#define VLV_MASTER_IER _MMIO(0x4400c) /* Gunit master IER */
5751#define MASTER_INTERRUPT_ENABLE (1<<31) 5822#define MASTER_INTERRUPT_ENABLE (1<<31)
5752 5823
5753#define DEISR 0x44000 5824#define DEISR _MMIO(0x44000)
5754#define DEIMR 0x44004 5825#define DEIMR _MMIO(0x44004)
5755#define DEIIR 0x44008 5826#define DEIIR _MMIO(0x44008)
5756#define DEIER 0x4400c 5827#define DEIER _MMIO(0x4400c)
5757 5828
5758#define GTISR 0x44010 5829#define GTISR _MMIO(0x44010)
5759#define GTIMR 0x44014 5830#define GTIMR _MMIO(0x44014)
5760#define GTIIR 0x44018 5831#define GTIIR _MMIO(0x44018)
5761#define GTIER 0x4401c 5832#define GTIER _MMIO(0x4401c)
5762 5833
5763#define GEN8_MASTER_IRQ 0x44200 5834#define GEN8_MASTER_IRQ _MMIO(0x44200)
5764#define GEN8_MASTER_IRQ_CONTROL (1<<31) 5835#define GEN8_MASTER_IRQ_CONTROL (1<<31)
5765#define GEN8_PCU_IRQ (1<<30) 5836#define GEN8_PCU_IRQ (1<<30)
5766#define GEN8_DE_PCH_IRQ (1<<23) 5837#define GEN8_DE_PCH_IRQ (1<<23)
@@ -5777,10 +5848,10 @@ enum skl_disp_power_wells {
5777#define GEN8_GT_BCS_IRQ (1<<1) 5848#define GEN8_GT_BCS_IRQ (1<<1)
5778#define GEN8_GT_RCS_IRQ (1<<0) 5849#define GEN8_GT_RCS_IRQ (1<<0)
5779 5850
5780#define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which))) 5851#define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which)))
5781#define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which))) 5852#define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which)))
5782#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which))) 5853#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
5783#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which))) 5854#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
5784 5855
5785#define GEN8_RCS_IRQ_SHIFT 0 5856#define GEN8_RCS_IRQ_SHIFT 0
5786#define GEN8_BCS_IRQ_SHIFT 16 5857#define GEN8_BCS_IRQ_SHIFT 16
@@ -5789,10 +5860,10 @@ enum skl_disp_power_wells {
5789#define GEN8_VECS_IRQ_SHIFT 0 5860#define GEN8_VECS_IRQ_SHIFT 0
5790#define GEN8_WD_IRQ_SHIFT 16 5861#define GEN8_WD_IRQ_SHIFT 16
5791 5862
5792#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe))) 5863#define GEN8_DE_PIPE_ISR(pipe) _MMIO(0x44400 + (0x10 * (pipe)))
5793#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe))) 5864#define GEN8_DE_PIPE_IMR(pipe) _MMIO(0x44404 + (0x10 * (pipe)))
5794#define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe))) 5865#define GEN8_DE_PIPE_IIR(pipe) _MMIO(0x44408 + (0x10 * (pipe)))
5795#define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe))) 5866#define GEN8_DE_PIPE_IER(pipe) _MMIO(0x4440c + (0x10 * (pipe)))
5796#define GEN8_PIPE_FIFO_UNDERRUN (1 << 31) 5867#define GEN8_PIPE_FIFO_UNDERRUN (1 << 31)
5797#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29) 5868#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29)
5798#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28) 5869#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
@@ -5825,10 +5896,10 @@ enum skl_disp_power_wells {
5825 GEN9_PIPE_PLANE2_FAULT | \ 5896 GEN9_PIPE_PLANE2_FAULT | \
5826 GEN9_PIPE_PLANE1_FAULT) 5897 GEN9_PIPE_PLANE1_FAULT)
5827 5898
5828#define GEN8_DE_PORT_ISR 0x44440 5899#define GEN8_DE_PORT_ISR _MMIO(0x44440)
5829#define GEN8_DE_PORT_IMR 0x44444 5900#define GEN8_DE_PORT_IMR _MMIO(0x44444)
5830#define GEN8_DE_PORT_IIR 0x44448 5901#define GEN8_DE_PORT_IIR _MMIO(0x44448)
5831#define GEN8_DE_PORT_IER 0x4444c 5902#define GEN8_DE_PORT_IER _MMIO(0x4444c)
5832#define GEN9_AUX_CHANNEL_D (1 << 27) 5903#define GEN9_AUX_CHANNEL_D (1 << 27)
5833#define GEN9_AUX_CHANNEL_C (1 << 26) 5904#define GEN9_AUX_CHANNEL_C (1 << 26)
5834#define GEN9_AUX_CHANNEL_B (1 << 25) 5905#define GEN9_AUX_CHANNEL_B (1 << 25)
@@ -5842,23 +5913,23 @@ enum skl_disp_power_wells {
5842#define BXT_DE_PORT_GMBUS (1 << 1) 5913#define BXT_DE_PORT_GMBUS (1 << 1)
5843#define GEN8_AUX_CHANNEL_A (1 << 0) 5914#define GEN8_AUX_CHANNEL_A (1 << 0)
5844 5915
5845#define GEN8_DE_MISC_ISR 0x44460 5916#define GEN8_DE_MISC_ISR _MMIO(0x44460)
5846#define GEN8_DE_MISC_IMR 0x44464 5917#define GEN8_DE_MISC_IMR _MMIO(0x44464)
5847#define GEN8_DE_MISC_IIR 0x44468 5918#define GEN8_DE_MISC_IIR _MMIO(0x44468)
5848#define GEN8_DE_MISC_IER 0x4446c 5919#define GEN8_DE_MISC_IER _MMIO(0x4446c)
5849#define GEN8_DE_MISC_GSE (1 << 27) 5920#define GEN8_DE_MISC_GSE (1 << 27)
5850 5921
5851#define GEN8_PCU_ISR 0x444e0 5922#define GEN8_PCU_ISR _MMIO(0x444e0)
5852#define GEN8_PCU_IMR 0x444e4 5923#define GEN8_PCU_IMR _MMIO(0x444e4)
5853#define GEN8_PCU_IIR 0x444e8 5924#define GEN8_PCU_IIR _MMIO(0x444e8)
5854#define GEN8_PCU_IER 0x444ec 5925#define GEN8_PCU_IER _MMIO(0x444ec)
5855 5926
5856#define ILK_DISPLAY_CHICKEN2 0x42004 5927#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
5857/* Required on all Ironlake and Sandybridge according to the B-Spec. */ 5928/* Required on all Ironlake and Sandybridge according to the B-Spec. */
5858#define ILK_ELPIN_409_SELECT (1 << 25) 5929#define ILK_ELPIN_409_SELECT (1 << 25)
5859#define ILK_DPARB_GATE (1<<22) 5930#define ILK_DPARB_GATE (1<<22)
5860#define ILK_VSDPFD_FULL (1<<21) 5931#define ILK_VSDPFD_FULL (1<<21)
5861#define FUSE_STRAP 0x42014 5932#define FUSE_STRAP _MMIO(0x42014)
5862#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31) 5933#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
5863#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30) 5934#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30)
5864#define ILK_DISPLAY_DEBUG_DISABLE (1 << 29) 5935#define ILK_DISPLAY_DEBUG_DISABLE (1 << 29)
@@ -5867,18 +5938,18 @@ enum skl_disp_power_wells {
5867#define HSW_CDCLK_LIMIT (1 << 24) 5938#define HSW_CDCLK_LIMIT (1 << 24)
5868#define ILK_DESKTOP (1 << 23) 5939#define ILK_DESKTOP (1 << 23)
5869 5940
5870#define ILK_DSPCLK_GATE_D 0x42020 5941#define ILK_DSPCLK_GATE_D _MMIO(0x42020)
5871#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) 5942#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
5872#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) 5943#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
5873#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) 5944#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
5874#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7) 5945#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
5875#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5) 5946#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
5876 5947
5877#define IVB_CHICKEN3 0x4200c 5948#define IVB_CHICKEN3 _MMIO(0x4200c)
5878# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) 5949# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
5879# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) 5950# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
5880 5951
5881#define CHICKEN_PAR1_1 0x42080 5952#define CHICKEN_PAR1_1 _MMIO(0x42080)
5882#define DPA_MASK_VBLANK_SRD (1 << 15) 5953#define DPA_MASK_VBLANK_SRD (1 << 15)
5883#define FORCE_ARB_IDLE_PLANES (1 << 14) 5954#define FORCE_ARB_IDLE_PLANES (1 << 14)
5884 5955
@@ -5886,70 +5957,70 @@ enum skl_disp_power_wells {
5886#define _CHICKEN_PIPESL_1_B 0x420b4 5957#define _CHICKEN_PIPESL_1_B 0x420b4
5887#define HSW_FBCQ_DIS (1 << 22) 5958#define HSW_FBCQ_DIS (1 << 22)
5888#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) 5959#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
5889#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) 5960#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
5890 5961
5891#define DISP_ARB_CTL 0x45000 5962#define DISP_ARB_CTL _MMIO(0x45000)
5892#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 5963#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
5893#define DISP_FBC_WM_DIS (1<<15) 5964#define DISP_FBC_WM_DIS (1<<15)
5894#define DISP_ARB_CTL2 0x45004 5965#define DISP_ARB_CTL2 _MMIO(0x45004)
5895#define DISP_DATA_PARTITION_5_6 (1<<6) 5966#define DISP_DATA_PARTITION_5_6 (1<<6)
5896#define DBUF_CTL 0x45008 5967#define DBUF_CTL _MMIO(0x45008)
5897#define DBUF_POWER_REQUEST (1<<31) 5968#define DBUF_POWER_REQUEST (1<<31)
5898#define DBUF_POWER_STATE (1<<30) 5969#define DBUF_POWER_STATE (1<<30)
5899#define GEN7_MSG_CTL 0x45010 5970#define GEN7_MSG_CTL _MMIO(0x45010)
5900#define WAIT_FOR_PCH_RESET_ACK (1<<1) 5971#define WAIT_FOR_PCH_RESET_ACK (1<<1)
5901#define WAIT_FOR_PCH_FLR_ACK (1<<0) 5972#define WAIT_FOR_PCH_FLR_ACK (1<<0)
5902#define HSW_NDE_RSTWRN_OPT 0x46408 5973#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
5903#define RESET_PCH_HANDSHAKE_ENABLE (1<<4) 5974#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
5904 5975
5905#define SKL_DFSM 0x51000 5976#define SKL_DFSM _MMIO(0x51000)
5906#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) 5977#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
5907#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23) 5978#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
5908#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23) 5979#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
5909#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23) 5980#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
5910#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23) 5981#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
5911 5982
5912#define FF_SLICE_CS_CHICKEN2 0x20e4 5983#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
5913#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) 5984#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
5914 5985
5915/* GEN7 chicken */ 5986/* GEN7 chicken */
5916#define GEN7_COMMON_SLICE_CHICKEN1 0x7010 5987#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
5917# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) 5988# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
5918# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) 5989# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
5919#define COMMON_SLICE_CHICKEN2 0x7014 5990#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
5920# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) 5991# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
5921 5992
5922#define HIZ_CHICKEN 0x7018 5993#define HIZ_CHICKEN _MMIO(0x7018)
5923# define CHV_HZ_8X8_MODE_IN_1X (1<<15) 5994# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
5924# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3) 5995# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3)
5925 5996
5926#define GEN9_SLICE_COMMON_ECO_CHICKEN0 0x7308 5997#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
5927#define DISABLE_PIXEL_MASK_CAMMING (1<<14) 5998#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
5928 5999
5929#define GEN7_L3SQCREG1 0xB010 6000#define GEN7_L3SQCREG1 _MMIO(0xB010)
5930#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 6001#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
5931 6002
5932#define GEN8_L3SQCREG1 0xB100 6003#define GEN8_L3SQCREG1 _MMIO(0xB100)
5933#define BDW_WA_L3SQCREG1_DEFAULT 0x784000 6004#define BDW_WA_L3SQCREG1_DEFAULT 0x784000
5934 6005
5935#define GEN7_L3CNTLREG1 0xB01C 6006#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
5936#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C 6007#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
5937#define GEN7_L3AGDIS (1<<19) 6008#define GEN7_L3AGDIS (1<<19)
5938#define GEN7_L3CNTLREG2 0xB020 6009#define GEN7_L3CNTLREG2 _MMIO(0xB020)
5939#define GEN7_L3CNTLREG3 0xB024 6010#define GEN7_L3CNTLREG3 _MMIO(0xB024)
5940 6011
5941#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 6012#define GEN7_L3_CHICKEN_MODE_REGISTER _MMIO(0xB030)
5942#define GEN7_WA_L3_CHICKEN_MODE 0x20000000 6013#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
5943 6014
5944#define GEN7_L3SQCREG4 0xb034 6015#define GEN7_L3SQCREG4 _MMIO(0xb034)
5945#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) 6016#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
5946 6017
5947#define GEN8_L3SQCREG4 0xb118 6018#define GEN8_L3SQCREG4 _MMIO(0xb118)
5948#define GEN8_LQSC_RO_PERF_DIS (1<<27) 6019#define GEN8_LQSC_RO_PERF_DIS (1<<27)
5949#define GEN8_LQSC_FLUSH_COHERENT_LINES (1<<21) 6020#define GEN8_LQSC_FLUSH_COHERENT_LINES (1<<21)
5950 6021
5951/* GEN8 chicken */ 6022/* GEN8 chicken */
5952#define HDC_CHICKEN0 0x7300 6023#define HDC_CHICKEN0 _MMIO(0x7300)
5953#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15) 6024#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15)
5954#define HDC_FENCE_DEST_SLM_DISABLE (1<<14) 6025#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
5955#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11) 6026#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
@@ -5958,17 +6029,17 @@ enum skl_disp_power_wells {
5958#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10) 6029#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
5959 6030
5960/* GEN9 chicken */ 6031/* GEN9 chicken */
5961#define SLICE_ECO_CHICKEN0 0x7308 6032#define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
5962#define PIXEL_MASK_CAMMING_DISABLE (1 << 14) 6033#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
5963 6034
5964/* WaCatErrorRejectionIssue */ 6035/* WaCatErrorRejectionIssue */
5965#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 6036#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
5966#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) 6037#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
5967 6038
5968#define HSW_SCRATCH1 0xb038 6039#define HSW_SCRATCH1 _MMIO(0xb038)
5969#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27) 6040#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
5970 6041
5971#define BDW_SCRATCH1 0xb11c 6042#define BDW_SCRATCH1 _MMIO(0xb11c)
5972#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2) 6043#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2)
5973 6044
5974/* PCH */ 6045/* PCH */
@@ -6062,12 +6133,12 @@ enum skl_disp_power_wells {
6062 SDE_FDI_RXB_CPT | \ 6133 SDE_FDI_RXB_CPT | \
6063 SDE_FDI_RXA_CPT) 6134 SDE_FDI_RXA_CPT)
6064 6135
6065#define SDEISR 0xc4000 6136#define SDEISR _MMIO(0xc4000)
6066#define SDEIMR 0xc4004 6137#define SDEIMR _MMIO(0xc4004)
6067#define SDEIIR 0xc4008 6138#define SDEIIR _MMIO(0xc4008)
6068#define SDEIER 0xc400c 6139#define SDEIER _MMIO(0xc400c)
6069 6140
6070#define SERR_INT 0xc4040 6141#define SERR_INT _MMIO(0xc4040)
6071#define SERR_INT_POISON (1<<31) 6142#define SERR_INT_POISON (1<<31)
6072#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6) 6143#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
6073#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3) 6144#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
@@ -6075,7 +6146,7 @@ enum skl_disp_power_wells {
6075#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3)) 6146#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
6076 6147
6077/* digital port hotplug */ 6148/* digital port hotplug */
6078#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ 6149#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
6079#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */ 6150#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
6080#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */ 6151#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
6081#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */ 6152#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
@@ -6112,42 +6183,42 @@ enum skl_disp_power_wells {
6112#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) 6183#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
6113#define PORTB_HOTPLUG_LONG_DETECT (2 << 0) 6184#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
6114 6185
6115#define PCH_PORT_HOTPLUG2 0xc403C /* SHOTPLUG_CTL2 SPT+ */ 6186#define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */
6116#define PORTE_HOTPLUG_ENABLE (1 << 4) 6187#define PORTE_HOTPLUG_ENABLE (1 << 4)
6117#define PORTE_HOTPLUG_STATUS_MASK (3 << 0) 6188#define PORTE_HOTPLUG_STATUS_MASK (3 << 0)
6118#define PORTE_HOTPLUG_NO_DETECT (0 << 0) 6189#define PORTE_HOTPLUG_NO_DETECT (0 << 0)
6119#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0) 6190#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
6120#define PORTE_HOTPLUG_LONG_DETECT (2 << 0) 6191#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
6121 6192
6122#define PCH_GPIOA 0xc5010 6193#define PCH_GPIOA _MMIO(0xc5010)
6123#define PCH_GPIOB 0xc5014 6194#define PCH_GPIOB _MMIO(0xc5014)
6124#define PCH_GPIOC 0xc5018 6195#define PCH_GPIOC _MMIO(0xc5018)
6125#define PCH_GPIOD 0xc501c 6196#define PCH_GPIOD _MMIO(0xc501c)
6126#define PCH_GPIOE 0xc5020 6197#define PCH_GPIOE _MMIO(0xc5020)
6127#define PCH_GPIOF 0xc5024 6198#define PCH_GPIOF _MMIO(0xc5024)
6128 6199
6129#define PCH_GMBUS0 0xc5100 6200#define PCH_GMBUS0 _MMIO(0xc5100)
6130#define PCH_GMBUS1 0xc5104 6201#define PCH_GMBUS1 _MMIO(0xc5104)
6131#define PCH_GMBUS2 0xc5108 6202#define PCH_GMBUS2 _MMIO(0xc5108)
6132#define PCH_GMBUS3 0xc510c 6203#define PCH_GMBUS3 _MMIO(0xc510c)
6133#define PCH_GMBUS4 0xc5110 6204#define PCH_GMBUS4 _MMIO(0xc5110)
6134#define PCH_GMBUS5 0xc5120 6205#define PCH_GMBUS5 _MMIO(0xc5120)
6135 6206
6136#define _PCH_DPLL_A 0xc6014 6207#define _PCH_DPLL_A 0xc6014
6137#define _PCH_DPLL_B 0xc6018 6208#define _PCH_DPLL_B 0xc6018
6138#define PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) 6209#define PCH_DPLL(pll) _MMIO(pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
6139 6210
6140#define _PCH_FPA0 0xc6040 6211#define _PCH_FPA0 0xc6040
6141#define FP_CB_TUNE (0x3<<22) 6212#define FP_CB_TUNE (0x3<<22)
6142#define _PCH_FPA1 0xc6044 6213#define _PCH_FPA1 0xc6044
6143#define _PCH_FPB0 0xc6048 6214#define _PCH_FPB0 0xc6048
6144#define _PCH_FPB1 0xc604c 6215#define _PCH_FPB1 0xc604c
6145#define PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0) 6216#define PCH_FP0(pll) _MMIO(pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
6146#define PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1) 6217#define PCH_FP1(pll) _MMIO(pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
6147 6218
6148#define PCH_DPLL_TEST 0xc606c 6219#define PCH_DPLL_TEST _MMIO(0xc606c)
6149 6220
6150#define PCH_DREF_CONTROL 0xC6200 6221#define PCH_DREF_CONTROL _MMIO(0xC6200)
6151#define DREF_CONTROL_MASK 0x7fc3 6222#define DREF_CONTROL_MASK 0x7fc3
6152#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13) 6223#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13)
6153#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13) 6224#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13)
@@ -6170,19 +6241,19 @@ enum skl_disp_power_wells {
6170#define DREF_SSC4_DISABLE (0) 6241#define DREF_SSC4_DISABLE (0)
6171#define DREF_SSC4_ENABLE (1) 6242#define DREF_SSC4_ENABLE (1)
6172 6243
6173#define PCH_RAWCLK_FREQ 0xc6204 6244#define PCH_RAWCLK_FREQ _MMIO(0xc6204)
6174#define FDL_TP1_TIMER_SHIFT 12 6245#define FDL_TP1_TIMER_SHIFT 12
6175#define FDL_TP1_TIMER_MASK (3<<12) 6246#define FDL_TP1_TIMER_MASK (3<<12)
6176#define FDL_TP2_TIMER_SHIFT 10 6247#define FDL_TP2_TIMER_SHIFT 10
6177#define FDL_TP2_TIMER_MASK (3<<10) 6248#define FDL_TP2_TIMER_MASK (3<<10)
6178#define RAWCLK_FREQ_MASK 0x3ff 6249#define RAWCLK_FREQ_MASK 0x3ff
6179 6250
6180#define PCH_DPLL_TMR_CFG 0xc6208 6251#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
6181 6252
6182#define PCH_SSC4_PARMS 0xc6210 6253#define PCH_SSC4_PARMS _MMIO(0xc6210)
6183#define PCH_SSC4_AUX_PARMS 0xc6214 6254#define PCH_SSC4_AUX_PARMS _MMIO(0xc6214)
6184 6255
6185#define PCH_DPLL_SEL 0xc7000 6256#define PCH_DPLL_SEL _MMIO(0xc7000)
6186#define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4)) 6257#define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4))
6187#define TRANS_DPLLA_SEL(pipe) 0 6258#define TRANS_DPLLA_SEL(pipe) 0
6188#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3)) 6259#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
@@ -6230,79 +6301,73 @@ enum skl_disp_power_wells {
6230#define _VIDEO_DIP_DATA_B 0xe1208 6301#define _VIDEO_DIP_DATA_B 0xe1208
6231#define _VIDEO_DIP_GCP_B 0xe1210 6302#define _VIDEO_DIP_GCP_B 0xe1210
6232 6303
6233#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B) 6304#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
6234#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) 6305#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
6235#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) 6306#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
6236 6307
6237/* Per-transcoder DIP controls (VLV) */ 6308/* Per-transcoder DIP controls (VLV) */
6238#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) 6309#define _VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
6239#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) 6310#define _VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
6240#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) 6311#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
6241 6312
6242#define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170) 6313#define _VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
6243#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) 6314#define _VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
6244#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) 6315#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
6245 6316
6246#define CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0) 6317#define _CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0)
6247#define CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4) 6318#define _CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4)
6248#define CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8) 6319#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8)
6249 6320
6250#define VLV_TVIDEO_DIP_CTL(pipe) \ 6321#define VLV_TVIDEO_DIP_CTL(pipe) \
6251 _PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \ 6322 _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_CTL_A, \
6252 VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C) 6323 _VLV_VIDEO_DIP_CTL_B, _CHV_VIDEO_DIP_CTL_C)
6253#define VLV_TVIDEO_DIP_DATA(pipe) \ 6324#define VLV_TVIDEO_DIP_DATA(pipe) \
6254 _PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \ 6325 _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_DATA_A, \
6255 VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C) 6326 _VLV_VIDEO_DIP_DATA_B, _CHV_VIDEO_DIP_DATA_C)
6256#define VLV_TVIDEO_DIP_GCP(pipe) \ 6327#define VLV_TVIDEO_DIP_GCP(pipe) \
6257 _PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \ 6328 _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
6258 VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C) 6329 _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
6259 6330
6260/* Haswell DIP controls */ 6331/* Haswell DIP controls */
6261#define HSW_VIDEO_DIP_CTL_A 0x60200 6332
6262#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220 6333#define _HSW_VIDEO_DIP_CTL_A 0x60200
6263#define HSW_VIDEO_DIP_VS_DATA_A 0x60260 6334#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220
6264#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0 6335#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260
6265#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0 6336#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
6266#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320 6337#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
6267#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240 6338#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
6268#define HSW_VIDEO_DIP_VS_ECC_A 0x60280 6339#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
6269#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0 6340#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
6270#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300 6341#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
6271#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344 6342#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300
6272#define HSW_VIDEO_DIP_GCP_A 0x60210 6343#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344
6273 6344#define _HSW_VIDEO_DIP_GCP_A 0x60210
6274#define HSW_VIDEO_DIP_CTL_B 0x61200 6345
6275#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220 6346#define _HSW_VIDEO_DIP_CTL_B 0x61200
6276#define HSW_VIDEO_DIP_VS_DATA_B 0x61260 6347#define _HSW_VIDEO_DIP_AVI_DATA_B 0x61220
6277#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0 6348#define _HSW_VIDEO_DIP_VS_DATA_B 0x61260
6278#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0 6349#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
6279#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320 6350#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
6280#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240 6351#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
6281#define HSW_VIDEO_DIP_VS_ECC_B 0x61280 6352#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
6282#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0 6353#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
6283#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300 6354#define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
6284#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344 6355#define _HSW_VIDEO_DIP_GMP_ECC_B 0x61300
6285#define HSW_VIDEO_DIP_GCP_B 0x61210 6356#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
6286 6357#define _HSW_VIDEO_DIP_GCP_B 0x61210
6287#define HSW_TVIDEO_DIP_CTL(trans) \ 6358
6288 _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A) 6359#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
6289#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) \ 6360#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
6290 (_TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A) + (i) * 4) 6361#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
6291#define HSW_TVIDEO_DIP_VS_DATA(trans, i) \ 6362#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
6292 (_TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A) + (i) * 4) 6363#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
6293#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) \ 6364#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
6294 (_TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A) + (i) * 4) 6365
6295#define HSW_TVIDEO_DIP_GCP(trans) \ 6366#define _HSW_STEREO_3D_CTL_A 0x70020
6296 _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A) 6367#define S3D_ENABLE (1<<31)
6297#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) \ 6368#define _HSW_STEREO_3D_CTL_B 0x71020
6298 (_TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A) + (i) * 4) 6369
6299 6370#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A)
6300#define HSW_STEREO_3D_CTL_A 0x70020
6301#define S3D_ENABLE (1<<31)
6302#define HSW_STEREO_3D_CTL_B 0x71020
6303
6304#define HSW_STEREO_3D_CTL(trans) \
6305 _PIPE2(trans, HSW_STEREO_3D_CTL_A)
6306 6371
6307#define _PCH_TRANS_HTOTAL_B 0xe1000 6372#define _PCH_TRANS_HTOTAL_B 0xe1000
6308#define _PCH_TRANS_HBLANK_B 0xe1004 6373#define _PCH_TRANS_HBLANK_B 0xe1004
@@ -6310,16 +6375,15 @@ enum skl_disp_power_wells {
6310#define _PCH_TRANS_VTOTAL_B 0xe100c 6375#define _PCH_TRANS_VTOTAL_B 0xe100c
6311#define _PCH_TRANS_VBLANK_B 0xe1010 6376#define _PCH_TRANS_VBLANK_B 0xe1010
6312#define _PCH_TRANS_VSYNC_B 0xe1014 6377#define _PCH_TRANS_VSYNC_B 0xe1014
6313#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028 6378#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
6314 6379
6315#define PCH_TRANS_HTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B) 6380#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
6316#define PCH_TRANS_HBLANK(pipe) _PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B) 6381#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
6317#define PCH_TRANS_HSYNC(pipe) _PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B) 6382#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
6318#define PCH_TRANS_VTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B) 6383#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
6319#define PCH_TRANS_VBLANK(pipe) _PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B) 6384#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
6320#define PCH_TRANS_VSYNC(pipe) _PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B) 6385#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
6321#define PCH_TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, \ 6386#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B)
6322 _PCH_TRANS_VSYNCSHIFT_B)
6323 6387
6324#define _PCH_TRANSB_DATA_M1 0xe1030 6388#define _PCH_TRANSB_DATA_M1 0xe1030
6325#define _PCH_TRANSB_DATA_N1 0xe1034 6389#define _PCH_TRANSB_DATA_N1 0xe1034
@@ -6330,19 +6394,19 @@ enum skl_disp_power_wells {
6330#define _PCH_TRANSB_LINK_M2 0xe1048 6394#define _PCH_TRANSB_LINK_M2 0xe1048
6331#define _PCH_TRANSB_LINK_N2 0xe104c 6395#define _PCH_TRANSB_LINK_N2 0xe104c
6332 6396
6333#define PCH_TRANS_DATA_M1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1) 6397#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
6334#define PCH_TRANS_DATA_N1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1) 6398#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
6335#define PCH_TRANS_DATA_M2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2) 6399#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
6336#define PCH_TRANS_DATA_N2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2) 6400#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
6337#define PCH_TRANS_LINK_M1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1) 6401#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
6338#define PCH_TRANS_LINK_N1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1) 6402#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
6339#define PCH_TRANS_LINK_M2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2) 6403#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
6340#define PCH_TRANS_LINK_N2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2) 6404#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
6341 6405
6342#define _PCH_TRANSACONF 0xf0008 6406#define _PCH_TRANSACONF 0xf0008
6343#define _PCH_TRANSBCONF 0xf1008 6407#define _PCH_TRANSBCONF 0xf1008
6344#define PCH_TRANSCONF(pipe) _PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF) 6408#define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
6345#define LPT_TRANSCONF _PCH_TRANSACONF /* lpt has only one transcoder */ 6409#define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
6346#define TRANS_DISABLE (0<<31) 6410#define TRANS_DISABLE (0<<31)
6347#define TRANS_ENABLE (1<<31) 6411#define TRANS_ENABLE (1<<31)
6348#define TRANS_STATE_MASK (1<<30) 6412#define TRANS_STATE_MASK (1<<30)
@@ -6363,47 +6427,47 @@ enum skl_disp_power_wells {
6363 6427
6364#define _TRANSA_CHICKEN1 0xf0060 6428#define _TRANSA_CHICKEN1 0xf0060
6365#define _TRANSB_CHICKEN1 0xf1060 6429#define _TRANSB_CHICKEN1 0xf1060
6366#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) 6430#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
6367#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10) 6431#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10)
6368#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4) 6432#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
6369#define _TRANSA_CHICKEN2 0xf0064 6433#define _TRANSA_CHICKEN2 0xf0064
6370#define _TRANSB_CHICKEN2 0xf1064 6434#define _TRANSB_CHICKEN2 0xf1064
6371#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) 6435#define TRANS_CHICKEN2(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
6372#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) 6436#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
6373#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29) 6437#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)
6374#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27) 6438#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27)
6375#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26) 6439#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26)
6376#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25) 6440#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25)
6377 6441
6378#define SOUTH_CHICKEN1 0xc2000 6442#define SOUTH_CHICKEN1 _MMIO(0xc2000)
6379#define FDIA_PHASE_SYNC_SHIFT_OVR 19 6443#define FDIA_PHASE_SYNC_SHIFT_OVR 19
6380#define FDIA_PHASE_SYNC_SHIFT_EN 18 6444#define FDIA_PHASE_SYNC_SHIFT_EN 18
6381#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) 6445#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
6382#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) 6446#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
6383#define FDI_BC_BIFURCATION_SELECT (1 << 12) 6447#define FDI_BC_BIFURCATION_SELECT (1 << 12)
6384#define SPT_PWM_GRANULARITY (1<<0) 6448#define SPT_PWM_GRANULARITY (1<<0)
6385#define SOUTH_CHICKEN2 0xc2004 6449#define SOUTH_CHICKEN2 _MMIO(0xc2004)
6386#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13) 6450#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
6387#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12) 6451#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
6388#define LPT_PWM_GRANULARITY (1<<5) 6452#define LPT_PWM_GRANULARITY (1<<5)
6389#define DPLS_EDP_PPS_FIX_DIS (1<<0) 6453#define DPLS_EDP_PPS_FIX_DIS (1<<0)
6390 6454
6391#define _FDI_RXA_CHICKEN 0xc200c 6455#define _FDI_RXA_CHICKEN 0xc200c
6392#define _FDI_RXB_CHICKEN 0xc2010 6456#define _FDI_RXB_CHICKEN 0xc2010
6393#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1) 6457#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
6394#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0) 6458#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
6395#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) 6459#define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
6396 6460
6397#define SOUTH_DSPCLK_GATE_D 0xc2020 6461#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
6398#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30) 6462#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
6399#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) 6463#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
6400#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14) 6464#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
6401#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) 6465#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
6402 6466
6403/* CPU: FDI_TX */ 6467/* CPU: FDI_TX */
6404#define _FDI_TXA_CTL 0x60100 6468#define _FDI_TXA_CTL 0x60100
6405#define _FDI_TXB_CTL 0x61100 6469#define _FDI_TXB_CTL 0x61100
6406#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL) 6470#define FDI_TX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
6407#define FDI_TX_DISABLE (0<<31) 6471#define FDI_TX_DISABLE (0<<31)
6408#define FDI_TX_ENABLE (1<<31) 6472#define FDI_TX_ENABLE (1<<31)
6409#define FDI_LINK_TRAIN_PATTERN_1 (0<<28) 6473#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
@@ -6453,7 +6517,7 @@ enum skl_disp_power_wells {
6453/* FDI_RX, FDI_X is hard-wired to Transcoder_X */ 6517/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
6454#define _FDI_RXA_CTL 0xf000c 6518#define _FDI_RXA_CTL 0xf000c
6455#define _FDI_RXB_CTL 0xf100c 6519#define _FDI_RXB_CTL 0xf100c
6456#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) 6520#define FDI_RX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
6457#define FDI_RX_ENABLE (1<<31) 6521#define FDI_RX_ENABLE (1<<31)
6458/* train, dp width same as FDI_TX */ 6522/* train, dp width same as FDI_TX */
6459#define FDI_FS_ERRC_ENABLE (1<<27) 6523#define FDI_FS_ERRC_ENABLE (1<<27)
@@ -6489,14 +6553,14 @@ enum skl_disp_power_wells {
6489#define FDI_RX_TP1_TO_TP2_48 (2<<20) 6553#define FDI_RX_TP1_TO_TP2_48 (2<<20)
6490#define FDI_RX_TP1_TO_TP2_64 (3<<20) 6554#define FDI_RX_TP1_TO_TP2_64 (3<<20)
6491#define FDI_RX_FDI_DELAY_90 (0x90<<0) 6555#define FDI_RX_FDI_DELAY_90 (0x90<<0)
6492#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) 6556#define FDI_RX_MISC(pipe) _MMIO_PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
6493 6557
6494#define _FDI_RXA_TUSIZE1 0xf0030 6558#define _FDI_RXA_TUSIZE1 0xf0030
6495#define _FDI_RXA_TUSIZE2 0xf0038 6559#define _FDI_RXA_TUSIZE2 0xf0038
6496#define _FDI_RXB_TUSIZE1 0xf1030 6560#define _FDI_RXB_TUSIZE1 0xf1030
6497#define _FDI_RXB_TUSIZE2 0xf1038 6561#define _FDI_RXB_TUSIZE2 0xf1038
6498#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) 6562#define FDI_RX_TUSIZE1(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
6499#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) 6563#define FDI_RX_TUSIZE2(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
6500 6564
6501/* FDI_RX interrupt register format */ 6565/* FDI_RX interrupt register format */
6502#define FDI_RX_INTER_LANE_ALIGN (1<<10) 6566#define FDI_RX_INTER_LANE_ALIGN (1<<10)
@@ -6511,44 +6575,41 @@ enum skl_disp_power_wells {
6511#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1) 6575#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
6512#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0) 6576#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
6513 6577
6514#define _FDI_RXA_IIR 0xf0014 6578#define _FDI_RXA_IIR 0xf0014
6515#define _FDI_RXA_IMR 0xf0018 6579#define _FDI_RXA_IMR 0xf0018
6516#define _FDI_RXB_IIR 0xf1014 6580#define _FDI_RXB_IIR 0xf1014
6517#define _FDI_RXB_IMR 0xf1018 6581#define _FDI_RXB_IMR 0xf1018
6518#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR) 6582#define FDI_RX_IIR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
6519#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR) 6583#define FDI_RX_IMR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
6520 6584
6521#define FDI_PLL_CTL_1 0xfe000 6585#define FDI_PLL_CTL_1 _MMIO(0xfe000)
6522#define FDI_PLL_CTL_2 0xfe004 6586#define FDI_PLL_CTL_2 _MMIO(0xfe004)
6523 6587
6524#define PCH_LVDS 0xe1180 6588#define PCH_LVDS _MMIO(0xe1180)
6525#define LVDS_DETECTED (1 << 1) 6589#define LVDS_DETECTED (1 << 1)
6526 6590
6527/* vlv has 2 sets of panel control regs. */ 6591/* vlv has 2 sets of panel control regs. */
6528#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) 6592#define _PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
6529#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) 6593#define _PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
6530#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) 6594#define _PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
6531#define PANEL_PORT_SELECT_VLV(port) ((port) << 30) 6595#define PANEL_PORT_SELECT_VLV(port) ((port) << 30)
6532#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) 6596#define _PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
6533#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) 6597#define _PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
6534 6598
6535#define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300) 6599#define _PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300)
6536#define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304) 6600#define _PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304)
6537#define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308) 6601#define _PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308)
6538#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c) 6602#define _PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
6539#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310) 6603#define _PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
6540 6604
6541#define VLV_PIPE_PP_STATUS(pipe) _PIPE(pipe, PIPEA_PP_STATUS, PIPEB_PP_STATUS) 6605#define VLV_PIPE_PP_STATUS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_STATUS, _PIPEB_PP_STATUS)
6542#define VLV_PIPE_PP_CONTROL(pipe) _PIPE(pipe, PIPEA_PP_CONTROL, PIPEB_PP_CONTROL) 6606#define VLV_PIPE_PP_CONTROL(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_CONTROL, _PIPEB_PP_CONTROL)
6543#define VLV_PIPE_PP_ON_DELAYS(pipe) \ 6607#define VLV_PIPE_PP_ON_DELAYS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_ON_DELAYS, _PIPEB_PP_ON_DELAYS)
6544 _PIPE(pipe, PIPEA_PP_ON_DELAYS, PIPEB_PP_ON_DELAYS) 6608#define VLV_PIPE_PP_OFF_DELAYS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_OFF_DELAYS, _PIPEB_PP_OFF_DELAYS)
6545#define VLV_PIPE_PP_OFF_DELAYS(pipe) \ 6609#define VLV_PIPE_PP_DIVISOR(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_DIVISOR, _PIPEB_PP_DIVISOR)
6546 _PIPE(pipe, PIPEA_PP_OFF_DELAYS, PIPEB_PP_OFF_DELAYS) 6610
6547#define VLV_PIPE_PP_DIVISOR(pipe) \ 6611#define _PCH_PP_STATUS 0xc7200
6548 _PIPE(pipe, PIPEA_PP_DIVISOR, PIPEB_PP_DIVISOR) 6612#define _PCH_PP_CONTROL 0xc7204
6549
6550#define PCH_PP_STATUS 0xc7200
6551#define PCH_PP_CONTROL 0xc7204
6552#define PANEL_UNLOCK_REGS (0xabcd << 16) 6613#define PANEL_UNLOCK_REGS (0xabcd << 16)
6553#define PANEL_UNLOCK_MASK (0xffff << 16) 6614#define PANEL_UNLOCK_MASK (0xffff << 16)
6554#define BXT_POWER_CYCLE_DELAY_MASK (0x1f0) 6615#define BXT_POWER_CYCLE_DELAY_MASK (0x1f0)
@@ -6558,7 +6619,7 @@ enum skl_disp_power_wells {
6558#define PANEL_POWER_RESET (1 << 1) 6619#define PANEL_POWER_RESET (1 << 1)
6559#define PANEL_POWER_OFF (0 << 0) 6620#define PANEL_POWER_OFF (0 << 0)
6560#define PANEL_POWER_ON (1 << 0) 6621#define PANEL_POWER_ON (1 << 0)
6561#define PCH_PP_ON_DELAYS 0xc7208 6622#define _PCH_PP_ON_DELAYS 0xc7208
6562#define PANEL_PORT_SELECT_MASK (3 << 30) 6623#define PANEL_PORT_SELECT_MASK (3 << 30)
6563#define PANEL_PORT_SELECT_LVDS (0 << 30) 6624#define PANEL_PORT_SELECT_LVDS (0 << 30)
6564#define PANEL_PORT_SELECT_DPA (1 << 30) 6625#define PANEL_PORT_SELECT_DPA (1 << 30)
@@ -6569,52 +6630,64 @@ enum skl_disp_power_wells {
6569#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff) 6630#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
6570#define PANEL_LIGHT_ON_DELAY_SHIFT 0 6631#define PANEL_LIGHT_ON_DELAY_SHIFT 0
6571 6632
6572#define PCH_PP_OFF_DELAYS 0xc720c 6633#define _PCH_PP_OFF_DELAYS 0xc720c
6573#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) 6634#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
6574#define PANEL_POWER_DOWN_DELAY_SHIFT 16 6635#define PANEL_POWER_DOWN_DELAY_SHIFT 16
6575#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) 6636#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
6576#define PANEL_LIGHT_OFF_DELAY_SHIFT 0 6637#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
6577 6638
6578#define PCH_PP_DIVISOR 0xc7210 6639#define _PCH_PP_DIVISOR 0xc7210
6579#define PP_REFERENCE_DIVIDER_MASK (0xffffff00) 6640#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
6580#define PP_REFERENCE_DIVIDER_SHIFT 8 6641#define PP_REFERENCE_DIVIDER_SHIFT 8
6581#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f) 6642#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
6582#define PANEL_POWER_CYCLE_DELAY_SHIFT 0 6643#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
6583 6644
6645#define PCH_PP_STATUS _MMIO(_PCH_PP_STATUS)
6646#define PCH_PP_CONTROL _MMIO(_PCH_PP_CONTROL)
6647#define PCH_PP_ON_DELAYS _MMIO(_PCH_PP_ON_DELAYS)
6648#define PCH_PP_OFF_DELAYS _MMIO(_PCH_PP_OFF_DELAYS)
6649#define PCH_PP_DIVISOR _MMIO(_PCH_PP_DIVISOR)
6650
6584/* BXT PPS changes - 2nd set of PPS registers */ 6651/* BXT PPS changes - 2nd set of PPS registers */
6585#define _BXT_PP_STATUS2 0xc7300 6652#define _BXT_PP_STATUS2 0xc7300
6586#define _BXT_PP_CONTROL2 0xc7304 6653#define _BXT_PP_CONTROL2 0xc7304
6587#define _BXT_PP_ON_DELAYS2 0xc7308 6654#define _BXT_PP_ON_DELAYS2 0xc7308
6588#define _BXT_PP_OFF_DELAYS2 0xc730c 6655#define _BXT_PP_OFF_DELAYS2 0xc730c
6589 6656
6590#define BXT_PP_STATUS(n) _PIPE(n, PCH_PP_STATUS, _BXT_PP_STATUS2) 6657#define BXT_PP_STATUS(n) _MMIO_PIPE(n, _PCH_PP_STATUS, _BXT_PP_STATUS2)
6591#define BXT_PP_CONTROL(n) _PIPE(n, PCH_PP_CONTROL, _BXT_PP_CONTROL2) 6658#define BXT_PP_CONTROL(n) _MMIO_PIPE(n, _PCH_PP_CONTROL, _BXT_PP_CONTROL2)
6592#define BXT_PP_ON_DELAYS(n) _PIPE(n, PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2) 6659#define BXT_PP_ON_DELAYS(n) _MMIO_PIPE(n, _PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2)
6593#define BXT_PP_OFF_DELAYS(n) _PIPE(n, PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2) 6660#define BXT_PP_OFF_DELAYS(n) _MMIO_PIPE(n, _PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2)
6594 6661
6595#define PCH_DP_B 0xe4100 6662#define _PCH_DP_B 0xe4100
6596#define PCH_DPB_AUX_CH_CTL 0xe4110 6663#define PCH_DP_B _MMIO(_PCH_DP_B)
6597#define PCH_DPB_AUX_CH_DATA1 0xe4114 6664#define _PCH_DPB_AUX_CH_CTL 0xe4110
6598#define PCH_DPB_AUX_CH_DATA2 0xe4118 6665#define _PCH_DPB_AUX_CH_DATA1 0xe4114
6599#define PCH_DPB_AUX_CH_DATA3 0xe411c 6666#define _PCH_DPB_AUX_CH_DATA2 0xe4118
6600#define PCH_DPB_AUX_CH_DATA4 0xe4120 6667#define _PCH_DPB_AUX_CH_DATA3 0xe411c
6601#define PCH_DPB_AUX_CH_DATA5 0xe4124 6668#define _PCH_DPB_AUX_CH_DATA4 0xe4120
6602 6669#define _PCH_DPB_AUX_CH_DATA5 0xe4124
6603#define PCH_DP_C 0xe4200 6670
6604#define PCH_DPC_AUX_CH_CTL 0xe4210 6671#define _PCH_DP_C 0xe4200
6605#define PCH_DPC_AUX_CH_DATA1 0xe4214 6672#define PCH_DP_C _MMIO(_PCH_DP_C)
6606#define PCH_DPC_AUX_CH_DATA2 0xe4218 6673#define _PCH_DPC_AUX_CH_CTL 0xe4210
6607#define PCH_DPC_AUX_CH_DATA3 0xe421c 6674#define _PCH_DPC_AUX_CH_DATA1 0xe4214
6608#define PCH_DPC_AUX_CH_DATA4 0xe4220 6675#define _PCH_DPC_AUX_CH_DATA2 0xe4218
6609#define PCH_DPC_AUX_CH_DATA5 0xe4224 6676#define _PCH_DPC_AUX_CH_DATA3 0xe421c
6610 6677#define _PCH_DPC_AUX_CH_DATA4 0xe4220
6611#define PCH_DP_D 0xe4300 6678#define _PCH_DPC_AUX_CH_DATA5 0xe4224
6612#define PCH_DPD_AUX_CH_CTL 0xe4310 6679
6613#define PCH_DPD_AUX_CH_DATA1 0xe4314 6680#define _PCH_DP_D 0xe4300
6614#define PCH_DPD_AUX_CH_DATA2 0xe4318 6681#define PCH_DP_D _MMIO(_PCH_DP_D)
6615#define PCH_DPD_AUX_CH_DATA3 0xe431c 6682#define _PCH_DPD_AUX_CH_CTL 0xe4310
6616#define PCH_DPD_AUX_CH_DATA4 0xe4320 6683#define _PCH_DPD_AUX_CH_DATA1 0xe4314
6617#define PCH_DPD_AUX_CH_DATA5 0xe4324 6684#define _PCH_DPD_AUX_CH_DATA2 0xe4318
6685#define _PCH_DPD_AUX_CH_DATA3 0xe431c
6686#define _PCH_DPD_AUX_CH_DATA4 0xe4320
6687#define _PCH_DPD_AUX_CH_DATA5 0xe4324
6688
6689#define PCH_DP_AUX_CH_CTL(port) _MMIO_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_CTL, _PCH_DPC_AUX_CH_CTL)
6690#define PCH_DP_AUX_CH_DATA(port, i) _MMIO(_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
6618 6691
6619/* CPT */ 6692/* CPT */
6620#define PORT_TRANS_A_SEL_CPT 0 6693#define PORT_TRANS_A_SEL_CPT 0
@@ -6627,10 +6700,10 @@ enum skl_disp_power_wells {
6627#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24) 6700#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24)
6628#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16) 6701#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16)
6629 6702
6630#define TRANS_DP_CTL_A 0xe0300 6703#define _TRANS_DP_CTL_A 0xe0300
6631#define TRANS_DP_CTL_B 0xe1300 6704#define _TRANS_DP_CTL_B 0xe1300
6632#define TRANS_DP_CTL_C 0xe2300 6705#define _TRANS_DP_CTL_C 0xe2300
6633#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B) 6706#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
6634#define TRANS_DP_OUTPUT_ENABLE (1<<31) 6707#define TRANS_DP_OUTPUT_ENABLE (1<<31)
6635#define TRANS_DP_PORT_SEL_B (0<<29) 6708#define TRANS_DP_PORT_SEL_B (0<<29)
6636#define TRANS_DP_PORT_SEL_C (1<<29) 6709#define TRANS_DP_PORT_SEL_C (1<<29)
@@ -6683,40 +6756,40 @@ enum skl_disp_power_wells {
6683 6756
6684#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22) 6757#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
6685 6758
6686#define VLV_PMWGICZ 0x1300a4 6759#define VLV_PMWGICZ _MMIO(0x1300a4)
6687 6760
6688#define FORCEWAKE 0xA18C 6761#define FORCEWAKE _MMIO(0xA18C)
6689#define FORCEWAKE_VLV 0x1300b0 6762#define FORCEWAKE_VLV _MMIO(0x1300b0)
6690#define FORCEWAKE_ACK_VLV 0x1300b4 6763#define FORCEWAKE_ACK_VLV _MMIO(0x1300b4)
6691#define FORCEWAKE_MEDIA_VLV 0x1300b8 6764#define FORCEWAKE_MEDIA_VLV _MMIO(0x1300b8)
6692#define FORCEWAKE_ACK_MEDIA_VLV 0x1300bc 6765#define FORCEWAKE_ACK_MEDIA_VLV _MMIO(0x1300bc)
6693#define FORCEWAKE_ACK_HSW 0x130044 6766#define FORCEWAKE_ACK_HSW _MMIO(0x130044)
6694#define FORCEWAKE_ACK 0x130090 6767#define FORCEWAKE_ACK _MMIO(0x130090)
6695#define VLV_GTLC_WAKE_CTRL 0x130090 6768#define VLV_GTLC_WAKE_CTRL _MMIO(0x130090)
6696#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25) 6769#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25)
6697#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24) 6770#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24)
6698#define VLV_GTLC_ALLOWWAKEREQ (1 << 0) 6771#define VLV_GTLC_ALLOWWAKEREQ (1 << 0)
6699 6772
6700#define VLV_GTLC_PW_STATUS 0x130094 6773#define VLV_GTLC_PW_STATUS _MMIO(0x130094)
6701#define VLV_GTLC_ALLOWWAKEACK (1 << 0) 6774#define VLV_GTLC_ALLOWWAKEACK (1 << 0)
6702#define VLV_GTLC_ALLOWWAKEERR (1 << 1) 6775#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
6703#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) 6776#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
6704#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) 6777#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
6705#define FORCEWAKE_MT 0xa188 /* multi-threaded */ 6778#define FORCEWAKE_MT _MMIO(0xa188) /* multi-threaded */
6706#define FORCEWAKE_MEDIA_GEN9 0xa270 6779#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270)
6707#define FORCEWAKE_RENDER_GEN9 0xa278 6780#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278)
6708#define FORCEWAKE_BLITTER_GEN9 0xa188 6781#define FORCEWAKE_BLITTER_GEN9 _MMIO(0xa188)
6709#define FORCEWAKE_ACK_MEDIA_GEN9 0x0D88 6782#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0x0D88)
6710#define FORCEWAKE_ACK_RENDER_GEN9 0x0D84 6783#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0x0D84)
6711#define FORCEWAKE_ACK_BLITTER_GEN9 0x130044 6784#define FORCEWAKE_ACK_BLITTER_GEN9 _MMIO(0x130044)
6712#define FORCEWAKE_KERNEL 0x1 6785#define FORCEWAKE_KERNEL 0x1
6713#define FORCEWAKE_USER 0x2 6786#define FORCEWAKE_USER 0x2
6714#define FORCEWAKE_MT_ACK 0x130040 6787#define FORCEWAKE_MT_ACK _MMIO(0x130040)
6715#define ECOBUS 0xa180 6788#define ECOBUS _MMIO(0xa180)
6716#define FORCEWAKE_MT_ENABLE (1<<5) 6789#define FORCEWAKE_MT_ENABLE (1<<5)
6717#define VLV_SPAREG2H 0xA194 6790#define VLV_SPAREG2H _MMIO(0xA194)
6718 6791
6719#define GTFIFODBG 0x120000 6792#define GTFIFODBG _MMIO(0x120000)
6720#define GT_FIFO_SBDROPERR (1<<6) 6793#define GT_FIFO_SBDROPERR (1<<6)
6721#define GT_FIFO_BLOBDROPERR (1<<5) 6794#define GT_FIFO_BLOBDROPERR (1<<5)
6722#define GT_FIFO_SB_READ_ABORTERR (1<<4) 6795#define GT_FIFO_SB_READ_ABORTERR (1<<4)
@@ -6725,23 +6798,23 @@ enum skl_disp_power_wells {
6725#define GT_FIFO_IAWRERR (1<<1) 6798#define GT_FIFO_IAWRERR (1<<1)
6726#define GT_FIFO_IARDERR (1<<0) 6799#define GT_FIFO_IARDERR (1<<0)
6727 6800
6728#define GTFIFOCTL 0x120008 6801#define GTFIFOCTL _MMIO(0x120008)
6729#define GT_FIFO_FREE_ENTRIES_MASK 0x7f 6802#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
6730#define GT_FIFO_NUM_RESERVED_ENTRIES 20 6803#define GT_FIFO_NUM_RESERVED_ENTRIES 20
6731#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12) 6804#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12)
6732#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11) 6805#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11)
6733 6806
6734#define HSW_IDICR 0x9008 6807#define HSW_IDICR _MMIO(0x9008)
6735#define IDIHASHMSK(x) (((x) & 0x3f) << 16) 6808#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
6736#define HSW_EDRAM_PRESENT 0x120010 6809#define HSW_EDRAM_PRESENT _MMIO(0x120010)
6737#define EDRAM_ENABLED 0x1 6810#define EDRAM_ENABLED 0x1
6738 6811
6739#define GEN6_UCGCTL1 0x9400 6812#define GEN6_UCGCTL1 _MMIO(0x9400)
6740# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16) 6813# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
6741# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) 6814# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
6742# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) 6815# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
6743 6816
6744#define GEN6_UCGCTL2 0x9404 6817#define GEN6_UCGCTL2 _MMIO(0x9404)
6745# define GEN6_VFUNIT_CLOCK_GATE_DISABLE (1 << 31) 6818# define GEN6_VFUNIT_CLOCK_GATE_DISABLE (1 << 31)
6746# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30) 6819# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
6747# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22) 6820# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
@@ -6749,30 +6822,30 @@ enum skl_disp_power_wells {
6749# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) 6822# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
6750# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) 6823# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
6751 6824
6752#define GEN6_UCGCTL3 0x9408 6825#define GEN6_UCGCTL3 _MMIO(0x9408)
6753 6826
6754#define GEN7_UCGCTL4 0x940c 6827#define GEN7_UCGCTL4 _MMIO(0x940c)
6755#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) 6828#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
6756 6829
6757#define GEN6_RCGCTL1 0x9410 6830#define GEN6_RCGCTL1 _MMIO(0x9410)
6758#define GEN6_RCGCTL2 0x9414 6831#define GEN6_RCGCTL2 _MMIO(0x9414)
6759#define GEN6_RSTCTL 0x9420 6832#define GEN6_RSTCTL _MMIO(0x9420)
6760 6833
6761#define GEN8_UCGCTL6 0x9430 6834#define GEN8_UCGCTL6 _MMIO(0x9430)
6762#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24) 6835#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24)
6763#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) 6836#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
6764#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28) 6837#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28)
6765 6838
6766#define GEN6_GFXPAUSE 0xA000 6839#define GEN6_GFXPAUSE _MMIO(0xA000)
6767#define GEN6_RPNSWREQ 0xA008 6840#define GEN6_RPNSWREQ _MMIO(0xA008)
6768#define GEN6_TURBO_DISABLE (1<<31) 6841#define GEN6_TURBO_DISABLE (1<<31)
6769#define GEN6_FREQUENCY(x) ((x)<<25) 6842#define GEN6_FREQUENCY(x) ((x)<<25)
6770#define HSW_FREQUENCY(x) ((x)<<24) 6843#define HSW_FREQUENCY(x) ((x)<<24)
6771#define GEN9_FREQUENCY(x) ((x)<<23) 6844#define GEN9_FREQUENCY(x) ((x)<<23)
6772#define GEN6_OFFSET(x) ((x)<<19) 6845#define GEN6_OFFSET(x) ((x)<<19)
6773#define GEN6_AGGRESSIVE_TURBO (0<<15) 6846#define GEN6_AGGRESSIVE_TURBO (0<<15)
6774#define GEN6_RC_VIDEO_FREQ 0xA00C 6847#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C)
6775#define GEN6_RC_CONTROL 0xA090 6848#define GEN6_RC_CONTROL _MMIO(0xA090)
6776#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16) 6849#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
6777#define GEN6_RC_CTL_RC6p_ENABLE (1<<17) 6850#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
6778#define GEN6_RC_CTL_RC6_ENABLE (1<<18) 6851#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
@@ -6782,16 +6855,16 @@ enum skl_disp_power_wells {
6782#define GEN7_RC_CTL_TO_MODE (1<<28) 6855#define GEN7_RC_CTL_TO_MODE (1<<28)
6783#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27) 6856#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
6784#define GEN6_RC_CTL_HW_ENABLE (1<<31) 6857#define GEN6_RC_CTL_HW_ENABLE (1<<31)
6785#define GEN6_RP_DOWN_TIMEOUT 0xA010 6858#define GEN6_RP_DOWN_TIMEOUT _MMIO(0xA010)
6786#define GEN6_RP_INTERRUPT_LIMITS 0xA014 6859#define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xA014)
6787#define GEN6_RPSTAT1 0xA01C 6860#define GEN6_RPSTAT1 _MMIO(0xA01C)
6788#define GEN6_CAGF_SHIFT 8 6861#define GEN6_CAGF_SHIFT 8
6789#define HSW_CAGF_SHIFT 7 6862#define HSW_CAGF_SHIFT 7
6790#define GEN9_CAGF_SHIFT 23 6863#define GEN9_CAGF_SHIFT 23
6791#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) 6864#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
6792#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) 6865#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
6793#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT) 6866#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT)
6794#define GEN6_RP_CONTROL 0xA024 6867#define GEN6_RP_CONTROL _MMIO(0xA024)
6795#define GEN6_RP_MEDIA_TURBO (1<<11) 6868#define GEN6_RP_MEDIA_TURBO (1<<11)
6796#define GEN6_RP_MEDIA_MODE_MASK (3<<9) 6869#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
6797#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9) 6870#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
@@ -6805,53 +6878,53 @@ enum skl_disp_power_wells {
6805#define GEN6_RP_UP_BUSY_CONT (0x4<<3) 6878#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
6806#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0) 6879#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
6807#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) 6880#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
6808#define GEN6_RP_UP_THRESHOLD 0xA02C 6881#define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C)
6809#define GEN6_RP_DOWN_THRESHOLD 0xA030 6882#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030)
6810#define GEN6_RP_CUR_UP_EI 0xA050 6883#define GEN6_RP_CUR_UP_EI _MMIO(0xA050)
6811#define GEN6_CURICONT_MASK 0xffffff 6884#define GEN6_CURICONT_MASK 0xffffff
6812#define GEN6_RP_CUR_UP 0xA054 6885#define GEN6_RP_CUR_UP _MMIO(0xA054)
6813#define GEN6_CURBSYTAVG_MASK 0xffffff 6886#define GEN6_CURBSYTAVG_MASK 0xffffff
6814#define GEN6_RP_PREV_UP 0xA058 6887#define GEN6_RP_PREV_UP _MMIO(0xA058)
6815#define GEN6_RP_CUR_DOWN_EI 0xA05C 6888#define GEN6_RP_CUR_DOWN_EI _MMIO(0xA05C)
6816#define GEN6_CURIAVG_MASK 0xffffff 6889#define GEN6_CURIAVG_MASK 0xffffff
6817#define GEN6_RP_CUR_DOWN 0xA060 6890#define GEN6_RP_CUR_DOWN _MMIO(0xA060)
6818#define GEN6_RP_PREV_DOWN 0xA064 6891#define GEN6_RP_PREV_DOWN _MMIO(0xA064)
6819#define GEN6_RP_UP_EI 0xA068 6892#define GEN6_RP_UP_EI _MMIO(0xA068)
6820#define GEN6_RP_DOWN_EI 0xA06C 6893#define GEN6_RP_DOWN_EI _MMIO(0xA06C)
6821#define GEN6_RP_IDLE_HYSTERSIS 0xA070 6894#define GEN6_RP_IDLE_HYSTERSIS _MMIO(0xA070)
6822#define GEN6_RPDEUHWTC 0xA080 6895#define GEN6_RPDEUHWTC _MMIO(0xA080)
6823#define GEN6_RPDEUC 0xA084 6896#define GEN6_RPDEUC _MMIO(0xA084)
6824#define GEN6_RPDEUCSW 0xA088 6897#define GEN6_RPDEUCSW _MMIO(0xA088)
6825#define GEN6_RC_STATE 0xA094 6898#define GEN6_RC_STATE _MMIO(0xA094)
6826#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098 6899#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
6827#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C 6900#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
6828#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0 6901#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
6829#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8 6902#define GEN6_RC_EVALUATION_INTERVAL _MMIO(0xA0A8)
6830#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC 6903#define GEN6_RC_IDLE_HYSTERSIS _MMIO(0xA0AC)
6831#define GEN6_RC_SLEEP 0xA0B0 6904#define GEN6_RC_SLEEP _MMIO(0xA0B0)
6832#define GEN6_RCUBMABDTMR 0xA0B0 6905#define GEN6_RCUBMABDTMR _MMIO(0xA0B0)
6833#define GEN6_RC1e_THRESHOLD 0xA0B4 6906#define GEN6_RC1e_THRESHOLD _MMIO(0xA0B4)
6834#define GEN6_RC6_THRESHOLD 0xA0B8 6907#define GEN6_RC6_THRESHOLD _MMIO(0xA0B8)
6835#define GEN6_RC6p_THRESHOLD 0xA0BC 6908#define GEN6_RC6p_THRESHOLD _MMIO(0xA0BC)
6836#define VLV_RCEDATA 0xA0BC 6909#define VLV_RCEDATA _MMIO(0xA0BC)
6837#define GEN6_RC6pp_THRESHOLD 0xA0C0 6910#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
6838#define GEN6_PMINTRMSK 0xA168 6911#define GEN6_PMINTRMSK _MMIO(0xA168)
6839#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) 6912#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
6840#define VLV_PWRDWNUPCTL 0xA294 6913#define VLV_PWRDWNUPCTL _MMIO(0xA294)
6841#define GEN9_MEDIA_PG_IDLE_HYSTERESIS 0xA0C4 6914#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
6842#define GEN9_RENDER_PG_IDLE_HYSTERESIS 0xA0C8 6915#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
6843#define GEN9_PG_ENABLE 0xA210 6916#define GEN9_PG_ENABLE _MMIO(0xA210)
6844#define GEN9_RENDER_PG_ENABLE (1<<0) 6917#define GEN9_RENDER_PG_ENABLE (1<<0)
6845#define GEN9_MEDIA_PG_ENABLE (1<<1) 6918#define GEN9_MEDIA_PG_ENABLE (1<<1)
6846 6919
6847#define VLV_CHICKEN_3 (VLV_DISPLAY_BASE + 0x7040C) 6920#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C)
6848#define PIXEL_OVERLAP_CNT_MASK (3 << 30) 6921#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
6849#define PIXEL_OVERLAP_CNT_SHIFT 30 6922#define PIXEL_OVERLAP_CNT_SHIFT 30
6850 6923
6851#define GEN6_PMISR 0x44020 6924#define GEN6_PMISR _MMIO(0x44020)
6852#define GEN6_PMIMR 0x44024 /* rps_lock */ 6925#define GEN6_PMIMR _MMIO(0x44024) /* rps_lock */
6853#define GEN6_PMIIR 0x44028 6926#define GEN6_PMIIR _MMIO(0x44028)
6854#define GEN6_PMIER 0x4402C 6927#define GEN6_PMIER _MMIO(0x4402C)
6855#define GEN6_PM_MBOX_EVENT (1<<25) 6928#define GEN6_PM_MBOX_EVENT (1<<25)
6856#define GEN6_PM_THERMAL_EVENT (1<<24) 6929#define GEN6_PM_THERMAL_EVENT (1<<24)
6857#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6) 6930#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
@@ -6863,30 +6936,30 @@ enum skl_disp_power_wells {
6863 GEN6_PM_RP_DOWN_THRESHOLD | \ 6936 GEN6_PM_RP_DOWN_THRESHOLD | \
6864 GEN6_PM_RP_DOWN_TIMEOUT) 6937 GEN6_PM_RP_DOWN_TIMEOUT)
6865 6938
6866#define GEN7_GT_SCRATCH(i) (0x4F100 + (i) * 4) 6939#define GEN7_GT_SCRATCH(i) _MMIO(0x4F100 + (i) * 4)
6867#define GEN7_GT_SCRATCH_REG_NUM 8 6940#define GEN7_GT_SCRATCH_REG_NUM 8
6868 6941
6869#define VLV_GTLC_SURVIVABILITY_REG 0x130098 6942#define VLV_GTLC_SURVIVABILITY_REG _MMIO(0x130098)
6870#define VLV_GFX_CLK_STATUS_BIT (1<<3) 6943#define VLV_GFX_CLK_STATUS_BIT (1<<3)
6871#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2) 6944#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2)
6872 6945
6873#define GEN6_GT_GFX_RC6_LOCKED 0x138104 6946#define GEN6_GT_GFX_RC6_LOCKED _MMIO(0x138104)
6874#define VLV_COUNTER_CONTROL 0x138104 6947#define VLV_COUNTER_CONTROL _MMIO(0x138104)
6875#define VLV_COUNT_RANGE_HIGH (1<<15) 6948#define VLV_COUNT_RANGE_HIGH (1<<15)
6876#define VLV_MEDIA_RC0_COUNT_EN (1<<5) 6949#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
6877#define VLV_RENDER_RC0_COUNT_EN (1<<4) 6950#define VLV_RENDER_RC0_COUNT_EN (1<<4)
6878#define VLV_MEDIA_RC6_COUNT_EN (1<<1) 6951#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
6879#define VLV_RENDER_RC6_COUNT_EN (1<<0) 6952#define VLV_RENDER_RC6_COUNT_EN (1<<0)
6880#define GEN6_GT_GFX_RC6 0x138108 6953#define GEN6_GT_GFX_RC6 _MMIO(0x138108)
6881#define VLV_GT_RENDER_RC6 0x138108 6954#define VLV_GT_RENDER_RC6 _MMIO(0x138108)
6882#define VLV_GT_MEDIA_RC6 0x13810C 6955#define VLV_GT_MEDIA_RC6 _MMIO(0x13810C)
6883 6956
6884#define GEN6_GT_GFX_RC6p 0x13810C 6957#define GEN6_GT_GFX_RC6p _MMIO(0x13810C)
6885#define GEN6_GT_GFX_RC6pp 0x138110 6958#define GEN6_GT_GFX_RC6pp _MMIO(0x138110)
6886#define VLV_RENDER_C0_COUNT 0x138118 6959#define VLV_RENDER_C0_COUNT _MMIO(0x138118)
6887#define VLV_MEDIA_C0_COUNT 0x13811C 6960#define VLV_MEDIA_C0_COUNT _MMIO(0x13811C)
6888 6961
6889#define GEN6_PCODE_MAILBOX 0x138124 6962#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
6890#define GEN6_PCODE_READY (1<<31) 6963#define GEN6_PCODE_READY (1<<31)
6891#define GEN6_PCODE_WRITE_RC6VIDS 0x4 6964#define GEN6_PCODE_WRITE_RC6VIDS 0x4
6892#define GEN6_PCODE_READ_RC6VIDS 0x5 6965#define GEN6_PCODE_READ_RC6VIDS 0x5
@@ -6909,12 +6982,12 @@ enum skl_disp_power_wells {
6909#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 6982#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
6910#define DISPLAY_IPS_CONTROL 0x19 6983#define DISPLAY_IPS_CONTROL 0x19
6911#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A 6984#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
6912#define GEN6_PCODE_DATA 0x138128 6985#define GEN6_PCODE_DATA _MMIO(0x138128)
6913#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 6986#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
6914#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 6987#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
6915#define GEN6_PCODE_DATA1 0x13812C 6988#define GEN6_PCODE_DATA1 _MMIO(0x13812C)
6916 6989
6917#define GEN6_GT_CORE_STATUS 0x138060 6990#define GEN6_GT_CORE_STATUS _MMIO(0x138060)
6918#define GEN6_CORE_CPD_STATE_MASK (7<<4) 6991#define GEN6_CORE_CPD_STATE_MASK (7<<4)
6919#define GEN6_RCn_MASK 7 6992#define GEN6_RCn_MASK 7
6920#define GEN6_RC0 0 6993#define GEN6_RC0 0
@@ -6922,26 +6995,26 @@ enum skl_disp_power_wells {
6922#define GEN6_RC6 3 6995#define GEN6_RC6 3
6923#define GEN6_RC7 4 6996#define GEN6_RC7 4
6924 6997
6925#define GEN8_GT_SLICE_INFO 0x138064 6998#define GEN8_GT_SLICE_INFO _MMIO(0x138064)
6926#define GEN8_LSLICESTAT_MASK 0x7 6999#define GEN8_LSLICESTAT_MASK 0x7
6927 7000
6928#define CHV_POWER_SS0_SIG1 0xa720 7001#define CHV_POWER_SS0_SIG1 _MMIO(0xa720)
6929#define CHV_POWER_SS1_SIG1 0xa728 7002#define CHV_POWER_SS1_SIG1 _MMIO(0xa728)
6930#define CHV_SS_PG_ENABLE (1<<1) 7003#define CHV_SS_PG_ENABLE (1<<1)
6931#define CHV_EU08_PG_ENABLE (1<<9) 7004#define CHV_EU08_PG_ENABLE (1<<9)
6932#define CHV_EU19_PG_ENABLE (1<<17) 7005#define CHV_EU19_PG_ENABLE (1<<17)
6933#define CHV_EU210_PG_ENABLE (1<<25) 7006#define CHV_EU210_PG_ENABLE (1<<25)
6934 7007
6935#define CHV_POWER_SS0_SIG2 0xa724 7008#define CHV_POWER_SS0_SIG2 _MMIO(0xa724)
6936#define CHV_POWER_SS1_SIG2 0xa72c 7009#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c)
6937#define CHV_EU311_PG_ENABLE (1<<1) 7010#define CHV_EU311_PG_ENABLE (1<<1)
6938 7011
6939#define GEN9_SLICE_PGCTL_ACK(slice) (0x804c + (slice)*0x4) 7012#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice)*0x4)
6940#define GEN9_PGCTL_SLICE_ACK (1 << 0) 7013#define GEN9_PGCTL_SLICE_ACK (1 << 0)
6941#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2)) 7014#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2))
6942 7015
6943#define GEN9_SS01_EU_PGCTL_ACK(slice) (0x805c + (slice)*0x8) 7016#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice)*0x8)
6944#define GEN9_SS23_EU_PGCTL_ACK(slice) (0x8060 + (slice)*0x8) 7017#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice)*0x8)
6945#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0) 7018#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
6946#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2) 7019#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
6947#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4) 7020#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
@@ -6951,18 +7024,17 @@ enum skl_disp_power_wells {
6951#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12) 7024#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
6952#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14) 7025#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
6953 7026
6954#define GEN7_MISCCPCTL (0x9424) 7027#define GEN7_MISCCPCTL _MMIO(0x9424)
6955#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) 7028#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
6956#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2) 7029#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2)
6957#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4) 7030#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4)
6958#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1<<6) 7031#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1<<6)
6959 7032
6960#define GEN8_GARBCNTL 0xB004 7033#define GEN8_GARBCNTL _MMIO(0xB004)
6961#define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7) 7034#define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7)
6962 7035
6963/* IVYBRIDGE DPF */ 7036/* IVYBRIDGE DPF */
6964#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */ 7037#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
6965#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */
6966#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14) 7038#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
6967#define GEN7_PARITY_ERROR_VALID (1<<13) 7039#define GEN7_PARITY_ERROR_VALID (1<<13)
6968#define GEN7_L3CDERRST1_BANK_MASK (3<<11) 7040#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
@@ -6975,119 +7047,102 @@ enum skl_disp_power_wells {
6975 ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8) 7047 ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
6976#define GEN7_L3CDERRST1_ENABLE (1<<7) 7048#define GEN7_L3CDERRST1_ENABLE (1<<7)
6977 7049
6978#define GEN7_L3LOG_BASE 0xB070 7050#define GEN7_L3LOG(slice, i) _MMIO(0xB070 + (slice) * 0x200 + (i) * 4)
6979#define HSW_L3LOG_BASE_SLICE1 0xB270
6980#define GEN7_L3LOG_SIZE 0x80 7051#define GEN7_L3LOG_SIZE 0x80
6981 7052
6982#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ 7053#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */
6983#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100 7054#define GEN7_HALF_SLICE_CHICKEN1_GT2 _MMIO(0xf100)
6984#define GEN7_MAX_PS_THREAD_DEP (8<<12) 7055#define GEN7_MAX_PS_THREAD_DEP (8<<12)
6985#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10) 7056#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
6986#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1<<4) 7057#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1<<4)
6987#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) 7058#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
6988 7059
6989#define GEN9_HALF_SLICE_CHICKEN5 0xe188 7060#define GEN9_HALF_SLICE_CHICKEN5 _MMIO(0xe188)
6990#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5) 7061#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
6991#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3) 7062#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
6992 7063
6993#define GEN8_ROW_CHICKEN 0xe4f0 7064#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
6994#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8) 7065#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
6995#define STALL_DOP_GATING_DISABLE (1<<5) 7066#define STALL_DOP_GATING_DISABLE (1<<5)
6996 7067
6997#define GEN7_ROW_CHICKEN2 0xe4f4 7068#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
6998#define GEN7_ROW_CHICKEN2_GT2 0xf4f4 7069#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
6999#define DOP_CLOCK_GATING_DISABLE (1<<0) 7070#define DOP_CLOCK_GATING_DISABLE (1<<0)
7000 7071
7001#define HSW_ROW_CHICKEN3 0xe49c 7072#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
7002#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) 7073#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
7003 7074
7004#define HALF_SLICE_CHICKEN2 0xe180 7075#define HALF_SLICE_CHICKEN2 _MMIO(0xe180)
7005#define GEN8_ST_PO_DISABLE (1<<13) 7076#define GEN8_ST_PO_DISABLE (1<<13)
7006 7077
7007#define HALF_SLICE_CHICKEN3 0xe184 7078#define HALF_SLICE_CHICKEN3 _MMIO(0xe184)
7008#define HSW_SAMPLE_C_PERFORMANCE (1<<9) 7079#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
7009#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) 7080#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
7010#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5) 7081#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
7011#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) 7082#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
7012 7083
7013#define GEN9_HALF_SLICE_CHICKEN7 0xe194 7084#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
7014#define GEN9_ENABLE_YV12_BUGFIX (1<<4) 7085#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
7015 7086
7016/* Audio */ 7087/* Audio */
7017#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020) 7088#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
7018#define INTEL_AUDIO_DEVCL 0x808629FB 7089#define INTEL_AUDIO_DEVCL 0x808629FB
7019#define INTEL_AUDIO_DEVBLC 0x80862801 7090#define INTEL_AUDIO_DEVBLC 0x80862801
7020#define INTEL_AUDIO_DEVCTG 0x80862802 7091#define INTEL_AUDIO_DEVCTG 0x80862802
7021 7092
7022#define G4X_AUD_CNTL_ST 0x620B4 7093#define G4X_AUD_CNTL_ST _MMIO(0x620B4)
7023#define G4X_ELDV_DEVCL_DEVBLC (1 << 13) 7094#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
7024#define G4X_ELDV_DEVCTG (1 << 14) 7095#define G4X_ELDV_DEVCTG (1 << 14)
7025#define G4X_ELD_ADDR_MASK (0xf << 5) 7096#define G4X_ELD_ADDR_MASK (0xf << 5)
7026#define G4X_ELD_ACK (1 << 4) 7097#define G4X_ELD_ACK (1 << 4)
7027#define G4X_HDMIW_HDMIEDID 0x6210C 7098#define G4X_HDMIW_HDMIEDID _MMIO(0x6210C)
7028 7099
7029#define _IBX_HDMIW_HDMIEDID_A 0xE2050 7100#define _IBX_HDMIW_HDMIEDID_A 0xE2050
7030#define _IBX_HDMIW_HDMIEDID_B 0xE2150 7101#define _IBX_HDMIW_HDMIEDID_B 0xE2150
7031#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ 7102#define IBX_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _IBX_HDMIW_HDMIEDID_A, \
7032 _IBX_HDMIW_HDMIEDID_A, \ 7103 _IBX_HDMIW_HDMIEDID_B)
7033 _IBX_HDMIW_HDMIEDID_B)
7034#define _IBX_AUD_CNTL_ST_A 0xE20B4 7104#define _IBX_AUD_CNTL_ST_A 0xE20B4
7035#define _IBX_AUD_CNTL_ST_B 0xE21B4 7105#define _IBX_AUD_CNTL_ST_B 0xE21B4
7036#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \ 7106#define IBX_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CNTL_ST_A, \
7037 _IBX_AUD_CNTL_ST_A, \ 7107 _IBX_AUD_CNTL_ST_B)
7038 _IBX_AUD_CNTL_ST_B)
7039#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10) 7108#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
7040#define IBX_ELD_ADDRESS_MASK (0x1f << 5) 7109#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
7041#define IBX_ELD_ACK (1 << 4) 7110#define IBX_ELD_ACK (1 << 4)
7042#define IBX_AUD_CNTL_ST2 0xE20C0 7111#define IBX_AUD_CNTL_ST2 _MMIO(0xE20C0)
7043#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4)) 7112#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4))
7044#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4)) 7113#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4))
7045 7114
7046#define _CPT_HDMIW_HDMIEDID_A 0xE5050 7115#define _CPT_HDMIW_HDMIEDID_A 0xE5050
7047#define _CPT_HDMIW_HDMIEDID_B 0xE5150 7116#define _CPT_HDMIW_HDMIEDID_B 0xE5150
7048#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ 7117#define CPT_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _CPT_HDMIW_HDMIEDID_A, _CPT_HDMIW_HDMIEDID_B)
7049 _CPT_HDMIW_HDMIEDID_A, \
7050 _CPT_HDMIW_HDMIEDID_B)
7051#define _CPT_AUD_CNTL_ST_A 0xE50B4 7118#define _CPT_AUD_CNTL_ST_A 0xE50B4
7052#define _CPT_AUD_CNTL_ST_B 0xE51B4 7119#define _CPT_AUD_CNTL_ST_B 0xE51B4
7053#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \ 7120#define CPT_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CNTL_ST_A, _CPT_AUD_CNTL_ST_B)
7054 _CPT_AUD_CNTL_ST_A, \ 7121#define CPT_AUD_CNTRL_ST2 _MMIO(0xE50C0)
7055 _CPT_AUD_CNTL_ST_B)
7056#define CPT_AUD_CNTRL_ST2 0xE50C0
7057 7122
7058#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050) 7123#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
7059#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150) 7124#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
7060#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ 7125#define VLV_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _VLV_HDMIW_HDMIEDID_A, _VLV_HDMIW_HDMIEDID_B)
7061 _VLV_HDMIW_HDMIEDID_A, \
7062 _VLV_HDMIW_HDMIEDID_B)
7063#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4) 7126#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
7064#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4) 7127#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
7065#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \ 7128#define VLV_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CNTL_ST_A, _VLV_AUD_CNTL_ST_B)
7066 _VLV_AUD_CNTL_ST_A, \ 7129#define VLV_AUD_CNTL_ST2 _MMIO(VLV_DISPLAY_BASE + 0x620C0)
7067 _VLV_AUD_CNTL_ST_B)
7068#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0)
7069 7130
7070/* These are the 4 32-bit write offset registers for each stream 7131/* These are the 4 32-bit write offset registers for each stream
7071 * output buffer. It determines the offset from the 7132 * output buffer. It determines the offset from the
7072 * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to. 7133 * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
7073 */ 7134 */
7074#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) 7135#define GEN7_SO_WRITE_OFFSET(n) _MMIO(0x5280 + (n) * 4)
7075 7136
7076#define _IBX_AUD_CONFIG_A 0xe2000 7137#define _IBX_AUD_CONFIG_A 0xe2000
7077#define _IBX_AUD_CONFIG_B 0xe2100 7138#define _IBX_AUD_CONFIG_B 0xe2100
7078#define IBX_AUD_CFG(pipe) _PIPE(pipe, \ 7139#define IBX_AUD_CFG(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CONFIG_A, _IBX_AUD_CONFIG_B)
7079 _IBX_AUD_CONFIG_A, \
7080 _IBX_AUD_CONFIG_B)
7081#define _CPT_AUD_CONFIG_A 0xe5000 7140#define _CPT_AUD_CONFIG_A 0xe5000
7082#define _CPT_AUD_CONFIG_B 0xe5100 7141#define _CPT_AUD_CONFIG_B 0xe5100
7083#define CPT_AUD_CFG(pipe) _PIPE(pipe, \ 7142#define CPT_AUD_CFG(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CONFIG_A, _CPT_AUD_CONFIG_B)
7084 _CPT_AUD_CONFIG_A, \
7085 _CPT_AUD_CONFIG_B)
7086#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000) 7143#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
7087#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100) 7144#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
7088#define VLV_AUD_CFG(pipe) _PIPE(pipe, \ 7145#define VLV_AUD_CFG(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CONFIG_A, _VLV_AUD_CONFIG_B)
7089 _VLV_AUD_CONFIG_A, \
7090 _VLV_AUD_CONFIG_B)
7091 7146
7092#define AUD_CONFIG_N_VALUE_INDEX (1 << 29) 7147#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
7093#define AUD_CONFIG_N_PROG_ENABLE (1 << 28) 7148#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
@@ -7112,72 +7167,62 @@ enum skl_disp_power_wells {
7112/* HSW Audio */ 7167/* HSW Audio */
7113#define _HSW_AUD_CONFIG_A 0x65000 7168#define _HSW_AUD_CONFIG_A 0x65000
7114#define _HSW_AUD_CONFIG_B 0x65100 7169#define _HSW_AUD_CONFIG_B 0x65100
7115#define HSW_AUD_CFG(pipe) _PIPE(pipe, \ 7170#define HSW_AUD_CFG(pipe) _MMIO_PIPE(pipe, _HSW_AUD_CONFIG_A, _HSW_AUD_CONFIG_B)
7116 _HSW_AUD_CONFIG_A, \
7117 _HSW_AUD_CONFIG_B)
7118 7171
7119#define _HSW_AUD_MISC_CTRL_A 0x65010 7172#define _HSW_AUD_MISC_CTRL_A 0x65010
7120#define _HSW_AUD_MISC_CTRL_B 0x65110 7173#define _HSW_AUD_MISC_CTRL_B 0x65110
7121#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \ 7174#define HSW_AUD_MISC_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
7122 _HSW_AUD_MISC_CTRL_A, \
7123 _HSW_AUD_MISC_CTRL_B)
7124 7175
7125#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 7176#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
7126#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 7177#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
7127#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \ 7178#define HSW_AUD_DIP_ELD_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
7128 _HSW_AUD_DIP_ELD_CTRL_ST_A, \
7129 _HSW_AUD_DIP_ELD_CTRL_ST_B)
7130 7179
7131/* Audio Digital Converter */ 7180/* Audio Digital Converter */
7132#define _HSW_AUD_DIG_CNVT_1 0x65080 7181#define _HSW_AUD_DIG_CNVT_1 0x65080
7133#define _HSW_AUD_DIG_CNVT_2 0x65180 7182#define _HSW_AUD_DIG_CNVT_2 0x65180
7134#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \ 7183#define AUD_DIG_CNVT(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIG_CNVT_1, _HSW_AUD_DIG_CNVT_2)
7135 _HSW_AUD_DIG_CNVT_1, \
7136 _HSW_AUD_DIG_CNVT_2)
7137#define DIP_PORT_SEL_MASK 0x3 7184#define DIP_PORT_SEL_MASK 0x3
7138 7185
7139#define _HSW_AUD_EDID_DATA_A 0x65050 7186#define _HSW_AUD_EDID_DATA_A 0x65050
7140#define _HSW_AUD_EDID_DATA_B 0x65150 7187#define _HSW_AUD_EDID_DATA_B 0x65150
7141#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \ 7188#define HSW_AUD_EDID_DATA(pipe) _MMIO_PIPE(pipe, _HSW_AUD_EDID_DATA_A, _HSW_AUD_EDID_DATA_B)
7142 _HSW_AUD_EDID_DATA_A, \
7143 _HSW_AUD_EDID_DATA_B)
7144 7189
7145#define HSW_AUD_PIPE_CONV_CFG 0x6507c 7190#define HSW_AUD_PIPE_CONV_CFG _MMIO(0x6507c)
7146#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 7191#define HSW_AUD_PIN_ELD_CP_VLD _MMIO(0x650c0)
7147#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4)) 7192#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4))
7148#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4)) 7193#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4))
7149#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4)) 7194#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
7150#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4)) 7195#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
7151 7196
7152#define HSW_AUD_CHICKENBIT 0x65f10 7197#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
7153#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15) 7198#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
7154 7199
7155/* HSW Power Wells */ 7200/* HSW Power Wells */
7156#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */ 7201#define HSW_PWR_WELL_BIOS _MMIO(0x45400) /* CTL1 */
7157#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */ 7202#define HSW_PWR_WELL_DRIVER _MMIO(0x45404) /* CTL2 */
7158#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */ 7203#define HSW_PWR_WELL_KVMR _MMIO(0x45408) /* CTL3 */
7159#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */ 7204#define HSW_PWR_WELL_DEBUG _MMIO(0x4540C) /* CTL4 */
7160#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31) 7205#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31)
7161#define HSW_PWR_WELL_STATE_ENABLED (1<<30) 7206#define HSW_PWR_WELL_STATE_ENABLED (1<<30)
7162#define HSW_PWR_WELL_CTL5 0x45410 7207#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
7163#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) 7208#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
7164#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) 7209#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
7165#define HSW_PWR_WELL_FORCE_ON (1<<19) 7210#define HSW_PWR_WELL_FORCE_ON (1<<19)
7166#define HSW_PWR_WELL_CTL6 0x45414 7211#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
7167 7212
7168/* SKL Fuse Status */ 7213/* SKL Fuse Status */
7169#define SKL_FUSE_STATUS 0x42000 7214#define SKL_FUSE_STATUS _MMIO(0x42000)
7170#define SKL_FUSE_DOWNLOAD_STATUS (1<<31) 7215#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
7171#define SKL_FUSE_PG0_DIST_STATUS (1<<27) 7216#define SKL_FUSE_PG0_DIST_STATUS (1<<27)
7172#define SKL_FUSE_PG1_DIST_STATUS (1<<26) 7217#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
7173#define SKL_FUSE_PG2_DIST_STATUS (1<<25) 7218#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
7174 7219
7175/* Per-pipe DDI Function Control */ 7220/* Per-pipe DDI Function Control */
7176#define TRANS_DDI_FUNC_CTL_A 0x60400 7221#define _TRANS_DDI_FUNC_CTL_A 0x60400
7177#define TRANS_DDI_FUNC_CTL_B 0x61400 7222#define _TRANS_DDI_FUNC_CTL_B 0x61400
7178#define TRANS_DDI_FUNC_CTL_C 0x62400 7223#define _TRANS_DDI_FUNC_CTL_C 0x62400
7179#define TRANS_DDI_FUNC_CTL_EDP 0x6F400 7224#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
7180#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A) 7225#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
7181 7226
7182#define TRANS_DDI_FUNC_ENABLE (1<<31) 7227#define TRANS_DDI_FUNC_ENABLE (1<<31)
7183/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ 7228/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
@@ -7207,9 +7252,9 @@ enum skl_disp_power_wells {
7207#define TRANS_DDI_BFI_ENABLE (1<<4) 7252#define TRANS_DDI_BFI_ENABLE (1<<4)
7208 7253
7209/* DisplayPort Transport Control */ 7254/* DisplayPort Transport Control */
7210#define DP_TP_CTL_A 0x64040 7255#define _DP_TP_CTL_A 0x64040
7211#define DP_TP_CTL_B 0x64140 7256#define _DP_TP_CTL_B 0x64140
7212#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B) 7257#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
7213#define DP_TP_CTL_ENABLE (1<<31) 7258#define DP_TP_CTL_ENABLE (1<<31)
7214#define DP_TP_CTL_MODE_SST (0<<27) 7259#define DP_TP_CTL_MODE_SST (0<<27)
7215#define DP_TP_CTL_MODE_MST (1<<27) 7260#define DP_TP_CTL_MODE_MST (1<<27)
@@ -7225,9 +7270,9 @@ enum skl_disp_power_wells {
7225#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7) 7270#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
7226 7271
7227/* DisplayPort Transport Status */ 7272/* DisplayPort Transport Status */
7228#define DP_TP_STATUS_A 0x64044 7273#define _DP_TP_STATUS_A 0x64044
7229#define DP_TP_STATUS_B 0x64144 7274#define _DP_TP_STATUS_B 0x64144
7230#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) 7275#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
7231#define DP_TP_STATUS_IDLE_DONE (1<<25) 7276#define DP_TP_STATUS_IDLE_DONE (1<<25)
7232#define DP_TP_STATUS_ACT_SENT (1<<24) 7277#define DP_TP_STATUS_ACT_SENT (1<<24)
7233#define DP_TP_STATUS_MODE_STATUS_MST (1<<23) 7278#define DP_TP_STATUS_MODE_STATUS_MST (1<<23)
@@ -7237,9 +7282,9 @@ enum skl_disp_power_wells {
7237#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0) 7282#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0)
7238 7283
7239/* DDI Buffer Control */ 7284/* DDI Buffer Control */
7240#define DDI_BUF_CTL_A 0x64000 7285#define _DDI_BUF_CTL_A 0x64000
7241#define DDI_BUF_CTL_B 0x64100 7286#define _DDI_BUF_CTL_B 0x64100
7242#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) 7287#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
7243#define DDI_BUF_CTL_ENABLE (1<<31) 7288#define DDI_BUF_CTL_ENABLE (1<<31)
7244#define DDI_BUF_TRANS_SELECT(n) ((n) << 24) 7289#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
7245#define DDI_BUF_EMP_MASK (0xf<<24) 7290#define DDI_BUF_EMP_MASK (0xf<<24)
@@ -7252,17 +7297,17 @@ enum skl_disp_power_wells {
7252#define DDI_INIT_DISPLAY_DETECTED (1<<0) 7297#define DDI_INIT_DISPLAY_DETECTED (1<<0)
7253 7298
7254/* DDI Buffer Translations */ 7299/* DDI Buffer Translations */
7255#define DDI_BUF_TRANS_A 0x64E00 7300#define _DDI_BUF_TRANS_A 0x64E00
7256#define DDI_BUF_TRANS_B 0x64E60 7301#define _DDI_BUF_TRANS_B 0x64E60
7257#define DDI_BUF_TRANS_LO(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8) 7302#define DDI_BUF_TRANS_LO(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8)
7258#define DDI_BUF_TRANS_HI(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8 + 4) 7303#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
7259 7304
7260/* Sideband Interface (SBI) is programmed indirectly, via 7305/* Sideband Interface (SBI) is programmed indirectly, via
7261 * SBI_ADDR, which contains the register offset; and SBI_DATA, 7306 * SBI_ADDR, which contains the register offset; and SBI_DATA,
7262 * which contains the payload */ 7307 * which contains the payload */
7263#define SBI_ADDR 0xC6000 7308#define SBI_ADDR _MMIO(0xC6000)
7264#define SBI_DATA 0xC6004 7309#define SBI_DATA _MMIO(0xC6004)
7265#define SBI_CTL_STAT 0xC6008 7310#define SBI_CTL_STAT _MMIO(0xC6008)
7266#define SBI_CTL_DEST_ICLK (0x0<<16) 7311#define SBI_CTL_DEST_ICLK (0x0<<16)
7267#define SBI_CTL_DEST_MPHY (0x1<<16) 7312#define SBI_CTL_DEST_MPHY (0x1<<16)
7268#define SBI_CTL_OP_IORD (0x2<<8) 7313#define SBI_CTL_OP_IORD (0x2<<8)
@@ -7293,12 +7338,12 @@ enum skl_disp_power_wells {
7293#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0) 7338#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0)
7294 7339
7295/* LPT PIXCLK_GATE */ 7340/* LPT PIXCLK_GATE */
7296#define PIXCLK_GATE 0xC6020 7341#define PIXCLK_GATE _MMIO(0xC6020)
7297#define PIXCLK_GATE_UNGATE (1<<0) 7342#define PIXCLK_GATE_UNGATE (1<<0)
7298#define PIXCLK_GATE_GATE (0<<0) 7343#define PIXCLK_GATE_GATE (0<<0)
7299 7344
7300/* SPLL */ 7345/* SPLL */
7301#define SPLL_CTL 0x46020 7346#define SPLL_CTL _MMIO(0x46020)
7302#define SPLL_PLL_ENABLE (1<<31) 7347#define SPLL_PLL_ENABLE (1<<31)
7303#define SPLL_PLL_SSC (1<<28) 7348#define SPLL_PLL_SSC (1<<28)
7304#define SPLL_PLL_NON_SSC (2<<28) 7349#define SPLL_PLL_NON_SSC (2<<28)
@@ -7310,9 +7355,9 @@ enum skl_disp_power_wells {
7310#define SPLL_PLL_FREQ_MASK (3<<26) 7355#define SPLL_PLL_FREQ_MASK (3<<26)
7311 7356
7312/* WRPLL */ 7357/* WRPLL */
7313#define WRPLL_CTL1 0x46040 7358#define _WRPLL_CTL1 0x46040
7314#define WRPLL_CTL2 0x46060 7359#define _WRPLL_CTL2 0x46060
7315#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2) 7360#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
7316#define WRPLL_PLL_ENABLE (1<<31) 7361#define WRPLL_PLL_ENABLE (1<<31)
7317#define WRPLL_PLL_SSC (1<<28) 7362#define WRPLL_PLL_SSC (1<<28)
7318#define WRPLL_PLL_NON_SSC (2<<28) 7363#define WRPLL_PLL_NON_SSC (2<<28)
@@ -7329,9 +7374,9 @@ enum skl_disp_power_wells {
7329#define WRPLL_DIVIDER_FB_MASK (0xff<<16) 7374#define WRPLL_DIVIDER_FB_MASK (0xff<<16)
7330 7375
7331/* Port clock selection */ 7376/* Port clock selection */
7332#define PORT_CLK_SEL_A 0x46100 7377#define _PORT_CLK_SEL_A 0x46100
7333#define PORT_CLK_SEL_B 0x46104 7378#define _PORT_CLK_SEL_B 0x46104
7334#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B) 7379#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
7335#define PORT_CLK_SEL_LCPLL_2700 (0<<29) 7380#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
7336#define PORT_CLK_SEL_LCPLL_1350 (1<<29) 7381#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
7337#define PORT_CLK_SEL_LCPLL_810 (2<<29) 7382#define PORT_CLK_SEL_LCPLL_810 (2<<29)
@@ -7343,18 +7388,18 @@ enum skl_disp_power_wells {
7343#define PORT_CLK_SEL_MASK (7<<29) 7388#define PORT_CLK_SEL_MASK (7<<29)
7344 7389
7345/* Transcoder clock selection */ 7390/* Transcoder clock selection */
7346#define TRANS_CLK_SEL_A 0x46140 7391#define _TRANS_CLK_SEL_A 0x46140
7347#define TRANS_CLK_SEL_B 0x46144 7392#define _TRANS_CLK_SEL_B 0x46144
7348#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B) 7393#define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B)
7349/* For each transcoder, we need to select the corresponding port clock */ 7394/* For each transcoder, we need to select the corresponding port clock */
7350#define TRANS_CLK_SEL_DISABLED (0x0<<29) 7395#define TRANS_CLK_SEL_DISABLED (0x0<<29)
7351#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29) 7396#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
7352 7397
7353#define TRANSA_MSA_MISC 0x60410 7398#define _TRANSA_MSA_MISC 0x60410
7354#define TRANSB_MSA_MISC 0x61410 7399#define _TRANSB_MSA_MISC 0x61410
7355#define TRANSC_MSA_MISC 0x62410 7400#define _TRANSC_MSA_MISC 0x62410
7356#define TRANS_EDP_MSA_MISC 0x6f410 7401#define _TRANS_EDP_MSA_MISC 0x6f410
7357#define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC) 7402#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
7358 7403
7359#define TRANS_MSA_SYNC_CLK (1<<0) 7404#define TRANS_MSA_SYNC_CLK (1<<0)
7360#define TRANS_MSA_6_BPC (0<<5) 7405#define TRANS_MSA_6_BPC (0<<5)
@@ -7364,7 +7409,7 @@ enum skl_disp_power_wells {
7364#define TRANS_MSA_16_BPC (4<<5) 7409#define TRANS_MSA_16_BPC (4<<5)
7365 7410
7366/* LCPLL Control */ 7411/* LCPLL Control */
7367#define LCPLL_CTL 0x130040 7412#define LCPLL_CTL _MMIO(0x130040)
7368#define LCPLL_PLL_DISABLE (1<<31) 7413#define LCPLL_PLL_DISABLE (1<<31)
7369#define LCPLL_PLL_LOCK (1<<30) 7414#define LCPLL_PLL_LOCK (1<<30)
7370#define LCPLL_CLK_FREQ_MASK (3<<26) 7415#define LCPLL_CLK_FREQ_MASK (3<<26)
@@ -7384,7 +7429,7 @@ enum skl_disp_power_wells {
7384 */ 7429 */
7385 7430
7386/* CDCLK_CTL */ 7431/* CDCLK_CTL */
7387#define CDCLK_CTL 0x46000 7432#define CDCLK_CTL _MMIO(0x46000)
7388#define CDCLK_FREQ_SEL_MASK (3<<26) 7433#define CDCLK_FREQ_SEL_MASK (3<<26)
7389#define CDCLK_FREQ_450_432 (0<<26) 7434#define CDCLK_FREQ_450_432 (0<<26)
7390#define CDCLK_FREQ_540 (1<<26) 7435#define CDCLK_FREQ_540 (1<<26)
@@ -7400,12 +7445,12 @@ enum skl_disp_power_wells {
7400#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) 7445#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
7401 7446
7402/* LCPLL_CTL */ 7447/* LCPLL_CTL */
7403#define LCPLL1_CTL 0x46010 7448#define LCPLL1_CTL _MMIO(0x46010)
7404#define LCPLL2_CTL 0x46014 7449#define LCPLL2_CTL _MMIO(0x46014)
7405#define LCPLL_PLL_ENABLE (1<<31) 7450#define LCPLL_PLL_ENABLE (1<<31)
7406 7451
7407/* DPLL control1 */ 7452/* DPLL control1 */
7408#define DPLL_CTRL1 0x6C058 7453#define DPLL_CTRL1 _MMIO(0x6C058)
7409#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5)) 7454#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
7410#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4)) 7455#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
7411#define DPLL_CTRL1_LINK_RATE_MASK(id) (7<<((id)*6+1)) 7456#define DPLL_CTRL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
@@ -7420,7 +7465,7 @@ enum skl_disp_power_wells {
7420#define DPLL_CTRL1_LINK_RATE_2160 5 7465#define DPLL_CTRL1_LINK_RATE_2160 5
7421 7466
7422/* DPLL control2 */ 7467/* DPLL control2 */
7423#define DPLL_CTRL2 0x6C05C 7468#define DPLL_CTRL2 _MMIO(0x6C05C)
7424#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<((port)+15)) 7469#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<((port)+15))
7425#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1)) 7470#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
7426#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1) 7471#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
@@ -7428,21 +7473,21 @@ enum skl_disp_power_wells {
7428#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3)) 7473#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
7429 7474
7430/* DPLL Status */ 7475/* DPLL Status */
7431#define DPLL_STATUS 0x6C060 7476#define DPLL_STATUS _MMIO(0x6C060)
7432#define DPLL_LOCK(id) (1<<((id)*8)) 7477#define DPLL_LOCK(id) (1<<((id)*8))
7433 7478
7434/* DPLL cfg */ 7479/* DPLL cfg */
7435#define DPLL1_CFGCR1 0x6C040 7480#define _DPLL1_CFGCR1 0x6C040
7436#define DPLL2_CFGCR1 0x6C048 7481#define _DPLL2_CFGCR1 0x6C048
7437#define DPLL3_CFGCR1 0x6C050 7482#define _DPLL3_CFGCR1 0x6C050
7438#define DPLL_CFGCR1_FREQ_ENABLE (1<<31) 7483#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
7439#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9) 7484#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
7440#define DPLL_CFGCR1_DCO_FRACTION(x) ((x)<<9) 7485#define DPLL_CFGCR1_DCO_FRACTION(x) ((x)<<9)
7441#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff) 7486#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
7442 7487
7443#define DPLL1_CFGCR2 0x6C044 7488#define _DPLL1_CFGCR2 0x6C044
7444#define DPLL2_CFGCR2 0x6C04C 7489#define _DPLL2_CFGCR2 0x6C04C
7445#define DPLL3_CFGCR2 0x6C054 7490#define _DPLL3_CFGCR2 0x6C054
7446#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8) 7491#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
7447#define DPLL_CFGCR2_QDIV_RATIO(x) ((x)<<8) 7492#define DPLL_CFGCR2_QDIV_RATIO(x) ((x)<<8)
7448#define DPLL_CFGCR2_QDIV_MODE(x) ((x)<<7) 7493#define DPLL_CFGCR2_QDIV_MODE(x) ((x)<<7)
@@ -7460,58 +7505,58 @@ enum skl_disp_power_wells {
7460#define DPLL_CFGCR2_PDIV_7 (4<<2) 7505#define DPLL_CFGCR2_PDIV_7 (4<<2)
7461#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3) 7506#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
7462 7507
7463#define DPLL_CFGCR1(id) (DPLL1_CFGCR1 + ((id) - SKL_DPLL1) * 8) 7508#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR2)
7464#define DPLL_CFGCR2(id) (DPLL1_CFGCR2 + ((id) - SKL_DPLL1) * 8) 7509#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
7465 7510
7466/* BXT display engine PLL */ 7511/* BXT display engine PLL */
7467#define BXT_DE_PLL_CTL 0x6d000 7512#define BXT_DE_PLL_CTL _MMIO(0x6d000)
7468#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */ 7513#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
7469#define BXT_DE_PLL_RATIO_MASK 0xff 7514#define BXT_DE_PLL_RATIO_MASK 0xff
7470 7515
7471#define BXT_DE_PLL_ENABLE 0x46070 7516#define BXT_DE_PLL_ENABLE _MMIO(0x46070)
7472#define BXT_DE_PLL_PLL_ENABLE (1 << 31) 7517#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
7473#define BXT_DE_PLL_LOCK (1 << 30) 7518#define BXT_DE_PLL_LOCK (1 << 30)
7474 7519
7475/* GEN9 DC */ 7520/* GEN9 DC */
7476#define DC_STATE_EN 0x45504 7521#define DC_STATE_EN _MMIO(0x45504)
7522#define DC_STATE_DISABLE 0
7477#define DC_STATE_EN_UPTO_DC5 (1<<0) 7523#define DC_STATE_EN_UPTO_DC5 (1<<0)
7478#define DC_STATE_EN_DC9 (1<<3) 7524#define DC_STATE_EN_DC9 (1<<3)
7479#define DC_STATE_EN_UPTO_DC6 (2<<0) 7525#define DC_STATE_EN_UPTO_DC6 (2<<0)
7480#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3 7526#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
7481 7527
7482#define DC_STATE_DEBUG 0x45520 7528#define DC_STATE_DEBUG _MMIO(0x45520)
7483#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1) 7529#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
7484 7530
7485/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, 7531/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
7486 * since on HSW we can't write to it using I915_WRITE. */ 7532 * since on HSW we can't write to it using I915_WRITE. */
7487#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) 7533#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
7488#define D_COMP_BDW 0x138144 7534#define D_COMP_BDW _MMIO(0x138144)
7489#define D_COMP_RCOMP_IN_PROGRESS (1<<9) 7535#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
7490#define D_COMP_COMP_FORCE (1<<8) 7536#define D_COMP_COMP_FORCE (1<<8)
7491#define D_COMP_COMP_DISABLE (1<<0) 7537#define D_COMP_COMP_DISABLE (1<<0)
7492 7538
7493/* Pipe WM_LINETIME - watermark line time */ 7539/* Pipe WM_LINETIME - watermark line time */
7494#define PIPE_WM_LINETIME_A 0x45270 7540#define _PIPE_WM_LINETIME_A 0x45270
7495#define PIPE_WM_LINETIME_B 0x45274 7541#define _PIPE_WM_LINETIME_B 0x45274
7496#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \ 7542#define PIPE_WM_LINETIME(pipe) _MMIO_PIPE(pipe, _PIPE_WM_LINETIME_A, _PIPE_WM_LINETIME_B)
7497 PIPE_WM_LINETIME_B)
7498#define PIPE_WM_LINETIME_MASK (0x1ff) 7543#define PIPE_WM_LINETIME_MASK (0x1ff)
7499#define PIPE_WM_LINETIME_TIME(x) ((x)) 7544#define PIPE_WM_LINETIME_TIME(x) ((x))
7500#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) 7545#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
7501#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) 7546#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
7502 7547
7503/* SFUSE_STRAP */ 7548/* SFUSE_STRAP */
7504#define SFUSE_STRAP 0xc2014 7549#define SFUSE_STRAP _MMIO(0xc2014)
7505#define SFUSE_STRAP_FUSE_LOCK (1<<13) 7550#define SFUSE_STRAP_FUSE_LOCK (1<<13)
7506#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7) 7551#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
7507#define SFUSE_STRAP_DDIB_DETECTED (1<<2) 7552#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
7508#define SFUSE_STRAP_DDIC_DETECTED (1<<1) 7553#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
7509#define SFUSE_STRAP_DDID_DETECTED (1<<0) 7554#define SFUSE_STRAP_DDID_DETECTED (1<<0)
7510 7555
7511#define WM_MISC 0x45260 7556#define WM_MISC _MMIO(0x45260)
7512#define WM_MISC_DATA_PARTITION_5_6 (1 << 0) 7557#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
7513 7558
7514#define WM_DBG 0x45280 7559#define WM_DBG _MMIO(0x45280)
7515#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0) 7560#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
7516#define WM_DBG_DISALLOW_MAXFIFO (1<<1) 7561#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
7517#define WM_DBG_DISALLOW_SPRITE (1<<2) 7562#define WM_DBG_DISALLOW_SPRITE (1<<2)
@@ -7548,28 +7593,29 @@ enum skl_disp_power_wells {
7548#define _PIPE_B_CSC_POSTOFF_ME 0x49144 7593#define _PIPE_B_CSC_POSTOFF_ME 0x49144
7549#define _PIPE_B_CSC_POSTOFF_LO 0x49148 7594#define _PIPE_B_CSC_POSTOFF_LO 0x49148
7550 7595
7551#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY) 7596#define PIPE_CSC_COEFF_RY_GY(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
7552#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY) 7597#define PIPE_CSC_COEFF_BY(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
7553#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU) 7598#define PIPE_CSC_COEFF_RU_GU(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
7554#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU) 7599#define PIPE_CSC_COEFF_BU(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU)
7555#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV) 7600#define PIPE_CSC_COEFF_RV_GV(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV)
7556#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV) 7601#define PIPE_CSC_COEFF_BV(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV)
7557#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE) 7602#define PIPE_CSC_MODE(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE)
7558#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI) 7603#define PIPE_CSC_PREOFF_HI(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI)
7559#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME) 7604#define PIPE_CSC_PREOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME)
7560#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO) 7605#define PIPE_CSC_PREOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO)
7561#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI) 7606#define PIPE_CSC_POSTOFF_HI(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI)
7562#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) 7607#define PIPE_CSC_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
7563#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) 7608#define PIPE_CSC_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
7564 7609
7565/* MIPI DSI registers */ 7610/* MIPI DSI registers */
7566 7611
7567#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */ 7612#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
7613#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
7568 7614
7569/* BXT MIPI clock controls */ 7615/* BXT MIPI clock controls */
7570#define BXT_MAX_VAR_OUTPUT_KHZ 39500 7616#define BXT_MAX_VAR_OUTPUT_KHZ 39500
7571 7617
7572#define BXT_MIPI_CLOCK_CTL 0x46090 7618#define BXT_MIPI_CLOCK_CTL _MMIO(0x46090)
7573#define BXT_MIPI1_DIV_SHIFT 26 7619#define BXT_MIPI1_DIV_SHIFT 26
7574#define BXT_MIPI2_DIV_SHIFT 10 7620#define BXT_MIPI2_DIV_SHIFT 10
7575#define BXT_MIPI_DIV_SHIFT(port) \ 7621#define BXT_MIPI_DIV_SHIFT(port) \
@@ -7631,20 +7677,20 @@ enum skl_disp_power_wells {
7631/* BXT MIPI mode configure */ 7677/* BXT MIPI mode configure */
7632#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8 7678#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
7633#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8 7679#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8
7634#define BXT_MIPI_TRANS_HACTIVE(tc) _MIPI_PORT(tc, \ 7680#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(tc, \
7635 _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE) 7681 _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
7636 7682
7637#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC 7683#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC
7638#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC 7684#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC
7639#define BXT_MIPI_TRANS_VACTIVE(tc) _MIPI_PORT(tc, \ 7685#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(tc, \
7640 _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE) 7686 _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
7641 7687
7642#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100 7688#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100
7643#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900 7689#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900
7644#define BXT_MIPI_TRANS_VTOTAL(tc) _MIPI_PORT(tc, \ 7690#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(tc, \
7645 _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL) 7691 _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
7646 7692
7647#define BXT_DSI_PLL_CTL 0x161000 7693#define BXT_DSI_PLL_CTL _MMIO(0x161000)
7648#define BXT_DSI_PLL_PVD_RATIO_SHIFT 16 7694#define BXT_DSI_PLL_PVD_RATIO_SHIFT 16
7649#define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT) 7695#define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
7650#define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT) 7696#define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
@@ -7662,19 +7708,18 @@ enum skl_disp_power_wells {
7662#define BXT_DSI_PLL_RATIO_MASK 0xFF 7708#define BXT_DSI_PLL_RATIO_MASK 0xFF
7663#define BXT_REF_CLOCK_KHZ 19500 7709#define BXT_REF_CLOCK_KHZ 19500
7664 7710
7665#define BXT_DSI_PLL_ENABLE 0x46080 7711#define BXT_DSI_PLL_ENABLE _MMIO(0x46080)
7666#define BXT_DSI_PLL_DO_ENABLE (1 << 31) 7712#define BXT_DSI_PLL_DO_ENABLE (1 << 31)
7667#define BXT_DSI_PLL_LOCKED (1 << 30) 7713#define BXT_DSI_PLL_LOCKED (1 << 30)
7668 7714
7669#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) 7715#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
7670#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) 7716#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
7671#define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL) 7717#define MIPI_PORT_CTRL(port) _MMIO_MIPI(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
7672 7718
7673 /* BXT port control */ 7719 /* BXT port control */
7674#define _BXT_MIPIA_PORT_CTRL 0x6B0C0 7720#define _BXT_MIPIA_PORT_CTRL 0x6B0C0
7675#define _BXT_MIPIC_PORT_CTRL 0x6B8C0 7721#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
7676#define BXT_MIPI_PORT_CTRL(tc) _MIPI_PORT(tc, _BXT_MIPIA_PORT_CTRL, \ 7722#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
7677 _BXT_MIPIC_PORT_CTRL)
7678 7723
7679#define DPI_ENABLE (1 << 31) /* A + C */ 7724#define DPI_ENABLE (1 << 31) /* A + C */
7680#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 7725#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
@@ -7718,8 +7763,7 @@ enum skl_disp_power_wells {
7718 7763
7719#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) 7764#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
7720#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) 7765#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
7721#define MIPI_TEARING_CTRL(port) _MIPI_PORT(port, \ 7766#define MIPI_TEARING_CTRL(port) _MMIO_MIPI(port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
7722 _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
7723#define TEARING_EFFECT_DELAY_SHIFT 0 7767#define TEARING_EFFECT_DELAY_SHIFT 0
7724#define TEARING_EFFECT_DELAY_MASK (0xffff << 0) 7768#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
7725 7769
@@ -7730,8 +7774,7 @@ enum skl_disp_power_wells {
7730 7774
7731#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000) 7775#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
7732#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800) 7776#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
7733#define MIPI_DEVICE_READY(port) _MIPI_PORT(port, _MIPIA_DEVICE_READY, \ 7777#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
7734 _MIPIC_DEVICE_READY)
7735#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ 7778#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
7736#define ULPS_STATE_MASK (3 << 1) 7779#define ULPS_STATE_MASK (3 << 1)
7737#define ULPS_STATE_ENTER (2 << 1) 7780#define ULPS_STATE_ENTER (2 << 1)
@@ -7741,12 +7784,10 @@ enum skl_disp_power_wells {
7741 7784
7742#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004) 7785#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
7743#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804) 7786#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
7744#define MIPI_INTR_STAT(port) _MIPI_PORT(port, _MIPIA_INTR_STAT, \ 7787#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
7745 _MIPIC_INTR_STAT)
7746#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008) 7788#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
7747#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808) 7789#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
7748#define MIPI_INTR_EN(port) _MIPI_PORT(port, _MIPIA_INTR_EN, \ 7790#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
7749 _MIPIC_INTR_EN)
7750#define TEARING_EFFECT (1 << 31) 7791#define TEARING_EFFECT (1 << 31)
7751#define SPL_PKT_SENT_INTERRUPT (1 << 30) 7792#define SPL_PKT_SENT_INTERRUPT (1 << 30)
7752#define GEN_READ_DATA_AVAIL (1 << 29) 7793#define GEN_READ_DATA_AVAIL (1 << 29)
@@ -7782,8 +7823,7 @@ enum skl_disp_power_wells {
7782 7823
7783#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c) 7824#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
7784#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c) 7825#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
7785#define MIPI_DSI_FUNC_PRG(port) _MIPI_PORT(port, _MIPIA_DSI_FUNC_PRG, \ 7826#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
7786 _MIPIC_DSI_FUNC_PRG)
7787#define CMD_MODE_DATA_WIDTH_MASK (7 << 13) 7827#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
7788#define CMD_MODE_NOT_SUPPORTED (0 << 13) 7828#define CMD_MODE_NOT_SUPPORTED (0 << 13)
7789#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) 7829#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
@@ -7806,32 +7846,27 @@ enum skl_disp_power_wells {
7806 7846
7807#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010) 7847#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
7808#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810) 7848#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
7809#define MIPI_HS_TX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_HS_TX_TIMEOUT, \ 7849#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
7810 _MIPIC_HS_TX_TIMEOUT)
7811#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff 7850#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
7812 7851
7813#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014) 7852#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
7814#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814) 7853#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
7815#define MIPI_LP_RX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_LP_RX_TIMEOUT, \ 7854#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
7816 _MIPIC_LP_RX_TIMEOUT)
7817#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff 7855#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
7818 7856
7819#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018) 7857#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
7820#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818) 7858#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
7821#define MIPI_TURN_AROUND_TIMEOUT(port) _MIPI_PORT(port, \ 7859#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
7822 _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
7823#define TURN_AROUND_TIMEOUT_MASK 0x3f 7860#define TURN_AROUND_TIMEOUT_MASK 0x3f
7824 7861
7825#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c) 7862#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
7826#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c) 7863#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
7827#define MIPI_DEVICE_RESET_TIMER(port) _MIPI_PORT(port, \ 7864#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
7828 _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
7829#define DEVICE_RESET_TIMER_MASK 0xffff 7865#define DEVICE_RESET_TIMER_MASK 0xffff
7830 7866
7831#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020) 7867#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
7832#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820) 7868#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
7833#define MIPI_DPI_RESOLUTION(port) _MIPI_PORT(port, _MIPIA_DPI_RESOLUTION, \ 7869#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
7834 _MIPIC_DPI_RESOLUTION)
7835#define VERTICAL_ADDRESS_SHIFT 16 7870#define VERTICAL_ADDRESS_SHIFT 16
7836#define VERTICAL_ADDRESS_MASK (0xffff << 16) 7871#define VERTICAL_ADDRESS_MASK (0xffff << 16)
7837#define HORIZONTAL_ADDRESS_SHIFT 0 7872#define HORIZONTAL_ADDRESS_SHIFT 0
@@ -7839,8 +7874,7 @@ enum skl_disp_power_wells {
7839 7874
7840#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024) 7875#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
7841#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824) 7876#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
7842#define MIPI_DBI_FIFO_THROTTLE(port) _MIPI_PORT(port, \ 7877#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
7843 _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
7844#define DBI_FIFO_EMPTY_HALF (0 << 0) 7878#define DBI_FIFO_EMPTY_HALF (0 << 0)
7845#define DBI_FIFO_EMPTY_QUARTER (1 << 0) 7879#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
7846#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) 7880#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
@@ -7848,50 +7882,41 @@ enum skl_disp_power_wells {
7848/* regs below are bits 15:0 */ 7882/* regs below are bits 15:0 */
7849#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028) 7883#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
7850#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828) 7884#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
7851#define MIPI_HSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \ 7885#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
7852 _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
7853 7886
7854#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c) 7887#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
7855#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c) 7888#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
7856#define MIPI_HBP_COUNT(port) _MIPI_PORT(port, _MIPIA_HBP_COUNT, \ 7889#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
7857 _MIPIC_HBP_COUNT)
7858 7890
7859#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030) 7891#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
7860#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830) 7892#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
7861#define MIPI_HFP_COUNT(port) _MIPI_PORT(port, _MIPIA_HFP_COUNT, \ 7893#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
7862 _MIPIC_HFP_COUNT)
7863 7894
7864#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034) 7895#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
7865#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834) 7896#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
7866#define MIPI_HACTIVE_AREA_COUNT(port) _MIPI_PORT(port, \ 7897#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
7867 _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
7868 7898
7869#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038) 7899#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
7870#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838) 7900#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
7871#define MIPI_VSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \ 7901#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
7872 _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
7873 7902
7874#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c) 7903#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
7875#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c) 7904#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
7876#define MIPI_VBP_COUNT(port) _MIPI_PORT(port, _MIPIA_VBP_COUNT, \ 7905#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
7877 _MIPIC_VBP_COUNT)
7878 7906
7879#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040) 7907#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
7880#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840) 7908#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
7881#define MIPI_VFP_COUNT(port) _MIPI_PORT(port, _MIPIA_VFP_COUNT, \ 7909#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
7882 _MIPIC_VFP_COUNT)
7883 7910
7884#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044) 7911#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
7885#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844) 7912#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
7886#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MIPI_PORT(port, \ 7913#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
7887 _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
7888 7914
7889/* regs above are bits 15:0 */ 7915/* regs above are bits 15:0 */
7890 7916
7891#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048) 7917#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
7892#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848) 7918#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
7893#define MIPI_DPI_CONTROL(port) _MIPI_PORT(port, _MIPIA_DPI_CONTROL, \ 7919#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
7894 _MIPIC_DPI_CONTROL)
7895#define DPI_LP_MODE (1 << 6) 7920#define DPI_LP_MODE (1 << 6)
7896#define BACKLIGHT_OFF (1 << 5) 7921#define BACKLIGHT_OFF (1 << 5)
7897#define BACKLIGHT_ON (1 << 4) 7922#define BACKLIGHT_ON (1 << 4)
@@ -7902,29 +7927,26 @@ enum skl_disp_power_wells {
7902 7927
7903#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c) 7928#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
7904#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c) 7929#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
7905#define MIPI_DPI_DATA(port) _MIPI_PORT(port, _MIPIA_DPI_DATA, \ 7930#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
7906 _MIPIC_DPI_DATA)
7907#define COMMAND_BYTE_SHIFT 0 7931#define COMMAND_BYTE_SHIFT 0
7908#define COMMAND_BYTE_MASK (0x3f << 0) 7932#define COMMAND_BYTE_MASK (0x3f << 0)
7909 7933
7910#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050) 7934#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
7911#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850) 7935#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
7912#define MIPI_INIT_COUNT(port) _MIPI_PORT(port, _MIPIA_INIT_COUNT, \ 7936#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
7913 _MIPIC_INIT_COUNT)
7914#define MASTER_INIT_TIMER_SHIFT 0 7937#define MASTER_INIT_TIMER_SHIFT 0
7915#define MASTER_INIT_TIMER_MASK (0xffff << 0) 7938#define MASTER_INIT_TIMER_MASK (0xffff << 0)
7916 7939
7917#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054) 7940#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
7918#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854) 7941#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
7919#define MIPI_MAX_RETURN_PKT_SIZE(port) _MIPI_PORT(port, \ 7942#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \
7920 _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE) 7943 _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
7921#define MAX_RETURN_PKT_SIZE_SHIFT 0 7944#define MAX_RETURN_PKT_SIZE_SHIFT 0
7922#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) 7945#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
7923 7946
7924#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058) 7947#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
7925#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858) 7948#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
7926#define MIPI_VIDEO_MODE_FORMAT(port) _MIPI_PORT(port, \ 7949#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
7927 _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
7928#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) 7950#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
7929#define DISABLE_VIDEO_BTA (1 << 3) 7951#define DISABLE_VIDEO_BTA (1 << 3)
7930#define IP_TG_CONFIG (1 << 2) 7952#define IP_TG_CONFIG (1 << 2)
@@ -7934,8 +7956,7 @@ enum skl_disp_power_wells {
7934 7956
7935#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) 7957#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
7936#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) 7958#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
7937#define MIPI_EOT_DISABLE(port) _MIPI_PORT(port, _MIPIA_EOT_DISABLE, \ 7959#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
7938 _MIPIC_EOT_DISABLE)
7939#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) 7960#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
7940#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) 7961#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
7941#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) 7962#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
@@ -7947,31 +7968,26 @@ enum skl_disp_power_wells {
7947 7968
7948#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060) 7969#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
7949#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860) 7970#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
7950#define MIPI_LP_BYTECLK(port) _MIPI_PORT(port, _MIPIA_LP_BYTECLK, \ 7971#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
7951 _MIPIC_LP_BYTECLK)
7952#define LP_BYTECLK_SHIFT 0 7972#define LP_BYTECLK_SHIFT 0
7953#define LP_BYTECLK_MASK (0xffff << 0) 7973#define LP_BYTECLK_MASK (0xffff << 0)
7954 7974
7955/* bits 31:0 */ 7975/* bits 31:0 */
7956#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064) 7976#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
7957#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864) 7977#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
7958#define MIPI_LP_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_LP_GEN_DATA, \ 7978#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
7959 _MIPIC_LP_GEN_DATA)
7960 7979
7961/* bits 31:0 */ 7980/* bits 31:0 */
7962#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068) 7981#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
7963#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868) 7982#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
7964#define MIPI_HS_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_HS_GEN_DATA, \ 7983#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
7965 _MIPIC_HS_GEN_DATA)
7966 7984
7967#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c) 7985#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
7968#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c) 7986#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
7969#define MIPI_LP_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_LP_GEN_CTRL, \ 7987#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
7970 _MIPIC_LP_GEN_CTRL)
7971#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070) 7988#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
7972#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870) 7989#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
7973#define MIPI_HS_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_HS_GEN_CTRL, \ 7990#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
7974 _MIPIC_HS_GEN_CTRL)
7975#define LONG_PACKET_WORD_COUNT_SHIFT 8 7991#define LONG_PACKET_WORD_COUNT_SHIFT 8
7976#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) 7992#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
7977#define SHORT_PACKET_PARAM_SHIFT 8 7993#define SHORT_PACKET_PARAM_SHIFT 8
@@ -7984,8 +8000,7 @@ enum skl_disp_power_wells {
7984 8000
7985#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074) 8001#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
7986#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874) 8002#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
7987#define MIPI_GEN_FIFO_STAT(port) _MIPI_PORT(port, _MIPIA_GEN_FIFO_STAT, \ 8003#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
7988 _MIPIC_GEN_FIFO_STAT)
7989#define DPI_FIFO_EMPTY (1 << 28) 8004#define DPI_FIFO_EMPTY (1 << 28)
7990#define DBI_FIFO_EMPTY (1 << 27) 8005#define DBI_FIFO_EMPTY (1 << 27)
7991#define LP_CTRL_FIFO_EMPTY (1 << 26) 8006#define LP_CTRL_FIFO_EMPTY (1 << 26)
@@ -8003,16 +8018,14 @@ enum skl_disp_power_wells {
8003 8018
8004#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078) 8019#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
8005#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878) 8020#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
8006#define MIPI_HS_LP_DBI_ENABLE(port) _MIPI_PORT(port, \ 8021#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
8007 _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
8008#define DBI_HS_LP_MODE_MASK (1 << 0) 8022#define DBI_HS_LP_MODE_MASK (1 << 0)
8009#define DBI_LP_MODE (1 << 0) 8023#define DBI_LP_MODE (1 << 0)
8010#define DBI_HS_MODE (0 << 0) 8024#define DBI_HS_MODE (0 << 0)
8011 8025
8012#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080) 8026#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
8013#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880) 8027#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
8014#define MIPI_DPHY_PARAM(port) _MIPI_PORT(port, _MIPIA_DPHY_PARAM, \ 8028#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
8015 _MIPIC_DPHY_PARAM)
8016#define EXIT_ZERO_COUNT_SHIFT 24 8029#define EXIT_ZERO_COUNT_SHIFT 24
8017#define EXIT_ZERO_COUNT_MASK (0x3f << 24) 8030#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
8018#define TRAIL_COUNT_SHIFT 16 8031#define TRAIL_COUNT_SHIFT 16
@@ -8025,15 +8038,11 @@ enum skl_disp_power_wells {
8025/* bits 31:0 */ 8038/* bits 31:0 */
8026#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) 8039#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
8027#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) 8040#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
8028#define MIPI_DBI_BW_CTRL(port) _MIPI_PORT(port, _MIPIA_DBI_BW_CTRL, \ 8041#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
8029 _MIPIC_DBI_BW_CTRL) 8042
8030 8043#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb088)
8031#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \ 8044#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb888)
8032 + 0xb088) 8045#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
8033#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
8034 + 0xb888)
8035#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MIPI_PORT(port, \
8036 _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
8037#define LP_HS_SSW_CNT_SHIFT 16 8046#define LP_HS_SSW_CNT_SHIFT 16
8038#define LP_HS_SSW_CNT_MASK (0xffff << 16) 8047#define LP_HS_SSW_CNT_MASK (0xffff << 16)
8039#define HS_LP_PWR_SW_CNT_SHIFT 0 8048#define HS_LP_PWR_SW_CNT_SHIFT 0
@@ -8041,19 +8050,16 @@ enum skl_disp_power_wells {
8041 8050
8042#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c) 8051#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
8043#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c) 8052#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
8044#define MIPI_STOP_STATE_STALL(port) _MIPI_PORT(port, \ 8053#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
8045 _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
8046#define STOP_STATE_STALL_COUNTER_SHIFT 0 8054#define STOP_STATE_STALL_COUNTER_SHIFT 0
8047#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) 8055#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
8048 8056
8049#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090) 8057#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
8050#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890) 8058#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
8051#define MIPI_INTR_STAT_REG_1(port) _MIPI_PORT(port, \ 8059#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
8052 _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
8053#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094) 8060#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
8054#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894) 8061#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
8055#define MIPI_INTR_EN_REG_1(port) _MIPI_PORT(port, _MIPIA_INTR_EN_REG_1, \ 8062#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
8056 _MIPIC_INTR_EN_REG_1)
8057#define RX_CONTENTION_DETECTED (1 << 0) 8063#define RX_CONTENTION_DETECTED (1 << 0)
8058 8064
8059/* XXX: only pipe A ?!? */ 8065/* XXX: only pipe A ?!? */
@@ -8073,8 +8079,7 @@ enum skl_disp_power_wells {
8073 8079
8074#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104) 8080#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
8075#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904) 8081#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904)
8076#define MIPI_CTRL(port) _MIPI_PORT(port, _MIPIA_CTRL, \ 8082#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL)
8077 _MIPIC_CTRL)
8078#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ 8083#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
8079#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) 8084#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
8080#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) 8085#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
@@ -8093,23 +8098,20 @@ enum skl_disp_power_wells {
8093 8098
8094#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) 8099#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
8095#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) 8100#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
8096#define MIPI_DATA_ADDRESS(port) _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \ 8101#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
8097 _MIPIC_DATA_ADDRESS)
8098#define DATA_MEM_ADDRESS_SHIFT 5 8102#define DATA_MEM_ADDRESS_SHIFT 5
8099#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) 8103#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
8100#define DATA_VALID (1 << 0) 8104#define DATA_VALID (1 << 0)
8101 8105
8102#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c) 8106#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
8103#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c) 8107#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
8104#define MIPI_DATA_LENGTH(port) _MIPI_PORT(port, _MIPIA_DATA_LENGTH, \ 8108#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
8105 _MIPIC_DATA_LENGTH)
8106#define DATA_LENGTH_SHIFT 0 8109#define DATA_LENGTH_SHIFT 0
8107#define DATA_LENGTH_MASK (0xfffff << 0) 8110#define DATA_LENGTH_MASK (0xfffff << 0)
8108 8111
8109#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110) 8112#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
8110#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910) 8113#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
8111#define MIPI_COMMAND_ADDRESS(port) _MIPI_PORT(port, \ 8114#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
8112 _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
8113#define COMMAND_MEM_ADDRESS_SHIFT 5 8115#define COMMAND_MEM_ADDRESS_SHIFT 5
8114#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) 8116#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
8115#define AUTO_PWG_ENABLE (1 << 2) 8117#define AUTO_PWG_ENABLE (1 << 2)
@@ -8118,21 +8120,17 @@ enum skl_disp_power_wells {
8118 8120
8119#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114) 8121#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
8120#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914) 8122#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
8121#define MIPI_COMMAND_LENGTH(port) _MIPI_PORT(port, _MIPIA_COMMAND_LENGTH, \ 8123#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
8122 _MIPIC_COMMAND_LENGTH)
8123#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ 8124#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
8124#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) 8125#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
8125 8126
8126#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118) 8127#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
8127#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918) 8128#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
8128#define MIPI_READ_DATA_RETURN(port, n) \ 8129#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
8129 (_MIPI_PORT(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) \
8130 + 4 * (n)) /* n: 0...7 */
8131 8130
8132#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138) 8131#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
8133#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938) 8132#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
8134#define MIPI_READ_DATA_VALID(port) _MIPI_PORT(port, \ 8133#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
8135 _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
8136#define READ_DATA_VALID(n) (1 << (n)) 8134#define READ_DATA_VALID(n) (1 << (n))
8137 8135
8138/* For UMS only (deprecated): */ 8136/* For UMS only (deprecated): */
@@ -8140,12 +8138,12 @@ enum skl_disp_power_wells {
8140#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) 8138#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
8141 8139
8142/* MOCS (Memory Object Control State) registers */ 8140/* MOCS (Memory Object Control State) registers */
8143#define GEN9_LNCFCMOCS0 0xb020 /* L3 Cache Control base */ 8141#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
8144 8142
8145#define GEN9_GFX_MOCS_0 0xc800 /* Graphics MOCS base register*/ 8143#define GEN9_GFX_MOCS(i) _MMIO(0xc800 + (i) * 4) /* Graphics MOCS registers */
8146#define GEN9_MFX0_MOCS_0 0xc900 /* Media 0 MOCS base register*/ 8144#define GEN9_MFX0_MOCS(i) _MMIO(0xc900 + (i) * 4) /* Media 0 MOCS registers */
8147#define GEN9_MFX1_MOCS_0 0xca00 /* Media 1 MOCS base register*/ 8145#define GEN9_MFX1_MOCS(i) _MMIO(0xca00 + (i) * 4) /* Media 1 MOCS registers */
8148#define GEN9_VEBOX_MOCS_0 0xcb00 /* Video MOCS base register*/ 8146#define GEN9_VEBOX_MOCS(i) _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */
8149#define GEN9_BLT_MOCS_0 0xcc00 /* Blitter MOCS base register*/ 8147#define GEN9_BLT_MOCS(i) _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */
8150 8148
8151#endif /* _I915_REG_H_ */ 8149#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 50ce9ce2b269..f929c61f0fa2 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -35,7 +35,8 @@
35#define dev_to_drm_minor(d) dev_get_drvdata((d)) 35#define dev_to_drm_minor(d) dev_get_drvdata((d))
36 36
37#ifdef CONFIG_PM 37#ifdef CONFIG_PM
38static u32 calc_residency(struct drm_device *dev, const u32 reg) 38static u32 calc_residency(struct drm_device *dev,
39 i915_reg_t reg)
39{ 40{
40 struct drm_i915_private *dev_priv = dev->dev_private; 41 struct drm_i915_private *dev_priv = dev->dev_private;
41 u64 raw_time; /* 32b value may overflow during fixed point math */ 42 u64 raw_time; /* 32b value may overflow during fixed point math */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 04fe8491c8b6..52b2d409945d 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -664,7 +664,7 @@ TRACE_EVENT(i915_flip_complete,
664); 664);
665 665
666TRACE_EVENT_CONDITION(i915_reg_rw, 666TRACE_EVENT_CONDITION(i915_reg_rw,
667 TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace), 667 TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
668 668
669 TP_ARGS(write, reg, val, len, trace), 669 TP_ARGS(write, reg, val, len, trace),
670 670
@@ -679,7 +679,7 @@ TRACE_EVENT_CONDITION(i915_reg_rw,
679 679
680 TP_fast_assign( 680 TP_fast_assign(
681 __entry->val = (u64)val; 681 __entry->val = (u64)val;
682 __entry->reg = reg; 682 __entry->reg = i915_mmio_reg_offset(reg);
683 __entry->write = write; 683 __entry->write = write;
684 __entry->len = len; 684 __entry->len = len;
685 ), 685 ),
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 5eee75bff170..dea7429be4d0 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -69,13 +69,13 @@ void i915_check_vgpu(struct drm_device *dev)
69 if (!IS_HASWELL(dev)) 69 if (!IS_HASWELL(dev))
70 return; 70 return;
71 71
72 magic = readq(dev_priv->regs + vgtif_reg(magic)); 72 magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
73 if (magic != VGT_MAGIC) 73 if (magic != VGT_MAGIC)
74 return; 74 return;
75 75
76 version = INTEL_VGT_IF_VERSION_ENCODE( 76 version = INTEL_VGT_IF_VERSION_ENCODE(
77 readw(dev_priv->regs + vgtif_reg(version_major)), 77 __raw_i915_read16(dev_priv, vgtif_reg(version_major)),
78 readw(dev_priv->regs + vgtif_reg(version_minor))); 78 __raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
79 if (version != INTEL_VGT_IF_VERSION) { 79 if (version != INTEL_VGT_IF_VERSION) {
80 DRM_INFO("VGT interface version mismatch!\n"); 80 DRM_INFO("VGT interface version mismatch!\n");
81 return; 81 return;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 21c97f44d637..3c83b47b5f69 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -92,14 +92,10 @@ struct vgt_if {
92 uint32_t g2v_notify; 92 uint32_t g2v_notify;
93 uint32_t rsv6[7]; 93 uint32_t rsv6[7];
94 94
95 uint32_t pdp0_lo; 95 struct {
96 uint32_t pdp0_hi; 96 uint32_t lo;
97 uint32_t pdp1_lo; 97 uint32_t hi;
98 uint32_t pdp1_hi; 98 } pdp[4];
99 uint32_t pdp2_lo;
100 uint32_t pdp2_hi;
101 uint32_t pdp3_lo;
102 uint32_t pdp3_hi;
103 99
104 uint32_t execlist_context_descriptor_lo; 100 uint32_t execlist_context_descriptor_lo;
105 uint32_t execlist_context_descriptor_hi; 101 uint32_t execlist_context_descriptor_hi;
@@ -108,7 +104,7 @@ struct vgt_if {
108} __packed; 104} __packed;
109 105
110#define vgtif_reg(x) \ 106#define vgtif_reg(x) \
111 (VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x) 107 _MMIO((VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x))
112 108
113/* vGPU display status to be used by the host side */ 109/* vGPU display status to be used by the host side */
114#define VGT_DRV_DISPLAY_NOT_READY 0 110#define VGT_DRV_DISPLAY_NOT_READY 0
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index f1975f267710..643f342de33b 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -94,6 +94,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
94 __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); 94 __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
95 95
96 crtc_state->update_pipe = false; 96 crtc_state->update_pipe = false;
97 crtc_state->disable_lp_wm = false;
97 98
98 return &crtc_state->base; 99 return &crtc_state->base;
99} 100}
@@ -205,8 +206,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
205 * but since this plane is unchanged just do the 206 * but since this plane is unchanged just do the
206 * minimum required validation. 207 * minimum required validation.
207 */ 208 */
208 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
209 intel_crtc->atomic.wait_for_flips = true;
210 crtc_state->base.planes_changed = true; 209 crtc_state->base.planes_changed = true;
211 } 210 }
212 211
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index a11980696595..c6bb0fc1edfb 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -84,6 +84,7 @@ intel_plane_duplicate_state(struct drm_plane *plane)
84 state = &intel_state->base; 84 state = &intel_state->base;
85 85
86 __drm_atomic_helper_plane_duplicate_state(plane, state); 86 __drm_atomic_helper_plane_duplicate_state(plane, state);
87 intel_state->wait_req = NULL;
87 88
88 return state; 89 return state;
89} 90}
@@ -100,6 +101,7 @@ void
100intel_plane_destroy_state(struct drm_plane *plane, 101intel_plane_destroy_state(struct drm_plane *plane,
101 struct drm_plane_state *state) 102 struct drm_plane_state *state)
102{ 103{
104 WARN_ON(state && to_intel_plane_state(state)->wait_req);
103 drm_atomic_helper_plane_destroy_state(plane, state); 105 drm_atomic_helper_plane_destroy_state(plane, state);
104} 106}
105 107
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 4dccd9b003a1..9aa83e71b792 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -161,9 +161,9 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc,
161} 161}
162 162
163static bool intel_eld_uptodate(struct drm_connector *connector, 163static bool intel_eld_uptodate(struct drm_connector *connector,
164 int reg_eldv, uint32_t bits_eldv, 164 i915_reg_t reg_eldv, uint32_t bits_eldv,
165 int reg_elda, uint32_t bits_elda, 165 i915_reg_t reg_elda, uint32_t bits_elda,
166 int reg_edid) 166 i915_reg_t reg_edid)
167{ 167{
168 struct drm_i915_private *dev_priv = connector->dev->dev_private; 168 struct drm_i915_private *dev_priv = connector->dev->dev_private;
169 uint8_t *eld = connector->eld; 169 uint8_t *eld = connector->eld;
@@ -364,8 +364,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
364 enum port port = intel_dig_port->port; 364 enum port port = intel_dig_port->port;
365 enum pipe pipe = intel_crtc->pipe; 365 enum pipe pipe = intel_crtc->pipe;
366 uint32_t tmp, eldv; 366 uint32_t tmp, eldv;
367 int aud_config; 367 i915_reg_t aud_config, aud_cntrl_st2;
368 int aud_cntrl_st2;
369 368
370 DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n", 369 DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
371 port_name(port), pipe_name(pipe)); 370 port_name(port), pipe_name(pipe));
@@ -416,10 +415,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
416 uint32_t eldv; 415 uint32_t eldv;
417 uint32_t tmp; 416 uint32_t tmp;
418 int len, i; 417 int len, i;
419 int hdmiw_hdmiedid; 418 i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
420 int aud_config;
421 int aud_cntl_st;
422 int aud_cntrl_st2;
423 419
424 DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n", 420 DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
425 port_name(port), pipe_name(pipe), drm_eld_size(eld)); 421 port_name(port), pipe_name(pipe), drm_eld_size(eld));
@@ -591,7 +587,7 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
591 struct drm_i915_private *dev_priv = dev_to_i915(dev); 587 struct drm_i915_private *dev_priv = dev_to_i915(dev);
592 u32 tmp; 588 u32 tmp;
593 589
594 if (!IS_SKYLAKE(dev_priv)) 590 if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
595 return; 591 return;
596 592
597 /* 593 /*
@@ -642,10 +638,11 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
642 u32 tmp; 638 u32 tmp;
643 int n; 639 int n;
644 640
645 /* HSW, BDW SKL need this fix */ 641 /* HSW, BDW, SKL, KBL need this fix */
646 if (!IS_SKYLAKE(dev_priv) && 642 if (!IS_SKYLAKE(dev_priv) &&
647 !IS_BROADWELL(dev_priv) && 643 !IS_KABYLAKE(dev_priv) &&
648 !IS_HASWELL(dev_priv)) 644 !IS_BROADWELL(dev_priv) &&
645 !IS_HASWELL(dev_priv))
649 return 0; 646 return 0;
650 647
651 mutex_lock(&dev_priv->av_mutex); 648 mutex_lock(&dev_priv->av_mutex);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 6a2c76e367a5..27b3e610e8f0 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -50,7 +50,7 @@ struct intel_crt {
50 * encoder's enable/disable callbacks */ 50 * encoder's enable/disable callbacks */
51 struct intel_connector *connector; 51 struct intel_connector *connector;
52 bool force_hotplug_required; 52 bool force_hotplug_required;
53 u32 adpa_reg; 53 i915_reg_t adpa_reg;
54}; 54};
55 55
56static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) 56static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
@@ -480,12 +480,8 @@ intel_crt_load_detect(struct intel_crt *crt)
480 uint32_t vsample; 480 uint32_t vsample;
481 uint32_t vblank, vblank_start, vblank_end; 481 uint32_t vblank, vblank_start, vblank_end;
482 uint32_t dsl; 482 uint32_t dsl;
483 uint32_t bclrpat_reg; 483 i915_reg_t bclrpat_reg, vtotal_reg,
484 uint32_t vtotal_reg; 484 vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg;
485 uint32_t vblank_reg;
486 uint32_t vsync_reg;
487 uint32_t pipeconf_reg;
488 uint32_t pipe_dsl_reg;
489 uint8_t st00; 485 uint8_t st00;
490 enum drm_connector_status status; 486 enum drm_connector_status status;
491 487
@@ -518,7 +514,7 @@ intel_crt_load_detect(struct intel_crt *crt)
518 /* Wait for next Vblank to substitue 514 /* Wait for next Vblank to substitue
519 * border color for Color info */ 515 * border color for Color info */
520 intel_wait_for_vblank(dev, pipe); 516 intel_wait_for_vblank(dev, pipe);
521 st00 = I915_READ8(VGA_MSR_WRITE); 517 st00 = I915_READ8(_VGA_MSR_WRITE);
522 status = ((st00 & (1 << 4)) != 0) ? 518 status = ((st00 & (1 << 4)) != 0) ?
523 connector_status_connected : 519 connector_status_connected :
524 connector_status_disconnected; 520 connector_status_disconnected;
@@ -563,7 +559,7 @@ intel_crt_load_detect(struct intel_crt *crt)
563 do { 559 do {
564 count++; 560 count++;
565 /* Read the ST00 VGA status register */ 561 /* Read the ST00 VGA status register */
566 st00 = I915_READ8(VGA_MSR_WRITE); 562 st00 = I915_READ8(_VGA_MSR_WRITE);
567 if (st00 & (1 << 4)) 563 if (st00 & (1 << 4))
568 detect++; 564 detect++;
569 } while ((I915_READ(pipe_dsl_reg) == dsl)); 565 } while ((I915_READ(pipe_dsl_reg) == dsl));
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 9e530a739354..6c6a6695e99c 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -47,21 +47,10 @@
47MODULE_FIRMWARE(I915_CSR_SKL); 47MODULE_FIRMWARE(I915_CSR_SKL);
48MODULE_FIRMWARE(I915_CSR_BXT); 48MODULE_FIRMWARE(I915_CSR_BXT);
49 49
50/* 50#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
51* SKL CSR registers for DC5 and DC6 51
52*/
53#define CSR_PROGRAM(i) (0x80000 + (i) * 4)
54#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
55#define CSR_HTP_ADDR_SKL 0x00500034
56#define CSR_SSP_BASE 0x8F074
57#define CSR_HTP_SKL 0x8F004
58#define CSR_LAST_WRITE 0x8F034
59#define CSR_LAST_WRITE_VALUE 0xc003b400
60/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
61#define CSR_MAX_FW_SIZE 0x2FFF 52#define CSR_MAX_FW_SIZE 0x2FFF
62#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF 53#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
63#define CSR_MMIO_START_RANGE 0x80000
64#define CSR_MMIO_END_RANGE 0x8FFFF
65 54
66struct intel_css_header { 55struct intel_css_header {
67 /* 0x09 for DMC */ 56 /* 0x09 for DMC */
@@ -178,166 +167,134 @@ struct stepping_info {
178}; 167};
179 168
180static const struct stepping_info skl_stepping_info[] = { 169static const struct stepping_info skl_stepping_info[] = {
181 {'A', '0'}, {'B', '0'}, {'C', '0'}, 170 {'A', '0'}, {'B', '0'}, {'C', '0'},
182 {'D', '0'}, {'E', '0'}, {'F', '0'}, 171 {'D', '0'}, {'E', '0'}, {'F', '0'},
183 {'G', '0'}, {'H', '0'}, {'I', '0'} 172 {'G', '0'}, {'H', '0'}, {'I', '0'}
184}; 173};
185 174
186static struct stepping_info bxt_stepping_info[] = { 175static const struct stepping_info bxt_stepping_info[] = {
187 {'A', '0'}, {'A', '1'}, {'A', '2'}, 176 {'A', '0'}, {'A', '1'}, {'A', '2'},
188 {'B', '0'}, {'B', '1'}, {'B', '2'} 177 {'B', '0'}, {'B', '1'}, {'B', '2'}
189}; 178};
190 179
191static char intel_get_stepping(struct drm_device *dev) 180static const struct stepping_info *intel_get_stepping_info(struct drm_device *dev)
192{
193 if (IS_SKYLAKE(dev) && (dev->pdev->revision <
194 ARRAY_SIZE(skl_stepping_info)))
195 return skl_stepping_info[dev->pdev->revision].stepping;
196 else if (IS_BROXTON(dev) && (dev->pdev->revision <
197 ARRAY_SIZE(bxt_stepping_info)))
198 return bxt_stepping_info[dev->pdev->revision].stepping;
199 else
200 return -ENODATA;
201}
202
203static char intel_get_substepping(struct drm_device *dev)
204{ 181{
205 if (IS_SKYLAKE(dev) && (dev->pdev->revision < 182 const struct stepping_info *si;
206 ARRAY_SIZE(skl_stepping_info))) 183 unsigned int size;
207 return skl_stepping_info[dev->pdev->revision].substepping; 184
208 else if (IS_BROXTON(dev) && (dev->pdev->revision < 185 if (IS_SKYLAKE(dev)) {
209 ARRAY_SIZE(bxt_stepping_info))) 186 size = ARRAY_SIZE(skl_stepping_info);
210 return bxt_stepping_info[dev->pdev->revision].substepping; 187 si = skl_stepping_info;
211 else 188 } else if (IS_BROXTON(dev)) {
212 return -ENODATA; 189 size = ARRAY_SIZE(bxt_stepping_info);
213} 190 si = bxt_stepping_info;
214 191 } else {
215/** 192 return NULL;
216 * intel_csr_load_status_get() - to get firmware loading status. 193 }
217 * @dev_priv: i915 device.
218 *
219 * This function helps to get the firmware loading status.
220 *
221 * Return: Firmware loading status.
222 */
223enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv)
224{
225 enum csr_state state;
226 194
227 mutex_lock(&dev_priv->csr_lock); 195 if (INTEL_REVID(dev) < size)
228 state = dev_priv->csr.state; 196 return si + INTEL_REVID(dev);
229 mutex_unlock(&dev_priv->csr_lock);
230 197
231 return state; 198 return NULL;
232}
233
234/**
235 * intel_csr_load_status_set() - help to set firmware loading status.
236 * @dev_priv: i915 device.
237 * @state: enumeration of firmware loading status.
238 *
239 * Set the firmware loading status.
240 */
241void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
242 enum csr_state state)
243{
244 mutex_lock(&dev_priv->csr_lock);
245 dev_priv->csr.state = state;
246 mutex_unlock(&dev_priv->csr_lock);
247} 199}
248 200
249/** 201/**
250 * intel_csr_load_program() - write the firmware from memory to register. 202 * intel_csr_load_program() - write the firmware from memory to register.
251 * @dev: drm device. 203 * @dev_priv: i915 drm device.
252 * 204 *
253 * CSR firmware is read from a .bin file and kept in internal memory one time. 205 * CSR firmware is read from a .bin file and kept in internal memory one time.
254 * Everytime display comes back from low power state this function is called to 206 * Everytime display comes back from low power state this function is called to
255 * copy the firmware from internal memory to registers. 207 * copy the firmware from internal memory to registers.
256 */ 208 */
257void intel_csr_load_program(struct drm_device *dev) 209void intel_csr_load_program(struct drm_i915_private *dev_priv)
258{ 210{
259 struct drm_i915_private *dev_priv = dev->dev_private;
260 u32 *payload = dev_priv->csr.dmc_payload; 211 u32 *payload = dev_priv->csr.dmc_payload;
261 uint32_t i, fw_size; 212 uint32_t i, fw_size;
262 213
263 if (!IS_GEN9(dev)) { 214 if (!IS_GEN9(dev_priv)) {
264 DRM_ERROR("No CSR support available for this platform\n"); 215 DRM_ERROR("No CSR support available for this platform\n");
265 return; 216 return;
266 } 217 }
267 218
268 /* 219 if (!dev_priv->csr.dmc_payload) {
269 * FIXME: Firmware gets lost on S3/S4, but not when entering system 220 DRM_ERROR("Tried to program CSR with empty payload\n");
270 * standby or suspend-to-idle (which is just like forced runtime pm).
271 * Unfortunately the ACPI subsystem doesn't yet give us a way to
272 * differentiate this, hence figure it out with this hack.
273 */
274 if (I915_READ(CSR_PROGRAM(0)))
275 return; 221 return;
222 }
276 223
277 mutex_lock(&dev_priv->csr_lock);
278 fw_size = dev_priv->csr.dmc_fw_size; 224 fw_size = dev_priv->csr.dmc_fw_size;
279 for (i = 0; i < fw_size; i++) 225 for (i = 0; i < fw_size; i++)
280 I915_WRITE(CSR_PROGRAM(i), payload[i]); 226 I915_WRITE(CSR_PROGRAM(i), payload[i]);
281 227
282 for (i = 0; i < dev_priv->csr.mmio_count; i++) { 228 for (i = 0; i < dev_priv->csr.mmio_count; i++) {
283 I915_WRITE(dev_priv->csr.mmioaddr[i], 229 I915_WRITE(dev_priv->csr.mmioaddr[i],
284 dev_priv->csr.mmiodata[i]); 230 dev_priv->csr.mmiodata[i]);
285 } 231 }
286
287 dev_priv->csr.state = FW_LOADED;
288 mutex_unlock(&dev_priv->csr_lock);
289} 232}
290 233
291static void finish_csr_load(const struct firmware *fw, void *context) 234static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
235 const struct firmware *fw)
292{ 236{
293 struct drm_i915_private *dev_priv = context;
294 struct drm_device *dev = dev_priv->dev; 237 struct drm_device *dev = dev_priv->dev;
295 struct intel_css_header *css_header; 238 struct intel_css_header *css_header;
296 struct intel_package_header *package_header; 239 struct intel_package_header *package_header;
297 struct intel_dmc_header *dmc_header; 240 struct intel_dmc_header *dmc_header;
298 struct intel_csr *csr = &dev_priv->csr; 241 struct intel_csr *csr = &dev_priv->csr;
299 char stepping = intel_get_stepping(dev); 242 const struct stepping_info *stepping_info = intel_get_stepping_info(dev);
300 char substepping = intel_get_substepping(dev); 243 char stepping, substepping;
301 uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; 244 uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
302 uint32_t i; 245 uint32_t i;
303 uint32_t *dmc_payload; 246 uint32_t *dmc_payload;
304 bool fw_loaded = false;
305 247
306 if (!fw) { 248 if (!fw)
307 i915_firmware_load_error_print(csr->fw_path, 0); 249 return NULL;
308 goto out;
309 }
310 250
311 if ((stepping == -ENODATA) || (substepping == -ENODATA)) { 251 if (!stepping_info) {
312 DRM_ERROR("Unknown stepping info, firmware loading failed\n"); 252 DRM_ERROR("Unknown stepping info, firmware loading failed\n");
313 goto out; 253 return NULL;
314 } 254 }
315 255
256 stepping = stepping_info->stepping;
257 substepping = stepping_info->substepping;
258
316 /* Extract CSS Header information*/ 259 /* Extract CSS Header information*/
317 css_header = (struct intel_css_header *)fw->data; 260 css_header = (struct intel_css_header *)fw->data;
318 if (sizeof(struct intel_css_header) != 261 if (sizeof(struct intel_css_header) !=
319 (css_header->header_len * 4)) { 262 (css_header->header_len * 4)) {
320 DRM_ERROR("Firmware has wrong CSS header length %u bytes\n", 263 DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
321 (css_header->header_len * 4)); 264 (css_header->header_len * 4));
322 goto out; 265 return NULL;
323 } 266 }
267
268 csr->version = css_header->version;
269
270 if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) {
271 DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
272 " please upgrade to v%u.%u or later"
273 " [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n",
274 CSR_VERSION_MAJOR(csr->version),
275 CSR_VERSION_MINOR(csr->version),
276 CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
277 CSR_VERSION_MINOR(SKL_CSR_VERSION_REQUIRED));
278 return NULL;
279 }
280
324 readcount += sizeof(struct intel_css_header); 281 readcount += sizeof(struct intel_css_header);
325 282
326 /* Extract Package Header information*/ 283 /* Extract Package Header information*/
327 package_header = (struct intel_package_header *) 284 package_header = (struct intel_package_header *)
328 &fw->data[readcount]; 285 &fw->data[readcount];
329 if (sizeof(struct intel_package_header) != 286 if (sizeof(struct intel_package_header) !=
330 (package_header->header_len * 4)) { 287 (package_header->header_len * 4)) {
331 DRM_ERROR("Firmware has wrong package header length %u bytes\n", 288 DRM_ERROR("Firmware has wrong package header length %u bytes\n",
332 (package_header->header_len * 4)); 289 (package_header->header_len * 4));
333 goto out; 290 return NULL;
334 } 291 }
335 readcount += sizeof(struct intel_package_header); 292 readcount += sizeof(struct intel_package_header);
336 293
337 /* Search for dmc_offset to find firware binary. */ 294 /* Search for dmc_offset to find firware binary. */
338 for (i = 0; i < package_header->num_entries; i++) { 295 for (i = 0; i < package_header->num_entries; i++) {
339 if (package_header->fw_info[i].substepping == '*' && 296 if (package_header->fw_info[i].substepping == '*' &&
340 stepping == package_header->fw_info[i].stepping) { 297 stepping == package_header->fw_info[i].stepping) {
341 dmc_offset = package_header->fw_info[i].offset; 298 dmc_offset = package_header->fw_info[i].offset;
342 break; 299 break;
343 } else if (stepping == package_header->fw_info[i].stepping && 300 } else if (stepping == package_header->fw_info[i].stepping &&
@@ -345,12 +302,12 @@ static void finish_csr_load(const struct firmware *fw, void *context)
345 dmc_offset = package_header->fw_info[i].offset; 302 dmc_offset = package_header->fw_info[i].offset;
346 break; 303 break;
347 } else if (package_header->fw_info[i].stepping == '*' && 304 } else if (package_header->fw_info[i].stepping == '*' &&
348 package_header->fw_info[i].substepping == '*') 305 package_header->fw_info[i].substepping == '*')
349 dmc_offset = package_header->fw_info[i].offset; 306 dmc_offset = package_header->fw_info[i].offset;
350 } 307 }
351 if (dmc_offset == CSR_DEFAULT_FW_OFFSET) { 308 if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
352 DRM_ERROR("Firmware not supported for %c stepping\n", stepping); 309 DRM_ERROR("Firmware not supported for %c stepping\n", stepping);
353 goto out; 310 return NULL;
354 } 311 }
355 readcount += dmc_offset; 312 readcount += dmc_offset;
356 313
@@ -358,26 +315,26 @@ static void finish_csr_load(const struct firmware *fw, void *context)
358 dmc_header = (struct intel_dmc_header *)&fw->data[readcount]; 315 dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
359 if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) { 316 if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
360 DRM_ERROR("Firmware has wrong dmc header length %u bytes\n", 317 DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
361 (dmc_header->header_len)); 318 (dmc_header->header_len));
362 goto out; 319 return NULL;
363 } 320 }
364 readcount += sizeof(struct intel_dmc_header); 321 readcount += sizeof(struct intel_dmc_header);
365 322
366 /* Cache the dmc header info. */ 323 /* Cache the dmc header info. */
367 if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) { 324 if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
368 DRM_ERROR("Firmware has wrong mmio count %u\n", 325 DRM_ERROR("Firmware has wrong mmio count %u\n",
369 dmc_header->mmio_count); 326 dmc_header->mmio_count);
370 goto out; 327 return NULL;
371 } 328 }
372 csr->mmio_count = dmc_header->mmio_count; 329 csr->mmio_count = dmc_header->mmio_count;
373 for (i = 0; i < dmc_header->mmio_count; i++) { 330 for (i = 0; i < dmc_header->mmio_count; i++) {
374 if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE || 331 if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
375 dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) { 332 dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
376 DRM_ERROR(" Firmware has wrong mmio address 0x%x\n", 333 DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
377 dmc_header->mmioaddr[i]); 334 dmc_header->mmioaddr[i]);
378 goto out; 335 return NULL;
379 } 336 }
380 csr->mmioaddr[i] = dmc_header->mmioaddr[i]; 337 csr->mmioaddr[i] = _MMIO(dmc_header->mmioaddr[i]);
381 csr->mmiodata[i] = dmc_header->mmiodata[i]; 338 csr->mmiodata[i] = dmc_header->mmiodata[i];
382 } 339 }
383 340
@@ -385,56 +342,80 @@ static void finish_csr_load(const struct firmware *fw, void *context)
385 nbytes = dmc_header->fw_size * 4; 342 nbytes = dmc_header->fw_size * 4;
386 if (nbytes > CSR_MAX_FW_SIZE) { 343 if (nbytes > CSR_MAX_FW_SIZE) {
387 DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes); 344 DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
388 goto out; 345 return NULL;
389 } 346 }
390 csr->dmc_fw_size = dmc_header->fw_size; 347 csr->dmc_fw_size = dmc_header->fw_size;
391 348
392 csr->dmc_payload = kmalloc(nbytes, GFP_KERNEL); 349 dmc_payload = kmalloc(nbytes, GFP_KERNEL);
393 if (!csr->dmc_payload) { 350 if (!dmc_payload) {
394 DRM_ERROR("Memory allocation failed for dmc payload\n"); 351 DRM_ERROR("Memory allocation failed for dmc payload\n");
395 goto out; 352 return NULL;
396 } 353 }
397 354
398 dmc_payload = csr->dmc_payload;
399 memcpy(dmc_payload, &fw->data[readcount], nbytes); 355 memcpy(dmc_payload, &fw->data[readcount], nbytes);
400 356
357 return dmc_payload;
358}
359
360static void csr_load_work_fn(struct work_struct *work)
361{
362 struct drm_i915_private *dev_priv;
363 struct intel_csr *csr;
364 const struct firmware *fw;
365 int ret;
366
367 dev_priv = container_of(work, typeof(*dev_priv), csr.work);
368 csr = &dev_priv->csr;
369
370 ret = request_firmware(&fw, dev_priv->csr.fw_path,
371 &dev_priv->dev->pdev->dev);
372 if (!fw)
373 goto out;
374
375 dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
376 if (!dev_priv->csr.dmc_payload)
377 goto out;
378
401 /* load csr program during system boot, as needed for DC states */ 379 /* load csr program during system boot, as needed for DC states */
402 intel_csr_load_program(dev); 380 intel_csr_load_program(dev_priv);
403 fw_loaded = true;
404 381
405 DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path);
406out: 382out:
407 if (fw_loaded) 383 if (dev_priv->csr.dmc_payload) {
408 intel_runtime_pm_put(dev_priv); 384 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
409 else 385
410 intel_csr_load_status_set(dev_priv, FW_FAILED); 386 DRM_INFO("Finished loading %s (v%u.%u)\n",
387 dev_priv->csr.fw_path,
388 CSR_VERSION_MAJOR(csr->version),
389 CSR_VERSION_MINOR(csr->version));
390 } else {
391 DRM_ERROR("Failed to load DMC firmware, disabling rpm\n");
392 }
411 393
412 release_firmware(fw); 394 release_firmware(fw);
413} 395}
414 396
415/** 397/**
416 * intel_csr_ucode_init() - initialize the firmware loading. 398 * intel_csr_ucode_init() - initialize the firmware loading.
417 * @dev: drm device. 399 * @dev_priv: i915 drm device.
418 * 400 *
419 * This function is called at the time of loading the display driver to read 401 * This function is called at the time of loading the display driver to read
420 * firmware from a .bin file and copied into a internal memory. 402 * firmware from a .bin file and copied into a internal memory.
421 */ 403 */
422void intel_csr_ucode_init(struct drm_device *dev) 404void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
423{ 405{
424 struct drm_i915_private *dev_priv = dev->dev_private;
425 struct intel_csr *csr = &dev_priv->csr; 406 struct intel_csr *csr = &dev_priv->csr;
426 int ret;
427 407
428 if (!HAS_CSR(dev)) 408 INIT_WORK(&dev_priv->csr.work, csr_load_work_fn);
409
410 if (!HAS_CSR(dev_priv))
429 return; 411 return;
430 412
431 if (IS_SKYLAKE(dev)) 413 if (IS_SKYLAKE(dev_priv))
432 csr->fw_path = I915_CSR_SKL; 414 csr->fw_path = I915_CSR_SKL;
433 else if (IS_BROXTON(dev_priv)) 415 else if (IS_BROXTON(dev_priv))
434 csr->fw_path = I915_CSR_BXT; 416 csr->fw_path = I915_CSR_BXT;
435 else { 417 else {
436 DRM_ERROR("Unexpected: no known CSR firmware for platform\n"); 418 DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
437 intel_csr_load_status_set(dev_priv, FW_FAILED);
438 return; 419 return;
439 } 420 }
440 421
@@ -444,43 +425,24 @@ void intel_csr_ucode_init(struct drm_device *dev)
444 * Obtain a runtime pm reference, until CSR is loaded, 425 * Obtain a runtime pm reference, until CSR is loaded,
445 * to avoid entering runtime-suspend. 426 * to avoid entering runtime-suspend.
446 */ 427 */
447 intel_runtime_pm_get(dev_priv); 428 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
448 429
449 /* CSR supported for platform, load firmware */ 430 schedule_work(&dev_priv->csr.work);
450 ret = request_firmware_nowait(THIS_MODULE, true, csr->fw_path,
451 &dev_priv->dev->pdev->dev,
452 GFP_KERNEL, dev_priv,
453 finish_csr_load);
454 if (ret) {
455 i915_firmware_load_error_print(csr->fw_path, ret);
456 intel_csr_load_status_set(dev_priv, FW_FAILED);
457 }
458} 431}
459 432
460/** 433/**
461 * intel_csr_ucode_fini() - unload the CSR firmware. 434 * intel_csr_ucode_fini() - unload the CSR firmware.
462 * @dev: drm device. 435 * @dev_priv: i915 drm device.
463 * 436 *
464 * Firmmware unloading includes freeing the internal momory and reset the 437 * Firmmware unloading includes freeing the internal momory and reset the
465 * firmware loading status. 438 * firmware loading status.
466 */ 439 */
467void intel_csr_ucode_fini(struct drm_device *dev) 440void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
468{ 441{
469 struct drm_i915_private *dev_priv = dev->dev_private; 442 if (!HAS_CSR(dev_priv))
470
471 if (!HAS_CSR(dev))
472 return; 443 return;
473 444
474 intel_csr_load_status_set(dev_priv, FW_FAILED); 445 flush_work(&dev_priv->csr.work);
475 kfree(dev_priv->csr.dmc_payload);
476}
477 446
478void assert_csr_loaded(struct drm_i915_private *dev_priv) 447 kfree(dev_priv->csr.dmc_payload);
479{
480 WARN_ONCE(intel_csr_load_status_get(dev_priv) != FW_LOADED,
481 "CSR is not loaded.\n");
482 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
483 "CSR program storage start is NULL\n");
484 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
485 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
486} 448}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index a6752a61d99f..76ce7c2960b6 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -133,12 +133,12 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
133 { 0x00002016, 0x000000A0, 0x0 }, 133 { 0x00002016, 0x000000A0, 0x0 },
134 { 0x00005012, 0x0000009B, 0x0 }, 134 { 0x00005012, 0x0000009B, 0x0 },
135 { 0x00007011, 0x00000088, 0x0 }, 135 { 0x00007011, 0x00000088, 0x0 },
136 { 0x00009010, 0x000000C7, 0x0 }, 136 { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
137 { 0x00002016, 0x0000009B, 0x0 }, 137 { 0x00002016, 0x0000009B, 0x0 },
138 { 0x00005012, 0x00000088, 0x0 }, 138 { 0x00005012, 0x00000088, 0x0 },
139 { 0x00007011, 0x000000C7, 0x0 }, 139 { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
140 { 0x00002016, 0x000000DF, 0x0 }, 140 { 0x00002016, 0x000000DF, 0x0 },
141 { 0x00005012, 0x000000C7, 0x0 }, 141 { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
142}; 142};
143 143
144/* Skylake U */ 144/* Skylake U */
@@ -146,12 +146,12 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
146 { 0x0000201B, 0x000000A2, 0x0 }, 146 { 0x0000201B, 0x000000A2, 0x0 },
147 { 0x00005012, 0x00000088, 0x0 }, 147 { 0x00005012, 0x00000088, 0x0 },
148 { 0x00007011, 0x00000087, 0x0 }, 148 { 0x00007011, 0x00000087, 0x0 },
149 { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost level 0x1 */ 149 { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
150 { 0x0000201B, 0x0000009D, 0x0 }, 150 { 0x0000201B, 0x0000009D, 0x0 },
151 { 0x00005012, 0x000000C7, 0x0 }, 151 { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
152 { 0x00007011, 0x000000C7, 0x0 }, 152 { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
153 { 0x00002016, 0x00000088, 0x0 }, 153 { 0x00002016, 0x00000088, 0x0 },
154 { 0x00005012, 0x000000C7, 0x0 }, 154 { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
155}; 155};
156 156
157/* Skylake Y */ 157/* Skylake Y */
@@ -159,12 +159,12 @@ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
159 { 0x00000018, 0x000000A2, 0x0 }, 159 { 0x00000018, 0x000000A2, 0x0 },
160 { 0x00005012, 0x00000088, 0x0 }, 160 { 0x00005012, 0x00000088, 0x0 },
161 { 0x00007011, 0x00000087, 0x0 }, 161 { 0x00007011, 0x00000087, 0x0 },
162 { 0x80009010, 0x000000C7, 0x3 }, /* Uses I_boost level 0x3 */ 162 { 0x80009010, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
163 { 0x00000018, 0x0000009D, 0x0 }, 163 { 0x00000018, 0x0000009D, 0x0 },
164 { 0x00005012, 0x000000C7, 0x0 }, 164 { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
165 { 0x00007011, 0x000000C7, 0x0 }, 165 { 0x80007011, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
166 { 0x00000018, 0x00000088, 0x0 }, 166 { 0x00000018, 0x00000088, 0x0 },
167 { 0x00005012, 0x000000C7, 0x0 }, 167 { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
168}; 168};
169 169
170/* 170/*
@@ -345,7 +345,7 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
345static bool 345static bool
346intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port) 346intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
347{ 347{
348 return intel_dig_port->hdmi.hdmi_reg; 348 return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg);
349} 349}
350 350
351static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev, 351static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
@@ -448,7 +448,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
448 bxt_ddi_vswing_sequence(dev, hdmi_level, port, 448 bxt_ddi_vswing_sequence(dev, hdmi_level, port,
449 INTEL_OUTPUT_HDMI); 449 INTEL_OUTPUT_HDMI);
450 return; 450 return;
451 } else if (IS_SKYLAKE(dev)) { 451 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
452 ddi_translations_fdi = NULL; 452 ddi_translations_fdi = NULL;
453 ddi_translations_dp = 453 ddi_translations_dp =
454 skl_get_buf_trans_dp(dev, &n_dp_entries); 454 skl_get_buf_trans_dp(dev, &n_dp_entries);
@@ -576,7 +576,7 @@ void intel_prepare_ddi(struct drm_device *dev)
576static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, 576static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
577 enum port port) 577 enum port port)
578{ 578{
579 uint32_t reg = DDI_BUF_CTL(port); 579 i915_reg_t reg = DDI_BUF_CTL(port);
580 int i; 580 int i;
581 581
582 for (i = 0; i < 16; i++) { 582 for (i = 0; i < 16; i++) {
@@ -931,7 +931,8 @@ static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
931 /* Otherwise a < c && b >= d, do nothing */ 931 /* Otherwise a < c && b >= d, do nothing */
932} 932}
933 933
934static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg) 934static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
935 i915_reg_t reg)
935{ 936{
936 int refclk = LC_FREQ; 937 int refclk = LC_FREQ;
937 int n, p, r; 938 int n, p, r;
@@ -967,7 +968,7 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg)
967static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, 968static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
968 uint32_t dpll) 969 uint32_t dpll)
969{ 970{
970 uint32_t cfgcr1_reg, cfgcr2_reg; 971 i915_reg_t cfgcr1_reg, cfgcr2_reg;
971 uint32_t cfgcr1_val, cfgcr2_val; 972 uint32_t cfgcr1_val, cfgcr2_val;
972 uint32_t p0, p1, p2, dco_freq; 973 uint32_t p0, p1, p2, dco_freq;
973 974
@@ -1112,10 +1113,10 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
1112 link_clock = 270000; 1113 link_clock = 270000;
1113 break; 1114 break;
1114 case PORT_CLK_SEL_WRPLL1: 1115 case PORT_CLK_SEL_WRPLL1:
1115 link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1); 1116 link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0));
1116 break; 1117 break;
1117 case PORT_CLK_SEL_WRPLL2: 1118 case PORT_CLK_SEL_WRPLL2:
1118 link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2); 1119 link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1));
1119 break; 1120 break;
1120 case PORT_CLK_SEL_SPLL: 1121 case PORT_CLK_SEL_SPLL:
1121 pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK; 1122 pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
@@ -1184,7 +1185,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
1184 1185
1185 if (INTEL_INFO(dev)->gen <= 8) 1186 if (INTEL_INFO(dev)->gen <= 8)
1186 hsw_ddi_clock_get(encoder, pipe_config); 1187 hsw_ddi_clock_get(encoder, pipe_config);
1187 else if (IS_SKYLAKE(dev)) 1188 else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
1188 skl_ddi_clock_get(encoder, pipe_config); 1189 skl_ddi_clock_get(encoder, pipe_config);
1189 else if (IS_BROXTON(dev)) 1190 else if (IS_BROXTON(dev))
1190 bxt_ddi_clock_get(encoder, pipe_config); 1191 bxt_ddi_clock_get(encoder, pipe_config);
@@ -1780,7 +1781,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
1780 struct intel_encoder *intel_encoder = 1781 struct intel_encoder *intel_encoder =
1781 intel_ddi_get_crtc_new_encoder(crtc_state); 1782 intel_ddi_get_crtc_new_encoder(crtc_state);
1782 1783
1783 if (IS_SKYLAKE(dev)) 1784 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
1784 return skl_ddi_pll_select(intel_crtc, crtc_state, 1785 return skl_ddi_pll_select(intel_crtc, crtc_state,
1785 intel_encoder); 1786 intel_encoder);
1786 else if (IS_BROXTON(dev)) 1787 else if (IS_BROXTON(dev))
@@ -1942,7 +1943,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
1942void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, 1943void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
1943 enum transcoder cpu_transcoder) 1944 enum transcoder cpu_transcoder)
1944{ 1945{
1945 uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1946 i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1946 uint32_t val = I915_READ(reg); 1947 uint32_t val = I915_READ(reg);
1947 1948
1948 val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); 1949 val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
@@ -2097,21 +2098,21 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
2097 iboost = dp_iboost; 2098 iboost = dp_iboost;
2098 } else { 2099 } else {
2099 ddi_translations = skl_get_buf_trans_dp(dev, &n_entries); 2100 ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
2100 iboost = ddi_translations[port].i_boost; 2101 iboost = ddi_translations[level].i_boost;
2101 } 2102 }
2102 } else if (type == INTEL_OUTPUT_EDP) { 2103 } else if (type == INTEL_OUTPUT_EDP) {
2103 if (dp_iboost) { 2104 if (dp_iboost) {
2104 iboost = dp_iboost; 2105 iboost = dp_iboost;
2105 } else { 2106 } else {
2106 ddi_translations = skl_get_buf_trans_edp(dev, &n_entries); 2107 ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
2107 iboost = ddi_translations[port].i_boost; 2108 iboost = ddi_translations[level].i_boost;
2108 } 2109 }
2109 } else if (type == INTEL_OUTPUT_HDMI) { 2110 } else if (type == INTEL_OUTPUT_HDMI) {
2110 if (hdmi_iboost) { 2111 if (hdmi_iboost) {
2111 iboost = hdmi_iboost; 2112 iboost = hdmi_iboost;
2112 } else { 2113 } else {
2113 ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries); 2114 ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
2114 iboost = ddi_translations[port].i_boost; 2115 iboost = ddi_translations[level].i_boost;
2115 } 2116 }
2116 } else { 2117 } else {
2117 return; 2118 return;
@@ -2263,7 +2264,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
2263 2264
2264 level = translate_signal_level(signal_levels); 2265 level = translate_signal_level(signal_levels);
2265 2266
2266 if (IS_SKYLAKE(dev)) 2267 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
2267 skl_ddi_set_iboost(dev, level, port, encoder->type); 2268 skl_ddi_set_iboost(dev, level, port, encoder->type);
2268 else if (IS_BROXTON(dev)) 2269 else if (IS_BROXTON(dev))
2269 bxt_ddi_vswing_sequence(dev, level, port, encoder->type); 2270 bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
@@ -2271,30 +2272,21 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
2271 return DDI_BUF_TRANS_SELECT(level); 2272 return DDI_BUF_TRANS_SELECT(level);
2272} 2273}
2273 2274
2274static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) 2275void intel_ddi_clk_select(struct intel_encoder *encoder,
2276 const struct intel_crtc_state *pipe_config)
2275{ 2277{
2276 struct drm_encoder *encoder = &intel_encoder->base; 2278 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2277 struct drm_device *dev = encoder->dev; 2279 enum port port = intel_ddi_get_encoder_port(encoder);
2278 struct drm_i915_private *dev_priv = dev->dev_private;
2279 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
2280 enum port port = intel_ddi_get_encoder_port(intel_encoder);
2281 int type = intel_encoder->type;
2282 int hdmi_level;
2283
2284 if (type == INTEL_OUTPUT_EDP) {
2285 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2286 intel_edp_panel_on(intel_dp);
2287 }
2288 2280
2289 if (IS_SKYLAKE(dev)) { 2281 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
2290 uint32_t dpll = crtc->config->ddi_pll_sel; 2282 uint32_t dpll = pipe_config->ddi_pll_sel;
2291 uint32_t val; 2283 uint32_t val;
2292 2284
2293 /* 2285 /*
2294 * DPLL0 is used for eDP and is the only "private" DPLL (as 2286 * DPLL0 is used for eDP and is the only "private" DPLL (as
2295 * opposed to shared) on SKL 2287 * opposed to shared) on SKL
2296 */ 2288 */
2297 if (type == INTEL_OUTPUT_EDP) { 2289 if (encoder->type == INTEL_OUTPUT_EDP) {
2298 WARN_ON(dpll != SKL_DPLL0); 2290 WARN_ON(dpll != SKL_DPLL0);
2299 2291
2300 val = I915_READ(DPLL_CTRL1); 2292 val = I915_READ(DPLL_CTRL1);
@@ -2302,7 +2294,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
2302 val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | 2294 val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
2303 DPLL_CTRL1_SSC(dpll) | 2295 DPLL_CTRL1_SSC(dpll) |
2304 DPLL_CTRL1_LINK_RATE_MASK(dpll)); 2296 DPLL_CTRL1_LINK_RATE_MASK(dpll));
2305 val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6); 2297 val |= pipe_config->dpll_hw_state.ctrl1 << (dpll * 6);
2306 2298
2307 I915_WRITE(DPLL_CTRL1, val); 2299 I915_WRITE(DPLL_CTRL1, val);
2308 POSTING_READ(DPLL_CTRL1); 2300 POSTING_READ(DPLL_CTRL1);
@@ -2318,10 +2310,28 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
2318 2310
2319 I915_WRITE(DPLL_CTRL2, val); 2311 I915_WRITE(DPLL_CTRL2, val);
2320 2312
2321 } else if (INTEL_INFO(dev)->gen < 9) { 2313 } else if (INTEL_INFO(dev_priv)->gen < 9) {
2322 WARN_ON(crtc->config->ddi_pll_sel == PORT_CLK_SEL_NONE); 2314 WARN_ON(pipe_config->ddi_pll_sel == PORT_CLK_SEL_NONE);
2323 I915_WRITE(PORT_CLK_SEL(port), crtc->config->ddi_pll_sel); 2315 I915_WRITE(PORT_CLK_SEL(port), pipe_config->ddi_pll_sel);
2324 } 2316 }
2317}
2318
2319static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
2320{
2321 struct drm_encoder *encoder = &intel_encoder->base;
2322 struct drm_device *dev = encoder->dev;
2323 struct drm_i915_private *dev_priv = dev->dev_private;
2324 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
2325 enum port port = intel_ddi_get_encoder_port(intel_encoder);
2326 int type = intel_encoder->type;
2327 int hdmi_level;
2328
2329 if (type == INTEL_OUTPUT_EDP) {
2330 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2331 intel_edp_panel_on(intel_dp);
2332 }
2333
2334 intel_ddi_clk_select(intel_encoder, crtc->config);
2325 2335
2326 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 2336 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
2327 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2337 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2381,7 +2391,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
2381 intel_edp_panel_off(intel_dp); 2391 intel_edp_panel_off(intel_dp);
2382 } 2392 }
2383 2393
2384 if (IS_SKYLAKE(dev)) 2394 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
2385 I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) | 2395 I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
2386 DPLL_CTRL2_DDI_CLK_OFF(port))); 2396 DPLL_CTRL2_DDI_CLK_OFF(port)));
2387 else if (INTEL_INFO(dev)->gen < 9) 2397 else if (INTEL_INFO(dev)->gen < 9)
@@ -2553,7 +2563,7 @@ static const char * const skl_ddi_pll_names[] = {
2553}; 2563};
2554 2564
2555struct skl_dpll_regs { 2565struct skl_dpll_regs {
2556 u32 ctl, cfgcr1, cfgcr2; 2566 i915_reg_t ctl, cfgcr1, cfgcr2;
2557}; 2567};
2558 2568
2559/* this array is indexed by the *shared* pll id */ 2569/* this array is indexed by the *shared* pll id */
@@ -2566,13 +2576,13 @@ static const struct skl_dpll_regs skl_dpll_regs[3] = {
2566 }, 2576 },
2567 { 2577 {
2568 /* DPLL 2 */ 2578 /* DPLL 2 */
2569 .ctl = WRPLL_CTL1, 2579 .ctl = WRPLL_CTL(0),
2570 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2), 2580 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
2571 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2), 2581 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
2572 }, 2582 },
2573 { 2583 {
2574 /* DPLL 3 */ 2584 /* DPLL 3 */
2575 .ctl = WRPLL_CTL2, 2585 .ctl = WRPLL_CTL(1),
2576 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3), 2586 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
2577 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3), 2587 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
2578 }, 2588 },
@@ -2992,22 +3002,22 @@ void intel_ddi_pll_init(struct drm_device *dev)
2992 struct drm_i915_private *dev_priv = dev->dev_private; 3002 struct drm_i915_private *dev_priv = dev->dev_private;
2993 uint32_t val = I915_READ(LCPLL_CTL); 3003 uint32_t val = I915_READ(LCPLL_CTL);
2994 3004
2995 if (IS_SKYLAKE(dev)) 3005 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
2996 skl_shared_dplls_init(dev_priv); 3006 skl_shared_dplls_init(dev_priv);
2997 else if (IS_BROXTON(dev)) 3007 else if (IS_BROXTON(dev))
2998 bxt_shared_dplls_init(dev_priv); 3008 bxt_shared_dplls_init(dev_priv);
2999 else 3009 else
3000 hsw_shared_dplls_init(dev_priv); 3010 hsw_shared_dplls_init(dev_priv);
3001 3011
3002 if (IS_SKYLAKE(dev)) { 3012 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
3003 int cdclk_freq; 3013 int cdclk_freq;
3004 3014
3005 cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 3015 cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
3006 dev_priv->skl_boot_cdclk = cdclk_freq; 3016 dev_priv->skl_boot_cdclk = cdclk_freq;
3017 if (skl_sanitize_cdclk(dev_priv))
3018 DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
3007 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) 3019 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
3008 DRM_ERROR("LCPLL1 is disabled\n"); 3020 DRM_ERROR("LCPLL1 is disabled\n");
3009 else
3010 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
3011 } else if (IS_BROXTON(dev)) { 3021 } else if (IS_BROXTON(dev)) {
3012 broxton_init_cdclk(dev); 3022 broxton_init_cdclk(dev);
3013 broxton_ddi_phy_init(dev); 3023 broxton_ddi_phy_init(dev);
@@ -3026,11 +3036,11 @@ void intel_ddi_pll_init(struct drm_device *dev)
3026 } 3036 }
3027} 3037}
3028 3038
3029void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) 3039void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
3030{ 3040{
3031 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 3041 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3032 struct intel_dp *intel_dp = &intel_dig_port->dp; 3042 struct drm_i915_private *dev_priv =
3033 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 3043 to_i915(intel_dig_port->base.base.dev);
3034 enum port port = intel_dig_port->port; 3044 enum port port = intel_dig_port->port;
3035 uint32_t val; 3045 uint32_t val;
3036 bool wait = false; 3046 bool wait = false;
@@ -3289,6 +3299,20 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
3289 (DDI_BUF_PORT_REVERSAL | 3299 (DDI_BUF_PORT_REVERSAL |
3290 DDI_A_4_LANES); 3300 DDI_A_4_LANES);
3291 3301
3302 /*
3303 * Bspec says that DDI_A_4_LANES is the only supported configuration
3304 * for Broxton. Yet some BIOS fail to set this bit on port A if eDP
3305 * wasn't lit up at boot. Force this bit on in our internal
3306 * configuration so that we use the proper lane count for our
3307 * calculations.
3308 */
3309 if (IS_BROXTON(dev) && port == PORT_A) {
3310 if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
3311 DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
3312 intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
3313 }
3314 }
3315
3292 intel_encoder->type = INTEL_OUTPUT_UNKNOWN; 3316 intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
3293 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 3317 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3294 intel_encoder->cloneable = 0; 3318 intel_encoder->cloneable = 0;
@@ -3302,8 +3326,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
3302 * On BXT A0/A1, sw needs to activate DDIA HPD logic and 3326 * On BXT A0/A1, sw needs to activate DDIA HPD logic and
3303 * interrupts to check the external panel connection. 3327 * interrupts to check the external panel connection.
3304 */ 3328 */
3305 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0) 3329 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B)
3306 && port == PORT_B)
3307 dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port; 3330 dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
3308 else 3331 else
3309 dev_priv->hotplug.irq_port[port] = intel_dig_port; 3332 dev_priv->hotplug.irq_port[port] = intel_dig_port;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index bea7f3aef2b0..9228ec018e98 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1095,7 +1095,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1095static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) 1095static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1096{ 1096{
1097 struct drm_i915_private *dev_priv = dev->dev_private; 1097 struct drm_i915_private *dev_priv = dev->dev_private;
1098 u32 reg = PIPEDSL(pipe); 1098 i915_reg_t reg = PIPEDSL(pipe);
1099 u32 line1, line2; 1099 u32 line1, line2;
1100 u32 line_mask; 1100 u32 line_mask;
1101 1101
@@ -1135,7 +1135,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1135 enum pipe pipe = crtc->pipe; 1135 enum pipe pipe = crtc->pipe;
1136 1136
1137 if (INTEL_INFO(dev)->gen >= 4) { 1137 if (INTEL_INFO(dev)->gen >= 4) {
1138 int reg = PIPECONF(cpu_transcoder); 1138 i915_reg_t reg = PIPECONF(cpu_transcoder);
1139 1139
1140 /* Wait for the Pipe State to go off */ 1140 /* Wait for the Pipe State to go off */
1141 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 1141 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
@@ -1285,7 +1285,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1285 enum pipe pipe) 1285 enum pipe pipe)
1286{ 1286{
1287 struct drm_device *dev = dev_priv->dev; 1287 struct drm_device *dev = dev_priv->dev;
1288 int pp_reg; 1288 i915_reg_t pp_reg;
1289 u32 val; 1289 u32 val;
1290 enum pipe panel_pipe = PIPE_A; 1290 enum pipe panel_pipe = PIPE_A;
1291 bool locked = true; 1291 bool locked = true;
@@ -1480,8 +1480,7 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1480 return false; 1480 return false;
1481 1481
1482 if (HAS_PCH_CPT(dev_priv->dev)) { 1482 if (HAS_PCH_CPT(dev_priv->dev)) {
1483 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); 1483 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
1484 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1485 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1484 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1486 return false; 1485 return false;
1487 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1486 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
@@ -1545,12 +1544,13 @@ static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1545} 1544}
1546 1545
1547static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1546static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1548 enum pipe pipe, int reg, u32 port_sel) 1547 enum pipe pipe, i915_reg_t reg,
1548 u32 port_sel)
1549{ 1549{
1550 u32 val = I915_READ(reg); 1550 u32 val = I915_READ(reg);
1551 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1551 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1552 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1552 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1553 reg, pipe_name(pipe)); 1553 i915_mmio_reg_offset(reg), pipe_name(pipe));
1554 1554
1555 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 1555 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1556 && (val & DP_PIPEB_SELECT), 1556 && (val & DP_PIPEB_SELECT),
@@ -1558,12 +1558,12 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1558} 1558}
1559 1559
1560static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1560static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1561 enum pipe pipe, int reg) 1561 enum pipe pipe, i915_reg_t reg)
1562{ 1562{
1563 u32 val = I915_READ(reg); 1563 u32 val = I915_READ(reg);
1564 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1564 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1565 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1565 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1566 reg, pipe_name(pipe)); 1566 i915_mmio_reg_offset(reg), pipe_name(pipe));
1567 1567
1568 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 1568 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1569 && (val & SDVO_PIPE_B_SELECT), 1569 && (val & SDVO_PIPE_B_SELECT),
@@ -1599,7 +1599,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc,
1599{ 1599{
1600 struct drm_device *dev = crtc->base.dev; 1600 struct drm_device *dev = crtc->base.dev;
1601 struct drm_i915_private *dev_priv = dev->dev_private; 1601 struct drm_i915_private *dev_priv = dev->dev_private;
1602 int reg = DPLL(crtc->pipe); 1602 i915_reg_t reg = DPLL(crtc->pipe);
1603 u32 dpll = pipe_config->dpll_hw_state.dpll; 1603 u32 dpll = pipe_config->dpll_hw_state.dpll;
1604 1604
1605 assert_pipe_disabled(dev_priv, crtc->pipe); 1605 assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -1688,7 +1688,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
1688{ 1688{
1689 struct drm_device *dev = crtc->base.dev; 1689 struct drm_device *dev = crtc->base.dev;
1690 struct drm_i915_private *dev_priv = dev->dev_private; 1690 struct drm_i915_private *dev_priv = dev->dev_private;
1691 int reg = DPLL(crtc->pipe); 1691 i915_reg_t reg = DPLL(crtc->pipe);
1692 u32 dpll = crtc->config->dpll_hw_state.dpll; 1692 u32 dpll = crtc->config->dpll_hw_state.dpll;
1693 1693
1694 assert_pipe_disabled(dev_priv, crtc->pipe); 1694 assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -1837,7 +1837,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1837 unsigned int expected_mask) 1837 unsigned int expected_mask)
1838{ 1838{
1839 u32 port_mask; 1839 u32 port_mask;
1840 int dpll_reg; 1840 i915_reg_t dpll_reg;
1841 1841
1842 switch (dport->port) { 1842 switch (dport->port) {
1843 case PORT_B: 1843 case PORT_B:
@@ -1962,7 +1962,8 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1962 struct drm_device *dev = dev_priv->dev; 1962 struct drm_device *dev = dev_priv->dev;
1963 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1963 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1964 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1964 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1965 uint32_t reg, val, pipeconf_val; 1965 i915_reg_t reg;
1966 uint32_t val, pipeconf_val;
1966 1967
1967 /* PCH only available on ILK+ */ 1968 /* PCH only available on ILK+ */
1968 BUG_ON(!HAS_PCH_SPLIT(dev)); 1969 BUG_ON(!HAS_PCH_SPLIT(dev));
@@ -2051,7 +2052,8 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2051 enum pipe pipe) 2052 enum pipe pipe)
2052{ 2053{
2053 struct drm_device *dev = dev_priv->dev; 2054 struct drm_device *dev = dev_priv->dev;
2054 uint32_t reg, val; 2055 i915_reg_t reg;
2056 uint32_t val;
2055 2057
2056 /* FDI relies on the transcoder */ 2058 /* FDI relies on the transcoder */
2057 assert_fdi_tx_disabled(dev_priv, pipe); 2059 assert_fdi_tx_disabled(dev_priv, pipe);
@@ -2068,7 +2070,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2068 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 2070 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2069 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 2071 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
2070 2072
2071 if (!HAS_PCH_IBX(dev)) { 2073 if (HAS_PCH_CPT(dev)) {
2072 /* Workaround: Clear the timing override chicken bit again. */ 2074 /* Workaround: Clear the timing override chicken bit again. */
2073 reg = TRANS_CHICKEN2(pipe); 2075 reg = TRANS_CHICKEN2(pipe);
2074 val = I915_READ(reg); 2076 val = I915_READ(reg);
@@ -2106,10 +2108,9 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
2106 struct drm_device *dev = crtc->base.dev; 2108 struct drm_device *dev = crtc->base.dev;
2107 struct drm_i915_private *dev_priv = dev->dev_private; 2109 struct drm_i915_private *dev_priv = dev->dev_private;
2108 enum pipe pipe = crtc->pipe; 2110 enum pipe pipe = crtc->pipe;
2109 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 2111 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2110 pipe);
2111 enum pipe pch_transcoder; 2112 enum pipe pch_transcoder;
2112 int reg; 2113 i915_reg_t reg;
2113 u32 val; 2114 u32 val;
2114 2115
2115 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 2116 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
@@ -2170,7 +2171,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
2170 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 2171 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2171 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 2172 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2172 enum pipe pipe = crtc->pipe; 2173 enum pipe pipe = crtc->pipe;
2173 int reg; 2174 i915_reg_t reg;
2174 u32 val; 2175 u32 val;
2175 2176
2176 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 2177 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
@@ -2269,20 +2270,20 @@ intel_fb_align_height(struct drm_device *dev, unsigned int height,
2269 fb_format_modifier, 0)); 2270 fb_format_modifier, 0));
2270} 2271}
2271 2272
2272static int 2273static void
2273intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, 2274intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2274 const struct drm_plane_state *plane_state) 2275 const struct drm_plane_state *plane_state)
2275{ 2276{
2276 struct intel_rotation_info *info = &view->rotation_info; 2277 struct intel_rotation_info *info = &view->params.rotation_info;
2277 unsigned int tile_height, tile_pitch; 2278 unsigned int tile_height, tile_pitch;
2278 2279
2279 *view = i915_ggtt_view_normal; 2280 *view = i915_ggtt_view_normal;
2280 2281
2281 if (!plane_state) 2282 if (!plane_state)
2282 return 0; 2283 return;
2283 2284
2284 if (!intel_rotation_90_or_270(plane_state->rotation)) 2285 if (!intel_rotation_90_or_270(plane_state->rotation))
2285 return 0; 2286 return;
2286 2287
2287 *view = i915_ggtt_view_rotated; 2288 *view = i915_ggtt_view_rotated;
2288 2289
@@ -2309,8 +2310,6 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2309 info->size_uv = info->width_pages_uv * info->height_pages_uv * 2310 info->size_uv = info->width_pages_uv * info->height_pages_uv *
2310 PAGE_SIZE; 2311 PAGE_SIZE;
2311 } 2312 }
2312
2313 return 0;
2314} 2313}
2315 2314
2316static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) 2315static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
@@ -2329,9 +2328,7 @@ static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
2329int 2328int
2330intel_pin_and_fence_fb_obj(struct drm_plane *plane, 2329intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2331 struct drm_framebuffer *fb, 2330 struct drm_framebuffer *fb,
2332 const struct drm_plane_state *plane_state, 2331 const struct drm_plane_state *plane_state)
2333 struct intel_engine_cs *pipelined,
2334 struct drm_i915_gem_request **pipelined_request)
2335{ 2332{
2336 struct drm_device *dev = fb->dev; 2333 struct drm_device *dev = fb->dev;
2337 struct drm_i915_private *dev_priv = dev->dev_private; 2334 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2366,9 +2363,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2366 return -EINVAL; 2363 return -EINVAL;
2367 } 2364 }
2368 2365
2369 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); 2366 intel_fill_fb_ggtt_view(&view, fb, plane_state);
2370 if (ret)
2371 return ret;
2372 2367
2373 /* Note that the w/a also requires 64 PTE of padding following the 2368 /* Note that the w/a also requires 64 PTE of padding following the
2374 * bo. We currently fill all unused PTE with the shadow page and so 2369 * bo. We currently fill all unused PTE with the shadow page and so
@@ -2387,11 +2382,10 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2387 */ 2382 */
2388 intel_runtime_pm_get(dev_priv); 2383 intel_runtime_pm_get(dev_priv);
2389 2384
2390 dev_priv->mm.interruptible = false; 2385 ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2391 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined, 2386 &view);
2392 pipelined_request, &view);
2393 if (ret) 2387 if (ret)
2394 goto err_interruptible; 2388 goto err_pm;
2395 2389
2396 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2390 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2397 * fence, whereas 965+ only requires a fence if using 2391 * fence, whereas 965+ only requires a fence if using
@@ -2417,14 +2411,12 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2417 i915_gem_object_pin_fence(obj); 2411 i915_gem_object_pin_fence(obj);
2418 } 2412 }
2419 2413
2420 dev_priv->mm.interruptible = true;
2421 intel_runtime_pm_put(dev_priv); 2414 intel_runtime_pm_put(dev_priv);
2422 return 0; 2415 return 0;
2423 2416
2424err_unpin: 2417err_unpin:
2425 i915_gem_object_unpin_from_display_plane(obj, &view); 2418 i915_gem_object_unpin_from_display_plane(obj, &view);
2426err_interruptible: 2419err_pm:
2427 dev_priv->mm.interruptible = true;
2428 intel_runtime_pm_put(dev_priv); 2420 intel_runtime_pm_put(dev_priv);
2429 return ret; 2421 return ret;
2430} 2422}
@@ -2434,12 +2426,10 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2434{ 2426{
2435 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2427 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2436 struct i915_ggtt_view view; 2428 struct i915_ggtt_view view;
2437 int ret;
2438 2429
2439 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); 2430 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2440 2431
2441 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); 2432 intel_fill_fb_ggtt_view(&view, fb, plane_state);
2442 WARN_ONCE(ret, "Couldn't get view from plane state!");
2443 2433
2444 if (view.type == I915_GGTT_VIEW_NORMAL) 2434 if (view.type == I915_GGTT_VIEW_NORMAL)
2445 i915_gem_object_unpin_fence(obj); 2435 i915_gem_object_unpin_fence(obj);
@@ -2680,7 +2670,7 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2680 int plane = intel_crtc->plane; 2670 int plane = intel_crtc->plane;
2681 unsigned long linear_offset; 2671 unsigned long linear_offset;
2682 u32 dspcntr; 2672 u32 dspcntr;
2683 u32 reg = DSPCNTR(plane); 2673 i915_reg_t reg = DSPCNTR(plane);
2684 int pixel_size; 2674 int pixel_size;
2685 2675
2686 if (!visible || !fb) { 2676 if (!visible || !fb) {
@@ -2810,7 +2800,7 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2810 int plane = intel_crtc->plane; 2800 int plane = intel_crtc->plane;
2811 unsigned long linear_offset; 2801 unsigned long linear_offset;
2812 u32 dspcntr; 2802 u32 dspcntr;
2813 u32 reg = DSPCNTR(plane); 2803 i915_reg_t reg = DSPCNTR(plane);
2814 int pixel_size; 2804 int pixel_size;
2815 2805
2816 if (!visible || !fb) { 2806 if (!visible || !fb) {
@@ -2935,30 +2925,32 @@ u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
2935 } 2925 }
2936} 2926}
2937 2927
2938unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, 2928u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2939 struct drm_i915_gem_object *obj, 2929 struct drm_i915_gem_object *obj,
2940 unsigned int plane) 2930 unsigned int plane)
2941{ 2931{
2942 const struct i915_ggtt_view *view = &i915_ggtt_view_normal; 2932 struct i915_ggtt_view view;
2943 struct i915_vma *vma; 2933 struct i915_vma *vma;
2944 unsigned char *offset; 2934 u64 offset;
2945 2935
2946 if (intel_rotation_90_or_270(intel_plane->base.state->rotation)) 2936 intel_fill_fb_ggtt_view(&view, intel_plane->base.fb,
2947 view = &i915_ggtt_view_rotated; 2937 intel_plane->base.state);
2948 2938
2949 vma = i915_gem_obj_to_ggtt_view(obj, view); 2939 vma = i915_gem_obj_to_ggtt_view(obj, &view);
2950 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", 2940 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2951 view->type)) 2941 view.type))
2952 return -1; 2942 return -1;
2953 2943
2954 offset = (unsigned char *)vma->node.start; 2944 offset = vma->node.start;
2955 2945
2956 if (plane == 1) { 2946 if (plane == 1) {
2957 offset += vma->ggtt_view.rotation_info.uv_start_page * 2947 offset += vma->ggtt_view.params.rotation_info.uv_start_page *
2958 PAGE_SIZE; 2948 PAGE_SIZE;
2959 } 2949 }
2960 2950
2961 return (unsigned long)offset; 2951 WARN_ON(upper_32_bits(offset));
2952
2953 return lower_32_bits(offset);
2962} 2954}
2963 2955
2964static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 2956static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
@@ -3084,7 +3076,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
3084 u32 tile_height, plane_offset, plane_size; 3076 u32 tile_height, plane_offset, plane_size;
3085 unsigned int rotation; 3077 unsigned int rotation;
3086 int x_offset, y_offset; 3078 int x_offset, y_offset;
3087 unsigned long surf_addr; 3079 u32 surf_addr;
3088 struct intel_crtc_state *crtc_state = intel_crtc->config; 3080 struct intel_crtc_state *crtc_state = intel_crtc->config;
3089 struct intel_plane_state *plane_state; 3081 struct intel_plane_state *plane_state;
3090 int src_x = 0, src_y = 0, src_w = 0, src_h = 0; 3082 int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
@@ -3212,10 +3204,9 @@ static void intel_update_primary_planes(struct drm_device *dev)
3212 struct intel_plane_state *plane_state; 3204 struct intel_plane_state *plane_state;
3213 3205
3214 drm_modeset_lock_crtc(crtc, &plane->base); 3206 drm_modeset_lock_crtc(crtc, &plane->base);
3215
3216 plane_state = to_intel_plane_state(plane->base.state); 3207 plane_state = to_intel_plane_state(plane->base.state);
3217 3208
3218 if (plane_state->base.fb) 3209 if (crtc->state->active && plane_state->base.fb)
3219 plane->commit_plane(&plane->base, plane_state); 3210 plane->commit_plane(&plane->base, plane_state);
3220 3211
3221 drm_modeset_unlock_crtc(crtc); 3212 drm_modeset_unlock_crtc(crtc);
@@ -3291,32 +3282,6 @@ void intel_finish_reset(struct drm_device *dev)
3291 drm_modeset_unlock_all(dev); 3282 drm_modeset_unlock_all(dev);
3292} 3283}
3293 3284
3294static void
3295intel_finish_fb(struct drm_framebuffer *old_fb)
3296{
3297 struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
3298 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3299 bool was_interruptible = dev_priv->mm.interruptible;
3300 int ret;
3301
3302 /* Big Hammer, we also need to ensure that any pending
3303 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
3304 * current scanout is retired before unpinning the old
3305 * framebuffer. Note that we rely on userspace rendering
3306 * into the buffer attached to the pipe they are waiting
3307 * on. If not, userspace generates a GPU hang with IPEHR
3308 * point to the MI_WAIT_FOR_EVENT.
3309 *
3310 * This should only fail upon a hung GPU, in which case we
3311 * can safely continue.
3312 */
3313 dev_priv->mm.interruptible = false;
3314 ret = i915_gem_object_wait_rendering(obj, true);
3315 dev_priv->mm.interruptible = was_interruptible;
3316
3317 WARN_ON(ret);
3318}
3319
3320static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3285static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3321{ 3286{
3322 struct drm_device *dev = crtc->dev; 3287 struct drm_device *dev = crtc->dev;
@@ -3386,7 +3351,8 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
3386 struct drm_i915_private *dev_priv = dev->dev_private; 3351 struct drm_i915_private *dev_priv = dev->dev_private;
3387 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3352 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3388 int pipe = intel_crtc->pipe; 3353 int pipe = intel_crtc->pipe;
3389 u32 reg, temp; 3354 i915_reg_t reg;
3355 u32 temp;
3390 3356
3391 /* enable normal train */ 3357 /* enable normal train */
3392 reg = FDI_TX_CTL(pipe); 3358 reg = FDI_TX_CTL(pipe);
@@ -3428,7 +3394,8 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3428 struct drm_i915_private *dev_priv = dev->dev_private; 3394 struct drm_i915_private *dev_priv = dev->dev_private;
3429 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3395 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3430 int pipe = intel_crtc->pipe; 3396 int pipe = intel_crtc->pipe;
3431 u32 reg, temp, tries; 3397 i915_reg_t reg;
3398 u32 temp, tries;
3432 3399
3433 /* FDI needs bits from pipe first */ 3400 /* FDI needs bits from pipe first */
3434 assert_pipe_enabled(dev_priv, pipe); 3401 assert_pipe_enabled(dev_priv, pipe);
@@ -3528,7 +3495,8 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
3528 struct drm_i915_private *dev_priv = dev->dev_private; 3495 struct drm_i915_private *dev_priv = dev->dev_private;
3529 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3496 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3530 int pipe = intel_crtc->pipe; 3497 int pipe = intel_crtc->pipe;
3531 u32 reg, temp, i, retry; 3498 i915_reg_t reg;
3499 u32 temp, i, retry;
3532 3500
3533 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3501 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3534 for train result */ 3502 for train result */
@@ -3660,7 +3628,8 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3660 struct drm_i915_private *dev_priv = dev->dev_private; 3628 struct drm_i915_private *dev_priv = dev->dev_private;
3661 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3629 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3662 int pipe = intel_crtc->pipe; 3630 int pipe = intel_crtc->pipe;
3663 u32 reg, temp, i, j; 3631 i915_reg_t reg;
3632 u32 temp, i, j;
3664 3633
3665 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3634 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3666 for train result */ 3635 for train result */
@@ -3777,8 +3746,8 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3777 struct drm_device *dev = intel_crtc->base.dev; 3746 struct drm_device *dev = intel_crtc->base.dev;
3778 struct drm_i915_private *dev_priv = dev->dev_private; 3747 struct drm_i915_private *dev_priv = dev->dev_private;
3779 int pipe = intel_crtc->pipe; 3748 int pipe = intel_crtc->pipe;
3780 u32 reg, temp; 3749 i915_reg_t reg;
3781 3750 u32 temp;
3782 3751
3783 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 3752 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3784 reg = FDI_RX_CTL(pipe); 3753 reg = FDI_RX_CTL(pipe);
@@ -3814,7 +3783,8 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3814 struct drm_device *dev = intel_crtc->base.dev; 3783 struct drm_device *dev = intel_crtc->base.dev;
3815 struct drm_i915_private *dev_priv = dev->dev_private; 3784 struct drm_i915_private *dev_priv = dev->dev_private;
3816 int pipe = intel_crtc->pipe; 3785 int pipe = intel_crtc->pipe;
3817 u32 reg, temp; 3786 i915_reg_t reg;
3787 u32 temp;
3818 3788
3819 /* Switch from PCDclk to Rawclk */ 3789 /* Switch from PCDclk to Rawclk */
3820 reg = FDI_RX_CTL(pipe); 3790 reg = FDI_RX_CTL(pipe);
@@ -3844,7 +3814,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
3844 struct drm_i915_private *dev_priv = dev->dev_private; 3814 struct drm_i915_private *dev_priv = dev->dev_private;
3845 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3815 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3846 int pipe = intel_crtc->pipe; 3816 int pipe = intel_crtc->pipe;
3847 u32 reg, temp; 3817 i915_reg_t reg;
3818 u32 temp;
3848 3819
3849 /* disable CPU FDI tx and PCH FDI rx */ 3820 /* disable CPU FDI tx and PCH FDI rx */
3850 reg = FDI_TX_CTL(pipe); 3821 reg = FDI_TX_CTL(pipe);
@@ -3937,15 +3908,23 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
3937 work->pending_flip_obj); 3908 work->pending_flip_obj);
3938} 3909}
3939 3910
3940void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3911static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3941{ 3912{
3942 struct drm_device *dev = crtc->dev; 3913 struct drm_device *dev = crtc->dev;
3943 struct drm_i915_private *dev_priv = dev->dev_private; 3914 struct drm_i915_private *dev_priv = dev->dev_private;
3915 long ret;
3944 3916
3945 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3917 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3946 if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, 3918
3947 !intel_crtc_has_pending_flip(crtc), 3919 ret = wait_event_interruptible_timeout(
3948 60*HZ) == 0)) { 3920 dev_priv->pending_flip_queue,
3921 !intel_crtc_has_pending_flip(crtc),
3922 60*HZ);
3923
3924 if (ret < 0)
3925 return ret;
3926
3927 if (ret == 0) {
3949 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3928 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3950 3929
3951 spin_lock_irq(&dev->event_lock); 3930 spin_lock_irq(&dev->event_lock);
@@ -3956,11 +3935,7 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3956 spin_unlock_irq(&dev->event_lock); 3935 spin_unlock_irq(&dev->event_lock);
3957 } 3936 }
3958 3937
3959 if (crtc->primary->fb) { 3938 return 0;
3960 mutex_lock(&dev->struct_mutex);
3961 intel_finish_fb(crtc->primary->fb);
3962 mutex_unlock(&dev->struct_mutex);
3963 }
3964} 3939}
3965 3940
3966/* Program iCLKIP clock to the desired frequency */ 3941/* Program iCLKIP clock to the desired frequency */
@@ -4120,6 +4095,22 @@ static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4120 } 4095 }
4121} 4096}
4122 4097
4098/* Return which DP Port should be selected for Transcoder DP control */
4099static enum port
4100intel_trans_dp_port_sel(struct drm_crtc *crtc)
4101{
4102 struct drm_device *dev = crtc->dev;
4103 struct intel_encoder *encoder;
4104
4105 for_each_encoder_on_crtc(dev, crtc, encoder) {
4106 if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4107 encoder->type == INTEL_OUTPUT_EDP)
4108 return enc_to_dig_port(&encoder->base)->port;
4109 }
4110
4111 return -1;
4112}
4113
4123/* 4114/*
4124 * Enable PCH resources required for PCH ports: 4115 * Enable PCH resources required for PCH ports:
4125 * - PCH PLLs 4116 * - PCH PLLs
@@ -4134,7 +4125,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
4134 struct drm_i915_private *dev_priv = dev->dev_private; 4125 struct drm_i915_private *dev_priv = dev->dev_private;
4135 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4126 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4136 int pipe = intel_crtc->pipe; 4127 int pipe = intel_crtc->pipe;
4137 u32 reg, temp; 4128 u32 temp;
4138 4129
4139 assert_pch_transcoder_disabled(dev_priv, pipe); 4130 assert_pch_transcoder_disabled(dev_priv, pipe);
4140 4131
@@ -4181,8 +4172,10 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
4181 4172
4182 /* For PCH DP, enable TRANS_DP_CTL */ 4173 /* For PCH DP, enable TRANS_DP_CTL */
4183 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { 4174 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4175 const struct drm_display_mode *adjusted_mode =
4176 &intel_crtc->config->base.adjusted_mode;
4184 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4177 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4185 reg = TRANS_DP_CTL(pipe); 4178 i915_reg_t reg = TRANS_DP_CTL(pipe);
4186 temp = I915_READ(reg); 4179 temp = I915_READ(reg);
4187 temp &= ~(TRANS_DP_PORT_SEL_MASK | 4180 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4188 TRANS_DP_SYNC_MASK | 4181 TRANS_DP_SYNC_MASK |
@@ -4190,19 +4183,19 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
4190 temp |= TRANS_DP_OUTPUT_ENABLE; 4183 temp |= TRANS_DP_OUTPUT_ENABLE;
4191 temp |= bpc << 9; /* same format but at 11:9 */ 4184 temp |= bpc << 9; /* same format but at 11:9 */
4192 4185
4193 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 4186 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4194 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 4187 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4195 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 4188 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4196 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 4189 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4197 4190
4198 switch (intel_trans_dp_port_sel(crtc)) { 4191 switch (intel_trans_dp_port_sel(crtc)) {
4199 case PCH_DP_B: 4192 case PORT_B:
4200 temp |= TRANS_DP_PORT_SEL_B; 4193 temp |= TRANS_DP_PORT_SEL_B;
4201 break; 4194 break;
4202 case PCH_DP_C: 4195 case PORT_C:
4203 temp |= TRANS_DP_PORT_SEL_C; 4196 temp |= TRANS_DP_PORT_SEL_C;
4204 break; 4197 break;
4205 case PCH_DP_D: 4198 case PORT_D:
4206 temp |= TRANS_DP_PORT_SEL_D; 4199 temp |= TRANS_DP_PORT_SEL_D;
4207 break; 4200 break;
4208 default: 4201 default:
@@ -4342,7 +4335,7 @@ static void intel_shared_dpll_commit(struct drm_atomic_state *state)
4342static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4335static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4343{ 4336{
4344 struct drm_i915_private *dev_priv = dev->dev_private; 4337 struct drm_i915_private *dev_priv = dev->dev_private;
4345 int dslreg = PIPEDSL(pipe); 4338 i915_reg_t dslreg = PIPEDSL(pipe);
4346 u32 temp; 4339 u32 temp;
4347 4340
4348 temp = I915_READ(dslreg); 4341 temp = I915_READ(dslreg);
@@ -4652,7 +4645,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
4652 } 4645 }
4653 4646
4654 for (i = 0; i < 256; i++) { 4647 for (i = 0; i < 256; i++) {
4655 u32 palreg; 4648 i915_reg_t palreg;
4656 4649
4657 if (HAS_GMCH_DISPLAY(dev)) 4650 if (HAS_GMCH_DISPLAY(dev))
4658 palreg = PALETTE(pipe, i); 4651 palreg = PALETTE(pipe, i);
@@ -4731,9 +4724,9 @@ intel_post_enable_primary(struct drm_crtc *crtc)
4731 if (IS_GEN2(dev)) 4724 if (IS_GEN2(dev))
4732 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4725 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4733 4726
4734 /* Underruns don't raise interrupts, so check manually. */ 4727 /* Underruns don't always raise interrupts, so check manually. */
4735 if (HAS_GMCH_DISPLAY(dev)) 4728 intel_check_cpu_fifo_underruns(dev_priv);
4736 i9xx_check_fifo_underruns(dev_priv); 4729 intel_check_pch_fifo_underruns(dev_priv);
4737} 4730}
4738 4731
4739/** 4732/**
@@ -4792,7 +4785,6 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
4792 struct intel_crtc_atomic_commit *atomic = &crtc->atomic; 4785 struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4793 struct drm_device *dev = crtc->base.dev; 4786 struct drm_device *dev = crtc->base.dev;
4794 struct drm_i915_private *dev_priv = dev->dev_private; 4787 struct drm_i915_private *dev_priv = dev->dev_private;
4795 struct drm_plane *plane;
4796 4788
4797 if (atomic->wait_vblank) 4789 if (atomic->wait_vblank)
4798 intel_wait_for_vblank(dev, crtc->pipe); 4790 intel_wait_for_vblank(dev, crtc->pipe);
@@ -4811,10 +4803,6 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
4811 if (atomic->post_enable_primary) 4803 if (atomic->post_enable_primary)
4812 intel_post_enable_primary(&crtc->base); 4804 intel_post_enable_primary(&crtc->base);
4813 4805
4814 drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
4815 intel_update_sprite_watermarks(plane, &crtc->base,
4816 0, 0, 0, false, false);
4817
4818 memset(atomic, 0, sizeof(*atomic)); 4806 memset(atomic, 0, sizeof(*atomic));
4819} 4807}
4820 4808
@@ -4823,20 +4811,6 @@ static void intel_pre_plane_update(struct intel_crtc *crtc)
4823 struct drm_device *dev = crtc->base.dev; 4811 struct drm_device *dev = crtc->base.dev;
4824 struct drm_i915_private *dev_priv = dev->dev_private; 4812 struct drm_i915_private *dev_priv = dev->dev_private;
4825 struct intel_crtc_atomic_commit *atomic = &crtc->atomic; 4813 struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4826 struct drm_plane *p;
4827
4828 /* Track fb's for any planes being disabled */
4829 drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
4830 struct intel_plane *plane = to_intel_plane(p);
4831
4832 mutex_lock(&dev->struct_mutex);
4833 i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
4834 plane->frontbuffer_bit);
4835 mutex_unlock(&dev->struct_mutex);
4836 }
4837
4838 if (atomic->wait_for_flips)
4839 intel_crtc_wait_for_pending_flips(&crtc->base);
4840 4814
4841 if (atomic->disable_fbc) 4815 if (atomic->disable_fbc)
4842 intel_fbc_disable_crtc(crtc); 4816 intel_fbc_disable_crtc(crtc);
@@ -4885,6 +4859,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4885 return; 4859 return;
4886 4860
4887 if (intel_crtc->config->has_pch_encoder) 4861 if (intel_crtc->config->has_pch_encoder)
4862 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4863
4864 if (intel_crtc->config->has_pch_encoder)
4888 intel_prepare_shared_dpll(intel_crtc); 4865 intel_prepare_shared_dpll(intel_crtc);
4889 4866
4890 if (intel_crtc->config->has_dp_encoder) 4867 if (intel_crtc->config->has_dp_encoder)
@@ -4902,7 +4879,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4902 intel_crtc->active = true; 4879 intel_crtc->active = true;
4903 4880
4904 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4881 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4905 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4906 4882
4907 for_each_encoder_on_crtc(dev, crtc, encoder) 4883 for_each_encoder_on_crtc(dev, crtc, encoder)
4908 if (encoder->pre_enable) 4884 if (encoder->pre_enable)
@@ -4940,6 +4916,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4940 4916
4941 if (HAS_PCH_CPT(dev)) 4917 if (HAS_PCH_CPT(dev))
4942 cpt_verify_modeset(dev, intel_crtc->pipe); 4918 cpt_verify_modeset(dev, intel_crtc->pipe);
4919
4920 /* Must wait for vblank to avoid spurious PCH FIFO underruns */
4921 if (intel_crtc->config->has_pch_encoder)
4922 intel_wait_for_vblank(dev, pipe);
4923 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4943} 4924}
4944 4925
4945/* IPS only exists on ULT machines and is tied to pipe A. */ 4926/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -4962,6 +4943,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4962 if (WARN_ON(intel_crtc->active)) 4943 if (WARN_ON(intel_crtc->active))
4963 return; 4944 return;
4964 4945
4946 if (intel_crtc->config->has_pch_encoder)
4947 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4948 false);
4949
4965 if (intel_crtc_to_shared_dpll(intel_crtc)) 4950 if (intel_crtc_to_shared_dpll(intel_crtc))
4966 intel_enable_shared_dpll(intel_crtc); 4951 intel_enable_shared_dpll(intel_crtc);
4967 4952
@@ -4994,11 +4979,8 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4994 encoder->pre_enable(encoder); 4979 encoder->pre_enable(encoder);
4995 } 4980 }
4996 4981
4997 if (intel_crtc->config->has_pch_encoder) { 4982 if (intel_crtc->config->has_pch_encoder)
4998 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4999 true);
5000 dev_priv->display.fdi_link_train(crtc); 4983 dev_priv->display.fdi_link_train(crtc);
5001 }
5002 4984
5003 if (!is_dsi) 4985 if (!is_dsi)
5004 intel_ddi_enable_pipe_clock(intel_crtc); 4986 intel_ddi_enable_pipe_clock(intel_crtc);
@@ -5035,6 +5017,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
5035 intel_opregion_notify_encoder(encoder, true); 5017 intel_opregion_notify_encoder(encoder, true);
5036 } 5018 }
5037 5019
5020 if (intel_crtc->config->has_pch_encoder)
5021 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5022 true);
5023
5038 /* If we change the relative order between pipe/planes enabling, we need 5024 /* If we change the relative order between pipe/planes enabling, we need
5039 * to change the workaround. */ 5025 * to change the workaround. */
5040 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; 5026 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
@@ -5066,7 +5052,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
5066 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5052 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5067 struct intel_encoder *encoder; 5053 struct intel_encoder *encoder;
5068 int pipe = intel_crtc->pipe; 5054 int pipe = intel_crtc->pipe;
5069 u32 reg, temp; 5055
5056 if (intel_crtc->config->has_pch_encoder)
5057 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5070 5058
5071 for_each_encoder_on_crtc(dev, crtc, encoder) 5059 for_each_encoder_on_crtc(dev, crtc, encoder)
5072 encoder->disable(encoder); 5060 encoder->disable(encoder);
@@ -5074,9 +5062,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
5074 drm_crtc_vblank_off(crtc); 5062 drm_crtc_vblank_off(crtc);
5075 assert_vblank_disabled(crtc); 5063 assert_vblank_disabled(crtc);
5076 5064
5077 if (intel_crtc->config->has_pch_encoder)
5078 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5079
5080 intel_disable_pipe(intel_crtc); 5065 intel_disable_pipe(intel_crtc);
5081 5066
5082 ironlake_pfit_disable(intel_crtc, false); 5067 ironlake_pfit_disable(intel_crtc, false);
@@ -5092,6 +5077,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
5092 ironlake_disable_pch_transcoder(dev_priv, pipe); 5077 ironlake_disable_pch_transcoder(dev_priv, pipe);
5093 5078
5094 if (HAS_PCH_CPT(dev)) { 5079 if (HAS_PCH_CPT(dev)) {
5080 i915_reg_t reg;
5081 u32 temp;
5082
5095 /* disable TRANS_DP_CTL */ 5083 /* disable TRANS_DP_CTL */
5096 reg = TRANS_DP_CTL(pipe); 5084 reg = TRANS_DP_CTL(pipe);
5097 temp = I915_READ(reg); 5085 temp = I915_READ(reg);
@@ -5108,6 +5096,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
5108 5096
5109 ironlake_fdi_pll_disable(intel_crtc); 5097 ironlake_fdi_pll_disable(intel_crtc);
5110 } 5098 }
5099
5100 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5111} 5101}
5112 5102
5113static void haswell_crtc_disable(struct drm_crtc *crtc) 5103static void haswell_crtc_disable(struct drm_crtc *crtc)
@@ -5119,6 +5109,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5119 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5109 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5120 bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); 5110 bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
5121 5111
5112 if (intel_crtc->config->has_pch_encoder)
5113 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5114 false);
5115
5122 for_each_encoder_on_crtc(dev, crtc, encoder) { 5116 for_each_encoder_on_crtc(dev, crtc, encoder) {
5123 intel_opregion_notify_encoder(encoder, false); 5117 intel_opregion_notify_encoder(encoder, false);
5124 encoder->disable(encoder); 5118 encoder->disable(encoder);
@@ -5127,9 +5121,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5127 drm_crtc_vblank_off(crtc); 5121 drm_crtc_vblank_off(crtc);
5128 assert_vblank_disabled(crtc); 5122 assert_vblank_disabled(crtc);
5129 5123
5130 if (intel_crtc->config->has_pch_encoder)
5131 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5132 false);
5133 intel_disable_pipe(intel_crtc); 5124 intel_disable_pipe(intel_crtc);
5134 5125
5135 if (intel_crtc->config->dp_encoder_is_mst) 5126 if (intel_crtc->config->dp_encoder_is_mst)
@@ -5154,6 +5145,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5154 for_each_encoder_on_crtc(dev, crtc, encoder) 5145 for_each_encoder_on_crtc(dev, crtc, encoder)
5155 if (encoder->post_disable) 5146 if (encoder->post_disable)
5156 encoder->post_disable(encoder); 5147 encoder->post_disable(encoder);
5148
5149 if (intel_crtc->config->has_pch_encoder)
5150 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5151 true);
5157} 5152}
5158 5153
5159static void i9xx_pfit_enable(struct intel_crtc *crtc) 5154static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -5184,21 +5179,41 @@ static enum intel_display_power_domain port_to_power_domain(enum port port)
5184{ 5179{
5185 switch (port) { 5180 switch (port) {
5186 case PORT_A: 5181 case PORT_A:
5187 return POWER_DOMAIN_PORT_DDI_A_4_LANES; 5182 return POWER_DOMAIN_PORT_DDI_A_LANES;
5188 case PORT_B: 5183 case PORT_B:
5189 return POWER_DOMAIN_PORT_DDI_B_4_LANES; 5184 return POWER_DOMAIN_PORT_DDI_B_LANES;
5190 case PORT_C: 5185 case PORT_C:
5191 return POWER_DOMAIN_PORT_DDI_C_4_LANES; 5186 return POWER_DOMAIN_PORT_DDI_C_LANES;
5192 case PORT_D: 5187 case PORT_D:
5193 return POWER_DOMAIN_PORT_DDI_D_4_LANES; 5188 return POWER_DOMAIN_PORT_DDI_D_LANES;
5194 case PORT_E: 5189 case PORT_E:
5195 return POWER_DOMAIN_PORT_DDI_E_2_LANES; 5190 return POWER_DOMAIN_PORT_DDI_E_LANES;
5196 default: 5191 default:
5197 WARN_ON_ONCE(1); 5192 MISSING_CASE(port);
5198 return POWER_DOMAIN_PORT_OTHER; 5193 return POWER_DOMAIN_PORT_OTHER;
5199 } 5194 }
5200} 5195}
5201 5196
5197static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5198{
5199 switch (port) {
5200 case PORT_A:
5201 return POWER_DOMAIN_AUX_A;
5202 case PORT_B:
5203 return POWER_DOMAIN_AUX_B;
5204 case PORT_C:
5205 return POWER_DOMAIN_AUX_C;
5206 case PORT_D:
5207 return POWER_DOMAIN_AUX_D;
5208 case PORT_E:
5209 /* FIXME: Check VBT for actual wiring of PORT E */
5210 return POWER_DOMAIN_AUX_D;
5211 default:
5212 MISSING_CASE(port);
5213 return POWER_DOMAIN_AUX_A;
5214 }
5215}
5216
5202#define for_each_power_domain(domain, mask) \ 5217#define for_each_power_domain(domain, mask) \
5203 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 5218 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
5204 if ((1 << (domain)) & (mask)) 5219 if ((1 << (domain)) & (mask))
@@ -5230,6 +5245,36 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5230 } 5245 }
5231} 5246}
5232 5247
5248enum intel_display_power_domain
5249intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5250{
5251 struct drm_device *dev = intel_encoder->base.dev;
5252 struct intel_digital_port *intel_dig_port;
5253
5254 switch (intel_encoder->type) {
5255 case INTEL_OUTPUT_UNKNOWN:
5256 case INTEL_OUTPUT_HDMI:
5257 /*
5258 * Only DDI platforms should ever use these output types.
5259 * We can get here after the HDMI detect code has already set
5260 * the type of the shared encoder. Since we can't be sure
5261 * what's the status of the given connectors, play safe and
5262 * run the DP detection too.
5263 */
5264 WARN_ON_ONCE(!HAS_DDI(dev));
5265 case INTEL_OUTPUT_DISPLAYPORT:
5266 case INTEL_OUTPUT_EDP:
5267 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5268 return port_to_aux_power_domain(intel_dig_port->port);
5269 case INTEL_OUTPUT_DP_MST:
5270 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5271 return port_to_aux_power_domain(intel_dig_port->port);
5272 default:
5273 MISSING_CASE(intel_encoder->type);
5274 return POWER_DOMAIN_AUX_A;
5275 }
5276}
5277
5233static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) 5278static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
5234{ 5279{
5235 struct drm_device *dev = crtc->dev; 5280 struct drm_device *dev = crtc->dev;
@@ -5237,13 +5282,11 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
5237 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5282 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5238 enum pipe pipe = intel_crtc->pipe; 5283 enum pipe pipe = intel_crtc->pipe;
5239 unsigned long mask; 5284 unsigned long mask;
5240 enum transcoder transcoder; 5285 enum transcoder transcoder = intel_crtc->config->cpu_transcoder;
5241 5286
5242 if (!crtc->state->active) 5287 if (!crtc->state->active)
5243 return 0; 5288 return 0;
5244 5289
5245 transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
5246
5247 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 5290 mask = BIT(POWER_DOMAIN_PIPE(pipe));
5248 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 5291 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5249 if (intel_crtc->config->pch_pfit.enabled || 5292 if (intel_crtc->config->pch_pfit.enabled ||
@@ -5330,7 +5373,7 @@ static void intel_update_max_cdclk(struct drm_device *dev)
5330{ 5373{
5331 struct drm_i915_private *dev_priv = dev->dev_private; 5374 struct drm_i915_private *dev_priv = dev->dev_private;
5332 5375
5333 if (IS_SKYLAKE(dev)) { 5376 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5334 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 5377 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5335 5378
5336 if (limit == SKL_DFSM_CDCLK_LIMIT_675) 5379 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
@@ -5747,32 +5790,16 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5747 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5790 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5748 DRM_ERROR("DBuf power disable timeout\n"); 5791 DRM_ERROR("DBuf power disable timeout\n");
5749 5792
5750 /* 5793 /* disable DPLL0 */
5751 * DMC assumes ownership of LCPLL and will get confused if we touch it. 5794 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5752 */ 5795 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5753 if (dev_priv->csr.dmc_payload) { 5796 DRM_ERROR("Couldn't disable DPLL0\n");
5754 /* disable DPLL0 */
5755 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) &
5756 ~LCPLL_PLL_ENABLE);
5757 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5758 DRM_ERROR("Couldn't disable DPLL0\n");
5759 }
5760
5761 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5762} 5797}
5763 5798
5764void skl_init_cdclk(struct drm_i915_private *dev_priv) 5799void skl_init_cdclk(struct drm_i915_private *dev_priv)
5765{ 5800{
5766 u32 val;
5767 unsigned int required_vco; 5801 unsigned int required_vco;
5768 5802
5769 /* enable PCH reset handshake */
5770 val = I915_READ(HSW_NDE_RSTWRN_OPT);
5771 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
5772
5773 /* enable PG1 and Misc I/O */
5774 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5775
5776 /* DPLL0 not enabled (happens on early BIOS versions) */ 5803 /* DPLL0 not enabled (happens on early BIOS versions) */
5777 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) { 5804 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5778 /* enable DPLL0 */ 5805 /* enable DPLL0 */
@@ -5793,6 +5820,45 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
5793 DRM_ERROR("DBuf power enable timeout\n"); 5820 DRM_ERROR("DBuf power enable timeout\n");
5794} 5821}
5795 5822
5823int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5824{
5825 uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
5826 uint32_t cdctl = I915_READ(CDCLK_CTL);
5827 int freq = dev_priv->skl_boot_cdclk;
5828
5829 /*
5830 * check if the pre-os intialized the display
5831 * There is SWF18 scratchpad register defined which is set by the
5832 * pre-os which can be used by the OS drivers to check the status
5833 */
5834 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5835 goto sanitize;
5836
5837 /* Is PLL enabled and locked ? */
5838 if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
5839 goto sanitize;
5840
5841 /* DPLL okay; verify the cdclock
5842 *
5843 * Noticed in some instances that the freq selection is correct but
5844 * decimal part is programmed wrong from BIOS where pre-os does not
5845 * enable display. Verify the same as well.
5846 */
5847 if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
5848 /* All well; nothing to sanitize */
5849 return false;
5850sanitize:
5851 /*
5852 * As of now initialize with max cdclk till
5853 * we get dynamic cdclk support
5854 * */
5855 dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5856 skl_init_cdclk(dev_priv);
5857
5858 /* we did have to sanitize */
5859 return true;
5860}
5861
5796/* Adjust CDclk dividers to allow high res or save power if possible */ 5862/* Adjust CDclk dividers to allow high res or save power if possible */
5797static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) 5863static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5798{ 5864{
@@ -6257,7 +6323,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6257 return; 6323 return;
6258 6324
6259 if (to_intel_plane_state(crtc->primary->state)->visible) { 6325 if (to_intel_plane_state(crtc->primary->state)->visible) {
6260 intel_crtc_wait_for_pending_flips(crtc); 6326 WARN_ON(intel_crtc->unpin_work);
6327
6261 intel_pre_disable_primary(crtc); 6328 intel_pre_disable_primary(crtc);
6262 } 6329 }
6263 6330
@@ -6575,6 +6642,15 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
6575 pipe_config_supports_ips(dev_priv, pipe_config); 6642 pipe_config_supports_ips(dev_priv, pipe_config);
6576} 6643}
6577 6644
6645static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6646{
6647 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6648
6649 /* GDG double wide on either pipe, otherwise pipe A only */
6650 return INTEL_INFO(dev_priv)->gen < 4 &&
6651 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6652}
6653
6578static int intel_crtc_compute_config(struct intel_crtc *crtc, 6654static int intel_crtc_compute_config(struct intel_crtc *crtc,
6579 struct intel_crtc_state *pipe_config) 6655 struct intel_crtc_state *pipe_config)
6580{ 6656{
@@ -6584,23 +6660,24 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6584 6660
6585 /* FIXME should check pixel clock limits on all platforms */ 6661 /* FIXME should check pixel clock limits on all platforms */
6586 if (INTEL_INFO(dev)->gen < 4) { 6662 if (INTEL_INFO(dev)->gen < 4) {
6587 int clock_limit = dev_priv->max_cdclk_freq; 6663 int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6588 6664
6589 /* 6665 /*
6590 * Enable pixel doubling when the dot clock 6666 * Enable double wide mode when the dot clock
6591 * is > 90% of the (display) core speed. 6667 * is > 90% of the (display) core speed.
6592 *
6593 * GDG double wide on either pipe,
6594 * otherwise pipe A only.
6595 */ 6668 */
6596 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) && 6669 if (intel_crtc_supports_double_wide(crtc) &&
6597 adjusted_mode->crtc_clock > clock_limit * 9 / 10) { 6670 adjusted_mode->crtc_clock > clock_limit) {
6598 clock_limit *= 2; 6671 clock_limit *= 2;
6599 pipe_config->double_wide = true; 6672 pipe_config->double_wide = true;
6600 } 6673 }
6601 6674
6602 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10) 6675 if (adjusted_mode->crtc_clock > clock_limit) {
6676 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6677 adjusted_mode->crtc_clock, clock_limit,
6678 yesno(pipe_config->double_wide));
6603 return -EINVAL; 6679 return -EINVAL;
6680 }
6604 } 6681 }
6605 6682
6606 /* 6683 /*
@@ -7365,7 +7442,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
7365 struct drm_device *dev = crtc->base.dev; 7442 struct drm_device *dev = crtc->base.dev;
7366 struct drm_i915_private *dev_priv = dev->dev_private; 7443 struct drm_i915_private *dev_priv = dev->dev_private;
7367 int pipe = crtc->pipe; 7444 int pipe = crtc->pipe;
7368 int dpll_reg = DPLL(crtc->pipe); 7445 i915_reg_t dpll_reg = DPLL(crtc->pipe);
7369 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7446 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7370 u32 loopfilter, tribuf_calcntr; 7447 u32 loopfilter, tribuf_calcntr;
7371 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 7448 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
@@ -9283,8 +9360,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9283 9360
9284 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 9361 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9285 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 9362 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9286 I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 9363 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9287 I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 9364 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9288 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); 9365 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9289 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 9366 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9290 "CPU PWM1 enabled\n"); 9367 "CPU PWM1 enabled\n");
@@ -9746,7 +9823,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9746 9823
9747 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 9824 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9748 9825
9749 if (IS_SKYLAKE(dev)) 9826 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
9750 skylake_get_ddi_pll(dev_priv, port, pipe_config); 9827 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9751 else if (IS_BROXTON(dev)) 9828 else if (IS_BROXTON(dev))
9752 bxt_get_ddi_pll(dev_priv, port, pipe_config); 9829 bxt_get_ddi_pll(dev_priv, port, pipe_config);
@@ -10092,20 +10169,17 @@ __intel_framebuffer_create(struct drm_device *dev,
10092 int ret; 10169 int ret;
10093 10170
10094 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 10171 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10095 if (!intel_fb) { 10172 if (!intel_fb)
10096 drm_gem_object_unreference(&obj->base);
10097 return ERR_PTR(-ENOMEM); 10173 return ERR_PTR(-ENOMEM);
10098 }
10099 10174
10100 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 10175 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
10101 if (ret) 10176 if (ret)
10102 goto err; 10177 goto err;
10103 10178
10104 return &intel_fb->base; 10179 return &intel_fb->base;
10180
10105err: 10181err:
10106 drm_gem_object_unreference(&obj->base);
10107 kfree(intel_fb); 10182 kfree(intel_fb);
10108
10109 return ERR_PTR(ret); 10183 return ERR_PTR(ret);
10110} 10184}
10111 10185
@@ -10145,6 +10219,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
10145 struct drm_display_mode *mode, 10219 struct drm_display_mode *mode,
10146 int depth, int bpp) 10220 int depth, int bpp)
10147{ 10221{
10222 struct drm_framebuffer *fb;
10148 struct drm_i915_gem_object *obj; 10223 struct drm_i915_gem_object *obj;
10149 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 10224 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10150 10225
@@ -10159,7 +10234,11 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
10159 bpp); 10234 bpp);
10160 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 10235 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10161 10236
10162 return intel_framebuffer_create(dev, &mode_cmd, obj); 10237 fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10238 if (IS_ERR(fb))
10239 drm_gem_object_unreference_unlocked(&obj->base);
10240
10241 return fb;
10163} 10242}
10164 10243
10165static struct drm_framebuffer * 10244static struct drm_framebuffer *
@@ -11062,7 +11141,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
11062 */ 11141 */
11063 if (ring->id == RCS) { 11142 if (ring->id == RCS) {
11064 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 11143 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
11065 intel_ring_emit(ring, DERRMR); 11144 intel_ring_emit_reg(ring, DERRMR);
11066 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 11145 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11067 DERRMR_PIPEB_PRI_FLIP_DONE | 11146 DERRMR_PIPEB_PRI_FLIP_DONE |
11068 DERRMR_PIPEC_PRI_FLIP_DONE)); 11147 DERRMR_PIPEC_PRI_FLIP_DONE));
@@ -11072,7 +11151,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
11072 else 11151 else
11073 intel_ring_emit(ring, MI_STORE_REGISTER_MEM | 11152 intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
11074 MI_SRM_LRM_GLOBAL_GTT); 11153 MI_SRM_LRM_GLOBAL_GTT);
11075 intel_ring_emit(ring, DERRMR); 11154 intel_ring_emit_reg(ring, DERRMR);
11076 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 11155 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
11077 if (IS_GEN8(dev)) { 11156 if (IS_GEN8(dev)) {
11078 intel_ring_emit(ring, 0); 11157 intel_ring_emit(ring, 0);
@@ -11117,13 +11196,14 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
11117} 11196}
11118 11197
11119static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, 11198static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11199 unsigned int rotation,
11120 struct intel_unpin_work *work) 11200 struct intel_unpin_work *work)
11121{ 11201{
11122 struct drm_device *dev = intel_crtc->base.dev; 11202 struct drm_device *dev = intel_crtc->base.dev;
11123 struct drm_i915_private *dev_priv = dev->dev_private; 11203 struct drm_i915_private *dev_priv = dev->dev_private;
11124 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 11204 struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11125 const enum pipe pipe = intel_crtc->pipe; 11205 const enum pipe pipe = intel_crtc->pipe;
11126 u32 ctl, stride; 11206 u32 ctl, stride, tile_height;
11127 11207
11128 ctl = I915_READ(PLANE_CTL(pipe, 0)); 11208 ctl = I915_READ(PLANE_CTL(pipe, 0));
11129 ctl &= ~PLANE_CTL_TILED_MASK; 11209 ctl &= ~PLANE_CTL_TILED_MASK;
@@ -11147,9 +11227,16 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11147 * The stride is either expressed as a multiple of 64 bytes chunks for 11227 * The stride is either expressed as a multiple of 64 bytes chunks for
11148 * linear buffers or in number of tiles for tiled buffers. 11228 * linear buffers or in number of tiles for tiled buffers.
11149 */ 11229 */
11150 stride = fb->pitches[0] / 11230 if (intel_rotation_90_or_270(rotation)) {
11151 intel_fb_stride_alignment(dev, fb->modifier[0], 11231 /* stride = Surface height in tiles */
11152 fb->pixel_format); 11232 tile_height = intel_tile_height(dev, fb->pixel_format,
11233 fb->modifier[0], 0);
11234 stride = DIV_ROUND_UP(fb->height, tile_height);
11235 } else {
11236 stride = fb->pitches[0] /
11237 intel_fb_stride_alignment(dev, fb->modifier[0],
11238 fb->pixel_format);
11239 }
11153 11240
11154 /* 11241 /*
11155 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on 11242 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
@@ -11170,10 +11257,9 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11170 struct intel_framebuffer *intel_fb = 11257 struct intel_framebuffer *intel_fb =
11171 to_intel_framebuffer(intel_crtc->base.primary->fb); 11258 to_intel_framebuffer(intel_crtc->base.primary->fb);
11172 struct drm_i915_gem_object *obj = intel_fb->obj; 11259 struct drm_i915_gem_object *obj = intel_fb->obj;
11260 i915_reg_t reg = DSPCNTR(intel_crtc->plane);
11173 u32 dspcntr; 11261 u32 dspcntr;
11174 u32 reg;
11175 11262
11176 reg = DSPCNTR(intel_crtc->plane);
11177 dspcntr = I915_READ(reg); 11263 dspcntr = I915_READ(reg);
11178 11264
11179 if (obj->tiling_mode != I915_TILING_NONE) 11265 if (obj->tiling_mode != I915_TILING_NONE)
@@ -11207,7 +11293,7 @@ static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11207 intel_pipe_update_start(crtc); 11293 intel_pipe_update_start(crtc);
11208 11294
11209 if (INTEL_INFO(mmio_flip->i915)->gen >= 9) 11295 if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11210 skl_do_mmio_flip(crtc, work); 11296 skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
11211 else 11297 else
11212 /* use_mmio_flip() retricts MMIO flips to ilk+ */ 11298 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11213 ilk_do_mmio_flip(crtc, work); 11299 ilk_do_mmio_flip(crtc, work);
@@ -11234,10 +11320,7 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
11234 11320
11235static int intel_queue_mmio_flip(struct drm_device *dev, 11321static int intel_queue_mmio_flip(struct drm_device *dev,
11236 struct drm_crtc *crtc, 11322 struct drm_crtc *crtc,
11237 struct drm_framebuffer *fb, 11323 struct drm_i915_gem_object *obj)
11238 struct drm_i915_gem_object *obj,
11239 struct intel_engine_cs *ring,
11240 uint32_t flags)
11241{ 11324{
11242 struct intel_mmio_flip *mmio_flip; 11325 struct intel_mmio_flip *mmio_flip;
11243 11326
@@ -11248,6 +11331,7 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
11248 mmio_flip->i915 = to_i915(dev); 11331 mmio_flip->i915 = to_i915(dev);
11249 mmio_flip->req = i915_gem_request_reference(obj->last_write_req); 11332 mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11250 mmio_flip->crtc = to_intel_crtc(crtc); 11333 mmio_flip->crtc = to_intel_crtc(crtc);
11334 mmio_flip->rotation = crtc->primary->state->rotation;
11251 11335
11252 INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); 11336 INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11253 schedule_work(&mmio_flip->work); 11337 schedule_work(&mmio_flip->work);
@@ -11453,9 +11537,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11453 * synchronisation, so all we want here is to pin the framebuffer 11537 * synchronisation, so all we want here is to pin the framebuffer
11454 * into the display plane and skip any waits. 11538 * into the display plane and skip any waits.
11455 */ 11539 */
11540 if (!mmio_flip) {
11541 ret = i915_gem_object_sync(obj, ring, &request);
11542 if (ret)
11543 goto cleanup_pending;
11544 }
11545
11456 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, 11546 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
11457 crtc->primary->state, 11547 crtc->primary->state);
11458 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
11459 if (ret) 11548 if (ret)
11460 goto cleanup_pending; 11549 goto cleanup_pending;
11461 11550
@@ -11464,8 +11553,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11464 work->gtt_offset += intel_crtc->dspaddr_offset; 11553 work->gtt_offset += intel_crtc->dspaddr_offset;
11465 11554
11466 if (mmio_flip) { 11555 if (mmio_flip) {
11467 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, 11556 ret = intel_queue_mmio_flip(dev, crtc, obj);
11468 page_flip_flags);
11469 if (ret) 11557 if (ret)
11470 goto cleanup_unpin; 11558 goto cleanup_unpin;
11471 11559
@@ -11579,18 +11667,32 @@ retry:
11579static bool intel_wm_need_update(struct drm_plane *plane, 11667static bool intel_wm_need_update(struct drm_plane *plane,
11580 struct drm_plane_state *state) 11668 struct drm_plane_state *state)
11581{ 11669{
11582 /* Update watermarks on tiling changes. */ 11670 struct intel_plane_state *new = to_intel_plane_state(state);
11671 struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11672
11673 /* Update watermarks on tiling or size changes. */
11583 if (!plane->state->fb || !state->fb || 11674 if (!plane->state->fb || !state->fb ||
11584 plane->state->fb->modifier[0] != state->fb->modifier[0] || 11675 plane->state->fb->modifier[0] != state->fb->modifier[0] ||
11585 plane->state->rotation != state->rotation) 11676 plane->state->rotation != state->rotation ||
11586 return true; 11677 drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11587 11678 drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11588 if (plane->state->crtc_w != state->crtc_w) 11679 drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11680 drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
11589 return true; 11681 return true;
11590 11682
11591 return false; 11683 return false;
11592} 11684}
11593 11685
11686static bool needs_scaling(struct intel_plane_state *state)
11687{
11688 int src_w = drm_rect_width(&state->src) >> 16;
11689 int src_h = drm_rect_height(&state->src) >> 16;
11690 int dst_w = drm_rect_width(&state->dst);
11691 int dst_h = drm_rect_height(&state->dst);
11692
11693 return (src_w != dst_w || src_h != dst_h);
11694}
11695
11594int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, 11696int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11595 struct drm_plane_state *plane_state) 11697 struct drm_plane_state *plane_state)
11596{ 11698{
@@ -11606,7 +11708,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11606 bool mode_changed = needs_modeset(crtc_state); 11708 bool mode_changed = needs_modeset(crtc_state);
11607 bool was_crtc_enabled = crtc->state->active; 11709 bool was_crtc_enabled = crtc->state->active;
11608 bool is_crtc_enabled = crtc_state->active; 11710 bool is_crtc_enabled = crtc_state->active;
11609
11610 bool turn_off, turn_on, visible, was_visible; 11711 bool turn_off, turn_on, visible, was_visible;
11611 struct drm_framebuffer *fb = plane_state->fb; 11712 struct drm_framebuffer *fb = plane_state->fb;
11612 11713
@@ -11619,14 +11720,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11619 return ret; 11720 return ret;
11620 } 11721 }
11621 11722
11622 /*
11623 * Disabling a plane is always okay; we just need to update
11624 * fb tracking in a special way since cleanup_fb() won't
11625 * get called by the plane helpers.
11626 */
11627 if (old_plane_state->base.fb && !fb)
11628 intel_crtc->atomic.disabled_planes |= 1 << i;
11629
11630 was_visible = old_plane_state->visible; 11723 was_visible = old_plane_state->visible;
11631 visible = to_intel_plane_state(plane_state)->visible; 11724 visible = to_intel_plane_state(plane_state)->visible;
11632 11725
@@ -11676,7 +11769,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11676 11769
11677 switch (plane->type) { 11770 switch (plane->type) {
11678 case DRM_PLANE_TYPE_PRIMARY: 11771 case DRM_PLANE_TYPE_PRIMARY:
11679 intel_crtc->atomic.wait_for_flips = true;
11680 intel_crtc->atomic.pre_disable_primary = turn_off; 11772 intel_crtc->atomic.pre_disable_primary = turn_off;
11681 intel_crtc->atomic.post_enable_primary = turn_on; 11773 intel_crtc->atomic.post_enable_primary = turn_on;
11682 11774
@@ -11724,11 +11816,23 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11724 case DRM_PLANE_TYPE_CURSOR: 11816 case DRM_PLANE_TYPE_CURSOR:
11725 break; 11817 break;
11726 case DRM_PLANE_TYPE_OVERLAY: 11818 case DRM_PLANE_TYPE_OVERLAY:
11727 if (turn_off && !mode_changed) { 11819 /*
11820 * WaCxSRDisabledForSpriteScaling:ivb
11821 *
11822 * cstate->update_wm was already set above, so this flag will
11823 * take effect when we commit and program watermarks.
11824 */
11825 if (IS_IVYBRIDGE(dev) &&
11826 needs_scaling(to_intel_plane_state(plane_state)) &&
11827 !needs_scaling(old_plane_state)) {
11828 to_intel_crtc_state(crtc_state)->disable_lp_wm = true;
11829 } else if (turn_off && !mode_changed) {
11728 intel_crtc->atomic.wait_vblank = true; 11830 intel_crtc->atomic.wait_vblank = true;
11729 intel_crtc->atomic.update_sprite_watermarks |= 11831 intel_crtc->atomic.update_sprite_watermarks |=
11730 1 << i; 11832 1 << i;
11731 } 11833 }
11834
11835 break;
11732 } 11836 }
11733 return 0; 11837 return 0;
11734} 11838}
@@ -11813,6 +11917,12 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11813 } 11917 }
11814 11918
11815 ret = 0; 11919 ret = 0;
11920 if (dev_priv->display.compute_pipe_wm) {
11921 ret = dev_priv->display.compute_pipe_wm(intel_crtc, state);
11922 if (ret)
11923 return ret;
11924 }
11925
11816 if (INTEL_INFO(dev)->gen >= 9) { 11926 if (INTEL_INFO(dev)->gen >= 9) {
11817 if (mode_changed) 11927 if (mode_changed)
11818 ret = skl_update_scaler_crtc(pipe_config); 11928 ret = skl_update_scaler_crtc(pipe_config);
@@ -12002,7 +12112,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12002 pipe_config->dpll_hw_state.pll9, 12112 pipe_config->dpll_hw_state.pll9,
12003 pipe_config->dpll_hw_state.pll10, 12113 pipe_config->dpll_hw_state.pll10,
12004 pipe_config->dpll_hw_state.pcsdw12); 12114 pipe_config->dpll_hw_state.pcsdw12);
12005 } else if (IS_SKYLAKE(dev)) { 12115 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
12006 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: " 12116 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12007 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", 12117 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12008 pipe_config->ddi_pll_sel, 12118 pipe_config->ddi_pll_sel,
@@ -12256,6 +12366,18 @@ intel_modeset_update_crtc_state(struct drm_atomic_state *state)
12256 crtc->hwmode = crtc->state->adjusted_mode; 12366 crtc->hwmode = crtc->state->adjusted_mode;
12257 else 12367 else
12258 crtc->hwmode.crtc_clock = 0; 12368 crtc->hwmode.crtc_clock = 0;
12369
12370 /*
12371 * Update legacy state to satisfy fbc code. This can
12372 * be removed when fbc uses the atomic state.
12373 */
12374 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12375 struct drm_plane_state *plane_state = crtc->primary->state;
12376
12377 crtc->primary->fb = plane_state->fb;
12378 crtc->x = plane_state->src_x >> 16;
12379 crtc->y = plane_state->src_y >> 16;
12380 }
12259 } 12381 }
12260} 12382}
12261 12383
@@ -13020,6 +13142,45 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13020 return 0; 13142 return 0;
13021} 13143}
13022 13144
13145/*
13146 * Handle calculation of various watermark data at the end of the atomic check
13147 * phase. The code here should be run after the per-crtc and per-plane 'check'
13148 * handlers to ensure that all derived state has been updated.
13149 */
13150static void calc_watermark_data(struct drm_atomic_state *state)
13151{
13152 struct drm_device *dev = state->dev;
13153 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13154 struct drm_crtc *crtc;
13155 struct drm_crtc_state *cstate;
13156 struct drm_plane *plane;
13157 struct drm_plane_state *pstate;
13158
13159 /*
13160 * Calculate watermark configuration details now that derived
13161 * plane/crtc state is all properly updated.
13162 */
13163 drm_for_each_crtc(crtc, dev) {
13164 cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13165 crtc->state;
13166
13167 if (cstate->active)
13168 intel_state->wm_config.num_pipes_active++;
13169 }
13170 drm_for_each_legacy_plane(plane, dev) {
13171 pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13172 plane->state;
13173
13174 if (!to_intel_plane_state(pstate)->visible)
13175 continue;
13176
13177 intel_state->wm_config.sprites_enabled = true;
13178 if (pstate->crtc_w != pstate->src_w >> 16 ||
13179 pstate->crtc_h != pstate->src_h >> 16)
13180 intel_state->wm_config.sprites_scaled = true;
13181 }
13182}
13183
13023/** 13184/**
13024 * intel_atomic_check - validate state object 13185 * intel_atomic_check - validate state object
13025 * @dev: drm device 13186 * @dev: drm device
@@ -13028,6 +13189,7 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13028static int intel_atomic_check(struct drm_device *dev, 13189static int intel_atomic_check(struct drm_device *dev,
13029 struct drm_atomic_state *state) 13190 struct drm_atomic_state *state)
13030{ 13191{
13192 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13031 struct drm_crtc *crtc; 13193 struct drm_crtc *crtc;
13032 struct drm_crtc_state *crtc_state; 13194 struct drm_crtc_state *crtc_state;
13033 int ret, i; 13195 int ret, i;
@@ -13095,10 +13257,81 @@ static int intel_atomic_check(struct drm_device *dev,
13095 if (ret) 13257 if (ret)
13096 return ret; 13258 return ret;
13097 } else 13259 } else
13098 to_intel_atomic_state(state)->cdclk = 13260 intel_state->cdclk = to_i915(state->dev)->cdclk_freq;
13099 to_i915(state->dev)->cdclk_freq; 13261
13262 ret = drm_atomic_helper_check_planes(state->dev, state);
13263 if (ret)
13264 return ret;
13265
13266 calc_watermark_data(state);
13267
13268 return 0;
13269}
13270
13271static int intel_atomic_prepare_commit(struct drm_device *dev,
13272 struct drm_atomic_state *state,
13273 bool async)
13274{
13275 struct drm_i915_private *dev_priv = dev->dev_private;
13276 struct drm_plane_state *plane_state;
13277 struct drm_crtc_state *crtc_state;
13278 struct drm_plane *plane;
13279 struct drm_crtc *crtc;
13280 int i, ret;
13281
13282 if (async) {
13283 DRM_DEBUG_KMS("i915 does not yet support async commit\n");
13284 return -EINVAL;
13285 }
13100 13286
13101 return drm_atomic_helper_check_planes(state->dev, state); 13287 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13288 ret = intel_crtc_wait_for_pending_flips(crtc);
13289 if (ret)
13290 return ret;
13291
13292 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13293 flush_workqueue(dev_priv->wq);
13294 }
13295
13296 ret = mutex_lock_interruptible(&dev->struct_mutex);
13297 if (ret)
13298 return ret;
13299
13300 ret = drm_atomic_helper_prepare_planes(dev, state);
13301 if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
13302 u32 reset_counter;
13303
13304 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
13305 mutex_unlock(&dev->struct_mutex);
13306
13307 for_each_plane_in_state(state, plane, plane_state, i) {
13308 struct intel_plane_state *intel_plane_state =
13309 to_intel_plane_state(plane_state);
13310
13311 if (!intel_plane_state->wait_req)
13312 continue;
13313
13314 ret = __i915_wait_request(intel_plane_state->wait_req,
13315 reset_counter, true,
13316 NULL, NULL);
13317
13318 /* Swallow -EIO errors to allow updates during hw lockup. */
13319 if (ret == -EIO)
13320 ret = 0;
13321
13322 if (ret)
13323 break;
13324 }
13325
13326 if (!ret)
13327 return 0;
13328
13329 mutex_lock(&dev->struct_mutex);
13330 drm_atomic_helper_cleanup_planes(dev, state);
13331 }
13332
13333 mutex_unlock(&dev->struct_mutex);
13334 return ret;
13102} 13335}
13103 13336
13104/** 13337/**
@@ -13122,22 +13355,20 @@ static int intel_atomic_commit(struct drm_device *dev,
13122 bool async) 13355 bool async)
13123{ 13356{
13124 struct drm_i915_private *dev_priv = dev->dev_private; 13357 struct drm_i915_private *dev_priv = dev->dev_private;
13125 struct drm_crtc *crtc;
13126 struct drm_crtc_state *crtc_state; 13358 struct drm_crtc_state *crtc_state;
13359 struct drm_crtc *crtc;
13127 int ret = 0; 13360 int ret = 0;
13128 int i; 13361 int i;
13129 bool any_ms = false; 13362 bool any_ms = false;
13130 13363
13131 if (async) { 13364 ret = intel_atomic_prepare_commit(dev, state, async);
13132 DRM_DEBUG_KMS("i915 does not yet support async commit\n"); 13365 if (ret) {
13133 return -EINVAL; 13366 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13134 }
13135
13136 ret = drm_atomic_helper_prepare_planes(dev, state);
13137 if (ret)
13138 return ret; 13367 return ret;
13368 }
13139 13369
13140 drm_atomic_helper_swap_state(dev, state); 13370 drm_atomic_helper_swap_state(dev, state);
13371 dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
13141 13372
13142 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13373 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13143 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13374 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -13175,6 +13406,9 @@ static int intel_atomic_commit(struct drm_device *dev,
13175 to_intel_crtc_state(crtc->state)->update_pipe; 13406 to_intel_crtc_state(crtc->state)->update_pipe;
13176 unsigned long put_domains = 0; 13407 unsigned long put_domains = 0;
13177 13408
13409 if (modeset)
13410 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13411
13178 if (modeset && crtc->state->active) { 13412 if (modeset && crtc->state->active) {
13179 update_scanline_offset(to_intel_crtc(crtc)); 13413 update_scanline_offset(to_intel_crtc(crtc));
13180 dev_priv->display.crtc_enable(crtc); 13414 dev_priv->display.crtc_enable(crtc);
@@ -13190,18 +13424,26 @@ static int intel_atomic_commit(struct drm_device *dev,
13190 if (!modeset) 13424 if (!modeset)
13191 intel_pre_plane_update(intel_crtc); 13425 intel_pre_plane_update(intel_crtc);
13192 13426
13193 drm_atomic_helper_commit_planes_on_crtc(crtc_state); 13427 if (crtc->state->active &&
13428 (crtc->state->planes_changed || update_pipe))
13429 drm_atomic_helper_commit_planes_on_crtc(crtc_state);
13194 13430
13195 if (put_domains) 13431 if (put_domains)
13196 modeset_put_power_domains(dev_priv, put_domains); 13432 modeset_put_power_domains(dev_priv, put_domains);
13197 13433
13198 intel_post_plane_update(intel_crtc); 13434 intel_post_plane_update(intel_crtc);
13435
13436 if (modeset)
13437 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13199 } 13438 }
13200 13439
13201 /* FIXME: add subpixel order */ 13440 /* FIXME: add subpixel order */
13202 13441
13203 drm_atomic_helper_wait_for_vblanks(dev, state); 13442 drm_atomic_helper_wait_for_vblanks(dev, state);
13443
13444 mutex_lock(&dev->struct_mutex);
13204 drm_atomic_helper_cleanup_planes(dev, state); 13445 drm_atomic_helper_cleanup_planes(dev, state);
13446 mutex_unlock(&dev->struct_mutex);
13205 13447
13206 if (any_ms) 13448 if (any_ms)
13207 intel_modeset_check_state(dev, state); 13449 intel_modeset_check_state(dev, state);
@@ -13370,6 +13612,8 @@ static void intel_shared_dpll_init(struct drm_device *dev)
13370 * bits. Some older platforms need special physical address handling for 13612 * bits. Some older platforms need special physical address handling for
13371 * cursor planes. 13613 * cursor planes.
13372 * 13614 *
13615 * Must be called with struct_mutex held.
13616 *
13373 * Returns 0 on success, negative error code on failure. 13617 * Returns 0 on success, negative error code on failure.
13374 */ 13618 */
13375int 13619int
@@ -13380,28 +13624,58 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13380 struct drm_framebuffer *fb = new_state->fb; 13624 struct drm_framebuffer *fb = new_state->fb;
13381 struct intel_plane *intel_plane = to_intel_plane(plane); 13625 struct intel_plane *intel_plane = to_intel_plane(plane);
13382 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13626 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13383 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 13627 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13384 int ret = 0; 13628 int ret = 0;
13385 13629
13386 if (!obj) 13630 if (!obj && !old_obj)
13387 return 0; 13631 return 0;
13388 13632
13389 mutex_lock(&dev->struct_mutex); 13633 if (old_obj) {
13634 struct drm_crtc_state *crtc_state =
13635 drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
13636
13637 /* Big Hammer, we also need to ensure that any pending
13638 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13639 * current scanout is retired before unpinning the old
13640 * framebuffer. Note that we rely on userspace rendering
13641 * into the buffer attached to the pipe they are waiting
13642 * on. If not, userspace generates a GPU hang with IPEHR
13643 * point to the MI_WAIT_FOR_EVENT.
13644 *
13645 * This should only fail upon a hung GPU, in which case we
13646 * can safely continue.
13647 */
13648 if (needs_modeset(crtc_state))
13649 ret = i915_gem_object_wait_rendering(old_obj, true);
13390 13650
13391 if (plane->type == DRM_PLANE_TYPE_CURSOR && 13651 /* Swallow -EIO errors to allow updates during hw lockup. */
13652 if (ret && ret != -EIO)
13653 return ret;
13654 }
13655
13656 if (!obj) {
13657 ret = 0;
13658 } else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13392 INTEL_INFO(dev)->cursor_needs_physical) { 13659 INTEL_INFO(dev)->cursor_needs_physical) {
13393 int align = IS_I830(dev) ? 16 * 1024 : 256; 13660 int align = IS_I830(dev) ? 16 * 1024 : 256;
13394 ret = i915_gem_object_attach_phys(obj, align); 13661 ret = i915_gem_object_attach_phys(obj, align);
13395 if (ret) 13662 if (ret)
13396 DRM_DEBUG_KMS("failed to attach phys object\n"); 13663 DRM_DEBUG_KMS("failed to attach phys object\n");
13397 } else { 13664 } else {
13398 ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL); 13665 ret = intel_pin_and_fence_fb_obj(plane, fb, new_state);
13399 } 13666 }
13400 13667
13401 if (ret == 0) 13668 if (ret == 0) {
13402 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); 13669 if (obj) {
13670 struct intel_plane_state *plane_state =
13671 to_intel_plane_state(new_state);
13403 13672
13404 mutex_unlock(&dev->struct_mutex); 13673 i915_gem_request_assign(&plane_state->wait_req,
13674 obj->last_write_req);
13675 }
13676
13677 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13678 }
13405 13679
13406 return ret; 13680 return ret;
13407} 13681}
@@ -13412,23 +13686,35 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13412 * @fb: old framebuffer that was on plane 13686 * @fb: old framebuffer that was on plane
13413 * 13687 *
13414 * Cleans up a framebuffer that has just been removed from a plane. 13688 * Cleans up a framebuffer that has just been removed from a plane.
13689 *
13690 * Must be called with struct_mutex held.
13415 */ 13691 */
13416void 13692void
13417intel_cleanup_plane_fb(struct drm_plane *plane, 13693intel_cleanup_plane_fb(struct drm_plane *plane,
13418 const struct drm_plane_state *old_state) 13694 const struct drm_plane_state *old_state)
13419{ 13695{
13420 struct drm_device *dev = plane->dev; 13696 struct drm_device *dev = plane->dev;
13421 struct drm_i915_gem_object *obj = intel_fb_obj(old_state->fb); 13697 struct intel_plane *intel_plane = to_intel_plane(plane);
13698 struct intel_plane_state *old_intel_state;
13699 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13700 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
13422 13701
13423 if (!obj) 13702 old_intel_state = to_intel_plane_state(old_state);
13703
13704 if (!obj && !old_obj)
13424 return; 13705 return;
13425 13706
13426 if (plane->type != DRM_PLANE_TYPE_CURSOR || 13707 if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
13427 !INTEL_INFO(dev)->cursor_needs_physical) { 13708 !INTEL_INFO(dev)->cursor_needs_physical))
13428 mutex_lock(&dev->struct_mutex);
13429 intel_unpin_fb_obj(old_state->fb, old_state); 13709 intel_unpin_fb_obj(old_state->fb, old_state);
13430 mutex_unlock(&dev->struct_mutex); 13710
13431 } 13711 /* prepare_fb aborted? */
13712 if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13713 (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13714 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13715
13716 i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13717
13432} 13718}
13433 13719
13434int 13720int
@@ -13447,7 +13733,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
13447 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 13733 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13448 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk; 13734 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
13449 13735
13450 if (!crtc_clock || !cdclk) 13736 if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
13451 return DRM_PLANE_HELPER_NO_SCALING; 13737 return DRM_PLANE_HELPER_NO_SCALING;
13452 13738
13453 /* 13739 /*
@@ -13495,18 +13781,8 @@ intel_commit_primary_plane(struct drm_plane *plane,
13495 struct drm_framebuffer *fb = state->base.fb; 13781 struct drm_framebuffer *fb = state->base.fb;
13496 struct drm_device *dev = plane->dev; 13782 struct drm_device *dev = plane->dev;
13497 struct drm_i915_private *dev_priv = dev->dev_private; 13783 struct drm_i915_private *dev_priv = dev->dev_private;
13498 struct intel_crtc *intel_crtc;
13499 struct drm_rect *src = &state->src;
13500 13784
13501 crtc = crtc ? crtc : plane->crtc; 13785 crtc = crtc ? crtc : plane->crtc;
13502 intel_crtc = to_intel_crtc(crtc);
13503
13504 plane->fb = fb;
13505 crtc->x = src->x1 >> 16;
13506 crtc->y = src->y1 >> 16;
13507
13508 if (!crtc->state->active)
13509 return;
13510 13786
13511 dev_priv->display.update_primary_plane(crtc, fb, 13787 dev_priv->display.update_primary_plane(crtc, fb,
13512 state->src.x1 >> 16, 13788 state->src.x1 >> 16,
@@ -13536,8 +13812,7 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13536 intel_update_watermarks(crtc); 13812 intel_update_watermarks(crtc);
13537 13813
13538 /* Perform vblank evasion around commit operation */ 13814 /* Perform vblank evasion around commit operation */
13539 if (crtc->state->active) 13815 intel_pipe_update_start(intel_crtc);
13540 intel_pipe_update_start(intel_crtc);
13541 13816
13542 if (modeset) 13817 if (modeset)
13543 return; 13818 return;
@@ -13553,8 +13828,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13553{ 13828{
13554 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13829 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13555 13830
13556 if (crtc->state->active) 13831 intel_pipe_update_end(intel_crtc);
13557 intel_pipe_update_end(intel_crtc);
13558} 13832}
13559 13833
13560/** 13834/**
@@ -13737,8 +14011,7 @@ intel_commit_cursor_plane(struct drm_plane *plane,
13737 intel_crtc->cursor_bo = obj; 14011 intel_crtc->cursor_bo = obj;
13738 14012
13739update: 14013update:
13740 if (crtc->state->active) 14014 intel_crtc_update_cursor(crtc, state->visible);
13741 intel_crtc_update_cursor(crtc, state->visible);
13742} 14015}
13743 14016
13744static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, 14017static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
@@ -14010,7 +14283,7 @@ static void intel_setup_outputs(struct drm_device *dev)
14010 */ 14283 */
14011 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 14284 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14012 /* WaIgnoreDDIAStrap: skl */ 14285 /* WaIgnoreDDIAStrap: skl */
14013 if (found || IS_SKYLAKE(dev)) 14286 if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14014 intel_ddi_init(dev, PORT_A); 14287 intel_ddi_init(dev, PORT_A);
14015 14288
14016 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 14289 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -14026,7 +14299,7 @@ static void intel_setup_outputs(struct drm_device *dev)
14026 /* 14299 /*
14027 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 14300 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14028 */ 14301 */
14029 if (IS_SKYLAKE(dev) && 14302 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
14030 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || 14303 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14031 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || 14304 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14032 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) 14305 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
@@ -14041,7 +14314,7 @@ static void intel_setup_outputs(struct drm_device *dev)
14041 14314
14042 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 14315 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14043 /* PCH SDVOB multiplex with HDMIB */ 14316 /* PCH SDVOB multiplex with HDMIB */
14044 found = intel_sdvo_init(dev, PCH_SDVOB, true); 14317 found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
14045 if (!found) 14318 if (!found)
14046 intel_hdmi_init(dev, PCH_HDMIB, PORT_B); 14319 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
14047 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 14320 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -14097,7 +14370,7 @@ static void intel_setup_outputs(struct drm_device *dev)
14097 14370
14098 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14371 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14099 DRM_DEBUG_KMS("probing SDVOB\n"); 14372 DRM_DEBUG_KMS("probing SDVOB\n");
14100 found = intel_sdvo_init(dev, GEN3_SDVOB, true); 14373 found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
14101 if (!found && IS_G4X(dev)) { 14374 if (!found && IS_G4X(dev)) {
14102 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 14375 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14103 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); 14376 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
@@ -14111,7 +14384,7 @@ static void intel_setup_outputs(struct drm_device *dev)
14111 14384
14112 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14385 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14113 DRM_DEBUG_KMS("probing SDVOC\n"); 14386 DRM_DEBUG_KMS("probing SDVOC\n");
14114 found = intel_sdvo_init(dev, GEN3_SDVOC, false); 14387 found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
14115 } 14388 }
14116 14389
14117 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 14390 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
@@ -14379,6 +14652,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
14379 struct drm_file *filp, 14652 struct drm_file *filp,
14380 const struct drm_mode_fb_cmd2 *user_mode_cmd) 14653 const struct drm_mode_fb_cmd2 *user_mode_cmd)
14381{ 14654{
14655 struct drm_framebuffer *fb;
14382 struct drm_i915_gem_object *obj; 14656 struct drm_i915_gem_object *obj;
14383 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 14657 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14384 14658
@@ -14387,7 +14661,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
14387 if (&obj->base == NULL) 14661 if (&obj->base == NULL)
14388 return ERR_PTR(-ENOENT); 14662 return ERR_PTR(-ENOENT);
14389 14663
14390 return intel_framebuffer_create(dev, &mode_cmd, obj); 14664 fb = intel_framebuffer_create(dev, &mode_cmd, obj);
14665 if (IS_ERR(fb))
14666 drm_gem_object_unreference_unlocked(&obj->base);
14667
14668 return fb;
14391} 14669}
14392 14670
14393#ifndef CONFIG_DRM_FBDEV_EMULATION 14671#ifndef CONFIG_DRM_FBDEV_EMULATION
@@ -14472,7 +14750,7 @@ static void intel_init_display(struct drm_device *dev)
14472 } 14750 }
14473 14751
14474 /* Returns the core display clock speed */ 14752 /* Returns the core display clock speed */
14475 if (IS_SKYLAKE(dev)) 14753 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14476 dev_priv->display.get_display_clock_speed = 14754 dev_priv->display.get_display_clock_speed =
14477 skylake_get_display_clock_speed; 14755 skylake_get_display_clock_speed;
14478 else if (IS_BROXTON(dev)) 14756 else if (IS_BROXTON(dev))
@@ -14761,7 +15039,7 @@ static void i915_disable_vga(struct drm_device *dev)
14761{ 15039{
14762 struct drm_i915_private *dev_priv = dev->dev_private; 15040 struct drm_i915_private *dev_priv = dev->dev_private;
14763 u8 sr1; 15041 u8 sr1;
14764 u32 vga_reg = i915_vgacntrl_reg(dev); 15042 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
14765 15043
14766 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 15044 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
14767 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 15045 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
@@ -14877,9 +15155,6 @@ void intel_modeset_init(struct drm_device *dev)
14877 i915_disable_vga(dev); 15155 i915_disable_vga(dev);
14878 intel_setup_outputs(dev); 15156 intel_setup_outputs(dev);
14879 15157
14880 /* Just in case the BIOS is doing something questionable. */
14881 intel_fbc_disable(dev_priv);
14882
14883 drm_modeset_lock_all(dev); 15158 drm_modeset_lock_all(dev);
14884 intel_modeset_setup_hw_state(dev); 15159 intel_modeset_setup_hw_state(dev);
14885 drm_modeset_unlock_all(dev); 15160 drm_modeset_unlock_all(dev);
@@ -14966,10 +15241,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
14966{ 15241{
14967 struct drm_device *dev = crtc->base.dev; 15242 struct drm_device *dev = crtc->base.dev;
14968 struct drm_i915_private *dev_priv = dev->dev_private; 15243 struct drm_i915_private *dev_priv = dev->dev_private;
14969 u32 reg; 15244 i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder);
14970 15245
14971 /* Clear any frame start delays used for debugging left by the BIOS */ 15246 /* Clear any frame start delays used for debugging left by the BIOS */
14972 reg = PIPECONF(crtc->config->cpu_transcoder);
14973 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 15247 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
14974 15248
14975 /* restore vblank interrupts to correct state */ 15249 /* restore vblank interrupts to correct state */
@@ -15123,7 +15397,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
15123void i915_redisable_vga_power_on(struct drm_device *dev) 15397void i915_redisable_vga_power_on(struct drm_device *dev)
15124{ 15398{
15125 struct drm_i915_private *dev_priv = dev->dev_private; 15399 struct drm_i915_private *dev_priv = dev->dev_private;
15126 u32 vga_reg = i915_vgacntrl_reg(dev); 15400 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15127 15401
15128 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 15402 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15129 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 15403 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
@@ -15162,7 +15436,7 @@ static void readout_plane_state(struct intel_crtc *crtc)
15162 struct intel_plane_state *plane_state = 15436 struct intel_plane_state *plane_state =
15163 to_intel_plane_state(primary->state); 15437 to_intel_plane_state(primary->state);
15164 15438
15165 plane_state->visible = 15439 plane_state->visible = crtc->active &&
15166 primary_get_hw_state(to_intel_plane(primary)); 15440 primary_get_hw_state(to_intel_plane(primary));
15167 15441
15168 if (plane_state->visible) 15442 if (plane_state->visible)
@@ -15419,8 +15693,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
15419 mutex_lock(&dev->struct_mutex); 15693 mutex_lock(&dev->struct_mutex);
15420 ret = intel_pin_and_fence_fb_obj(c->primary, 15694 ret = intel_pin_and_fence_fb_obj(c->primary,
15421 c->primary->fb, 15695 c->primary->fb,
15422 c->primary->state, 15696 c->primary->state);
15423 NULL, NULL);
15424 mutex_unlock(&dev->struct_mutex); 15697 mutex_unlock(&dev->struct_mutex);
15425 if (ret) { 15698 if (ret) {
15426 DRM_ERROR("failed to pin boot fb on pipe %d\n", 15699 DRM_ERROR("failed to pin boot fb on pipe %d\n",
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 09bdd94ca3ba..bec443a629da 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -277,7 +277,7 @@ static void pps_lock(struct intel_dp *intel_dp)
277 * See vlv_power_sequencer_reset() why we need 277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here. 278 * a power domain reference here.
279 */ 279 */
280 power_domain = intel_display_port_power_domain(encoder); 280 power_domain = intel_display_port_aux_power_domain(encoder);
281 intel_display_power_get(dev_priv, power_domain); 281 intel_display_power_get(dev_priv, power_domain);
282 282
283 mutex_lock(&dev_priv->pps_mutex); 283 mutex_lock(&dev_priv->pps_mutex);
@@ -293,7 +293,7 @@ static void pps_unlock(struct intel_dp *intel_dp)
293 293
294 mutex_unlock(&dev_priv->pps_mutex); 294 mutex_unlock(&dev_priv->pps_mutex);
295 295
296 power_domain = intel_display_port_power_domain(encoder); 296 power_domain = intel_display_port_aux_power_domain(encoder);
297 intel_display_power_put(dev_priv, power_domain); 297 intel_display_power_put(dev_priv, power_domain);
298} 298}
299 299
@@ -541,7 +541,8 @@ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
541 } 541 }
542} 542}
543 543
544static u32 _pp_ctrl_reg(struct intel_dp *intel_dp) 544static i915_reg_t
545_pp_ctrl_reg(struct intel_dp *intel_dp)
545{ 546{
546 struct drm_device *dev = intel_dp_to_dev(intel_dp); 547 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547 548
@@ -553,7 +554,8 @@ static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp)); 554 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554} 555}
555 556
556static u32 _pp_stat_reg(struct intel_dp *intel_dp) 557static i915_reg_t
558_pp_stat_reg(struct intel_dp *intel_dp)
557{ 559{
558 struct drm_device *dev = intel_dp_to_dev(intel_dp); 560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
559 561
@@ -582,7 +584,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
582 584
583 if (IS_VALLEYVIEW(dev)) { 585 if (IS_VALLEYVIEW(dev)) {
584 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 586 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
585 u32 pp_ctrl_reg, pp_div_reg; 587 i915_reg_t pp_ctrl_reg, pp_div_reg;
586 u32 pp_div; 588 u32 pp_div;
587 589
588 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); 590 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
@@ -652,7 +654,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 654 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653 struct drm_device *dev = intel_dig_port->base.base.dev; 655 struct drm_device *dev = intel_dig_port->base.base.dev;
654 struct drm_i915_private *dev_priv = dev->dev_private; 656 struct drm_i915_private *dev_priv = dev->dev_private;
655 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 657 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
656 uint32_t status; 658 uint32_t status;
657 bool done; 659 bool done;
658 660
@@ -750,7 +752,7 @@ static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
750 else 752 else
751 precharge = 5; 753 precharge = 5;
752 754
753 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL) 755 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
754 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 756 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755 else 757 else
756 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 758 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -789,8 +791,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 791 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790 struct drm_device *dev = intel_dig_port->base.base.dev; 792 struct drm_device *dev = intel_dig_port->base.base.dev;
791 struct drm_i915_private *dev_priv = dev->dev_private; 793 struct drm_i915_private *dev_priv = dev->dev_private;
792 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 794 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
793 uint32_t ch_data = ch_ctl + 4;
794 uint32_t aux_clock_divider; 795 uint32_t aux_clock_divider;
795 int i, ret, recv_bytes; 796 int i, ret, recv_bytes;
796 uint32_t status; 797 uint32_t status;
@@ -816,8 +817,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
816 817
817 intel_dp_check_edp(intel_dp); 818 intel_dp_check_edp(intel_dp);
818 819
819 intel_aux_display_runtime_get(dev_priv);
820
821 /* Try to wait for any previous AUX channel activity */ 820 /* Try to wait for any previous AUX channel activity */
822 for (try = 0; try < 3; try++) { 821 for (try = 0; try < 3; try++) {
823 status = I915_READ_NOTRACE(ch_ctl); 822 status = I915_READ_NOTRACE(ch_ctl);
@@ -856,7 +855,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
856 for (try = 0; try < 5; try++) { 855 for (try = 0; try < 5; try++) {
857 /* Load the send data into the aux channel data registers */ 856 /* Load the send data into the aux channel data registers */
858 for (i = 0; i < send_bytes; i += 4) 857 for (i = 0; i < send_bytes; i += 4)
859 I915_WRITE(ch_data + i, 858 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
860 intel_dp_pack_aux(send + i, 859 intel_dp_pack_aux(send + i,
861 send_bytes - i)); 860 send_bytes - i));
862 861
@@ -920,13 +919,12 @@ done:
920 recv_bytes = recv_size; 919 recv_bytes = recv_size;
921 920
922 for (i = 0; i < recv_bytes; i += 4) 921 for (i = 0; i < recv_bytes; i += 4)
923 intel_dp_unpack_aux(I915_READ(ch_data + i), 922 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
924 recv + i, recv_bytes - i); 923 recv + i, recv_bytes - i);
925 924
926 ret = recv_bytes; 925 ret = recv_bytes;
927out: 926out:
928 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 927 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
929 intel_aux_display_runtime_put(dev_priv);
930 928
931 if (vdd) 929 if (vdd)
932 edp_panel_vdd_off(intel_dp, false); 930 edp_panel_vdd_off(intel_dp, false);
@@ -1008,96 +1006,206 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1008 return ret; 1006 return ret;
1009} 1007}
1010 1008
1011static void 1009static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1012intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) 1010 enum port port)
1013{ 1011{
1014 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1012 switch (port) {
1015 struct drm_i915_private *dev_priv = dev->dev_private; 1013 case PORT_B:
1016 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1014 case PORT_C:
1017 enum port port = intel_dig_port->port; 1015 case PORT_D:
1018 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; 1016 return DP_AUX_CH_CTL(port);
1019 const char *name = NULL; 1017 default:
1020 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL; 1018 MISSING_CASE(port);
1021 int ret; 1019 return DP_AUX_CH_CTL(PORT_B);
1020 }
1021}
1022 1022
1023 /* On SKL we don't have Aux for port E so we rely on VBT to set 1023static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1024 * a proper alternate aux channel. 1024 enum port port, int index)
1025 */ 1025{
1026 if (IS_SKYLAKE(dev) && port == PORT_E) { 1026 switch (port) {
1027 switch (info->alternate_aux_channel) { 1027 case PORT_B:
1028 case DP_AUX_B: 1028 case PORT_C:
1029 porte_aux_ctl_reg = DPB_AUX_CH_CTL; 1029 case PORT_D:
1030 break; 1030 return DP_AUX_CH_DATA(port, index);
1031 case DP_AUX_C: 1031 default:
1032 porte_aux_ctl_reg = DPC_AUX_CH_CTL; 1032 MISSING_CASE(port);
1033 break; 1033 return DP_AUX_CH_DATA(PORT_B, index);
1034 case DP_AUX_D:
1035 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1036 break;
1037 case DP_AUX_A:
1038 default:
1039 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1040 }
1041 } 1034 }
1035}
1042 1036
1037static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1038 enum port port)
1039{
1043 switch (port) { 1040 switch (port) {
1044 case PORT_A: 1041 case PORT_A:
1045 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; 1042 return DP_AUX_CH_CTL(port);
1046 name = "DPDDC-A";
1047 break;
1048 case PORT_B: 1043 case PORT_B:
1049 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1050 name = "DPDDC-B";
1051 break;
1052 case PORT_C: 1044 case PORT_C:
1053 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1054 name = "DPDDC-C";
1055 break;
1056 case PORT_D: 1045 case PORT_D:
1057 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; 1046 return PCH_DP_AUX_CH_CTL(port);
1058 name = "DPDDC-D";
1059 break;
1060 case PORT_E:
1061 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1062 name = "DPDDC-E";
1063 break;
1064 default: 1047 default:
1065 BUG(); 1048 MISSING_CASE(port);
1049 return DP_AUX_CH_CTL(PORT_A);
1066 } 1050 }
1051}
1067 1052
1068 /* 1053static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1069 * The AUX_CTL register is usually DP_CTL + 0x10. 1054 enum port port, int index)
1070 * 1055{
1071 * On Haswell and Broadwell though: 1056 switch (port) {
1072 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU 1057 case PORT_A:
1073 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU 1058 return DP_AUX_CH_DATA(port, index);
1074 * 1059 case PORT_B:
1075 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU. 1060 case PORT_C:
1076 */ 1061 case PORT_D:
1077 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E) 1062 return PCH_DP_AUX_CH_DATA(port, index);
1078 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; 1063 default:
1064 MISSING_CASE(port);
1065 return DP_AUX_CH_DATA(PORT_A, index);
1066 }
1067}
1068
1069/*
1070 * On SKL we don't have Aux for port E so we rely
1071 * on VBT to set a proper alternate aux channel.
1072 */
1073static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1074{
1075 const struct ddi_vbt_port_info *info =
1076 &dev_priv->vbt.ddi_port_info[PORT_E];
1077
1078 switch (info->alternate_aux_channel) {
1079 case DP_AUX_A:
1080 return PORT_A;
1081 case DP_AUX_B:
1082 return PORT_B;
1083 case DP_AUX_C:
1084 return PORT_C;
1085 case DP_AUX_D:
1086 return PORT_D;
1087 default:
1088 MISSING_CASE(info->alternate_aux_channel);
1089 return PORT_A;
1090 }
1091}
1092
1093static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1094 enum port port)
1095{
1096 if (port == PORT_E)
1097 port = skl_porte_aux_port(dev_priv);
1098
1099 switch (port) {
1100 case PORT_A:
1101 case PORT_B:
1102 case PORT_C:
1103 case PORT_D:
1104 return DP_AUX_CH_CTL(port);
1105 default:
1106 MISSING_CASE(port);
1107 return DP_AUX_CH_CTL(PORT_A);
1108 }
1109}
1110
1111static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1112 enum port port, int index)
1113{
1114 if (port == PORT_E)
1115 port = skl_porte_aux_port(dev_priv);
1116
1117 switch (port) {
1118 case PORT_A:
1119 case PORT_B:
1120 case PORT_C:
1121 case PORT_D:
1122 return DP_AUX_CH_DATA(port, index);
1123 default:
1124 MISSING_CASE(port);
1125 return DP_AUX_CH_DATA(PORT_A, index);
1126 }
1127}
1128
1129static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1130 enum port port)
1131{
1132 if (INTEL_INFO(dev_priv)->gen >= 9)
1133 return skl_aux_ctl_reg(dev_priv, port);
1134 else if (HAS_PCH_SPLIT(dev_priv))
1135 return ilk_aux_ctl_reg(dev_priv, port);
1136 else
1137 return g4x_aux_ctl_reg(dev_priv, port);
1138}
1139
1140static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1141 enum port port, int index)
1142{
1143 if (INTEL_INFO(dev_priv)->gen >= 9)
1144 return skl_aux_data_reg(dev_priv, port, index);
1145 else if (HAS_PCH_SPLIT(dev_priv))
1146 return ilk_aux_data_reg(dev_priv, port, index);
1147 else
1148 return g4x_aux_data_reg(dev_priv, port, index);
1149}
1150
1151static void intel_aux_reg_init(struct intel_dp *intel_dp)
1152{
1153 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1154 enum port port = dp_to_dig_port(intel_dp)->port;
1155 int i;
1156
1157 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1158 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1159 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1160}
1161
1162static void
1163intel_dp_aux_fini(struct intel_dp *intel_dp)
1164{
1165 drm_dp_aux_unregister(&intel_dp->aux);
1166 kfree(intel_dp->aux.name);
1167}
1168
1169static int
1170intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1171{
1172 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1173 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1174 enum port port = intel_dig_port->port;
1175 int ret;
1176
1177 intel_aux_reg_init(intel_dp);
1178
1179 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1180 if (!intel_dp->aux.name)
1181 return -ENOMEM;
1079 1182
1080 intel_dp->aux.name = name;
1081 intel_dp->aux.dev = dev->dev; 1183 intel_dp->aux.dev = dev->dev;
1082 intel_dp->aux.transfer = intel_dp_aux_transfer; 1184 intel_dp->aux.transfer = intel_dp_aux_transfer;
1083 1185
1084 DRM_DEBUG_KMS("registering %s bus for %s\n", name, 1186 DRM_DEBUG_KMS("registering %s bus for %s\n",
1187 intel_dp->aux.name,
1085 connector->base.kdev->kobj.name); 1188 connector->base.kdev->kobj.name);
1086 1189
1087 ret = drm_dp_aux_register(&intel_dp->aux); 1190 ret = drm_dp_aux_register(&intel_dp->aux);
1088 if (ret < 0) { 1191 if (ret < 0) {
1089 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n", 1192 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1090 name, ret); 1193 intel_dp->aux.name, ret);
1091 return; 1194 kfree(intel_dp->aux.name);
1195 return ret;
1092 } 1196 }
1093 1197
1094 ret = sysfs_create_link(&connector->base.kdev->kobj, 1198 ret = sysfs_create_link(&connector->base.kdev->kobj,
1095 &intel_dp->aux.ddc.dev.kobj, 1199 &intel_dp->aux.ddc.dev.kobj,
1096 intel_dp->aux.ddc.dev.kobj.name); 1200 intel_dp->aux.ddc.dev.kobj.name);
1097 if (ret < 0) { 1201 if (ret < 0) {
1098 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret); 1202 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1099 drm_dp_aux_unregister(&intel_dp->aux); 1203 intel_dp->aux.name, ret);
1204 intel_dp_aux_fini(intel_dp);
1205 return ret;
1100 } 1206 }
1207
1208 return 0;
1101} 1209}
1102 1210
1103static void 1211static void
@@ -1189,10 +1297,13 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1189 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1; 1297 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1190} 1298}
1191 1299
1192static bool intel_dp_source_supports_hbr2(struct drm_device *dev) 1300bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1193{ 1301{
1302 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1303 struct drm_device *dev = dig_port->base.base.dev;
1304
1194 /* WaDisableHBR2:skl */ 1305 /* WaDisableHBR2:skl */
1195 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) 1306 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1196 return false; 1307 return false;
1197 1308
1198 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) || 1309 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
@@ -1203,14 +1314,16 @@ static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1203} 1314}
1204 1315
1205static int 1316static int
1206intel_dp_source_rates(struct drm_device *dev, const int **source_rates) 1317intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1207{ 1318{
1319 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1320 struct drm_device *dev = dig_port->base.base.dev;
1208 int size; 1321 int size;
1209 1322
1210 if (IS_BROXTON(dev)) { 1323 if (IS_BROXTON(dev)) {
1211 *source_rates = bxt_rates; 1324 *source_rates = bxt_rates;
1212 size = ARRAY_SIZE(bxt_rates); 1325 size = ARRAY_SIZE(bxt_rates);
1213 } else if (IS_SKYLAKE(dev)) { 1326 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1214 *source_rates = skl_rates; 1327 *source_rates = skl_rates;
1215 size = ARRAY_SIZE(skl_rates); 1328 size = ARRAY_SIZE(skl_rates);
1216 } else { 1329 } else {
@@ -1219,7 +1332,7 @@ intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1219 } 1332 }
1220 1333
1221 /* This depends on the fact that 5.4 is last value in the array */ 1334 /* This depends on the fact that 5.4 is last value in the array */
1222 if (!intel_dp_source_supports_hbr2(dev)) 1335 if (!intel_dp_source_supports_hbr2(intel_dp))
1223 size--; 1336 size--;
1224 1337
1225 return size; 1338 return size;
@@ -1284,12 +1397,11 @@ static int intersect_rates(const int *source_rates, int source_len,
1284static int intel_dp_common_rates(struct intel_dp *intel_dp, 1397static int intel_dp_common_rates(struct intel_dp *intel_dp,
1285 int *common_rates) 1398 int *common_rates)
1286{ 1399{
1287 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1288 const int *source_rates, *sink_rates; 1400 const int *source_rates, *sink_rates;
1289 int source_len, sink_len; 1401 int source_len, sink_len;
1290 1402
1291 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates); 1403 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1292 source_len = intel_dp_source_rates(dev, &source_rates); 1404 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1293 1405
1294 return intersect_rates(source_rates, source_len, 1406 return intersect_rates(source_rates, source_len,
1295 sink_rates, sink_len, 1407 sink_rates, sink_len,
@@ -1314,7 +1426,6 @@ static void snprintf_int_array(char *str, size_t len,
1314 1426
1315static void intel_dp_print_rates(struct intel_dp *intel_dp) 1427static void intel_dp_print_rates(struct intel_dp *intel_dp)
1316{ 1428{
1317 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1318 const int *source_rates, *sink_rates; 1429 const int *source_rates, *sink_rates;
1319 int source_len, sink_len, common_len; 1430 int source_len, sink_len, common_len;
1320 int common_rates[DP_MAX_SUPPORTED_RATES]; 1431 int common_rates[DP_MAX_SUPPORTED_RATES];
@@ -1323,7 +1434,7 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
1323 if ((drm_debug & DRM_UT_KMS) == 0) 1434 if ((drm_debug & DRM_UT_KMS) == 0)
1324 return; 1435 return;
1325 1436
1326 source_len = intel_dp_source_rates(dev, &source_rates); 1437 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1327 snprintf_int_array(str, sizeof(str), source_rates, source_len); 1438 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1328 DRM_DEBUG_KMS("source rates: %s\n", str); 1439 DRM_DEBUG_KMS("source rates: %s\n", str);
1329 1440
@@ -1365,8 +1476,8 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1365 return rate_to_index(rate, intel_dp->sink_rates); 1476 return rate_to_index(rate, intel_dp->sink_rates);
1366} 1477}
1367 1478
1368static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1479void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1369 uint8_t *link_bw, uint8_t *rate_select) 1480 uint8_t *link_bw, uint8_t *rate_select)
1370{ 1481{
1371 if (intel_dp->num_sink_rates) { 1482 if (intel_dp->num_sink_rates) {
1372 *link_bw = 0; 1483 *link_bw = 0;
@@ -1426,7 +1537,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1426 return ret; 1537 return ret;
1427 } 1538 }
1428 1539
1429 if (!HAS_PCH_SPLIT(dev)) 1540 if (HAS_GMCH_DISPLAY(dev))
1430 intel_gmch_panel_fitting(intel_crtc, pipe_config, 1541 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1431 intel_connector->panel.fitting_mode); 1542 intel_connector->panel.fitting_mode);
1432 else 1543 else
@@ -1530,7 +1641,7 @@ found:
1530 &pipe_config->dp_m2_n2); 1641 &pipe_config->dp_m2_n2);
1531 } 1642 }
1532 1643
1533 if (IS_SKYLAKE(dev) && is_edp(intel_dp)) 1644 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1534 skl_edp_set_pll_config(pipe_config); 1645 skl_edp_set_pll_config(pipe_config);
1535 else if (IS_BROXTON(dev)) 1646 else if (IS_BROXTON(dev))
1536 /* handled in ddi */; 1647 /* handled in ddi */;
@@ -1542,37 +1653,6 @@ found:
1542 return true; 1653 return true;
1543} 1654}
1544 1655
1545static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1546{
1547 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1548 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1549 struct drm_device *dev = crtc->base.dev;
1550 struct drm_i915_private *dev_priv = dev->dev_private;
1551 u32 dpa_ctl;
1552
1553 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1554 crtc->config->port_clock);
1555 dpa_ctl = I915_READ(DP_A);
1556 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1557
1558 if (crtc->config->port_clock == 162000) {
1559 /* For a long time we've carried around a ILK-DevA w/a for the
1560 * 160MHz clock. If we're really unlucky, it's still required.
1561 */
1562 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1563 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1564 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1565 } else {
1566 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1567 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1568 }
1569
1570 I915_WRITE(DP_A, dpa_ctl);
1571
1572 POSTING_READ(DP_A);
1573 udelay(500);
1574}
1575
1576void intel_dp_set_link_params(struct intel_dp *intel_dp, 1656void intel_dp_set_link_params(struct intel_dp *intel_dp,
1577 const struct intel_crtc_state *pipe_config) 1657 const struct intel_crtc_state *pipe_config)
1578{ 1658{
@@ -1617,9 +1697,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
1617 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 1697 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1618 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count); 1698 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1619 1699
1620 if (crtc->config->has_audio)
1621 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1622
1623 /* Split out the IBX/CPU vs CPT settings */ 1700 /* Split out the IBX/CPU vs CPT settings */
1624 1701
1625 if (IS_GEN7(dev) && port == PORT_A) { 1702 if (IS_GEN7(dev) && port == PORT_A) {
@@ -1680,7 +1757,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
1680{ 1757{
1681 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1758 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1682 struct drm_i915_private *dev_priv = dev->dev_private; 1759 struct drm_i915_private *dev_priv = dev->dev_private;
1683 u32 pp_stat_reg, pp_ctrl_reg; 1760 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1684 1761
1685 lockdep_assert_held(&dev_priv->pps_mutex); 1762 lockdep_assert_held(&dev_priv->pps_mutex);
1686 1763
@@ -1770,7 +1847,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1770 struct drm_i915_private *dev_priv = dev->dev_private; 1847 struct drm_i915_private *dev_priv = dev->dev_private;
1771 enum intel_display_power_domain power_domain; 1848 enum intel_display_power_domain power_domain;
1772 u32 pp; 1849 u32 pp;
1773 u32 pp_stat_reg, pp_ctrl_reg; 1850 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1774 bool need_to_disable = !intel_dp->want_panel_vdd; 1851 bool need_to_disable = !intel_dp->want_panel_vdd;
1775 1852
1776 lockdep_assert_held(&dev_priv->pps_mutex); 1853 lockdep_assert_held(&dev_priv->pps_mutex);
@@ -1784,7 +1861,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1784 if (edp_have_panel_vdd(intel_dp)) 1861 if (edp_have_panel_vdd(intel_dp))
1785 return need_to_disable; 1862 return need_to_disable;
1786 1863
1787 power_domain = intel_display_port_power_domain(intel_encoder); 1864 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1788 intel_display_power_get(dev_priv, power_domain); 1865 intel_display_power_get(dev_priv, power_domain);
1789 1866
1790 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", 1867 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
@@ -1846,7 +1923,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1846 struct intel_encoder *intel_encoder = &intel_dig_port->base; 1923 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1847 enum intel_display_power_domain power_domain; 1924 enum intel_display_power_domain power_domain;
1848 u32 pp; 1925 u32 pp;
1849 u32 pp_stat_reg, pp_ctrl_reg; 1926 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1850 1927
1851 lockdep_assert_held(&dev_priv->pps_mutex); 1928 lockdep_assert_held(&dev_priv->pps_mutex);
1852 1929
@@ -1874,7 +1951,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1874 if ((pp & POWER_TARGET_ON) == 0) 1951 if ((pp & POWER_TARGET_ON) == 0)
1875 intel_dp->last_power_cycle = jiffies; 1952 intel_dp->last_power_cycle = jiffies;
1876 1953
1877 power_domain = intel_display_port_power_domain(intel_encoder); 1954 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1878 intel_display_power_put(dev_priv, power_domain); 1955 intel_display_power_put(dev_priv, power_domain);
1879} 1956}
1880 1957
@@ -1933,7 +2010,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
1933 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2010 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1934 struct drm_i915_private *dev_priv = dev->dev_private; 2011 struct drm_i915_private *dev_priv = dev->dev_private;
1935 u32 pp; 2012 u32 pp;
1936 u32 pp_ctrl_reg; 2013 i915_reg_t pp_ctrl_reg;
1937 2014
1938 lockdep_assert_held(&dev_priv->pps_mutex); 2015 lockdep_assert_held(&dev_priv->pps_mutex);
1939 2016
@@ -1995,7 +2072,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
1995 struct drm_i915_private *dev_priv = dev->dev_private; 2072 struct drm_i915_private *dev_priv = dev->dev_private;
1996 enum intel_display_power_domain power_domain; 2073 enum intel_display_power_domain power_domain;
1997 u32 pp; 2074 u32 pp;
1998 u32 pp_ctrl_reg; 2075 i915_reg_t pp_ctrl_reg;
1999 2076
2000 lockdep_assert_held(&dev_priv->pps_mutex); 2077 lockdep_assert_held(&dev_priv->pps_mutex);
2001 2078
@@ -2025,7 +2102,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
2025 wait_panel_off(intel_dp); 2102 wait_panel_off(intel_dp);
2026 2103
2027 /* We got a reference when we enabled the VDD. */ 2104 /* We got a reference when we enabled the VDD. */
2028 power_domain = intel_display_port_power_domain(intel_encoder); 2105 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2029 intel_display_power_put(dev_priv, power_domain); 2106 intel_display_power_put(dev_priv, power_domain);
2030} 2107}
2031 2108
@@ -2046,7 +2123,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2046 struct drm_device *dev = intel_dig_port->base.base.dev; 2123 struct drm_device *dev = intel_dig_port->base.base.dev;
2047 struct drm_i915_private *dev_priv = dev->dev_private; 2124 struct drm_i915_private *dev_priv = dev->dev_private;
2048 u32 pp; 2125 u32 pp;
2049 u32 pp_ctrl_reg; 2126 i915_reg_t pp_ctrl_reg;
2050 2127
2051 /* 2128 /*
2052 * If we enable the backlight right away following a panel power 2129 * If we enable the backlight right away following a panel power
@@ -2087,7 +2164,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2087 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2164 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2088 struct drm_i915_private *dev_priv = dev->dev_private; 2165 struct drm_i915_private *dev_priv = dev->dev_private;
2089 u32 pp; 2166 u32 pp;
2090 u32 pp_ctrl_reg; 2167 i915_reg_t pp_ctrl_reg;
2091 2168
2092 if (!is_edp(intel_dp)) 2169 if (!is_edp(intel_dp))
2093 return; 2170 return;
@@ -2146,27 +2223,61 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
2146 _intel_edp_backlight_off(intel_dp); 2223 _intel_edp_backlight_off(intel_dp);
2147} 2224}
2148 2225
2226static const char *state_string(bool enabled)
2227{
2228 return enabled ? "on" : "off";
2229}
2230
2231static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2232{
2233 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2234 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2235 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2236
2237 I915_STATE_WARN(cur_state != state,
2238 "DP port %c state assertion failure (expected %s, current %s)\n",
2239 port_name(dig_port->port),
2240 state_string(state), state_string(cur_state));
2241}
2242#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2243
2244static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2245{
2246 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2247
2248 I915_STATE_WARN(cur_state != state,
2249 "eDP PLL state assertion failure (expected %s, current %s)\n",
2250 state_string(state), state_string(cur_state));
2251}
2252#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2253#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2254
2149static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 2255static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2150{ 2256{
2151 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2257 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2152 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 2258 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2153 struct drm_device *dev = crtc->dev; 2259 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2154 struct drm_i915_private *dev_priv = dev->dev_private;
2155 u32 dpa_ctl;
2156 2260
2157 assert_pipe_disabled(dev_priv, 2261 assert_pipe_disabled(dev_priv, crtc->pipe);
2158 to_intel_crtc(crtc)->pipe); 2262 assert_dp_port_disabled(intel_dp);
2263 assert_edp_pll_disabled(dev_priv);
2264
2265 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2266 crtc->config->port_clock);
2267
2268 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2269
2270 if (crtc->config->port_clock == 162000)
2271 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2272 else
2273 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2274
2275 I915_WRITE(DP_A, intel_dp->DP);
2276 POSTING_READ(DP_A);
2277 udelay(500);
2159 2278
2160 DRM_DEBUG_KMS("\n");
2161 dpa_ctl = I915_READ(DP_A);
2162 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2163 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2164
2165 /* We don't adjust intel_dp->DP while tearing down the link, to
2166 * facilitate link retraining (e.g. after hotplug). Hence clear all
2167 * enable bits here to ensure that we don't enable too much. */
2168 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2169 intel_dp->DP |= DP_PLL_ENABLE; 2279 intel_dp->DP |= DP_PLL_ENABLE;
2280
2170 I915_WRITE(DP_A, intel_dp->DP); 2281 I915_WRITE(DP_A, intel_dp->DP);
2171 POSTING_READ(DP_A); 2282 POSTING_READ(DP_A);
2172 udelay(200); 2283 udelay(200);
@@ -2175,24 +2286,18 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2175static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 2286static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2176{ 2287{
2177 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2178 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 2289 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2179 struct drm_device *dev = crtc->dev; 2290 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2180 struct drm_i915_private *dev_priv = dev->dev_private;
2181 u32 dpa_ctl;
2182 2291
2183 assert_pipe_disabled(dev_priv, 2292 assert_pipe_disabled(dev_priv, crtc->pipe);
2184 to_intel_crtc(crtc)->pipe); 2293 assert_dp_port_disabled(intel_dp);
2294 assert_edp_pll_enabled(dev_priv);
2185 2295
2186 dpa_ctl = I915_READ(DP_A); 2296 DRM_DEBUG_KMS("disabling eDP PLL\n");
2187 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2188 "dp pll off, should be on\n");
2189 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2190 2297
2191 /* We can't rely on the value tracked for the DP register in 2298 intel_dp->DP &= ~DP_PLL_ENABLE;
2192 * intel_dp->DP because link_down must not change that (otherwise link 2299
2193 * re-training will fail. */ 2300 I915_WRITE(DP_A, intel_dp->DP);
2194 dpa_ctl &= ~DP_PLL_ENABLE;
2195 I915_WRITE(DP_A, dpa_ctl);
2196 POSTING_READ(DP_A); 2301 POSTING_READ(DP_A);
2197 udelay(200); 2302 udelay(200);
2198} 2303}
@@ -2261,7 +2366,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2261 } 2366 }
2262 2367
2263 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", 2368 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2264 intel_dp->output_reg); 2369 i915_mmio_reg_offset(intel_dp->output_reg));
2265 } else if (IS_CHERRYVIEW(dev)) { 2370 } else if (IS_CHERRYVIEW(dev)) {
2266 *pipe = DP_PORT_TO_PIPE_CHV(tmp); 2371 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2267 } else { 2372 } else {
@@ -2324,7 +2429,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
2324 intel_dp_get_m_n(crtc, pipe_config); 2429 intel_dp_get_m_n(crtc, pipe_config);
2325 2430
2326 if (port == PORT_A) { 2431 if (port == PORT_A) {
2327 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) 2432 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2328 pipe_config->port_clock = 162000; 2433 pipe_config->port_clock = 162000;
2329 else 2434 else
2330 pipe_config->port_clock = 270000; 2435 pipe_config->port_clock = 270000;
@@ -2389,6 +2494,8 @@ static void ilk_post_disable_dp(struct intel_encoder *encoder)
2389 enum port port = dp_to_dig_port(intel_dp)->port; 2494 enum port port = dp_to_dig_port(intel_dp)->port;
2390 2495
2391 intel_dp_link_down(intel_dp); 2496 intel_dp_link_down(intel_dp);
2497
2498 /* Only ilk+ has port A */
2392 if (port == PORT_A) 2499 if (port == PORT_A)
2393 ironlake_edp_pll_off(intel_dp); 2500 ironlake_edp_pll_off(intel_dp);
2394} 2501}
@@ -2548,6 +2655,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp)
2548{ 2655{
2549 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2656 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2550 struct drm_i915_private *dev_priv = dev->dev_private; 2657 struct drm_i915_private *dev_priv = dev->dev_private;
2658 struct intel_crtc *crtc =
2659 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2551 2660
2552 /* enable with pattern 1 (as per spec) */ 2661 /* enable with pattern 1 (as per spec) */
2553 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, 2662 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
@@ -2563,6 +2672,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp)
2563 * fail when the power sequencer is freshly used for this port. 2672 * fail when the power sequencer is freshly used for this port.
2564 */ 2673 */
2565 intel_dp->DP |= DP_PORT_EN; 2674 intel_dp->DP |= DP_PORT_EN;
2675 if (crtc->config->has_audio)
2676 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2566 2677
2567 I915_WRITE(intel_dp->output_reg, intel_dp->DP); 2678 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2568 POSTING_READ(intel_dp->output_reg); 2679 POSTING_READ(intel_dp->output_reg);
@@ -2575,6 +2686,8 @@ static void intel_enable_dp(struct intel_encoder *encoder)
2575 struct drm_i915_private *dev_priv = dev->dev_private; 2686 struct drm_i915_private *dev_priv = dev->dev_private;
2576 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 2687 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2577 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 2688 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2689 enum port port = dp_to_dig_port(intel_dp)->port;
2690 enum pipe pipe = crtc->pipe;
2578 2691
2579 if (WARN_ON(dp_reg & DP_PORT_EN)) 2692 if (WARN_ON(dp_reg & DP_PORT_EN))
2580 return; 2693 return;
@@ -2586,6 +2699,17 @@ static void intel_enable_dp(struct intel_encoder *encoder)
2586 2699
2587 intel_dp_enable_port(intel_dp); 2700 intel_dp_enable_port(intel_dp);
2588 2701
2702 if (port == PORT_A && IS_GEN5(dev_priv)) {
2703 /*
2704 * Underrun reporting for the other pipe was disabled in
2705 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2706 * enabled, so it's now safe to re-enable underrun reporting.
2707 */
2708 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2709 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2710 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2711 }
2712
2589 edp_panel_vdd_on(intel_dp); 2713 edp_panel_vdd_on(intel_dp);
2590 edp_panel_on(intel_dp); 2714 edp_panel_on(intel_dp);
2591 edp_panel_vdd_off(intel_dp, true); 2715 edp_panel_vdd_off(intel_dp, true);
@@ -2608,7 +2732,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
2608 2732
2609 if (crtc->config->has_audio) { 2733 if (crtc->config->has_audio) {
2610 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 2734 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2611 pipe_name(crtc->pipe)); 2735 pipe_name(pipe));
2612 intel_audio_codec_enable(encoder); 2736 intel_audio_codec_enable(encoder);
2613 } 2737 }
2614} 2738}
@@ -2631,16 +2755,29 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
2631 2755
2632static void g4x_pre_enable_dp(struct intel_encoder *encoder) 2756static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2633{ 2757{
2758 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2634 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2759 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2635 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2760 enum port port = dp_to_dig_port(intel_dp)->port;
2761 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2636 2762
2637 intel_dp_prepare(encoder); 2763 intel_dp_prepare(encoder);
2638 2764
2765 if (port == PORT_A && IS_GEN5(dev_priv)) {
2766 /*
2767 * We get FIFO underruns on the other pipe when
2768 * enabling the CPU eDP PLL, and when enabling CPU
2769 * eDP port. We could potentially avoid the PLL
2770 * underrun with a vblank wait just prior to enabling
2771 * the PLL, but that doesn't appear to help the port
2772 * enable case. Just sweep it all under the rug.
2773 */
2774 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2775 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2776 }
2777
2639 /* Only ilk+ has port A */ 2778 /* Only ilk+ has port A */
2640 if (dport->port == PORT_A) { 2779 if (port == PORT_A)
2641 ironlake_set_pll_cpu_edp(intel_dp);
2642 ironlake_edp_pll_on(intel_dp); 2780 ironlake_edp_pll_on(intel_dp);
2643 }
2644} 2781}
2645 2782
2646static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 2783static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
@@ -2648,7 +2785,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2648 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2785 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2649 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private; 2786 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2650 enum pipe pipe = intel_dp->pps_pipe; 2787 enum pipe pipe = intel_dp->pps_pipe;
2651 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); 2788 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2652 2789
2653 edp_panel_vdd_off_sync(intel_dp); 2790 edp_panel_vdd_off_sync(intel_dp);
2654 2791
@@ -3046,7 +3183,7 @@ intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3046 * Fetch AUX CH registers 0x202 - 0x207 which contain 3183 * Fetch AUX CH registers 0x202 - 0x207 which contain
3047 * link status information 3184 * link status information
3048 */ 3185 */
3049static bool 3186bool
3050intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 3187intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3051{ 3188{
3052 return intel_dp_dpcd_read_wake(&intel_dp->aux, 3189 return intel_dp_dpcd_read_wake(&intel_dp->aux,
@@ -3056,7 +3193,7 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
3056} 3193}
3057 3194
3058/* These are source-specific values. */ 3195/* These are source-specific values. */
3059static uint8_t 3196uint8_t
3060intel_dp_voltage_max(struct intel_dp *intel_dp) 3197intel_dp_voltage_max(struct intel_dp *intel_dp)
3061{ 3198{
3062 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3199 struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -3079,7 +3216,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
3079 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 3216 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3080} 3217}
3081 3218
3082static uint8_t 3219uint8_t
3083intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 3220intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3084{ 3221{
3085 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3222 struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -3421,38 +3558,6 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3421 return 0; 3558 return 0;
3422} 3559}
3423 3560
3424static void
3425intel_get_adjust_train(struct intel_dp *intel_dp,
3426 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3427{
3428 uint8_t v = 0;
3429 uint8_t p = 0;
3430 int lane;
3431 uint8_t voltage_max;
3432 uint8_t preemph_max;
3433
3434 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3435 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3436 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3437
3438 if (this_v > v)
3439 v = this_v;
3440 if (this_p > p)
3441 p = this_p;
3442 }
3443
3444 voltage_max = intel_dp_voltage_max(intel_dp);
3445 if (v >= voltage_max)
3446 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3447
3448 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3449 if (p >= preemph_max)
3450 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3451
3452 for (lane = 0; lane < 4; lane++)
3453 intel_dp->train_set[lane] = v | p;
3454}
3455
3456static uint32_t 3561static uint32_t
3457gen4_signal_levels(uint8_t train_set) 3562gen4_signal_levels(uint8_t train_set)
3458{ 3563{
@@ -3550,13 +3655,13 @@ gen7_edp_signal_levels(uint8_t train_set)
3550 } 3655 }
3551} 3656}
3552 3657
3553/* Properly updates "DP" with the correct signal levels. */ 3658void
3554static void 3659intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3555intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3556{ 3660{
3557 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3661 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3558 enum port port = intel_dig_port->port; 3662 enum port port = intel_dig_port->port;
3559 struct drm_device *dev = intel_dig_port->base.base.dev; 3663 struct drm_device *dev = intel_dig_port->base.base.dev;
3664 struct drm_i915_private *dev_priv = to_i915(dev);
3560 uint32_t signal_levels, mask = 0; 3665 uint32_t signal_levels, mask = 0;
3561 uint8_t train_set = intel_dp->train_set[0]; 3666 uint8_t train_set = intel_dp->train_set[0];
3562 3667
@@ -3591,74 +3696,27 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3591 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 3696 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3592 DP_TRAIN_PRE_EMPHASIS_SHIFT); 3697 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3593 3698
3594 *DP = (*DP & ~mask) | signal_levels; 3699 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3595}
3596
3597static bool
3598intel_dp_set_link_train(struct intel_dp *intel_dp,
3599 uint32_t *DP,
3600 uint8_t dp_train_pat)
3601{
3602 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3603 struct drm_i915_private *dev_priv =
3604 to_i915(intel_dig_port->base.base.dev);
3605 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3606 int ret, len;
3607
3608 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3609 3700
3610 I915_WRITE(intel_dp->output_reg, *DP); 3701 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3611 POSTING_READ(intel_dp->output_reg); 3702 POSTING_READ(intel_dp->output_reg);
3612
3613 buf[0] = dp_train_pat;
3614 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3615 DP_TRAINING_PATTERN_DISABLE) {
3616 /* don't write DP_TRAINING_LANEx_SET on disable */
3617 len = 1;
3618 } else {
3619 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3620 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3621 len = intel_dp->lane_count + 1;
3622 }
3623
3624 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3625 buf, len);
3626
3627 return ret == len;
3628} 3703}
3629 3704
3630static bool 3705void
3631intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP, 3706intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3632 uint8_t dp_train_pat) 3707 uint8_t dp_train_pat)
3633{
3634 if (!intel_dp->train_set_valid)
3635 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3636 intel_dp_set_signal_levels(intel_dp, DP);
3637 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3638}
3639
3640static bool
3641intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3642 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3643{ 3708{
3644 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3709 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3645 struct drm_i915_private *dev_priv = 3710 struct drm_i915_private *dev_priv =
3646 to_i915(intel_dig_port->base.base.dev); 3711 to_i915(intel_dig_port->base.base.dev);
3647 int ret;
3648 3712
3649 intel_get_adjust_train(intel_dp, link_status); 3713 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3650 intel_dp_set_signal_levels(intel_dp, DP);
3651 3714
3652 I915_WRITE(intel_dp->output_reg, *DP); 3715 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3653 POSTING_READ(intel_dp->output_reg); 3716 POSTING_READ(intel_dp->output_reg);
3654
3655 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3656 intel_dp->train_set, intel_dp->lane_count);
3657
3658 return ret == intel_dp->lane_count;
3659} 3717}
3660 3718
3661static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 3719void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3662{ 3720{
3663 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3721 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3664 struct drm_device *dev = intel_dig_port->base.base.dev; 3722 struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -3689,232 +3747,6 @@ static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3689 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 3747 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3690} 3748}
3691 3749
3692/* Enable corresponding port and start training pattern 1 */
3693static void
3694intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
3695{
3696 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3697 struct drm_device *dev = encoder->dev;
3698 int i;
3699 uint8_t voltage;
3700 int voltage_tries, loop_tries;
3701 uint32_t DP = intel_dp->DP;
3702 uint8_t link_config[2];
3703 uint8_t link_bw, rate_select;
3704
3705 if (HAS_DDI(dev))
3706 intel_ddi_prepare_link_retrain(encoder);
3707
3708 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3709 &link_bw, &rate_select);
3710
3711 /* Write the link configuration data */
3712 link_config[0] = link_bw;
3713 link_config[1] = intel_dp->lane_count;
3714 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3715 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3716 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3717 if (intel_dp->num_sink_rates)
3718 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3719 &rate_select, 1);
3720
3721 link_config[0] = 0;
3722 link_config[1] = DP_SET_ANSI_8B10B;
3723 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3724
3725 DP |= DP_PORT_EN;
3726
3727 /* clock recovery */
3728 if (!intel_dp_reset_link_train(intel_dp, &DP,
3729 DP_TRAINING_PATTERN_1 |
3730 DP_LINK_SCRAMBLING_DISABLE)) {
3731 DRM_ERROR("failed to enable link training\n");
3732 return;
3733 }
3734
3735 voltage = 0xff;
3736 voltage_tries = 0;
3737 loop_tries = 0;
3738 for (;;) {
3739 uint8_t link_status[DP_LINK_STATUS_SIZE];
3740
3741 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3742 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3743 DRM_ERROR("failed to get link status\n");
3744 break;
3745 }
3746
3747 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3748 DRM_DEBUG_KMS("clock recovery OK\n");
3749 break;
3750 }
3751
3752 /*
3753 * if we used previously trained voltage and pre-emphasis values
3754 * and we don't get clock recovery, reset link training values
3755 */
3756 if (intel_dp->train_set_valid) {
3757 DRM_DEBUG_KMS("clock recovery not ok, reset");
3758 /* clear the flag as we are not reusing train set */
3759 intel_dp->train_set_valid = false;
3760 if (!intel_dp_reset_link_train(intel_dp, &DP,
3761 DP_TRAINING_PATTERN_1 |
3762 DP_LINK_SCRAMBLING_DISABLE)) {
3763 DRM_ERROR("failed to enable link training\n");
3764 return;
3765 }
3766 continue;
3767 }
3768
3769 /* Check to see if we've tried the max voltage */
3770 for (i = 0; i < intel_dp->lane_count; i++)
3771 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3772 break;
3773 if (i == intel_dp->lane_count) {
3774 ++loop_tries;
3775 if (loop_tries == 5) {
3776 DRM_ERROR("too many full retries, give up\n");
3777 break;
3778 }
3779 intel_dp_reset_link_train(intel_dp, &DP,
3780 DP_TRAINING_PATTERN_1 |
3781 DP_LINK_SCRAMBLING_DISABLE);
3782 voltage_tries = 0;
3783 continue;
3784 }
3785
3786 /* Check to see if we've tried the same voltage 5 times */
3787 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3788 ++voltage_tries;
3789 if (voltage_tries == 5) {
3790 DRM_ERROR("too many voltage retries, give up\n");
3791 break;
3792 }
3793 } else
3794 voltage_tries = 0;
3795 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3796
3797 /* Update training set as requested by target */
3798 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3799 DRM_ERROR("failed to update link training\n");
3800 break;
3801 }
3802 }
3803
3804 intel_dp->DP = DP;
3805}
3806
3807static void
3808intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
3809{
3810 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3811 struct drm_device *dev = dig_port->base.base.dev;
3812 bool channel_eq = false;
3813 int tries, cr_tries;
3814 uint32_t DP = intel_dp->DP;
3815 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3816
3817 /*
3818 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
3819 *
3820 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
3821 * also mandatory for downstream devices that support HBR2.
3822 *
3823 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
3824 * supported but still not enabled.
3825 */
3826 if (intel_dp_source_supports_hbr2(dev) &&
3827 drm_dp_tps3_supported(intel_dp->dpcd))
3828 training_pattern = DP_TRAINING_PATTERN_3;
3829 else if (intel_dp->link_rate == 540000)
3830 DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
3831
3832 /* channel equalization */
3833 if (!intel_dp_set_link_train(intel_dp, &DP,
3834 training_pattern |
3835 DP_LINK_SCRAMBLING_DISABLE)) {
3836 DRM_ERROR("failed to start channel equalization\n");
3837 return;
3838 }
3839
3840 tries = 0;
3841 cr_tries = 0;
3842 channel_eq = false;
3843 for (;;) {
3844 uint8_t link_status[DP_LINK_STATUS_SIZE];
3845
3846 if (cr_tries > 5) {
3847 DRM_ERROR("failed to train DP, aborting\n");
3848 break;
3849 }
3850
3851 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3852 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3853 DRM_ERROR("failed to get link status\n");
3854 break;
3855 }
3856
3857 /* Make sure clock is still ok */
3858 if (!drm_dp_clock_recovery_ok(link_status,
3859 intel_dp->lane_count)) {
3860 intel_dp->train_set_valid = false;
3861 intel_dp_link_training_clock_recovery(intel_dp);
3862 intel_dp_set_link_train(intel_dp, &DP,
3863 training_pattern |
3864 DP_LINK_SCRAMBLING_DISABLE);
3865 cr_tries++;
3866 continue;
3867 }
3868
3869 if (drm_dp_channel_eq_ok(link_status,
3870 intel_dp->lane_count)) {
3871 channel_eq = true;
3872 break;
3873 }
3874
3875 /* Try 5 times, then try clock recovery if that fails */
3876 if (tries > 5) {
3877 intel_dp->train_set_valid = false;
3878 intel_dp_link_training_clock_recovery(intel_dp);
3879 intel_dp_set_link_train(intel_dp, &DP,
3880 training_pattern |
3881 DP_LINK_SCRAMBLING_DISABLE);
3882 tries = 0;
3883 cr_tries++;
3884 continue;
3885 }
3886
3887 /* Update training set as requested by target */
3888 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3889 DRM_ERROR("failed to update link training\n");
3890 break;
3891 }
3892 ++tries;
3893 }
3894
3895 intel_dp_set_idle_link_train(intel_dp);
3896
3897 intel_dp->DP = DP;
3898
3899 if (channel_eq) {
3900 intel_dp->train_set_valid = true;
3901 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3902 }
3903}
3904
3905void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3906{
3907 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3908 DP_TRAINING_PATTERN_DISABLE);
3909}
3910
3911void
3912intel_dp_start_link_train(struct intel_dp *intel_dp)
3913{
3914 intel_dp_link_training_clock_recovery(intel_dp);
3915 intel_dp_link_training_channel_equalization(intel_dp);
3916}
3917
3918static void 3750static void
3919intel_dp_link_down(struct intel_dp *intel_dp) 3751intel_dp_link_down(struct intel_dp *intel_dp)
3920{ 3752{
@@ -3957,6 +3789,13 @@ intel_dp_link_down(struct intel_dp *intel_dp)
3957 * matching HDMI port to be enabled on transcoder A. 3789 * matching HDMI port to be enabled on transcoder A.
3958 */ 3790 */
3959 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) { 3791 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3792 /*
3793 * We get CPU/PCH FIFO underruns on the other pipe when
3794 * doing the workaround. Sweep them under the rug.
3795 */
3796 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3797 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3798
3960 /* always enable with pattern 1 (as per spec) */ 3799 /* always enable with pattern 1 (as per spec) */
3961 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK); 3800 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3962 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1; 3801 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
@@ -3966,9 +3805,15 @@ intel_dp_link_down(struct intel_dp *intel_dp)
3966 DP &= ~DP_PORT_EN; 3805 DP &= ~DP_PORT_EN;
3967 I915_WRITE(intel_dp->output_reg, DP); 3806 I915_WRITE(intel_dp->output_reg, DP);
3968 POSTING_READ(intel_dp->output_reg); 3807 POSTING_READ(intel_dp->output_reg);
3808
3809 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3810 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3811 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3969 } 3812 }
3970 3813
3971 msleep(intel_dp->panel_power_down_delay); 3814 msleep(intel_dp->panel_power_down_delay);
3815
3816 intel_dp->DP = DP;
3972} 3817}
3973 3818
3974static bool 3819static bool
@@ -4016,7 +3861,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
4016 } 3861 }
4017 3862
4018 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n", 3863 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4019 yesno(intel_dp_source_supports_hbr2(dev)), 3864 yesno(intel_dp_source_supports_hbr2(intel_dp)),
4020 yesno(drm_dp_tps3_supported(intel_dp->dpcd))); 3865 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4021 3866
4022 /* Intermediate frequency support */ 3867 /* Intermediate frequency support */
@@ -4106,9 +3951,12 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
4106static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp) 3951static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4107{ 3952{
4108 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3953 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3954 struct drm_device *dev = dig_port->base.base.dev;
4109 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); 3955 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4110 u8 buf; 3956 u8 buf;
4111 int ret = 0; 3957 int ret = 0;
3958 int count = 0;
3959 int attempts = 10;
4112 3960
4113 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) { 3961 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4114 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n"); 3962 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
@@ -4123,7 +3971,22 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4123 goto out; 3971 goto out;
4124 } 3972 }
4125 3973
4126 intel_dp->sink_crc.started = false; 3974 do {
3975 intel_wait_for_vblank(dev, intel_crtc->pipe);
3976
3977 if (drm_dp_dpcd_readb(&intel_dp->aux,
3978 DP_TEST_SINK_MISC, &buf) < 0) {
3979 ret = -EIO;
3980 goto out;
3981 }
3982 count = buf & DP_TEST_COUNT_MASK;
3983 } while (--attempts && count);
3984
3985 if (attempts == 0) {
3986 DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
3987 ret = -ETIMEDOUT;
3988 }
3989
4127 out: 3990 out:
4128 hsw_enable_ips(intel_crtc); 3991 hsw_enable_ips(intel_crtc);
4129 return ret; 3992 return ret;
@@ -4132,27 +3995,26 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4132static int intel_dp_sink_crc_start(struct intel_dp *intel_dp) 3995static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4133{ 3996{
4134 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3997 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3998 struct drm_device *dev = dig_port->base.base.dev;
4135 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); 3999 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4136 u8 buf; 4000 u8 buf;
4137 int ret; 4001 int ret;
4138 4002
4139 if (intel_dp->sink_crc.started) {
4140 ret = intel_dp_sink_crc_stop(intel_dp);
4141 if (ret)
4142 return ret;
4143 }
4144
4145 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) 4003 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4146 return -EIO; 4004 return -EIO;
4147 4005
4148 if (!(buf & DP_TEST_CRC_SUPPORTED)) 4006 if (!(buf & DP_TEST_CRC_SUPPORTED))
4149 return -ENOTTY; 4007 return -ENOTTY;
4150 4008
4151 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4152
4153 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) 4009 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4154 return -EIO; 4010 return -EIO;
4155 4011
4012 if (buf & DP_TEST_SINK_START) {
4013 ret = intel_dp_sink_crc_stop(intel_dp);
4014 if (ret)
4015 return ret;
4016 }
4017
4156 hsw_disable_ips(intel_crtc); 4018 hsw_disable_ips(intel_crtc);
4157 4019
4158 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 4020 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
@@ -4161,7 +4023,7 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4161 return -EIO; 4023 return -EIO;
4162 } 4024 }
4163 4025
4164 intel_dp->sink_crc.started = true; 4026 intel_wait_for_vblank(dev, intel_crtc->pipe);
4165 return 0; 4027 return 0;
4166} 4028}
4167 4029
@@ -4173,7 +4035,6 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4173 u8 buf; 4035 u8 buf;
4174 int count, ret; 4036 int count, ret;
4175 int attempts = 6; 4037 int attempts = 6;
4176 bool old_equal_new;
4177 4038
4178 ret = intel_dp_sink_crc_start(intel_dp); 4039 ret = intel_dp_sink_crc_start(intel_dp);
4179 if (ret) 4040 if (ret)
@@ -4189,35 +4050,17 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4189 } 4050 }
4190 count = buf & DP_TEST_COUNT_MASK; 4051 count = buf & DP_TEST_COUNT_MASK;
4191 4052
4192 /* 4053 } while (--attempts && count == 0);
4193 * Count might be reset during the loop. In this case
4194 * last known count needs to be reset as well.
4195 */
4196 if (count == 0)
4197 intel_dp->sink_crc.last_count = 0;
4198
4199 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4200 ret = -EIO;
4201 goto stop;
4202 }
4203
4204 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4205 !memcmp(intel_dp->sink_crc.last_crc, crc,
4206 6 * sizeof(u8)));
4207
4208 } while (--attempts && (count == 0 || old_equal_new));
4209
4210 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4211 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4212 4054
4213 if (attempts == 0) { 4055 if (attempts == 0) {
4214 if (old_equal_new) { 4056 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4215 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n"); 4057 ret = -ETIMEDOUT;
4216 } else { 4058 goto stop;
4217 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n"); 4059 }
4218 ret = -ETIMEDOUT; 4060
4219 goto stop; 4061 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4220 } 4062 ret = -EIO;
4063 goto stop;
4221 } 4064 }
4222 4065
4223stop: 4066stop:
@@ -4317,13 +4160,6 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4317 uint8_t rxdata = 0; 4160 uint8_t rxdata = 0;
4318 int status = 0; 4161 int status = 0;
4319 4162
4320 intel_dp->compliance_test_active = 0;
4321 intel_dp->compliance_test_type = 0;
4322 intel_dp->compliance_test_data = 0;
4323
4324 intel_dp->aux.i2c_nack_count = 0;
4325 intel_dp->aux.i2c_defer_count = 0;
4326
4327 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1); 4163 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4328 if (status <= 0) { 4164 if (status <= 0) {
4329 DRM_DEBUG_KMS("Could not read test request from sink\n"); 4165 DRM_DEBUG_KMS("Could not read test request from sink\n");
@@ -4439,6 +4275,14 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
4439 4275
4440 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 4276 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4441 4277
4278 /*
4279 * Clearing compliance test variables to allow capturing
4280 * of values for next automated test request.
4281 */
4282 intel_dp->compliance_test_active = 0;
4283 intel_dp->compliance_test_type = 0;
4284 intel_dp->compliance_test_data = 0;
4285
4442 if (!intel_encoder->base.crtc) 4286 if (!intel_encoder->base.crtc)
4443 return; 4287 return;
4444 4288
@@ -4469,7 +4313,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
4469 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 4313 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4470 } 4314 }
4471 4315
4472 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 4316 /* if link training is requested we should perform it always */
4317 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4318 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4473 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 4319 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4474 intel_encoder->base.name); 4320 intel_encoder->base.name);
4475 intel_dp_start_link_train(intel_dp); 4321 intel_dp_start_link_train(intel_dp);
@@ -4687,41 +4533,6 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4687 return g4x_digital_port_connected(dev_priv, port); 4533 return g4x_digital_port_connected(dev_priv, port);
4688} 4534}
4689 4535
4690static enum drm_connector_status
4691ironlake_dp_detect(struct intel_dp *intel_dp)
4692{
4693 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4694 struct drm_i915_private *dev_priv = dev->dev_private;
4695 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4696
4697 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4698 return connector_status_disconnected;
4699
4700 return intel_dp_detect_dpcd(intel_dp);
4701}
4702
4703static enum drm_connector_status
4704g4x_dp_detect(struct intel_dp *intel_dp)
4705{
4706 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4707 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4708
4709 /* Can't disconnect eDP, but you can close the lid... */
4710 if (is_edp(intel_dp)) {
4711 enum drm_connector_status status;
4712
4713 status = intel_panel_detect(dev);
4714 if (status == connector_status_unknown)
4715 status = connector_status_connected;
4716 return status;
4717 }
4718
4719 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4720 return connector_status_disconnected;
4721
4722 return intel_dp_detect_dpcd(intel_dp);
4723}
4724
4725static struct edid * 4536static struct edid *
4726intel_dp_get_edid(struct intel_dp *intel_dp) 4537intel_dp_get_edid(struct intel_dp *intel_dp)
4727{ 4538{
@@ -4765,26 +4576,6 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
4765 intel_dp->has_audio = false; 4576 intel_dp->has_audio = false;
4766} 4577}
4767 4578
4768static enum intel_display_power_domain
4769intel_dp_power_get(struct intel_dp *dp)
4770{
4771 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4772 enum intel_display_power_domain power_domain;
4773
4774 power_domain = intel_display_port_power_domain(encoder);
4775 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4776
4777 return power_domain;
4778}
4779
4780static void
4781intel_dp_power_put(struct intel_dp *dp,
4782 enum intel_display_power_domain power_domain)
4783{
4784 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4785 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4786}
4787
4788static enum drm_connector_status 4579static enum drm_connector_status
4789intel_dp_detect(struct drm_connector *connector, bool force) 4580intel_dp_detect(struct drm_connector *connector, bool force)
4790{ 4581{
@@ -4808,17 +4599,25 @@ intel_dp_detect(struct drm_connector *connector, bool force)
4808 return connector_status_disconnected; 4599 return connector_status_disconnected;
4809 } 4600 }
4810 4601
4811 power_domain = intel_dp_power_get(intel_dp); 4602 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4603 intel_display_power_get(to_i915(dev), power_domain);
4812 4604
4813 /* Can't disconnect eDP, but you can close the lid... */ 4605 /* Can't disconnect eDP, but you can close the lid... */
4814 if (is_edp(intel_dp)) 4606 if (is_edp(intel_dp))
4815 status = edp_detect(intel_dp); 4607 status = edp_detect(intel_dp);
4816 else if (HAS_PCH_SPLIT(dev)) 4608 else if (intel_digital_port_connected(to_i915(dev),
4817 status = ironlake_dp_detect(intel_dp); 4609 dp_to_dig_port(intel_dp)))
4610 status = intel_dp_detect_dpcd(intel_dp);
4818 else 4611 else
4819 status = g4x_dp_detect(intel_dp); 4612 status = connector_status_disconnected;
4820 if (status != connector_status_connected) 4613
4614 if (status != connector_status_connected) {
4615 intel_dp->compliance_test_active = 0;
4616 intel_dp->compliance_test_type = 0;
4617 intel_dp->compliance_test_data = 0;
4618
4821 goto out; 4619 goto out;
4620 }
4822 4621
4823 intel_dp_probe_oui(intel_dp); 4622 intel_dp_probe_oui(intel_dp);
4824 4623
@@ -4832,6 +4631,14 @@ intel_dp_detect(struct drm_connector *connector, bool force)
4832 goto out; 4631 goto out;
4833 } 4632 }
4834 4633
4634 /*
4635 * Clearing NACK and defer counts to get their exact values
4636 * while reading EDID which are required by Compliance tests
4637 * 4.2.2.4 and 4.2.2.5
4638 */
4639 intel_dp->aux.i2c_nack_count = 0;
4640 intel_dp->aux.i2c_defer_count = 0;
4641
4835 intel_dp_set_edid(intel_dp); 4642 intel_dp_set_edid(intel_dp);
4836 4643
4837 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4644 if (intel_encoder->type != INTEL_OUTPUT_EDP)
@@ -4853,7 +4660,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
4853 } 4660 }
4854 4661
4855out: 4662out:
4856 intel_dp_power_put(intel_dp, power_domain); 4663 intel_display_power_put(to_i915(dev), power_domain);
4857 return status; 4664 return status;
4858} 4665}
4859 4666
@@ -4862,6 +4669,7 @@ intel_dp_force(struct drm_connector *connector)
4862{ 4669{
4863 struct intel_dp *intel_dp = intel_attached_dp(connector); 4670 struct intel_dp *intel_dp = intel_attached_dp(connector);
4864 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 4671 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4672 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4865 enum intel_display_power_domain power_domain; 4673 enum intel_display_power_domain power_domain;
4866 4674
4867 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 4675 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -4871,11 +4679,12 @@ intel_dp_force(struct drm_connector *connector)
4871 if (connector->status != connector_status_connected) 4679 if (connector->status != connector_status_connected)
4872 return; 4680 return;
4873 4681
4874 power_domain = intel_dp_power_get(intel_dp); 4682 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4683 intel_display_power_get(dev_priv, power_domain);
4875 4684
4876 intel_dp_set_edid(intel_dp); 4685 intel_dp_set_edid(intel_dp);
4877 4686
4878 intel_dp_power_put(intel_dp, power_domain); 4687 intel_display_power_put(dev_priv, power_domain);
4879 4688
4880 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4689 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4881 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4690 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
@@ -5034,7 +4843,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5034 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 4843 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5035 struct intel_dp *intel_dp = &intel_dig_port->dp; 4844 struct intel_dp *intel_dp = &intel_dig_port->dp;
5036 4845
5037 drm_dp_aux_unregister(&intel_dp->aux); 4846 intel_dp_aux_fini(intel_dp);
5038 intel_dp_mst_encoder_cleanup(intel_dig_port); 4847 intel_dp_mst_encoder_cleanup(intel_dig_port);
5039 if (is_edp(intel_dp)) { 4848 if (is_edp(intel_dp)) {
5040 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 4849 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
@@ -5091,7 +4900,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5091 * indefinitely. 4900 * indefinitely.
5092 */ 4901 */
5093 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); 4902 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5094 power_domain = intel_display_port_power_domain(&intel_dig_port->base); 4903 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
5095 intel_display_power_get(dev_priv, power_domain); 4904 intel_display_power_get(dev_priv, power_domain);
5096 4905
5097 edp_panel_vdd_schedule_off(intel_dp); 4906 edp_panel_vdd_schedule_off(intel_dp);
@@ -5172,7 +4981,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5172 port_name(intel_dig_port->port), 4981 port_name(intel_dig_port->port),
5173 long_hpd ? "long" : "short"); 4982 long_hpd ? "long" : "short");
5174 4983
5175 power_domain = intel_display_port_power_domain(intel_encoder); 4984 power_domain = intel_display_port_aux_power_domain(intel_encoder);
5176 intel_display_power_get(dev_priv, power_domain); 4985 intel_display_power_get(dev_priv, power_domain);
5177 4986
5178 if (long_hpd) { 4987 if (long_hpd) {
@@ -5223,25 +5032,6 @@ put_power:
5223 return ret; 5032 return ret;
5224} 5033}
5225 5034
5226/* Return which DP Port should be selected for Transcoder DP control */
5227int
5228intel_trans_dp_port_sel(struct drm_crtc *crtc)
5229{
5230 struct drm_device *dev = crtc->dev;
5231 struct intel_encoder *intel_encoder;
5232 struct intel_dp *intel_dp;
5233
5234 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5235 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5236
5237 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5238 intel_encoder->type == INTEL_OUTPUT_EDP)
5239 return intel_dp->output_reg;
5240 }
5241
5242 return -1;
5243}
5244
5245/* check the VBT to see whether the eDP is on another port */ 5035/* check the VBT to see whether the eDP is on another port */
5246bool intel_dp_is_edp(struct drm_device *dev, enum port port) 5036bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5247{ 5037{
@@ -5313,7 +5103,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5313 struct edp_power_seq cur, vbt, spec, 5103 struct edp_power_seq cur, vbt, spec,
5314 *final = &intel_dp->pps_delays; 5104 *final = &intel_dp->pps_delays;
5315 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0; 5105 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5316 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0; 5106 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5317 5107
5318 lockdep_assert_held(&dev_priv->pps_mutex); 5108 lockdep_assert_held(&dev_priv->pps_mutex);
5319 5109
@@ -5435,7 +5225,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5435 struct drm_i915_private *dev_priv = dev->dev_private; 5225 struct drm_i915_private *dev_priv = dev->dev_private;
5436 u32 pp_on, pp_off, pp_div, port_sel = 0; 5226 u32 pp_on, pp_off, pp_div, port_sel = 0;
5437 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); 5227 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5438 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg; 5228 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5439 enum port port = dp_to_dig_port(intel_dp)->port; 5229 enum port port = dp_to_dig_port(intel_dp)->port;
5440 const struct edp_power_seq *seq = &intel_dp->pps_delays; 5230 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5441 5231
@@ -5597,7 +5387,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5597 DRM_ERROR("Unsupported refreshrate type\n"); 5387 DRM_ERROR("Unsupported refreshrate type\n");
5598 } 5388 }
5599 } else if (INTEL_INFO(dev)->gen > 6) { 5389 } else if (INTEL_INFO(dev)->gen > 6) {
5600 u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder); 5390 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5601 u32 val; 5391 u32 val;
5602 5392
5603 val = I915_READ(reg); 5393 val = I915_READ(reg);
@@ -6015,7 +5805,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6015 struct drm_device *dev = intel_encoder->base.dev; 5805 struct drm_device *dev = intel_encoder->base.dev;
6016 struct drm_i915_private *dev_priv = dev->dev_private; 5806 struct drm_i915_private *dev_priv = dev->dev_private;
6017 enum port port = intel_dig_port->port; 5807 enum port port = intel_dig_port->port;
6018 int type; 5808 int type, ret;
6019 5809
6020 intel_dp->pps_pipe = INVALID_PIPE; 5810 intel_dp->pps_pipe = INVALID_PIPE;
6021 5811
@@ -6036,6 +5826,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6036 else 5826 else
6037 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl; 5827 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
6038 5828
5829 if (HAS_DDI(dev))
5830 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5831
6039 /* Preserve the current hw state. */ 5832 /* Preserve the current hw state. */
6040 intel_dp->DP = I915_READ(intel_dp->output_reg); 5833 intel_dp->DP = I915_READ(intel_dp->output_reg);
6041 intel_dp->attached_connector = intel_connector; 5834 intel_dp->attached_connector = intel_connector;
@@ -6087,7 +5880,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6087 break; 5880 break;
6088 case PORT_B: 5881 case PORT_B:
6089 intel_encoder->hpd_pin = HPD_PORT_B; 5882 intel_encoder->hpd_pin = HPD_PORT_B;
6090 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)) 5883 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
6091 intel_encoder->hpd_pin = HPD_PORT_A; 5884 intel_encoder->hpd_pin = HPD_PORT_A;
6092 break; 5885 break;
6093 case PORT_C: 5886 case PORT_C:
@@ -6113,7 +5906,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6113 pps_unlock(intel_dp); 5906 pps_unlock(intel_dp);
6114 } 5907 }
6115 5908
6116 intel_dp_aux_init(intel_dp, intel_connector); 5909 ret = intel_dp_aux_init(intel_dp, intel_connector);
5910 if (ret)
5911 goto fail;
6117 5912
6118 /* init MST on ports that can support it */ 5913 /* init MST on ports that can support it */
6119 if (HAS_DP_MST(dev) && 5914 if (HAS_DP_MST(dev) &&
@@ -6122,20 +5917,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6122 intel_connector->base.base.id); 5917 intel_connector->base.base.id);
6123 5918
6124 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 5919 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6125 drm_dp_aux_unregister(&intel_dp->aux); 5920 intel_dp_aux_fini(intel_dp);
6126 if (is_edp(intel_dp)) { 5921 intel_dp_mst_encoder_cleanup(intel_dig_port);
6127 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 5922 goto fail;
6128 /*
6129 * vdd might still be enabled do to the delayed vdd off.
6130 * Make sure vdd is actually turned off here.
6131 */
6132 pps_lock(intel_dp);
6133 edp_panel_vdd_off_sync(intel_dp);
6134 pps_unlock(intel_dp);
6135 }
6136 drm_connector_unregister(connector);
6137 drm_connector_cleanup(connector);
6138 return false;
6139 } 5923 }
6140 5924
6141 intel_dp_add_properties(intel_dp, connector); 5925 intel_dp_add_properties(intel_dp, connector);
@@ -6152,10 +5936,27 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6152 i915_debugfs_connector_add(connector); 5936 i915_debugfs_connector_add(connector);
6153 5937
6154 return true; 5938 return true;
5939
5940fail:
5941 if (is_edp(intel_dp)) {
5942 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5943 /*
5944 * vdd might still be enabled do to the delayed vdd off.
5945 * Make sure vdd is actually turned off here.
5946 */
5947 pps_lock(intel_dp);
5948 edp_panel_vdd_off_sync(intel_dp);
5949 pps_unlock(intel_dp);
5950 }
5951 drm_connector_unregister(connector);
5952 drm_connector_cleanup(connector);
5953
5954 return false;
6155} 5955}
6156 5956
6157void 5957void
6158intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 5958intel_dp_init(struct drm_device *dev,
5959 i915_reg_t output_reg, enum port port)
6159{ 5960{
6160 struct drm_i915_private *dev_priv = dev->dev_private; 5961 struct drm_i915_private *dev_priv = dev->dev_private;
6161 struct intel_digital_port *intel_dig_port; 5962 struct intel_digital_port *intel_dig_port;
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
new file mode 100644
index 000000000000..88887938e0bf
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -0,0 +1,323 @@
1/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "intel_drv.h"
25
26static void
27intel_get_adjust_train(struct intel_dp *intel_dp,
28 const uint8_t link_status[DP_LINK_STATUS_SIZE])
29{
30 uint8_t v = 0;
31 uint8_t p = 0;
32 int lane;
33 uint8_t voltage_max;
34 uint8_t preemph_max;
35
36 for (lane = 0; lane < intel_dp->lane_count; lane++) {
37 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
38 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
39
40 if (this_v > v)
41 v = this_v;
42 if (this_p > p)
43 p = this_p;
44 }
45
46 voltage_max = intel_dp_voltage_max(intel_dp);
47 if (v >= voltage_max)
48 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
49
50 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
51 if (p >= preemph_max)
52 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
53
54 for (lane = 0; lane < 4; lane++)
55 intel_dp->train_set[lane] = v | p;
56}
57
58static bool
59intel_dp_set_link_train(struct intel_dp *intel_dp,
60 uint8_t dp_train_pat)
61{
62 uint8_t buf[sizeof(intel_dp->train_set) + 1];
63 int ret, len;
64
65 intel_dp_program_link_training_pattern(intel_dp, dp_train_pat);
66
67 buf[0] = dp_train_pat;
68 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
69 DP_TRAINING_PATTERN_DISABLE) {
70 /* don't write DP_TRAINING_LANEx_SET on disable */
71 len = 1;
72 } else {
73 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
74 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
75 len = intel_dp->lane_count + 1;
76 }
77
78 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
79 buf, len);
80
81 return ret == len;
82}
83
84static bool
85intel_dp_reset_link_train(struct intel_dp *intel_dp,
86 uint8_t dp_train_pat)
87{
88 if (!intel_dp->train_set_valid)
89 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
90 intel_dp_set_signal_levels(intel_dp);
91 return intel_dp_set_link_train(intel_dp, dp_train_pat);
92}
93
94static bool
95intel_dp_update_link_train(struct intel_dp *intel_dp)
96{
97 int ret;
98
99 intel_dp_set_signal_levels(intel_dp);
100
101 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
102 intel_dp->train_set, intel_dp->lane_count);
103
104 return ret == intel_dp->lane_count;
105}
106
107/* Enable corresponding port and start training pattern 1 */
108static void
109intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
110{
111 int i;
112 uint8_t voltage;
113 int voltage_tries, loop_tries;
114 uint8_t link_config[2];
115 uint8_t link_bw, rate_select;
116
117 if (intel_dp->prepare_link_retrain)
118 intel_dp->prepare_link_retrain(intel_dp);
119
120 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
121 &link_bw, &rate_select);
122
123 /* Write the link configuration data */
124 link_config[0] = link_bw;
125 link_config[1] = intel_dp->lane_count;
126 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
127 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
128 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
129 if (intel_dp->num_sink_rates)
130 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
131 &rate_select, 1);
132
133 link_config[0] = 0;
134 link_config[1] = DP_SET_ANSI_8B10B;
135 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
136
137 intel_dp->DP |= DP_PORT_EN;
138
139 /* clock recovery */
140 if (!intel_dp_reset_link_train(intel_dp,
141 DP_TRAINING_PATTERN_1 |
142 DP_LINK_SCRAMBLING_DISABLE)) {
143 DRM_ERROR("failed to enable link training\n");
144 return;
145 }
146
147 voltage = 0xff;
148 voltage_tries = 0;
149 loop_tries = 0;
150 for (;;) {
151 uint8_t link_status[DP_LINK_STATUS_SIZE];
152
153 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
154 if (!intel_dp_get_link_status(intel_dp, link_status)) {
155 DRM_ERROR("failed to get link status\n");
156 break;
157 }
158
159 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
160 DRM_DEBUG_KMS("clock recovery OK\n");
161 break;
162 }
163
164 /*
165 * if we used previously trained voltage and pre-emphasis values
166 * and we don't get clock recovery, reset link training values
167 */
168 if (intel_dp->train_set_valid) {
169 DRM_DEBUG_KMS("clock recovery not ok, reset");
170 /* clear the flag as we are not reusing train set */
171 intel_dp->train_set_valid = false;
172 if (!intel_dp_reset_link_train(intel_dp,
173 DP_TRAINING_PATTERN_1 |
174 DP_LINK_SCRAMBLING_DISABLE)) {
175 DRM_ERROR("failed to enable link training\n");
176 return;
177 }
178 continue;
179 }
180
181 /* Check to see if we've tried the max voltage */
182 for (i = 0; i < intel_dp->lane_count; i++)
183 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
184 break;
185 if (i == intel_dp->lane_count) {
186 ++loop_tries;
187 if (loop_tries == 5) {
188 DRM_ERROR("too many full retries, give up\n");
189 break;
190 }
191 intel_dp_reset_link_train(intel_dp,
192 DP_TRAINING_PATTERN_1 |
193 DP_LINK_SCRAMBLING_DISABLE);
194 voltage_tries = 0;
195 continue;
196 }
197
198 /* Check to see if we've tried the same voltage 5 times */
199 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
200 ++voltage_tries;
201 if (voltage_tries == 5) {
202 DRM_ERROR("too many voltage retries, give up\n");
203 break;
204 }
205 } else
206 voltage_tries = 0;
207 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
208
209 /* Update training set as requested by target */
210 intel_get_adjust_train(intel_dp, link_status);
211 if (!intel_dp_update_link_train(intel_dp)) {
212 DRM_ERROR("failed to update link training\n");
213 break;
214 }
215 }
216}
217
218static void
219intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
220{
221 bool channel_eq = false;
222 int tries, cr_tries;
223 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
224
225 /*
226 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
227 *
228 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
229 * also mandatory for downstream devices that support HBR2.
230 *
231 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
232 * supported but still not enabled.
233 */
234 if (intel_dp_source_supports_hbr2(intel_dp) &&
235 drm_dp_tps3_supported(intel_dp->dpcd))
236 training_pattern = DP_TRAINING_PATTERN_3;
237 else if (intel_dp->link_rate == 540000)
238 DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
239
240 /* channel equalization */
241 if (!intel_dp_set_link_train(intel_dp,
242 training_pattern |
243 DP_LINK_SCRAMBLING_DISABLE)) {
244 DRM_ERROR("failed to start channel equalization\n");
245 return;
246 }
247
248 tries = 0;
249 cr_tries = 0;
250 channel_eq = false;
251 for (;;) {
252 uint8_t link_status[DP_LINK_STATUS_SIZE];
253
254 if (cr_tries > 5) {
255 DRM_ERROR("failed to train DP, aborting\n");
256 break;
257 }
258
259 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
260 if (!intel_dp_get_link_status(intel_dp, link_status)) {
261 DRM_ERROR("failed to get link status\n");
262 break;
263 }
264
265 /* Make sure clock is still ok */
266 if (!drm_dp_clock_recovery_ok(link_status,
267 intel_dp->lane_count)) {
268 intel_dp->train_set_valid = false;
269 intel_dp_link_training_clock_recovery(intel_dp);
270 intel_dp_set_link_train(intel_dp,
271 training_pattern |
272 DP_LINK_SCRAMBLING_DISABLE);
273 cr_tries++;
274 continue;
275 }
276
277 if (drm_dp_channel_eq_ok(link_status,
278 intel_dp->lane_count)) {
279 channel_eq = true;
280 break;
281 }
282
283 /* Try 5 times, then try clock recovery if that fails */
284 if (tries > 5) {
285 intel_dp->train_set_valid = false;
286 intel_dp_link_training_clock_recovery(intel_dp);
287 intel_dp_set_link_train(intel_dp,
288 training_pattern |
289 DP_LINK_SCRAMBLING_DISABLE);
290 tries = 0;
291 cr_tries++;
292 continue;
293 }
294
295 /* Update training set as requested by target */
296 intel_get_adjust_train(intel_dp, link_status);
297 if (!intel_dp_update_link_train(intel_dp)) {
298 DRM_ERROR("failed to update link training\n");
299 break;
300 }
301 ++tries;
302 }
303
304 intel_dp_set_idle_link_train(intel_dp);
305
306 if (channel_eq) {
307 intel_dp->train_set_valid = true;
308 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
309 }
310}
311
312void intel_dp_stop_link_train(struct intel_dp *intel_dp)
313{
314 intel_dp_set_link_train(intel_dp,
315 DP_TRAINING_PATTERN_DISABLE);
316}
317
318void
319intel_dp_start_link_train(struct intel_dp *intel_dp)
320{
321 intel_dp_link_training_clock_recovery(intel_dp);
322 intel_dp_link_training_channel_equalization(intel_dp);
323}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 0639275fc471..8c4e7dfe304c 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -173,20 +173,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
173 intel_mst->port = found->port; 173 intel_mst->port = found->port;
174 174
175 if (intel_dp->active_mst_links == 0) { 175 if (intel_dp->active_mst_links == 0) {
176 enum port port = intel_ddi_get_encoder_port(encoder); 176 intel_ddi_clk_select(encoder, intel_crtc->config);
177 177
178 intel_dp_set_link_params(intel_dp, intel_crtc->config); 178 intel_dp_set_link_params(intel_dp, intel_crtc->config);
179 179
180 /* FIXME: add support for SKL */
181 if (INTEL_INFO(dev)->gen < 9)
182 I915_WRITE(PORT_CLK_SEL(port),
183 intel_crtc->config->ddi_pll_sel);
184
185 intel_ddi_init_dp_buf_reg(&intel_dig_port->base); 180 intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
186 181
187 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 182 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
188 183
189
190 intel_dp_start_link_train(intel_dp); 184 intel_dp_start_link_train(intel_dp);
191 intel_dp_stop_link_train(intel_dp); 185 intel_dp_stop_link_train(intel_dp);
192 } 186 }
@@ -414,7 +408,10 @@ static void intel_connector_add_to_fbdev(struct intel_connector *connector)
414{ 408{
415#ifdef CONFIG_DRM_FBDEV_EMULATION 409#ifdef CONFIG_DRM_FBDEV_EMULATION
416 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 410 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
417 drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base); 411
412 if (dev_priv->fbdev)
413 drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
414 &connector->base);
418#endif 415#endif
419} 416}
420 417
@@ -422,7 +419,10 @@ static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
422{ 419{
423#ifdef CONFIG_DRM_FBDEV_EMULATION 420#ifdef CONFIG_DRM_FBDEV_EMULATION
424 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 421 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
425 drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base); 422
423 if (dev_priv->fbdev)
424 drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
425 &connector->base);
426#endif 426#endif
427} 427}
428 428
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d7926658b93d..ab5c147fa9e9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -248,6 +248,7 @@ struct intel_atomic_state {
248 unsigned int cdclk; 248 unsigned int cdclk;
249 bool dpll_set; 249 bool dpll_set;
250 struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; 250 struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
251 struct intel_wm_config wm_config;
251}; 252};
252 253
253struct intel_plane_state { 254struct intel_plane_state {
@@ -278,6 +279,9 @@ struct intel_plane_state {
278 int scaler_id; 279 int scaler_id;
279 280
280 struct drm_intel_sprite_colorkey ckey; 281 struct drm_intel_sprite_colorkey ckey;
282
283 /* async flip related structures */
284 struct drm_i915_gem_request *wait_req;
281}; 285};
282 286
283struct intel_initial_plane_config { 287struct intel_initial_plane_config {
@@ -332,6 +336,21 @@ struct intel_crtc_scaler_state {
332/* drm_mode->private_flags */ 336/* drm_mode->private_flags */
333#define I915_MODE_FLAG_INHERITED 1 337#define I915_MODE_FLAG_INHERITED 1
334 338
339struct intel_pipe_wm {
340 struct intel_wm_level wm[5];
341 uint32_t linetime;
342 bool fbc_wm_enabled;
343 bool pipe_enabled;
344 bool sprites_enabled;
345 bool sprites_scaled;
346};
347
348struct skl_pipe_wm {
349 struct skl_wm_level wm[8];
350 struct skl_wm_level trans_wm;
351 uint32_t linetime;
352};
353
335struct intel_crtc_state { 354struct intel_crtc_state {
336 struct drm_crtc_state base; 355 struct drm_crtc_state base;
337 356
@@ -466,6 +485,20 @@ struct intel_crtc_state {
466 485
467 /* w/a for waiting 2 vblanks during crtc enable */ 486 /* w/a for waiting 2 vblanks during crtc enable */
468 enum pipe hsw_workaround_pipe; 487 enum pipe hsw_workaround_pipe;
488
489 /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
490 bool disable_lp_wm;
491
492 struct {
493 /*
494 * optimal watermarks, programmed post-vblank when this state
495 * is committed
496 */
497 union {
498 struct intel_pipe_wm ilk;
499 struct skl_pipe_wm skl;
500 } optimal;
501 } wm;
469}; 502};
470 503
471struct vlv_wm_state { 504struct vlv_wm_state {
@@ -477,26 +510,12 @@ struct vlv_wm_state {
477 bool cxsr; 510 bool cxsr;
478}; 511};
479 512
480struct intel_pipe_wm {
481 struct intel_wm_level wm[5];
482 uint32_t linetime;
483 bool fbc_wm_enabled;
484 bool pipe_enabled;
485 bool sprites_enabled;
486 bool sprites_scaled;
487};
488
489struct intel_mmio_flip { 513struct intel_mmio_flip {
490 struct work_struct work; 514 struct work_struct work;
491 struct drm_i915_private *i915; 515 struct drm_i915_private *i915;
492 struct drm_i915_gem_request *req; 516 struct drm_i915_gem_request *req;
493 struct intel_crtc *crtc; 517 struct intel_crtc *crtc;
494}; 518 unsigned int rotation;
495
496struct skl_pipe_wm {
497 struct skl_wm_level wm[8];
498 struct skl_wm_level trans_wm;
499 uint32_t linetime;
500}; 519};
501 520
502/* 521/*
@@ -507,13 +526,11 @@ struct skl_pipe_wm {
507 */ 526 */
508struct intel_crtc_atomic_commit { 527struct intel_crtc_atomic_commit {
509 /* Sleepable operations to perform before commit */ 528 /* Sleepable operations to perform before commit */
510 bool wait_for_flips;
511 bool disable_fbc; 529 bool disable_fbc;
512 bool disable_ips; 530 bool disable_ips;
513 bool disable_cxsr; 531 bool disable_cxsr;
514 bool pre_disable_primary; 532 bool pre_disable_primary;
515 bool update_wm_pre, update_wm_post; 533 bool update_wm_pre, update_wm_post;
516 unsigned disabled_planes;
517 534
518 /* Sleepable operations to perform after commit */ 535 /* Sleepable operations to perform after commit */
519 unsigned fb_bits; 536 unsigned fb_bits;
@@ -566,9 +583,10 @@ struct intel_crtc {
566 /* per-pipe watermark state */ 583 /* per-pipe watermark state */
567 struct { 584 struct {
568 /* watermarks currently being used */ 585 /* watermarks currently being used */
569 struct intel_pipe_wm active; 586 union {
570 /* SKL wm values currently in use */ 587 struct intel_pipe_wm ilk;
571 struct skl_pipe_wm skl_active; 588 struct skl_pipe_wm skl;
589 } active;
572 /* allow CxSR on this pipe */ 590 /* allow CxSR on this pipe */
573 bool cxsr_allowed; 591 bool cxsr_allowed;
574 } wm; 592 } wm;
@@ -676,7 +694,7 @@ struct cxsr_latency {
676#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL) 694#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
677 695
678struct intel_hdmi { 696struct intel_hdmi {
679 u32 hdmi_reg; 697 i915_reg_t hdmi_reg;
680 int ddc_bus; 698 int ddc_bus;
681 bool limited_color_range; 699 bool limited_color_range;
682 bool color_range_auto; 700 bool color_range_auto;
@@ -718,15 +736,10 @@ enum link_m_n_set {
718 M2_N2 736 M2_N2
719}; 737};
720 738
721struct sink_crc {
722 bool started;
723 u8 last_crc[6];
724 int last_count;
725};
726
727struct intel_dp { 739struct intel_dp {
728 uint32_t output_reg; 740 i915_reg_t output_reg;
729 uint32_t aux_ch_ctl_reg; 741 i915_reg_t aux_ch_ctl_reg;
742 i915_reg_t aux_ch_data_reg[5];
730 uint32_t DP; 743 uint32_t DP;
731 int link_rate; 744 int link_rate;
732 uint8_t lane_count; 745 uint8_t lane_count;
@@ -740,7 +753,6 @@ struct intel_dp {
740 /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ 753 /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
741 uint8_t num_sink_rates; 754 uint8_t num_sink_rates;
742 int sink_rates[DP_MAX_SUPPORTED_RATES]; 755 int sink_rates[DP_MAX_SUPPORTED_RATES];
743 struct sink_crc sink_crc;
744 struct drm_dp_aux aux; 756 struct drm_dp_aux aux;
745 uint8_t train_set[4]; 757 uint8_t train_set[4];
746 int panel_power_up_delay; 758 int panel_power_up_delay;
@@ -782,6 +794,10 @@ struct intel_dp {
782 bool has_aux_irq, 794 bool has_aux_irq,
783 int send_bytes, 795 int send_bytes,
784 uint32_t aux_clock_divider); 796 uint32_t aux_clock_divider);
797
798 /* This is called before a link training is starterd */
799 void (*prepare_link_retrain)(struct intel_dp *intel_dp);
800
785 bool train_set_valid; 801 bool train_set_valid;
786 802
787 /* Displayport compliance testing */ 803 /* Displayport compliance testing */
@@ -941,7 +957,8 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
941 enum pipe pipe); 957 enum pipe pipe);
942void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, 958void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
943 enum transcoder pch_transcoder); 959 enum transcoder pch_transcoder);
944void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv); 960void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
961void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
945 962
946/* i915_irq.c */ 963/* i915_irq.c */
947void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 964void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
@@ -972,6 +989,8 @@ void intel_crt_init(struct drm_device *dev);
972 989
973 990
974/* intel_ddi.c */ 991/* intel_ddi.c */
992void intel_ddi_clk_select(struct intel_encoder *encoder,
993 const struct intel_crtc_state *pipe_config);
975void intel_prepare_ddi(struct drm_device *dev); 994void intel_prepare_ddi(struct drm_device *dev);
976void hsw_fdi_link_train(struct drm_crtc *crtc); 995void hsw_fdi_link_train(struct drm_crtc *crtc);
977void intel_ddi_init(struct drm_device *dev, enum port port); 996void intel_ddi_init(struct drm_device *dev, enum port port);
@@ -986,7 +1005,7 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
986bool intel_ddi_pll_select(struct intel_crtc *crtc, 1005bool intel_ddi_pll_select(struct intel_crtc *crtc,
987 struct intel_crtc_state *crtc_state); 1006 struct intel_crtc_state *crtc_state);
988void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); 1007void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
989void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); 1008void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
990bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); 1009bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
991void intel_ddi_fdi_disable(struct drm_crtc *crtc); 1010void intel_ddi_fdi_disable(struct drm_crtc *crtc);
992void intel_ddi_get_config(struct intel_encoder *encoder, 1011void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -1054,6 +1073,15 @@ intel_wait_for_vblank(struct drm_device *dev, int pipe)
1054{ 1073{
1055 drm_wait_one_vblank(dev, pipe); 1074 drm_wait_one_vblank(dev, pipe);
1056} 1075}
1076static inline void
1077intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
1078{
1079 const struct intel_crtc *crtc =
1080 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
1081
1082 if (crtc->active)
1083 intel_wait_for_vblank(dev, pipe);
1084}
1057int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); 1085int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
1058void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1086void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1059 struct intel_digital_port *dport, 1087 struct intel_digital_port *dport,
@@ -1067,9 +1095,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
1067 struct drm_modeset_acquire_ctx *ctx); 1095 struct drm_modeset_acquire_ctx *ctx);
1068int intel_pin_and_fence_fb_obj(struct drm_plane *plane, 1096int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
1069 struct drm_framebuffer *fb, 1097 struct drm_framebuffer *fb,
1070 const struct drm_plane_state *plane_state, 1098 const struct drm_plane_state *plane_state);
1071 struct intel_engine_cs *pipelined,
1072 struct drm_i915_gem_request **pipelined_request);
1073struct drm_framebuffer * 1099struct drm_framebuffer *
1074__intel_framebuffer_create(struct drm_device *dev, 1100__intel_framebuffer_create(struct drm_device *dev,
1075 struct drm_mode_fb_cmd2 *mode_cmd, 1101 struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1150,7 +1176,10 @@ void broxton_ddi_phy_uninit(struct drm_device *dev);
1150void bxt_enable_dc9(struct drm_i915_private *dev_priv); 1176void bxt_enable_dc9(struct drm_i915_private *dev_priv);
1151void bxt_disable_dc9(struct drm_i915_private *dev_priv); 1177void bxt_disable_dc9(struct drm_i915_private *dev_priv);
1152void skl_init_cdclk(struct drm_i915_private *dev_priv); 1178void skl_init_cdclk(struct drm_i915_private *dev_priv);
1179int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
1153void skl_uninit_cdclk(struct drm_i915_private *dev_priv); 1180void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
1181void skl_enable_dc6(struct drm_i915_private *dev_priv);
1182void skl_disable_dc6(struct drm_i915_private *dev_priv);
1154void intel_dp_get_m_n(struct intel_crtc *crtc, 1183void intel_dp_get_m_n(struct intel_crtc *crtc,
1155 struct intel_crtc_state *pipe_config); 1184 struct intel_crtc_state *pipe_config);
1156void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); 1185void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
@@ -1167,33 +1196,30 @@ void hsw_enable_ips(struct intel_crtc *crtc);
1167void hsw_disable_ips(struct intel_crtc *crtc); 1196void hsw_disable_ips(struct intel_crtc *crtc);
1168enum intel_display_power_domain 1197enum intel_display_power_domain
1169intel_display_port_power_domain(struct intel_encoder *intel_encoder); 1198intel_display_port_power_domain(struct intel_encoder *intel_encoder);
1199enum intel_display_power_domain
1200intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
1170void intel_mode_from_pipe_config(struct drm_display_mode *mode, 1201void intel_mode_from_pipe_config(struct drm_display_mode *mode,
1171 struct intel_crtc_state *pipe_config); 1202 struct intel_crtc_state *pipe_config);
1172void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
1173void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file); 1203void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
1174 1204
1175int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); 1205int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
1176int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); 1206int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
1177 1207
1178unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, 1208u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
1179 struct drm_i915_gem_object *obj, 1209 struct drm_i915_gem_object *obj,
1180 unsigned int plane); 1210 unsigned int plane);
1181 1211
1182u32 skl_plane_ctl_format(uint32_t pixel_format); 1212u32 skl_plane_ctl_format(uint32_t pixel_format);
1183u32 skl_plane_ctl_tiling(uint64_t fb_modifier); 1213u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
1184u32 skl_plane_ctl_rotation(unsigned int rotation); 1214u32 skl_plane_ctl_rotation(unsigned int rotation);
1185 1215
1186/* intel_csr.c */ 1216/* intel_csr.c */
1187void intel_csr_ucode_init(struct drm_device *dev); 1217void intel_csr_ucode_init(struct drm_i915_private *);
1188enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv); 1218void intel_csr_load_program(struct drm_i915_private *);
1189void intel_csr_load_status_set(struct drm_i915_private *dev_priv, 1219void intel_csr_ucode_fini(struct drm_i915_private *);
1190 enum csr_state state);
1191void intel_csr_load_program(struct drm_device *dev);
1192void intel_csr_ucode_fini(struct drm_device *dev);
1193void assert_csr_loaded(struct drm_i915_private *dev_priv);
1194 1220
1195/* intel_dp.c */ 1221/* intel_dp.c */
1196void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); 1222void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
1197bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 1223bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
1198 struct intel_connector *intel_connector); 1224 struct intel_connector *intel_connector);
1199void intel_dp_set_link_params(struct intel_dp *intel_dp, 1225void intel_dp_set_link_params(struct intel_dp *intel_dp,
@@ -1231,6 +1257,22 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
1231 struct intel_digital_port *port); 1257 struct intel_digital_port *port);
1232void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config); 1258void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
1233 1259
1260void
1261intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
1262 uint8_t dp_train_pat);
1263void
1264intel_dp_set_signal_levels(struct intel_dp *intel_dp);
1265void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
1266uint8_t
1267intel_dp_voltage_max(struct intel_dp *intel_dp);
1268uint8_t
1269intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing);
1270void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1271 uint8_t *link_bw, uint8_t *rate_select);
1272bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
1273bool
1274intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
1275
1234/* intel_dp_mst.c */ 1276/* intel_dp_mst.c */
1235int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); 1277int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
1236void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); 1278void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
@@ -1245,7 +1287,7 @@ void intel_dvo_init(struct drm_device *dev);
1245/* legacy fbdev emulation in intel_fbdev.c */ 1287/* legacy fbdev emulation in intel_fbdev.c */
1246#ifdef CONFIG_DRM_FBDEV_EMULATION 1288#ifdef CONFIG_DRM_FBDEV_EMULATION
1247extern int intel_fbdev_init(struct drm_device *dev); 1289extern int intel_fbdev_init(struct drm_device *dev);
1248extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie); 1290extern void intel_fbdev_initial_config_async(struct drm_device *dev);
1249extern void intel_fbdev_fini(struct drm_device *dev); 1291extern void intel_fbdev_fini(struct drm_device *dev);
1250extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous); 1292extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
1251extern void intel_fbdev_output_poll_changed(struct drm_device *dev); 1293extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
@@ -1256,7 +1298,7 @@ static inline int intel_fbdev_init(struct drm_device *dev)
1256 return 0; 1298 return 0;
1257} 1299}
1258 1300
1259static inline void intel_fbdev_initial_config(void *data, async_cookie_t cookie) 1301static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
1260{ 1302{
1261} 1303}
1262 1304
@@ -1284,11 +1326,10 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
1284 enum fb_op_origin origin); 1326 enum fb_op_origin origin);
1285void intel_fbc_flush(struct drm_i915_private *dev_priv, 1327void intel_fbc_flush(struct drm_i915_private *dev_priv,
1286 unsigned int frontbuffer_bits, enum fb_op_origin origin); 1328 unsigned int frontbuffer_bits, enum fb_op_origin origin);
1287const char *intel_no_fbc_reason_str(enum no_fbc_reason reason);
1288void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv); 1329void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
1289 1330
1290/* intel_hdmi.c */ 1331/* intel_hdmi.c */
1291void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); 1332void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port);
1292void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, 1333void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1293 struct intel_connector *intel_connector); 1334 struct intel_connector *intel_connector);
1294struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); 1335struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
@@ -1364,7 +1405,10 @@ void intel_psr_single_frame_update(struct drm_device *dev,
1364/* intel_runtime_pm.c */ 1405/* intel_runtime_pm.c */
1365int intel_power_domains_init(struct drm_i915_private *); 1406int intel_power_domains_init(struct drm_i915_private *);
1366void intel_power_domains_fini(struct drm_i915_private *); 1407void intel_power_domains_fini(struct drm_i915_private *);
1367void intel_power_domains_init_hw(struct drm_i915_private *dev_priv); 1408void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
1409void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
1410void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv);
1411void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv);
1368void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); 1412void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
1369 1413
1370bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 1414bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
@@ -1375,8 +1419,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
1375 enum intel_display_power_domain domain); 1419 enum intel_display_power_domain domain);
1376void intel_display_power_put(struct drm_i915_private *dev_priv, 1420void intel_display_power_put(struct drm_i915_private *dev_priv,
1377 enum intel_display_power_domain domain); 1421 enum intel_display_power_domain domain);
1378void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
1379void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
1380void intel_runtime_pm_get(struct drm_i915_private *dev_priv); 1422void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
1381void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); 1423void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
1382void intel_runtime_pm_put(struct drm_i915_private *dev_priv); 1424void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
@@ -1394,12 +1436,6 @@ void intel_init_clock_gating(struct drm_device *dev);
1394void intel_suspend_hw(struct drm_device *dev); 1436void intel_suspend_hw(struct drm_device *dev);
1395int ilk_wm_max_level(const struct drm_device *dev); 1437int ilk_wm_max_level(const struct drm_device *dev);
1396void intel_update_watermarks(struct drm_crtc *crtc); 1438void intel_update_watermarks(struct drm_crtc *crtc);
1397void intel_update_sprite_watermarks(struct drm_plane *plane,
1398 struct drm_crtc *crtc,
1399 uint32_t sprite_width,
1400 uint32_t sprite_height,
1401 int pixel_size,
1402 bool enabled, bool scaled);
1403void intel_init_pm(struct drm_device *dev); 1439void intel_init_pm(struct drm_device *dev);
1404void intel_pm_setup(struct drm_device *dev); 1440void intel_pm_setup(struct drm_device *dev);
1405void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 1441void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@@ -1427,7 +1463,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
1427uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); 1463uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
1428 1464
1429/* intel_sdvo.c */ 1465/* intel_sdvo.c */
1430bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); 1466bool intel_sdvo_init(struct drm_device *dev,
1467 i915_reg_t reg, enum port port);
1431 1468
1432 1469
1433/* intel_sprite.c */ 1470/* intel_sprite.c */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 170ae6f4866e..efb5a27dd49c 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -60,7 +60,8 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
60 DRM_ERROR("DPI FIFOs are not empty\n"); 60 DRM_ERROR("DPI FIFOs are not empty\n");
61} 61}
62 62
63static void write_data(struct drm_i915_private *dev_priv, u32 reg, 63static void write_data(struct drm_i915_private *dev_priv,
64 i915_reg_t reg,
64 const u8 *data, u32 len) 65 const u8 *data, u32 len)
65{ 66{
66 u32 i, j; 67 u32 i, j;
@@ -75,7 +76,8 @@ static void write_data(struct drm_i915_private *dev_priv, u32 reg,
75 } 76 }
76} 77}
77 78
78static void read_data(struct drm_i915_private *dev_priv, u32 reg, 79static void read_data(struct drm_i915_private *dev_priv,
80 i915_reg_t reg,
79 u8 *data, u32 len) 81 u8 *data, u32 len)
80{ 82{
81 u32 i, j; 83 u32 i, j;
@@ -98,7 +100,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
98 struct mipi_dsi_packet packet; 100 struct mipi_dsi_packet packet;
99 ssize_t ret; 101 ssize_t ret;
100 const u8 *header, *data; 102 const u8 *header, *data;
101 u32 data_reg, data_mask, ctrl_reg, ctrl_mask; 103 i915_reg_t data_reg, ctrl_reg;
104 u32 data_mask, ctrl_mask;
102 105
103 ret = mipi_dsi_create_packet(&packet, msg); 106 ret = mipi_dsi_create_packet(&packet, msg);
104 if (ret < 0) 107 if (ret < 0)
@@ -377,10 +380,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
377 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 380 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
378 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 381 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
379 enum port port; 382 enum port port;
380 u32 temp;
381 u32 port_ctrl;
382 383
383 if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { 384 if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
385 u32 temp;
386
384 temp = I915_READ(VLV_CHICKEN_3); 387 temp = I915_READ(VLV_CHICKEN_3);
385 temp &= ~PIXEL_OVERLAP_CNT_MASK | 388 temp &= ~PIXEL_OVERLAP_CNT_MASK |
386 intel_dsi->pixel_overlap << 389 intel_dsi->pixel_overlap <<
@@ -389,8 +392,9 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
389 } 392 }
390 393
391 for_each_dsi_port(port, intel_dsi->ports) { 394 for_each_dsi_port(port, intel_dsi->ports) {
392 port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) : 395 i915_reg_t port_ctrl = IS_BROXTON(dev) ?
393 MIPI_PORT_CTRL(port); 396 BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
397 u32 temp;
394 398
395 temp = I915_READ(port_ctrl); 399 temp = I915_READ(port_ctrl);
396 400
@@ -416,13 +420,13 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
416 struct drm_i915_private *dev_priv = dev->dev_private; 420 struct drm_i915_private *dev_priv = dev->dev_private;
417 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 421 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
418 enum port port; 422 enum port port;
419 u32 temp;
420 u32 port_ctrl;
421 423
422 for_each_dsi_port(port, intel_dsi->ports) { 424 for_each_dsi_port(port, intel_dsi->ports) {
425 i915_reg_t port_ctrl = IS_BROXTON(dev) ?
426 BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
427 u32 temp;
428
423 /* de-assert ip_tg_enable signal */ 429 /* de-assert ip_tg_enable signal */
424 port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
425 MIPI_PORT_CTRL(port);
426 temp = I915_READ(port_ctrl); 430 temp = I915_READ(port_ctrl);
427 I915_WRITE(port_ctrl, temp & ~DPI_ENABLE); 431 I915_WRITE(port_ctrl, temp & ~DPI_ENABLE);
428 POSTING_READ(port_ctrl); 432 POSTING_READ(port_ctrl);
@@ -580,11 +584,13 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
580 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 584 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
581 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 585 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
582 enum port port; 586 enum port port;
583 u32 val;
584 u32 port_ctrl = 0;
585 587
586 DRM_DEBUG_KMS("\n"); 588 DRM_DEBUG_KMS("\n");
587 for_each_dsi_port(port, intel_dsi->ports) { 589 for_each_dsi_port(port, intel_dsi->ports) {
590 /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
591 i915_reg_t port_ctrl = IS_BROXTON(dev) ?
592 BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
593 u32 val;
588 594
589 I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY | 595 I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
590 ULPS_STATE_ENTER); 596 ULPS_STATE_ENTER);
@@ -598,12 +604,6 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
598 ULPS_STATE_ENTER); 604 ULPS_STATE_ENTER);
599 usleep_range(2000, 2500); 605 usleep_range(2000, 2500);
600 606
601 if (IS_BROXTON(dev))
602 port_ctrl = BXT_MIPI_PORT_CTRL(port);
603 else if (IS_VALLEYVIEW(dev))
604 /* Common bit for both MIPI Port A & MIPI Port C */
605 port_ctrl = MIPI_PORT_CTRL(PORT_A);
606
607 /* Wait till Clock lanes are in LP-00 state for MIPI Port A 607 /* Wait till Clock lanes are in LP-00 state for MIPI Port A
608 * only. MIPI Port C has no similar bit for checking 608 * only. MIPI Port C has no similar bit for checking
609 */ 609 */
@@ -656,7 +656,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
656 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 656 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
657 struct drm_device *dev = encoder->base.dev; 657 struct drm_device *dev = encoder->base.dev;
658 enum intel_display_power_domain power_domain; 658 enum intel_display_power_domain power_domain;
659 u32 dpi_enabled, func, ctrl_reg;
660 enum port port; 659 enum port port;
661 660
662 DRM_DEBUG_KMS("\n"); 661 DRM_DEBUG_KMS("\n");
@@ -667,9 +666,11 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
667 666
668 /* XXX: this only works for one DSI output */ 667 /* XXX: this only works for one DSI output */
669 for_each_dsi_port(port, intel_dsi->ports) { 668 for_each_dsi_port(port, intel_dsi->ports) {
669 i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
670 BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
671 u32 dpi_enabled, func;
672
670 func = I915_READ(MIPI_DSI_FUNC_PRG(port)); 673 func = I915_READ(MIPI_DSI_FUNC_PRG(port));
671 ctrl_reg = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
672 MIPI_PORT_CTRL(port);
673 dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE; 674 dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
674 675
675 /* Due to some hardware limitations on BYT, MIPI Port C DPI 676 /* Due to some hardware limitations on BYT, MIPI Port C DPI
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 8492053e0ff0..7161deb2aed8 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -44,6 +44,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
44 .type = INTEL_DVO_CHIP_TMDS, 44 .type = INTEL_DVO_CHIP_TMDS,
45 .name = "sil164", 45 .name = "sil164",
46 .dvo_reg = DVOC, 46 .dvo_reg = DVOC,
47 .dvo_srcdim_reg = DVOC_SRCDIM,
47 .slave_addr = SIL164_ADDR, 48 .slave_addr = SIL164_ADDR,
48 .dev_ops = &sil164_ops, 49 .dev_ops = &sil164_ops,
49 }, 50 },
@@ -51,6 +52,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
51 .type = INTEL_DVO_CHIP_TMDS, 52 .type = INTEL_DVO_CHIP_TMDS,
52 .name = "ch7xxx", 53 .name = "ch7xxx",
53 .dvo_reg = DVOC, 54 .dvo_reg = DVOC,
55 .dvo_srcdim_reg = DVOC_SRCDIM,
54 .slave_addr = CH7xxx_ADDR, 56 .slave_addr = CH7xxx_ADDR,
55 .dev_ops = &ch7xxx_ops, 57 .dev_ops = &ch7xxx_ops,
56 }, 58 },
@@ -58,6 +60,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
58 .type = INTEL_DVO_CHIP_TMDS, 60 .type = INTEL_DVO_CHIP_TMDS,
59 .name = "ch7xxx", 61 .name = "ch7xxx",
60 .dvo_reg = DVOC, 62 .dvo_reg = DVOC,
63 .dvo_srcdim_reg = DVOC_SRCDIM,
61 .slave_addr = 0x75, /* For some ch7010 */ 64 .slave_addr = 0x75, /* For some ch7010 */
62 .dev_ops = &ch7xxx_ops, 65 .dev_ops = &ch7xxx_ops,
63 }, 66 },
@@ -65,6 +68,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
65 .type = INTEL_DVO_CHIP_LVDS, 68 .type = INTEL_DVO_CHIP_LVDS,
66 .name = "ivch", 69 .name = "ivch",
67 .dvo_reg = DVOA, 70 .dvo_reg = DVOA,
71 .dvo_srcdim_reg = DVOA_SRCDIM,
68 .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */ 72 .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
69 .dev_ops = &ivch_ops, 73 .dev_ops = &ivch_ops,
70 }, 74 },
@@ -72,6 +76,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
72 .type = INTEL_DVO_CHIP_TMDS, 76 .type = INTEL_DVO_CHIP_TMDS,
73 .name = "tfp410", 77 .name = "tfp410",
74 .dvo_reg = DVOC, 78 .dvo_reg = DVOC,
79 .dvo_srcdim_reg = DVOC_SRCDIM,
75 .slave_addr = TFP410_ADDR, 80 .slave_addr = TFP410_ADDR,
76 .dev_ops = &tfp410_ops, 81 .dev_ops = &tfp410_ops,
77 }, 82 },
@@ -79,6 +84,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
79 .type = INTEL_DVO_CHIP_LVDS, 84 .type = INTEL_DVO_CHIP_LVDS,
80 .name = "ch7017", 85 .name = "ch7017",
81 .dvo_reg = DVOC, 86 .dvo_reg = DVOC,
87 .dvo_srcdim_reg = DVOC_SRCDIM,
82 .slave_addr = 0x75, 88 .slave_addr = 0x75,
83 .gpio = GMBUS_PIN_DPB, 89 .gpio = GMBUS_PIN_DPB,
84 .dev_ops = &ch7017_ops, 90 .dev_ops = &ch7017_ops,
@@ -87,6 +93,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
87 .type = INTEL_DVO_CHIP_TMDS, 93 .type = INTEL_DVO_CHIP_TMDS,
88 .name = "ns2501", 94 .name = "ns2501",
89 .dvo_reg = DVOB, 95 .dvo_reg = DVOB,
96 .dvo_srcdim_reg = DVOB_SRCDIM,
90 .slave_addr = NS2501_ADDR, 97 .slave_addr = NS2501_ADDR,
91 .dev_ops = &ns2501_ops, 98 .dev_ops = &ns2501_ops,
92 } 99 }
@@ -171,7 +178,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
171{ 178{
172 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 179 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
173 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 180 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
174 u32 dvo_reg = intel_dvo->dev.dvo_reg; 181 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
175 u32 temp = I915_READ(dvo_reg); 182 u32 temp = I915_READ(dvo_reg);
176 183
177 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); 184 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
@@ -184,7 +191,7 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
184 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 191 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
185 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 192 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
186 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 193 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
187 u32 dvo_reg = intel_dvo->dev.dvo_reg; 194 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
188 u32 temp = I915_READ(dvo_reg); 195 u32 temp = I915_READ(dvo_reg);
189 196
190 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, 197 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
@@ -255,20 +262,8 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder)
255 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 262 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
256 int pipe = crtc->pipe; 263 int pipe = crtc->pipe;
257 u32 dvo_val; 264 u32 dvo_val;
258 u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; 265 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
259 266 i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
260 switch (dvo_reg) {
261 case DVOA:
262 default:
263 dvo_srcdim_reg = DVOA_SRCDIM;
264 break;
265 case DVOB:
266 dvo_srcdim_reg = DVOB_SRCDIM;
267 break;
268 case DVOC:
269 dvo_srcdim_reg = DVOC_SRCDIM;
270 break;
271 }
272 267
273 /* Save the data order, since I don't know what it should be set to. */ 268 /* Save the data order, since I don't know what it should be set to. */
274 dvo_val = I915_READ(dvo_reg) & 269 dvo_val = I915_READ(dvo_reg) &
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index cf47352b7b8e..11fc5281e8ef 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -46,6 +46,11 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv)
46 return dev_priv->fbc.enable_fbc != NULL; 46 return dev_priv->fbc.enable_fbc != NULL;
47} 47}
48 48
49static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
50{
51 return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
52}
53
49/* 54/*
50 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the 55 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
51 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's 56 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
@@ -182,7 +187,8 @@ static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
182 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 187 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
183} 188}
184 189
185static void intel_fbc_nuke(struct drm_i915_private *dev_priv) 190/* This function forces a CFB recompression through the nuke operation. */
191static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
186{ 192{
187 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE); 193 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
188 POSTING_READ(MSG_FBC_REND_STATE); 194 POSTING_READ(MSG_FBC_REND_STATE);
@@ -231,7 +237,7 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
231 I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset); 237 I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
232 } 238 }
233 239
234 intel_fbc_nuke(dev_priv); 240 intel_fbc_recompress(dev_priv);
235 241
236 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane)); 242 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
237} 243}
@@ -310,7 +316,7 @@ static void gen7_fbc_enable(struct intel_crtc *crtc)
310 SNB_CPU_FENCE_ENABLE | obj->fence_reg); 316 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
311 I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc)); 317 I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
312 318
313 intel_fbc_nuke(dev_priv); 319 intel_fbc_recompress(dev_priv);
314 320
315 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane)); 321 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
316} 322}
@@ -370,8 +376,6 @@ static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
370 if (dev_priv->fbc.fbc_work == NULL) 376 if (dev_priv->fbc.fbc_work == NULL)
371 return; 377 return;
372 378
373 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
374
375 /* Synchronisation is provided by struct_mutex and checking of 379 /* Synchronisation is provided by struct_mutex and checking of
376 * dev_priv->fbc.fbc_work, so we can perform the cancellation 380 * dev_priv->fbc.fbc_work, so we can perform the cancellation
377 * entirely asynchronously. 381 * entirely asynchronously.
@@ -432,7 +436,8 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
432 436
433 intel_fbc_cancel_work(dev_priv); 437 intel_fbc_cancel_work(dev_priv);
434 438
435 dev_priv->fbc.disable_fbc(dev_priv); 439 if (dev_priv->fbc.enabled)
440 dev_priv->fbc.disable_fbc(dev_priv);
436 dev_priv->fbc.crtc = NULL; 441 dev_priv->fbc.crtc = NULL;
437} 442}
438 443
@@ -471,78 +476,45 @@ void intel_fbc_disable_crtc(struct intel_crtc *crtc)
471 mutex_unlock(&dev_priv->fbc.lock); 476 mutex_unlock(&dev_priv->fbc.lock);
472} 477}
473 478
474const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
475{
476 switch (reason) {
477 case FBC_OK:
478 return "FBC enabled but currently disabled in hardware";
479 case FBC_UNSUPPORTED:
480 return "unsupported by this chipset";
481 case FBC_NO_OUTPUT:
482 return "no output";
483 case FBC_STOLEN_TOO_SMALL:
484 return "not enough stolen memory";
485 case FBC_UNSUPPORTED_MODE:
486 return "mode incompatible with compression";
487 case FBC_MODE_TOO_LARGE:
488 return "mode too large for compression";
489 case FBC_BAD_PLANE:
490 return "FBC unsupported on plane";
491 case FBC_NOT_TILED:
492 return "framebuffer not tiled or fenced";
493 case FBC_MULTIPLE_PIPES:
494 return "more than one pipe active";
495 case FBC_MODULE_PARAM:
496 return "disabled per module param";
497 case FBC_CHIP_DEFAULT:
498 return "disabled per chip default";
499 case FBC_ROTATION:
500 return "rotation unsupported";
501 case FBC_IN_DBG_MASTER:
502 return "Kernel debugger is active";
503 case FBC_BAD_STRIDE:
504 return "framebuffer stride not supported";
505 case FBC_PIXEL_RATE:
506 return "pixel rate is too big";
507 case FBC_PIXEL_FORMAT:
508 return "pixel format is invalid";
509 default:
510 MISSING_CASE(reason);
511 return "unknown reason";
512 }
513}
514
515static void set_no_fbc_reason(struct drm_i915_private *dev_priv, 479static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
516 enum no_fbc_reason reason) 480 const char *reason)
517{ 481{
518 if (dev_priv->fbc.no_fbc_reason == reason) 482 if (dev_priv->fbc.no_fbc_reason == reason)
519 return; 483 return;
520 484
521 dev_priv->fbc.no_fbc_reason = reason; 485 dev_priv->fbc.no_fbc_reason = reason;
522 DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason)); 486 DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
487}
488
489static bool crtc_is_valid(struct intel_crtc *crtc)
490{
491 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
492
493 if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
494 return false;
495
496 if (!intel_crtc_active(&crtc->base))
497 return false;
498
499 if (!to_intel_plane_state(crtc->base.primary->state)->visible)
500 return false;
501
502 return true;
523} 503}
524 504
525static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv) 505static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
526{ 506{
527 struct drm_crtc *crtc = NULL, *tmp_crtc; 507 struct drm_crtc *crtc = NULL, *tmp_crtc;
528 enum pipe pipe; 508 enum pipe pipe;
529 bool pipe_a_only = false;
530
531 if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
532 pipe_a_only = true;
533 509
534 for_each_pipe(dev_priv, pipe) { 510 for_each_pipe(dev_priv, pipe) {
535 tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 511 tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
536 512
537 if (intel_crtc_active(tmp_crtc) && 513 if (crtc_is_valid(to_intel_crtc(tmp_crtc)))
538 to_intel_plane_state(tmp_crtc->primary->state)->visible)
539 crtc = tmp_crtc; 514 crtc = tmp_crtc;
540
541 if (pipe_a_only)
542 break;
543 } 515 }
544 516
545 if (!crtc || crtc->primary->fb == NULL) 517 if (!crtc)
546 return NULL; 518 return NULL;
547 519
548 return crtc; 520 return crtc;
@@ -581,7 +553,8 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
581 * reserved range size, so it always assumes the maximum (8mb) is used. 553 * reserved range size, so it always assumes the maximum (8mb) is used.
582 * If we enable FBC using a CFB on that memory range we'll get FIFO 554 * If we enable FBC using a CFB on that memory range we'll get FIFO
583 * underruns, even if that range is not reserved by the BIOS. */ 555 * underruns, even if that range is not reserved by the BIOS. */
584 if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) 556 if (IS_BROADWELL(dev_priv) ||
557 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
585 end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024; 558 end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
586 else 559 else
587 end = dev_priv->gtt.stolen_usable_size; 560 end = dev_priv->gtt.stolen_usable_size;
@@ -734,6 +707,7 @@ static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc)
734 if (INTEL_INFO(dev_priv)->gen >= 7) 707 if (INTEL_INFO(dev_priv)->gen >= 7)
735 lines = min(lines, 2048); 708 lines = min(lines, 2048);
736 709
710 /* Hardware needs the full buffer stride, not just the active area. */
737 return lines * fb->pitches[0]; 711 return lines * fb->pitches[0];
738} 712}
739 713
@@ -832,84 +806,62 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
832 * __intel_fbc_update - enable/disable FBC as needed, unlocked 806 * __intel_fbc_update - enable/disable FBC as needed, unlocked
833 * @dev_priv: i915 device instance 807 * @dev_priv: i915 device instance
834 * 808 *
835 * Set up the framebuffer compression hardware at mode set time. We 809 * This function completely reevaluates the status of FBC, then enables,
836 * enable it if possible: 810 * disables or maintains it on the same state.
837 * - plane A only (on pre-965)
838 * - no pixel mulitply/line duplication
839 * - no alpha buffer discard
840 * - no dual wide
841 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
842 *
843 * We can't assume that any compression will take place (worst case),
844 * so the compressed buffer has to be the same size as the uncompressed
845 * one. It also must reside (along with the line length buffer) in
846 * stolen memory.
847 *
848 * We need to enable/disable FBC on a global basis.
849 */ 811 */
850static void __intel_fbc_update(struct drm_i915_private *dev_priv) 812static void __intel_fbc_update(struct drm_i915_private *dev_priv)
851{ 813{
852 struct drm_crtc *crtc = NULL; 814 struct drm_crtc *drm_crtc = NULL;
853 struct intel_crtc *intel_crtc; 815 struct intel_crtc *crtc;
854 struct drm_framebuffer *fb; 816 struct drm_framebuffer *fb;
855 struct drm_i915_gem_object *obj; 817 struct drm_i915_gem_object *obj;
856 const struct drm_display_mode *adjusted_mode; 818 const struct drm_display_mode *adjusted_mode;
857 819
858 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); 820 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
859 821
860 /* disable framebuffer compression in vGPU */
861 if (intel_vgpu_active(dev_priv->dev)) 822 if (intel_vgpu_active(dev_priv->dev))
862 i915.enable_fbc = 0; 823 i915.enable_fbc = 0;
863 824
864 if (i915.enable_fbc < 0) { 825 if (i915.enable_fbc < 0) {
865 set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT); 826 set_no_fbc_reason(dev_priv, "disabled per chip default");
866 goto out_disable; 827 goto out_disable;
867 } 828 }
868 829
869 if (!i915.enable_fbc) { 830 if (!i915.enable_fbc) {
870 set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM); 831 set_no_fbc_reason(dev_priv, "disabled per module param");
871 goto out_disable; 832 goto out_disable;
872 } 833 }
873 834
874 /* 835 drm_crtc = intel_fbc_find_crtc(dev_priv);
875 * If FBC is already on, we just have to verify that we can 836 if (!drm_crtc) {
876 * keep it that way... 837 set_no_fbc_reason(dev_priv, "no output");
877 * Need to disable if:
878 * - more than one pipe is active
879 * - changing FBC params (stride, fence, mode)
880 * - new fb is too large to fit in compressed buffer
881 * - going to an unsupported config (interlace, pixel multiply, etc.)
882 */
883 crtc = intel_fbc_find_crtc(dev_priv);
884 if (!crtc) {
885 set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
886 goto out_disable; 838 goto out_disable;
887 } 839 }
888 840
889 if (!multiple_pipes_ok(dev_priv)) { 841 if (!multiple_pipes_ok(dev_priv)) {
890 set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES); 842 set_no_fbc_reason(dev_priv, "more than one pipe active");
891 goto out_disable; 843 goto out_disable;
892 } 844 }
893 845
894 intel_crtc = to_intel_crtc(crtc); 846 crtc = to_intel_crtc(drm_crtc);
895 fb = crtc->primary->fb; 847 fb = crtc->base.primary->fb;
896 obj = intel_fb_obj(fb); 848 obj = intel_fb_obj(fb);
897 adjusted_mode = &intel_crtc->config->base.adjusted_mode; 849 adjusted_mode = &crtc->config->base.adjusted_mode;
898 850
899 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) || 851 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
900 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 852 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
901 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE); 853 set_no_fbc_reason(dev_priv, "incompatible mode");
902 goto out_disable; 854 goto out_disable;
903 } 855 }
904 856
905 if (!intel_fbc_hw_tracking_covers_screen(intel_crtc)) { 857 if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
906 set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE); 858 set_no_fbc_reason(dev_priv, "mode too large for compression");
907 goto out_disable; 859 goto out_disable;
908 } 860 }
909 861
910 if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) && 862 if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
911 intel_crtc->plane != PLANE_A) { 863 crtc->plane != PLANE_A) {
912 set_no_fbc_reason(dev_priv, FBC_BAD_PLANE); 864 set_no_fbc_reason(dev_priv, "FBC unsupported on plane");
913 goto out_disable; 865 goto out_disable;
914 } 866 }
915 867
@@ -918,41 +870,35 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
918 */ 870 */
919 if (obj->tiling_mode != I915_TILING_X || 871 if (obj->tiling_mode != I915_TILING_X ||
920 obj->fence_reg == I915_FENCE_REG_NONE) { 872 obj->fence_reg == I915_FENCE_REG_NONE) {
921 set_no_fbc_reason(dev_priv, FBC_NOT_TILED); 873 set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced");
922 goto out_disable; 874 goto out_disable;
923 } 875 }
924 if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) && 876 if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
925 crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) { 877 crtc->base.primary->state->rotation != BIT(DRM_ROTATE_0)) {
926 set_no_fbc_reason(dev_priv, FBC_ROTATION); 878 set_no_fbc_reason(dev_priv, "rotation unsupported");
927 goto out_disable; 879 goto out_disable;
928 } 880 }
929 881
930 if (!stride_is_valid(dev_priv, fb->pitches[0])) { 882 if (!stride_is_valid(dev_priv, fb->pitches[0])) {
931 set_no_fbc_reason(dev_priv, FBC_BAD_STRIDE); 883 set_no_fbc_reason(dev_priv, "framebuffer stride not supported");
932 goto out_disable; 884 goto out_disable;
933 } 885 }
934 886
935 if (!pixel_format_is_valid(fb)) { 887 if (!pixel_format_is_valid(fb)) {
936 set_no_fbc_reason(dev_priv, FBC_PIXEL_FORMAT); 888 set_no_fbc_reason(dev_priv, "pixel format is invalid");
937 goto out_disable;
938 }
939
940 /* If the kernel debugger is active, always disable compression */
941 if (in_dbg_master()) {
942 set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
943 goto out_disable; 889 goto out_disable;
944 } 890 }
945 891
946 /* WaFbcExceedCdClockThreshold:hsw,bdw */ 892 /* WaFbcExceedCdClockThreshold:hsw,bdw */
947 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && 893 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
948 ilk_pipe_pixel_rate(intel_crtc->config) >= 894 ilk_pipe_pixel_rate(crtc->config) >=
949 dev_priv->cdclk_freq * 95 / 100) { 895 dev_priv->cdclk_freq * 95 / 100) {
950 set_no_fbc_reason(dev_priv, FBC_PIXEL_RATE); 896 set_no_fbc_reason(dev_priv, "pixel rate is too big");
951 goto out_disable; 897 goto out_disable;
952 } 898 }
953 899
954 if (intel_fbc_setup_cfb(intel_crtc)) { 900 if (intel_fbc_setup_cfb(crtc)) {
955 set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL); 901 set_no_fbc_reason(dev_priv, "not enough stolen memory");
956 goto out_disable; 902 goto out_disable;
957 } 903 }
958 904
@@ -961,9 +907,9 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
961 * cannot be unpinned (and have its GTT offset and fence revoked) 907 * cannot be unpinned (and have its GTT offset and fence revoked)
962 * without first being decoupled from the scanout and FBC disabled. 908 * without first being decoupled from the scanout and FBC disabled.
963 */ 909 */
964 if (dev_priv->fbc.crtc == intel_crtc && 910 if (dev_priv->fbc.crtc == crtc &&
965 dev_priv->fbc.fb_id == fb->base.id && 911 dev_priv->fbc.fb_id == fb->base.id &&
966 dev_priv->fbc.y == crtc->y) 912 dev_priv->fbc.y == crtc->base.y)
967 return; 913 return;
968 914
969 if (intel_fbc_enabled(dev_priv)) { 915 if (intel_fbc_enabled(dev_priv)) {
@@ -994,8 +940,8 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
994 __intel_fbc_disable(dev_priv); 940 __intel_fbc_disable(dev_priv);
995 } 941 }
996 942
997 intel_fbc_schedule_enable(intel_crtc); 943 intel_fbc_schedule_enable(crtc);
998 dev_priv->fbc.no_fbc_reason = FBC_OK; 944 dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)";
999 return; 945 return;
1000 946
1001out_disable: 947out_disable:
@@ -1085,10 +1031,10 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
1085 enum pipe pipe; 1031 enum pipe pipe;
1086 1032
1087 mutex_init(&dev_priv->fbc.lock); 1033 mutex_init(&dev_priv->fbc.lock);
1034 dev_priv->fbc.enabled = false;
1088 1035
1089 if (!HAS_FBC(dev_priv)) { 1036 if (!HAS_FBC(dev_priv)) {
1090 dev_priv->fbc.enabled = false; 1037 dev_priv->fbc.no_fbc_reason = "unsupported by this chipset";
1091 dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
1092 return; 1038 return;
1093 } 1039 }
1094 1040
@@ -1096,7 +1042,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
1096 dev_priv->fbc.possible_framebuffer_bits |= 1042 dev_priv->fbc.possible_framebuffer_bits |=
1097 INTEL_FRONTBUFFER_PRIMARY(pipe); 1043 INTEL_FRONTBUFFER_PRIMARY(pipe);
1098 1044
1099 if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) 1045 if (fbc_on_pipe_a_only(dev_priv))
1100 break; 1046 break;
1101 } 1047 }
1102 1048
@@ -1121,5 +1067,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
1121 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); 1067 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
1122 } 1068 }
1123 1069
1124 dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv); 1070 /* We still don't have any sort of hardware state readout for FBC, so
1071 * disable it in case the BIOS enabled it to make sure software matches
1072 * the hardware state. */
1073 if (dev_priv->fbc.fbc_enabled(dev_priv))
1074 dev_priv->fbc.disable_fbc(dev_priv);
1125} 1075}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 4fd5fdfef6bd..7ccde58f8c98 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -119,7 +119,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
119{ 119{
120 struct intel_fbdev *ifbdev = 120 struct intel_fbdev *ifbdev =
121 container_of(helper, struct intel_fbdev, helper); 121 container_of(helper, struct intel_fbdev, helper);
122 struct drm_framebuffer *fb; 122 struct drm_framebuffer *fb = NULL;
123 struct drm_device *dev = helper->dev; 123 struct drm_device *dev = helper->dev;
124 struct drm_i915_private *dev_priv = to_i915(dev); 124 struct drm_i915_private *dev_priv = to_i915(dev);
125 struct drm_mode_fb_cmd2 mode_cmd = {}; 125 struct drm_mode_fb_cmd2 mode_cmd = {};
@@ -138,6 +138,8 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
138 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 138 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
139 sizes->surface_depth); 139 sizes->surface_depth);
140 140
141 mutex_lock(&dev->struct_mutex);
142
141 size = mode_cmd.pitches[0] * mode_cmd.height; 143 size = mode_cmd.pitches[0] * mode_cmd.height;
142 size = PAGE_ALIGN(size); 144 size = PAGE_ALIGN(size);
143 145
@@ -156,26 +158,28 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
156 158
157 fb = __intel_framebuffer_create(dev, &mode_cmd, obj); 159 fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
158 if (IS_ERR(fb)) { 160 if (IS_ERR(fb)) {
161 drm_gem_object_unreference(&obj->base);
159 ret = PTR_ERR(fb); 162 ret = PTR_ERR(fb);
160 goto out_unref; 163 goto out;
161 } 164 }
162 165
163 /* Flush everything out, we'll be doing GTT only from now on */ 166 /* Flush everything out, we'll be doing GTT only from now on */
164 ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL); 167 ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
165 if (ret) { 168 if (ret) {
166 DRM_ERROR("failed to pin obj: %d\n", ret); 169 DRM_ERROR("failed to pin obj: %d\n", ret);
167 goto out_fb; 170 goto out;
168 } 171 }
169 172
173 mutex_unlock(&dev->struct_mutex);
174
170 ifbdev->fb = to_intel_framebuffer(fb); 175 ifbdev->fb = to_intel_framebuffer(fb);
171 176
172 return 0; 177 return 0;
173 178
174out_fb:
175 drm_framebuffer_remove(fb);
176out_unref:
177 drm_gem_object_unreference(&obj->base);
178out: 179out:
180 mutex_unlock(&dev->struct_mutex);
181 if (!IS_ERR_OR_NULL(fb))
182 drm_framebuffer_unreference(fb);
179 return ret; 183 return ret;
180} 184}
181 185
@@ -193,8 +197,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
193 int size, ret; 197 int size, ret;
194 bool prealloc = false; 198 bool prealloc = false;
195 199
196 mutex_lock(&dev->struct_mutex);
197
198 if (intel_fb && 200 if (intel_fb &&
199 (sizes->fb_width > intel_fb->base.width || 201 (sizes->fb_width > intel_fb->base.width ||
200 sizes->fb_height > intel_fb->base.height)) { 202 sizes->fb_height > intel_fb->base.height)) {
@@ -209,7 +211,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
209 DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); 211 DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
210 ret = intelfb_alloc(helper, sizes); 212 ret = intelfb_alloc(helper, sizes);
211 if (ret) 213 if (ret)
212 goto out_unlock; 214 return ret;
213 intel_fb = ifbdev->fb; 215 intel_fb = ifbdev->fb;
214 } else { 216 } else {
215 DRM_DEBUG_KMS("re-using BIOS fb\n"); 217 DRM_DEBUG_KMS("re-using BIOS fb\n");
@@ -221,8 +223,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
221 obj = intel_fb->obj; 223 obj = intel_fb->obj;
222 size = obj->base.size; 224 size = obj->base.size;
223 225
226 mutex_lock(&dev->struct_mutex);
227
224 info = drm_fb_helper_alloc_fbi(helper); 228 info = drm_fb_helper_alloc_fbi(helper);
225 if (IS_ERR(info)) { 229 if (IS_ERR(info)) {
230 DRM_ERROR("Failed to allocate fb_info\n");
226 ret = PTR_ERR(info); 231 ret = PTR_ERR(info);
227 goto out_unpin; 232 goto out_unpin;
228 } 233 }
@@ -249,6 +254,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
249 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), 254 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
250 size); 255 size);
251 if (!info->screen_base) { 256 if (!info->screen_base) {
257 DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
252 ret = -ENOSPC; 258 ret = -ENOSPC;
253 goto out_destroy_fbi; 259 goto out_destroy_fbi;
254 } 260 }
@@ -281,8 +287,6 @@ out_destroy_fbi:
281 drm_fb_helper_release_fbi(helper); 287 drm_fb_helper_release_fbi(helper);
282out_unpin: 288out_unpin:
283 i915_gem_object_ggtt_unpin(obj); 289 i915_gem_object_ggtt_unpin(obj);
284 drm_gem_object_unreference(&obj->base);
285out_unlock:
286 mutex_unlock(&dev->struct_mutex); 290 mutex_unlock(&dev->struct_mutex);
287 return ret; 291 return ret;
288} 292}
@@ -526,8 +530,10 @@ static void intel_fbdev_destroy(struct drm_device *dev,
526 530
527 drm_fb_helper_fini(&ifbdev->helper); 531 drm_fb_helper_fini(&ifbdev->helper);
528 532
529 drm_framebuffer_unregister_private(&ifbdev->fb->base); 533 if (ifbdev->fb) {
530 drm_framebuffer_remove(&ifbdev->fb->base); 534 drm_framebuffer_unregister_private(&ifbdev->fb->base);
535 drm_framebuffer_remove(&ifbdev->fb->base);
536 }
531} 537}
532 538
533/* 539/*
@@ -702,13 +708,20 @@ int intel_fbdev_init(struct drm_device *dev)
702 return 0; 708 return 0;
703} 709}
704 710
705void intel_fbdev_initial_config(void *data, async_cookie_t cookie) 711static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
706{ 712{
707 struct drm_i915_private *dev_priv = data; 713 struct drm_i915_private *dev_priv = data;
708 struct intel_fbdev *ifbdev = dev_priv->fbdev; 714 struct intel_fbdev *ifbdev = dev_priv->fbdev;
709 715
710 /* Due to peculiar init order wrt to hpd handling this is separate. */ 716 /* Due to peculiar init order wrt to hpd handling this is separate. */
711 drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp); 717 if (drm_fb_helper_initial_config(&ifbdev->helper,
718 ifbdev->preferred_bpp))
719 intel_fbdev_fini(dev_priv->dev);
720}
721
722void intel_fbdev_initial_config_async(struct drm_device *dev)
723{
724 async_schedule(intel_fbdev_initial_config, to_i915(dev));
712} 725}
713 726
714void intel_fbdev_fini(struct drm_device *dev) 727void intel_fbdev_fini(struct drm_device *dev)
@@ -719,7 +732,8 @@ void intel_fbdev_fini(struct drm_device *dev)
719 732
720 flush_work(&dev_priv->fbdev_suspend_work); 733 flush_work(&dev_priv->fbdev_suspend_work);
721 734
722 async_synchronize_full(); 735 if (!current_is_async())
736 async_synchronize_full();
723 intel_fbdev_destroy(dev, dev_priv->fbdev); 737 intel_fbdev_destroy(dev, dev_priv->fbdev);
724 kfree(dev_priv->fbdev); 738 kfree(dev_priv->fbdev);
725 dev_priv->fbdev = NULL; 739 dev_priv->fbdev = NULL;
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 54daa66c6970..7ae182d0594b 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -84,38 +84,21 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
84 return true; 84 return true;
85} 85}
86 86
87/** 87static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
88 * i9xx_check_fifo_underruns - check for fifo underruns
89 * @dev_priv: i915 device instance
90 *
91 * This function checks for fifo underruns on GMCH platforms. This needs to be
92 * done manually on modeset to make sure that we catch all underruns since they
93 * do not generate an interrupt by themselves on these platforms.
94 */
95void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv)
96{ 88{
97 struct intel_crtc *crtc; 89 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
98 90 i915_reg_t reg = PIPESTAT(crtc->pipe);
99 spin_lock_irq(&dev_priv->irq_lock); 91 u32 pipestat = I915_READ(reg) & 0xffff0000;
100
101 for_each_intel_crtc(dev_priv->dev, crtc) {
102 u32 reg = PIPESTAT(crtc->pipe);
103 u32 pipestat;
104
105 if (crtc->cpu_fifo_underrun_disabled)
106 continue;
107 92
108 pipestat = I915_READ(reg) & 0xffff0000; 93 assert_spin_locked(&dev_priv->irq_lock);
109 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
110 continue;
111 94
112 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 95 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
113 POSTING_READ(reg); 96 return;
114 97
115 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); 98 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
116 } 99 POSTING_READ(reg);
117 100
118 spin_unlock_irq(&dev_priv->irq_lock); 101 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
119} 102}
120 103
121static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, 104static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -123,7 +106,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
123 bool enable, bool old) 106 bool enable, bool old)
124{ 107{
125 struct drm_i915_private *dev_priv = dev->dev_private; 108 struct drm_i915_private *dev_priv = dev->dev_private;
126 u32 reg = PIPESTAT(pipe); 109 i915_reg_t reg = PIPESTAT(pipe);
127 u32 pipestat = I915_READ(reg) & 0xffff0000; 110 u32 pipestat = I915_READ(reg) & 0xffff0000;
128 111
129 assert_spin_locked(&dev_priv->irq_lock); 112 assert_spin_locked(&dev_priv->irq_lock);
@@ -150,6 +133,23 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
150 ironlake_disable_display_irq(dev_priv, bit); 133 ironlake_disable_display_irq(dev_priv, bit);
151} 134}
152 135
136static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
137{
138 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
139 enum pipe pipe = crtc->pipe;
140 uint32_t err_int = I915_READ(GEN7_ERR_INT);
141
142 assert_spin_locked(&dev_priv->irq_lock);
143
144 if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
145 return;
146
147 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
148 POSTING_READ(GEN7_ERR_INT);
149
150 DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
151}
152
153static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 153static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
154 enum pipe pipe, 154 enum pipe pipe,
155 bool enable, bool old) 155 bool enable, bool old)
@@ -202,6 +202,24 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
202 ibx_disable_display_interrupt(dev_priv, bit); 202 ibx_disable_display_interrupt(dev_priv, bit);
203} 203}
204 204
205static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
206{
207 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
208 enum transcoder pch_transcoder = (enum transcoder) crtc->pipe;
209 uint32_t serr_int = I915_READ(SERR_INT);
210
211 assert_spin_locked(&dev_priv->irq_lock);
212
213 if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
214 return;
215
216 I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
217 POSTING_READ(SERR_INT);
218
219 DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
220 transcoder_name(pch_transcoder));
221}
222
205static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 223static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
206 enum transcoder pch_transcoder, 224 enum transcoder pch_transcoder,
207 bool enable, bool old) 225 bool enable, bool old)
@@ -375,3 +393,56 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
375 DRM_ERROR("PCH transcoder %c FIFO underrun\n", 393 DRM_ERROR("PCH transcoder %c FIFO underrun\n",
376 transcoder_name(pch_transcoder)); 394 transcoder_name(pch_transcoder));
377} 395}
396
397/**
398 * intel_check_cpu_fifo_underruns - check for CPU fifo underruns immediately
399 * @dev_priv: i915 device instance
400 *
401 * Check for CPU fifo underruns immediately. Useful on IVB/HSW where the shared
402 * error interrupt may have been disabled, and so CPU fifo underruns won't
403 * necessarily raise an interrupt, and on GMCH platforms where underruns never
404 * raise an interrupt.
405 */
406void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
407{
408 struct intel_crtc *crtc;
409
410 spin_lock_irq(&dev_priv->irq_lock);
411
412 for_each_intel_crtc(dev_priv->dev, crtc) {
413 if (crtc->cpu_fifo_underrun_disabled)
414 continue;
415
416 if (HAS_GMCH_DISPLAY(dev_priv))
417 i9xx_check_fifo_underruns(crtc);
418 else if (IS_GEN7(dev_priv))
419 ivybridge_check_fifo_underruns(crtc);
420 }
421
422 spin_unlock_irq(&dev_priv->irq_lock);
423}
424
425/**
426 * intel_check_pch_fifo_underruns - check for PCH fifo underruns immediately
427 * @dev_priv: i915 device instance
428 *
429 * Check for PCH fifo underruns immediately. Useful on CPT/PPT where the shared
430 * error interrupt may have been disabled, and so PCH fifo underruns won't
431 * necessarily raise an interrupt.
432 */
433void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
434{
435 struct intel_crtc *crtc;
436
437 spin_lock_irq(&dev_priv->irq_lock);
438
439 for_each_intel_crtc(dev_priv->dev, crtc) {
440 if (crtc->pch_fifo_underrun_disabled)
441 continue;
442
443 if (HAS_PCH_CPT(dev_priv))
444 cpt_check_pch_fifo_underruns(crtc);
445 }
446
447 spin_unlock_irq(&dev_priv->irq_lock);
448}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 081d5f648d26..5ba586683c87 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -76,11 +76,17 @@ struct intel_guc_fw {
76 uint16_t guc_fw_minor_wanted; 76 uint16_t guc_fw_minor_wanted;
77 uint16_t guc_fw_major_found; 77 uint16_t guc_fw_major_found;
78 uint16_t guc_fw_minor_found; 78 uint16_t guc_fw_minor_found;
79
80 uint32_t header_size;
81 uint32_t header_offset;
82 uint32_t rsa_size;
83 uint32_t rsa_offset;
84 uint32_t ucode_size;
85 uint32_t ucode_offset;
79}; 86};
80 87
81struct intel_guc { 88struct intel_guc {
82 struct intel_guc_fw guc_fw; 89 struct intel_guc_fw guc_fw;
83
84 uint32_t log_flags; 90 uint32_t log_flags;
85 struct drm_i915_gem_object *log_obj; 91 struct drm_i915_gem_object *log_obj;
86 92
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 593d2f585978..40b2ea572e16 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -122,6 +122,78 @@
122 122
123#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1) 123#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1)
124 124
125/**
126 * DOC: GuC Firmware Layout
127 *
128 * The GuC firmware layout looks like this:
129 *
130 * +-------------------------------+
131 * | guc_css_header |
132 * | contains major/minor version |
133 * +-------------------------------+
134 * | uCode |
135 * +-------------------------------+
136 * | RSA signature |
137 * +-------------------------------+
138 * | modulus key |
139 * +-------------------------------+
140 * | exponent val |
141 * +-------------------------------+
142 *
143 * The firmware may or may not have modulus key and exponent data. The header,
144 * uCode and RSA signature are must-have components that will be used by driver.
145 * Length of each components, which is all in dwords, can be found in header.
146 * In the case that modulus and exponent are not present in fw, a.k.a truncated
147 * image, the length value still appears in header.
148 *
149 * Driver will do some basic fw size validation based on the following rules:
150 *
151 * 1. Header, uCode and RSA are must-have components.
152 * 2. All firmware components, if they present, are in the sequence illustrated
153 * in the layout table above.
154 * 3. Length info of each component can be found in header, in dwords.
155 * 4. Modulus and exponent key are not required by driver. They may not appear
156 * in fw. So driver will load a truncated firmware in this case.
157 */
158
159struct guc_css_header {
160 uint32_t module_type;
161 /* header_size includes all non-uCode bits, including css_header, rsa
162 * key, modulus key and exponent data. */
163 uint32_t header_size_dw;
164 uint32_t header_version;
165 uint32_t module_id;
166 uint32_t module_vendor;
167 union {
168 struct {
169 uint8_t day;
170 uint8_t month;
171 uint16_t year;
172 };
173 uint32_t date;
174 };
175 uint32_t size_dw; /* uCode plus header_size_dw */
176 uint32_t key_size_dw;
177 uint32_t modulus_size_dw;
178 uint32_t exponent_size_dw;
179 union {
180 struct {
181 uint8_t hour;
182 uint8_t min;
183 uint16_t sec;
184 };
185 uint32_t time;
186 };
187
188 char username[8];
189 char buildnumber[12];
190 uint32_t device_id;
191 uint32_t guc_sw_version;
192 uint32_t prod_preprod_fw;
193 uint32_t reserved[12];
194 uint32_t header_info;
195} __packed;
196
125struct guc_doorbell_info { 197struct guc_doorbell_info {
126 u32 db_status; 198 u32 db_status;
127 u32 cookie; 199 u32 cookie;
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 3541f76c65a7..550921f2ef7d 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -31,7 +31,7 @@
31#include "intel_guc.h" 31#include "intel_guc.h"
32 32
33/** 33/**
34 * DOC: GuC 34 * DOC: GuC-specific firmware loader
35 * 35 *
36 * intel_guc: 36 * intel_guc:
37 * Top level structure of guc. It handles firmware loading and manages client 37 * Top level structure of guc. It handles firmware loading and manages client
@@ -208,16 +208,6 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
208/* 208/*
209 * Transfer the firmware image to RAM for execution by the microcontroller. 209 * Transfer the firmware image to RAM for execution by the microcontroller.
210 * 210 *
211 * GuC Firmware layout:
212 * +-------------------------------+ ----
213 * | CSS header | 128B
214 * | contains major/minor version |
215 * +-------------------------------+ ----
216 * | uCode |
217 * +-------------------------------+ ----
218 * | RSA signature | 256B
219 * +-------------------------------+ ----
220 *
221 * Architecturally, the DMA engine is bidirectional, and can potentially even 211 * Architecturally, the DMA engine is bidirectional, and can potentially even
222 * transfer between GTT locations. This functionality is left out of the API 212 * transfer between GTT locations. This functionality is left out of the API
223 * for now as there is no need for it. 213 * for now as there is no need for it.
@@ -225,33 +215,29 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
225 * Note that GuC needs the CSS header plus uKernel code to be copied by the 215 * Note that GuC needs the CSS header plus uKernel code to be copied by the
226 * DMA engine in one operation, whereas the RSA signature is loaded via MMIO. 216 * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
227 */ 217 */
228
229#define UOS_CSS_HEADER_OFFSET 0
230#define UOS_VER_MINOR_OFFSET 0x44
231#define UOS_VER_MAJOR_OFFSET 0x46
232#define UOS_CSS_HEADER_SIZE 0x80
233#define UOS_RSA_SIG_SIZE 0x100
234
235static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv) 218static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
236{ 219{
237 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 220 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
238 struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj; 221 struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
239 unsigned long offset; 222 unsigned long offset;
240 struct sg_table *sg = fw_obj->pages; 223 struct sg_table *sg = fw_obj->pages;
241 u32 status, ucode_size, rsa[UOS_RSA_SIG_SIZE / sizeof(u32)]; 224 u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
242 int i, ret = 0; 225 int i, ret = 0;
243 226
244 /* uCode size, also is where RSA signature starts */ 227 /* where RSA signature starts */
245 offset = ucode_size = guc_fw->guc_fw_size - UOS_RSA_SIG_SIZE; 228 offset = guc_fw->rsa_offset;
246 I915_WRITE(DMA_COPY_SIZE, ucode_size);
247 229
248 /* Copy RSA signature from the fw image to HW for verification */ 230 /* Copy RSA signature from the fw image to HW for verification */
249 sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset); 231 sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
250 for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++) 232 for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
251 I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); 233 I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
252 234
235 /* The header plus uCode will be copied to WOPCM via DMA, excluding any
236 * other components */
237 I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
238
253 /* Set the source address for the new blob */ 239 /* Set the source address for the new blob */
254 offset = i915_gem_obj_ggtt_offset(fw_obj); 240 offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
255 I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); 241 I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
256 I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); 242 I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
257 243
@@ -322,8 +308,8 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
322 I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE); 308 I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
323 309
324 /* WaDisableMinuteIaClockGating:skl,bxt */ 310 /* WaDisableMinuteIaClockGating:skl,bxt */
325 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || 311 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
326 (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) { 312 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
327 I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) & 313 I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
328 ~GUC_ENABLE_MIA_CLOCK_GATING)); 314 ~GUC_ENABLE_MIA_CLOCK_GATING));
329 } 315 }
@@ -378,6 +364,9 @@ int intel_guc_ucode_load(struct drm_device *dev)
378 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 364 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
379 int err = 0; 365 int err = 0;
380 366
367 if (!i915.enable_guc_submission)
368 return 0;
369
381 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n", 370 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
382 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), 371 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
383 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); 372 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
@@ -457,10 +446,8 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
457{ 446{
458 struct drm_i915_gem_object *obj; 447 struct drm_i915_gem_object *obj;
459 const struct firmware *fw; 448 const struct firmware *fw;
460 const u8 *css_header; 449 struct guc_css_header *css;
461 const size_t minsize = UOS_CSS_HEADER_SIZE + UOS_RSA_SIG_SIZE; 450 size_t size;
462 const size_t maxsize = GUC_WOPCM_SIZE_VALUE + UOS_RSA_SIG_SIZE
463 - 0x8000; /* 32k reserved (8K stack + 24k context) */
464 int err; 451 int err;
465 452
466 DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n", 453 DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
@@ -474,12 +461,52 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
474 461
475 DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n", 462 DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
476 guc_fw->guc_fw_path, fw); 463 guc_fw->guc_fw_path, fw);
477 DRM_DEBUG_DRIVER("firmware file size %zu (minimum %zu, maximum %zu)\n",
478 fw->size, minsize, maxsize);
479 464
480 /* Check the size of the blob befoe examining buffer contents */ 465 /* Check the size of the blob before examining buffer contents */
481 if (fw->size < minsize || fw->size > maxsize) 466 if (fw->size < sizeof(struct guc_css_header)) {
467 DRM_ERROR("Firmware header is missing\n");
482 goto fail; 468 goto fail;
469 }
470
471 css = (struct guc_css_header *)fw->data;
472
473 /* Firmware bits always start from header */
474 guc_fw->header_offset = 0;
475 guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
476 css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
477
478 if (guc_fw->header_size != sizeof(struct guc_css_header)) {
479 DRM_ERROR("CSS header definition mismatch\n");
480 goto fail;
481 }
482
483 /* then, uCode */
484 guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
485 guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
486
487 /* now RSA */
488 if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
489 DRM_ERROR("RSA key size is bad\n");
490 goto fail;
491 }
492 guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
493 guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
494
495 /* At least, it should have header, uCode and RSA. Size of all three. */
496 size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
497 if (fw->size < size) {
498 DRM_ERROR("Missing firmware components\n");
499 goto fail;
500 }
501
502 /* Header and uCode will be loaded to WOPCM. Size of the two. */
503 size = guc_fw->header_size + guc_fw->ucode_size;
504
505 /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
506 if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
507 DRM_ERROR("Firmware is too large to fit in WOPCM\n");
508 goto fail;
509 }
483 510
484 /* 511 /*
485 * The GuC firmware image has the version number embedded at a well-known 512 * The GuC firmware image has the version number embedded at a well-known
@@ -487,9 +514,8 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
487 * TWO bytes each (i.e. u16), although all pointers and offsets are defined 514 * TWO bytes each (i.e. u16), although all pointers and offsets are defined
488 * in terms of bytes (u8). 515 * in terms of bytes (u8).
489 */ 516 */
490 css_header = fw->data + UOS_CSS_HEADER_OFFSET; 517 guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
491 guc_fw->guc_fw_major_found = *(u16 *)(css_header + UOS_VER_MAJOR_OFFSET); 518 guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
492 guc_fw->guc_fw_minor_found = *(u16 *)(css_header + UOS_VER_MINOR_OFFSET);
493 519
494 if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted || 520 if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
495 guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) { 521 guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
@@ -566,6 +592,9 @@ void intel_guc_ucode_init(struct drm_device *dev)
566 fw_path = ""; /* unknown device */ 592 fw_path = ""; /* unknown device */
567 } 593 }
568 594
595 if (!i915.enable_guc_submission)
596 return;
597
569 guc_fw->guc_dev = dev; 598 guc_fw->guc_dev = dev;
570 guc_fw->guc_fw_path = fw_path; 599 guc_fw->guc_fw_path = fw_path;
571 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; 600 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 9eafa191cee2..bdd462e7c690 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -113,10 +113,11 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
113 } 113 }
114} 114}
115 115
116static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv, 116static i915_reg_t
117 enum transcoder cpu_transcoder, 117hsw_dip_data_reg(struct drm_i915_private *dev_priv,
118 enum hdmi_infoframe_type type, 118 enum transcoder cpu_transcoder,
119 int i) 119 enum hdmi_infoframe_type type,
120 int i)
120{ 121{
121 switch (type) { 122 switch (type) {
122 case HDMI_INFOFRAME_TYPE_AVI: 123 case HDMI_INFOFRAME_TYPE_AVI:
@@ -127,7 +128,7 @@ static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv,
127 return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i); 128 return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i);
128 default: 129 default:
129 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); 130 DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
130 return 0; 131 return INVALID_MMIO_REG;
131 } 132 }
132} 133}
133 134
@@ -193,8 +194,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
193 struct drm_device *dev = encoder->dev; 194 struct drm_device *dev = encoder->dev;
194 struct drm_i915_private *dev_priv = dev->dev_private; 195 struct drm_i915_private *dev_priv = dev->dev_private;
195 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 196 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
196 int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 197 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
197 u32 val = I915_READ(reg); 198 u32 val = I915_READ(reg);
199 int i;
198 200
199 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); 201 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
200 202
@@ -229,7 +231,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
229 struct drm_i915_private *dev_priv = dev->dev_private; 231 struct drm_i915_private *dev_priv = dev->dev_private;
230 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 232 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
231 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 233 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
232 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 234 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
233 u32 val = I915_READ(reg); 235 u32 val = I915_READ(reg);
234 236
235 if ((val & VIDEO_DIP_ENABLE) == 0) 237 if ((val & VIDEO_DIP_ENABLE) == 0)
@@ -251,8 +253,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
251 struct drm_device *dev = encoder->dev; 253 struct drm_device *dev = encoder->dev;
252 struct drm_i915_private *dev_priv = dev->dev_private; 254 struct drm_i915_private *dev_priv = dev->dev_private;
253 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 255 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
254 int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 256 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
255 u32 val = I915_READ(reg); 257 u32 val = I915_READ(reg);
258 int i;
256 259
257 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); 260 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
258 261
@@ -289,8 +292,7 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
289 struct drm_device *dev = encoder->dev; 292 struct drm_device *dev = encoder->dev;
290 struct drm_i915_private *dev_priv = dev->dev_private; 293 struct drm_i915_private *dev_priv = dev->dev_private;
291 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 294 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
292 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 295 u32 val = I915_READ(TVIDEO_DIP_CTL(intel_crtc->pipe));
293 u32 val = I915_READ(reg);
294 296
295 if ((val & VIDEO_DIP_ENABLE) == 0) 297 if ((val & VIDEO_DIP_ENABLE) == 0)
296 return false; 298 return false;
@@ -308,8 +310,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
308 struct drm_device *dev = encoder->dev; 310 struct drm_device *dev = encoder->dev;
309 struct drm_i915_private *dev_priv = dev->dev_private; 311 struct drm_i915_private *dev_priv = dev->dev_private;
310 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 312 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
311 int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 313 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
312 u32 val = I915_READ(reg); 314 u32 val = I915_READ(reg);
315 int i;
313 316
314 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); 317 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
315 318
@@ -344,8 +347,7 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
344 struct drm_i915_private *dev_priv = dev->dev_private; 347 struct drm_i915_private *dev_priv = dev->dev_private;
345 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 348 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
346 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 349 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
347 int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 350 u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(intel_crtc->pipe));
348 u32 val = I915_READ(reg);
349 351
350 if ((val & VIDEO_DIP_ENABLE) == 0) 352 if ((val & VIDEO_DIP_ENABLE) == 0)
351 return false; 353 return false;
@@ -367,13 +369,13 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
367 struct drm_i915_private *dev_priv = dev->dev_private; 369 struct drm_i915_private *dev_priv = dev->dev_private;
368 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 370 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
369 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 371 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
370 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); 372 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
371 u32 data_reg; 373 i915_reg_t data_reg;
372 int i; 374 int i;
373 u32 val = I915_READ(ctl_reg); 375 u32 val = I915_READ(ctl_reg);
374 376
375 data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0); 377 data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0);
376 if (data_reg == 0) 378 if (i915_mmio_reg_valid(data_reg))
377 return; 379 return;
378 380
379 val &= ~hsw_infoframe_enable(type); 381 val &= ~hsw_infoframe_enable(type);
@@ -401,8 +403,7 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
401 struct drm_device *dev = encoder->dev; 403 struct drm_device *dev = encoder->dev;
402 struct drm_i915_private *dev_priv = dev->dev_private; 404 struct drm_i915_private *dev_priv = dev->dev_private;
403 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 405 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
404 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); 406 u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder));
405 u32 val = I915_READ(ctl_reg);
406 407
407 return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | 408 return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
408 VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | 409 VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
@@ -513,7 +514,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
513 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 514 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
514 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 515 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
515 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 516 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
516 u32 reg = VIDEO_DIP_CTL; 517 i915_reg_t reg = VIDEO_DIP_CTL;
517 u32 val = I915_READ(reg); 518 u32 val = I915_READ(reg);
518 u32 port = VIDEO_DIP_PORT(intel_dig_port->port); 519 u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
519 520
@@ -633,7 +634,8 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
633{ 634{
634 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 635 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
635 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); 636 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
636 u32 reg, val = 0; 637 i915_reg_t reg;
638 u32 val = 0;
637 639
638 if (HAS_DDI(dev_priv)) 640 if (HAS_DDI(dev_priv))
639 reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder); 641 reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
@@ -666,7 +668,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
666 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 668 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
667 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 669 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
668 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 670 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
669 u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 671 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
670 u32 val = I915_READ(reg); 672 u32 val = I915_READ(reg);
671 u32 port = VIDEO_DIP_PORT(intel_dig_port->port); 673 u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
672 674
@@ -717,7 +719,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
717 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 719 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
718 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 720 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
719 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 721 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
720 u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 722 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
721 u32 val = I915_READ(reg); 723 u32 val = I915_READ(reg);
722 724
723 assert_hdmi_port_disabled(intel_hdmi); 725 assert_hdmi_port_disabled(intel_hdmi);
@@ -760,7 +762,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
760 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 762 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
761 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 763 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
762 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 764 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
763 u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 765 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
764 u32 val = I915_READ(reg); 766 u32 val = I915_READ(reg);
765 u32 port = VIDEO_DIP_PORT(intel_dig_port->port); 767 u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
766 768
@@ -811,7 +813,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
811 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 813 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
812 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 814 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
813 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 815 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
814 u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); 816 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
815 u32 val = I915_READ(reg); 817 u32 val = I915_READ(reg);
816 818
817 assert_hdmi_port_disabled(intel_hdmi); 819 assert_hdmi_port_disabled(intel_hdmi);
@@ -1108,6 +1110,13 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
1108 * matching DP port to be enabled on transcoder A. 1110 * matching DP port to be enabled on transcoder A.
1109 */ 1111 */
1110 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) { 1112 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) {
1113 /*
1114 * We get CPU/PCH FIFO underruns on the other pipe when
1115 * doing the workaround. Sweep them under the rug.
1116 */
1117 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
1118 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
1119
1111 temp &= ~SDVO_PIPE_B_SELECT; 1120 temp &= ~SDVO_PIPE_B_SELECT;
1112 temp |= SDVO_ENABLE; 1121 temp |= SDVO_ENABLE;
1113 /* 1122 /*
@@ -1122,6 +1131,10 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
1122 temp &= ~SDVO_ENABLE; 1131 temp &= ~SDVO_ENABLE;
1123 I915_WRITE(intel_hdmi->hdmi_reg, temp); 1132 I915_WRITE(intel_hdmi->hdmi_reg, temp);
1124 POSTING_READ(intel_hdmi->hdmi_reg); 1133 POSTING_READ(intel_hdmi->hdmi_reg);
1134
1135 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
1136 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1137 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1125 } 1138 }
1126 1139
1127 intel_hdmi->set_infoframes(&encoder->base, false, NULL); 1140 intel_hdmi->set_infoframes(&encoder->base, false, NULL);
@@ -1335,21 +1348,18 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force)
1335{ 1348{
1336 struct drm_i915_private *dev_priv = to_i915(connector->dev); 1349 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1337 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 1350 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1338 struct intel_encoder *intel_encoder =
1339 &hdmi_to_dig_port(intel_hdmi)->base;
1340 enum intel_display_power_domain power_domain;
1341 struct edid *edid = NULL; 1351 struct edid *edid = NULL;
1342 bool connected = false; 1352 bool connected = false;
1343 1353
1344 power_domain = intel_display_port_power_domain(intel_encoder); 1354 if (force) {
1345 intel_display_power_get(dev_priv, power_domain); 1355 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1346 1356
1347 if (force)
1348 edid = drm_get_edid(connector, 1357 edid = drm_get_edid(connector,
1349 intel_gmbus_get_adapter(dev_priv, 1358 intel_gmbus_get_adapter(dev_priv,
1350 intel_hdmi->ddc_bus)); 1359 intel_hdmi->ddc_bus));
1351 1360
1352 intel_display_power_put(dev_priv, power_domain); 1361 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
1362 }
1353 1363
1354 to_intel_connector(connector)->detect_edid = edid; 1364 to_intel_connector(connector)->detect_edid = edid;
1355 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 1365 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -1383,6 +1393,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
1383 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1393 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1384 connector->base.id, connector->name); 1394 connector->base.id, connector->name);
1385 1395
1396 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1397
1386 while (!live_status && --retry) { 1398 while (!live_status && --retry) {
1387 live_status = intel_digital_port_connected(dev_priv, 1399 live_status = intel_digital_port_connected(dev_priv,
1388 hdmi_to_dig_port(intel_hdmi)); 1400 hdmi_to_dig_port(intel_hdmi));
@@ -1402,6 +1414,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
1402 } else 1414 } else
1403 status = connector_status_disconnected; 1415 status = connector_status_disconnected;
1404 1416
1417 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
1418
1405 return status; 1419 return status;
1406} 1420}
1407 1421
@@ -2039,7 +2053,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
2039 * On BXT A0/A1, sw needs to activate DDIA HPD logic and 2053 * On BXT A0/A1, sw needs to activate DDIA HPD logic and
2040 * interrupts to check the external panel connection. 2054 * interrupts to check the external panel connection.
2041 */ 2055 */
2042 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)) 2056 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
2043 intel_encoder->hpd_pin = HPD_PORT_A; 2057 intel_encoder->hpd_pin = HPD_PORT_A;
2044 else 2058 else
2045 intel_encoder->hpd_pin = HPD_PORT_B; 2059 intel_encoder->hpd_pin = HPD_PORT_B;
@@ -2131,7 +2145,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
2131 } 2145 }
2132} 2146}
2133 2147
2134void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) 2148void intel_hdmi_init(struct drm_device *dev,
2149 i915_reg_t hdmi_reg, enum port port)
2135{ 2150{
2136 struct intel_digital_port *intel_dig_port; 2151 struct intel_digital_port *intel_dig_port;
2137 struct intel_encoder *intel_encoder; 2152 struct intel_encoder *intel_encoder;
@@ -2202,7 +2217,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
2202 2217
2203 intel_dig_port->port = port; 2218 intel_dig_port->port = port;
2204 intel_dig_port->hdmi.hdmi_reg = hdmi_reg; 2219 intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
2205 intel_dig_port->dp.output_reg = 0; 2220 intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
2206 2221
2207 intel_hdmi_init_connector(intel_dig_port, intel_connector); 2222 intel_hdmi_init_connector(intel_dig_port, intel_connector);
2208} 2223}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 1369fc41d039..1110c83953cf 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -36,7 +36,7 @@
36 36
37struct gmbus_pin { 37struct gmbus_pin {
38 const char *name; 38 const char *name;
39 int reg; 39 i915_reg_t reg;
40}; 40};
41 41
42/* Map gmbus pin pairs to names and registers. */ 42/* Map gmbus pin pairs to names and registers. */
@@ -63,9 +63,9 @@ static const struct gmbus_pin gmbus_pins_skl[] = {
63}; 63};
64 64
65static const struct gmbus_pin gmbus_pins_bxt[] = { 65static const struct gmbus_pin gmbus_pins_bxt[] = {
66 [GMBUS_PIN_1_BXT] = { "dpb", PCH_GPIOB }, 66 [GMBUS_PIN_1_BXT] = { "dpb", GPIOB },
67 [GMBUS_PIN_2_BXT] = { "dpc", PCH_GPIOC }, 67 [GMBUS_PIN_2_BXT] = { "dpc", GPIOC },
68 [GMBUS_PIN_3_BXT] = { "misc", PCH_GPIOD }, 68 [GMBUS_PIN_3_BXT] = { "misc", GPIOD },
69}; 69};
70 70
71/* pin is expected to be valid */ 71/* pin is expected to be valid */
@@ -74,7 +74,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
74{ 74{
75 if (IS_BROXTON(dev_priv)) 75 if (IS_BROXTON(dev_priv))
76 return &gmbus_pins_bxt[pin]; 76 return &gmbus_pins_bxt[pin];
77 else if (IS_SKYLAKE(dev_priv)) 77 else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
78 return &gmbus_pins_skl[pin]; 78 return &gmbus_pins_skl[pin];
79 else if (IS_BROADWELL(dev_priv)) 79 else if (IS_BROADWELL(dev_priv))
80 return &gmbus_pins_bdw[pin]; 80 return &gmbus_pins_bdw[pin];
@@ -89,14 +89,15 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
89 89
90 if (IS_BROXTON(dev_priv)) 90 if (IS_BROXTON(dev_priv))
91 size = ARRAY_SIZE(gmbus_pins_bxt); 91 size = ARRAY_SIZE(gmbus_pins_bxt);
92 else if (IS_SKYLAKE(dev_priv)) 92 else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
93 size = ARRAY_SIZE(gmbus_pins_skl); 93 size = ARRAY_SIZE(gmbus_pins_skl);
94 else if (IS_BROADWELL(dev_priv)) 94 else if (IS_BROADWELL(dev_priv))
95 size = ARRAY_SIZE(gmbus_pins_bdw); 95 size = ARRAY_SIZE(gmbus_pins_bdw);
96 else 96 else
97 size = ARRAY_SIZE(gmbus_pins); 97 size = ARRAY_SIZE(gmbus_pins);
98 98
99 return pin < size && get_gmbus_pin(dev_priv, pin)->reg; 99 return pin < size &&
100 i915_mmio_reg_valid(get_gmbus_pin(dev_priv, pin)->reg);
100} 101}
101 102
102/* Intel GPIO access functions */ 103/* Intel GPIO access functions */
@@ -240,9 +241,8 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
240 241
241 algo = &bus->bit_algo; 242 algo = &bus->bit_algo;
242 243
243 bus->gpio_reg = dev_priv->gpio_mmio_base + 244 bus->gpio_reg = _MMIO(dev_priv->gpio_mmio_base +
244 get_gmbus_pin(dev_priv, pin)->reg; 245 i915_mmio_reg_offset(get_gmbus_pin(dev_priv, pin)->reg));
245
246 bus->adapter.algo_data = algo; 246 bus->adapter.algo_data = algo;
247 algo->setsda = set_data; 247 algo->setsda = set_data;
248 algo->setscl = set_clock; 248 algo->setscl = set_clock;
@@ -483,7 +483,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
483 int i = 0, inc, try = 0; 483 int i = 0, inc, try = 0;
484 int ret = 0; 484 int ret = 0;
485 485
486 intel_aux_display_runtime_get(dev_priv); 486 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
487 mutex_lock(&dev_priv->gmbus_mutex); 487 mutex_lock(&dev_priv->gmbus_mutex);
488 488
489 if (bus->force_bit) { 489 if (bus->force_bit) {
@@ -595,7 +595,9 @@ timeout:
595 595
596out: 596out:
597 mutex_unlock(&dev_priv->gmbus_mutex); 597 mutex_unlock(&dev_priv->gmbus_mutex);
598 intel_aux_display_runtime_put(dev_priv); 598
599 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
600
599 return ret; 601 return ret;
600} 602}
601 603
@@ -626,12 +628,13 @@ int intel_setup_gmbus(struct drm_device *dev)
626 628
627 if (HAS_PCH_NOP(dev)) 629 if (HAS_PCH_NOP(dev))
628 return 0; 630 return 0;
629 else if (HAS_PCH_SPLIT(dev)) 631
630 dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; 632 if (IS_VALLEYVIEW(dev))
631 else if (IS_VALLEYVIEW(dev))
632 dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; 633 dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
633 else 634 else if (!HAS_GMCH_DISPLAY(dev_priv))
634 dev_priv->gpio_mmio_base = 0; 635 dev_priv->gpio_mmio_base =
636 i915_mmio_reg_offset(PCH_GPIOA) -
637 i915_mmio_reg_offset(GPIOA);
635 638
636 mutex_init(&dev_priv->gmbus_mutex); 639 mutex_init(&dev_priv->gmbus_mutex);
637 init_waitqueue_head(&dev_priv->gmbus_wait_queue); 640 init_waitqueue_head(&dev_priv->gmbus_wait_queue);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 88e12bdf79e2..4ebafab53f30 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -190,16 +190,21 @@
190#define GEN8_CTX_L3LLC_COHERENT (1<<5) 190#define GEN8_CTX_L3LLC_COHERENT (1<<5)
191#define GEN8_CTX_PRIVILEGE (1<<8) 191#define GEN8_CTX_PRIVILEGE (1<<8)
192 192
193#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \ 193#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
194 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
195 (reg_state)[(pos)+1] = (val); \
196} while (0)
197
198#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
194 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \ 199 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
195 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \ 200 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
196 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \ 201 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
197} 202} while (0)
198 203
199#define ASSIGN_CTX_PML4(ppgtt, reg_state) { \ 204#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
200 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \ 205 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
201 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \ 206 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
202} 207} while (0)
203 208
204enum { 209enum {
205 ADVANCED_CONTEXT = 0, 210 ADVANCED_CONTEXT = 0,
@@ -284,8 +289,8 @@ static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
284{ 289{
285 struct drm_device *dev = ring->dev; 290 struct drm_device *dev = ring->dev;
286 291
287 return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || 292 return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
288 (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) && 293 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
289 (ring->id == VCS || ring->id == VCS2); 294 (ring->id == VCS || ring->id == VCS2);
290} 295}
291 296
@@ -921,7 +926,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
921 926
922 intel_logical_ring_emit(ringbuf, MI_NOOP); 927 intel_logical_ring_emit(ringbuf, MI_NOOP);
923 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1)); 928 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
924 intel_logical_ring_emit(ringbuf, INSTPM); 929 intel_logical_ring_emit_reg(ringbuf, INSTPM);
925 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode); 930 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
926 intel_logical_ring_advance(ringbuf); 931 intel_logical_ring_advance(ringbuf);
927 932
@@ -1096,7 +1101,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1096 1101
1097 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count)); 1102 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
1098 for (i = 0; i < w->count; i++) { 1103 for (i = 0; i < w->count; i++) {
1099 intel_logical_ring_emit(ringbuf, w->reg[i].addr); 1104 intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
1100 intel_logical_ring_emit(ringbuf, w->reg[i].value); 1105 intel_logical_ring_emit(ringbuf, w->reg[i].value);
1101 } 1106 }
1102 intel_logical_ring_emit(ringbuf, MI_NOOP); 1107 intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -1120,6 +1125,8 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1120 batch[__index] = (cmd); \ 1125 batch[__index] = (cmd); \
1121 } while (0) 1126 } while (0)
1122 1127
1128#define wa_ctx_emit_reg(batch, index, reg) \
1129 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
1123 1130
1124/* 1131/*
1125 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after 1132 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
@@ -1149,17 +1156,17 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
1149 * this batch updates GEN8_L3SQCREG4 with default value we need to 1156 * this batch updates GEN8_L3SQCREG4 with default value we need to
1150 * set this bit here to retain the WA during flush. 1157 * set this bit here to retain the WA during flush.
1151 */ 1158 */
1152 if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0) 1159 if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0))
1153 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; 1160 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1154 1161
1155 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 1162 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
1156 MI_SRM_LRM_GLOBAL_GTT)); 1163 MI_SRM_LRM_GLOBAL_GTT));
1157 wa_ctx_emit(batch, index, GEN8_L3SQCREG4); 1164 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1158 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); 1165 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
1159 wa_ctx_emit(batch, index, 0); 1166 wa_ctx_emit(batch, index, 0);
1160 1167
1161 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1168 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1162 wa_ctx_emit(batch, index, GEN8_L3SQCREG4); 1169 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1163 wa_ctx_emit(batch, index, l3sqc4_flush); 1170 wa_ctx_emit(batch, index, l3sqc4_flush);
1164 1171
1165 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); 1172 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
@@ -1172,7 +1179,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
1172 1179
1173 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | 1180 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
1174 MI_SRM_LRM_GLOBAL_GTT)); 1181 MI_SRM_LRM_GLOBAL_GTT));
1175 wa_ctx_emit(batch, index, GEN8_L3SQCREG4); 1182 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1176 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); 1183 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
1177 wa_ctx_emit(batch, index, 0); 1184 wa_ctx_emit(batch, index, 0);
1178 1185
@@ -1314,8 +1321,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
1314 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1321 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1315 1322
1316 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1323 /* WaDisableCtxRestoreArbitration:skl,bxt */
1317 if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) || 1324 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
1318 (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) 1325 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
1319 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1326 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1320 1327
1321 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ 1328 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1340,18 +1347,18 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
1340 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1347 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1341 1348
1342 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 1349 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
1343 if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) || 1350 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
1344 (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) { 1351 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
1345 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1352 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1346 wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); 1353 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
1347 wa_ctx_emit(batch, index, 1354 wa_ctx_emit(batch, index,
1348 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING)); 1355 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1349 wa_ctx_emit(batch, index, MI_NOOP); 1356 wa_ctx_emit(batch, index, MI_NOOP);
1350 } 1357 }
1351 1358
1352 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1359 /* WaDisableCtxRestoreArbitration:skl,bxt */
1353 if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) || 1360 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
1354 (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) 1361 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
1355 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); 1362 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1356 1363
1357 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); 1364 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1472,12 +1479,6 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
1472 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); 1479 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1473 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); 1480 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
1474 1481
1475 if (ring->status_page.obj) {
1476 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
1477 (u32)ring->status_page.gfx_addr);
1478 POSTING_READ(RING_HWS_PGA(ring->mmio_base));
1479 }
1480
1481 I915_WRITE(RING_MODE_GEN7(ring), 1482 I915_WRITE(RING_MODE_GEN7(ring),
1482 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | 1483 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1483 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); 1484 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
@@ -1562,9 +1563,9 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1562 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { 1563 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1563 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 1564 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1564 1565
1565 intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i)); 1566 intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
1566 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr)); 1567 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
1567 intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i)); 1568 intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
1568 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr)); 1569 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
1569 } 1570 }
1570 1571
@@ -1923,6 +1924,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
1923 i915_gem_batch_pool_init(dev, &ring->batch_pool); 1924 i915_gem_batch_pool_init(dev, &ring->batch_pool);
1924 init_waitqueue_head(&ring->irq_queue); 1925 init_waitqueue_head(&ring->irq_queue);
1925 1926
1927 INIT_LIST_HEAD(&ring->buffers);
1926 INIT_LIST_HEAD(&ring->execlist_queue); 1928 INIT_LIST_HEAD(&ring->execlist_queue);
1927 INIT_LIST_HEAD(&ring->execlist_retired_req_list); 1929 INIT_LIST_HEAD(&ring->execlist_retired_req_list);
1928 spin_lock_init(&ring->execlist_lock); 1930 spin_lock_init(&ring->execlist_lock);
@@ -1972,7 +1974,7 @@ static int logical_render_ring_init(struct drm_device *dev)
1972 ring->init_hw = gen8_init_render_ring; 1974 ring->init_hw = gen8_init_render_ring;
1973 ring->init_context = gen8_init_rcs_context; 1975 ring->init_context = gen8_init_rcs_context;
1974 ring->cleanup = intel_fini_pipe_control; 1976 ring->cleanup = intel_fini_pipe_control;
1975 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { 1977 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
1976 ring->get_seqno = bxt_a_get_seqno; 1978 ring->get_seqno = bxt_a_get_seqno;
1977 ring->set_seqno = bxt_a_set_seqno; 1979 ring->set_seqno = bxt_a_set_seqno;
1978 } else { 1980 } else {
@@ -2024,7 +2026,7 @@ static int logical_bsd_ring_init(struct drm_device *dev)
2024 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2026 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2025 2027
2026 ring->init_hw = gen8_init_common_ring; 2028 ring->init_hw = gen8_init_common_ring;
2027 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { 2029 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
2028 ring->get_seqno = bxt_a_get_seqno; 2030 ring->get_seqno = bxt_a_get_seqno;
2029 ring->set_seqno = bxt_a_set_seqno; 2031 ring->set_seqno = bxt_a_set_seqno;
2030 } else { 2032 } else {
@@ -2079,7 +2081,7 @@ static int logical_blt_ring_init(struct drm_device *dev)
2079 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 2081 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2080 2082
2081 ring->init_hw = gen8_init_common_ring; 2083 ring->init_hw = gen8_init_common_ring;
2082 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { 2084 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
2083 ring->get_seqno = bxt_a_get_seqno; 2085 ring->get_seqno = bxt_a_get_seqno;
2084 ring->set_seqno = bxt_a_set_seqno; 2086 ring->set_seqno = bxt_a_set_seqno;
2085 } else { 2087 } else {
@@ -2109,7 +2111,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
2109 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 2111 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2110 2112
2111 ring->init_hw = gen8_init_common_ring; 2113 ring->init_hw = gen8_init_common_ring;
2112 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { 2114 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
2113 ring->get_seqno = bxt_a_get_seqno; 2115 ring->get_seqno = bxt_a_get_seqno;
2114 ring->set_seqno = bxt_a_set_seqno; 2116 ring->set_seqno = bxt_a_set_seqno;
2115 } else { 2117 } else {
@@ -2263,46 +2265,31 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2263 * only for the first context restore: on a subsequent save, the GPU will 2265 * only for the first context restore: on a subsequent save, the GPU will
2264 * recreate this batchbuffer with new values (including all the missing 2266 * recreate this batchbuffer with new values (including all the missing
2265 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */ 2267 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
2266 if (ring->id == RCS) 2268 reg_state[CTX_LRI_HEADER_0] =
2267 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14); 2269 MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2268 else 2270 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
2269 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11); 2271 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2270 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED; 2272 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2271 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring); 2273 CTX_CTRL_RS_CTX_ENABLE));
2272 reg_state[CTX_CONTEXT_CONTROL+1] = 2274 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
2273 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | 2275 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
2274 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2275 CTX_CTRL_RS_CTX_ENABLE);
2276 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
2277 reg_state[CTX_RING_HEAD+1] = 0;
2278 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
2279 reg_state[CTX_RING_TAIL+1] = 0;
2280 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
2281 /* Ring buffer start address is not known until the buffer is pinned. 2276 /* Ring buffer start address is not known until the buffer is pinned.
2282 * It is written to the context image in execlists_update_context() 2277 * It is written to the context image in execlists_update_context()
2283 */ 2278 */
2284 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base); 2279 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0);
2285 reg_state[CTX_RING_BUFFER_CONTROL+1] = 2280 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base),
2286 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID; 2281 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
2287 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168; 2282 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0);
2288 reg_state[CTX_BB_HEAD_U+1] = 0; 2283 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0);
2289 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140; 2284 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base),
2290 reg_state[CTX_BB_HEAD_L+1] = 0; 2285 RING_BB_PPGTT);
2291 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110; 2286 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0);
2292 reg_state[CTX_BB_STATE+1] = (1<<5); 2287 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0);
2293 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c; 2288 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0);
2294 reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
2295 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
2296 reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
2297 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
2298 reg_state[CTX_SECOND_BB_STATE+1] = 0;
2299 if (ring->id == RCS) { 2289 if (ring->id == RCS) {
2300 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0; 2290 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0);
2301 reg_state[CTX_BB_PER_CTX_PTR+1] = 0; 2291 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0);
2302 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4; 2292 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0);
2303 reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
2304 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
2305 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
2306 if (ring->wa_ctx.obj) { 2293 if (ring->wa_ctx.obj) {
2307 struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx; 2294 struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
2308 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj); 2295 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
@@ -2319,18 +2306,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2319 0x01; 2306 0x01;
2320 } 2307 }
2321 } 2308 }
2322 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9); 2309 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2323 reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED; 2310 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0);
2324 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8; 2311 /* PDP values well be assigned later if needed */
2325 reg_state[CTX_CTX_TIMESTAMP+1] = 0; 2312 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0);
2326 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3); 2313 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0);
2327 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3); 2314 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0);
2328 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2); 2315 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0);
2329 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2); 2316 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0);
2330 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1); 2317 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0);
2331 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1); 2318 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0);
2332 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); 2319 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0);
2333 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
2334 2320
2335 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { 2321 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2336 /* 64b PPGTT (48bit canonical) 2322 /* 64b PPGTT (48bit canonical)
@@ -2352,8 +2338,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2352 2338
2353 if (ring->id == RCS) { 2339 if (ring->id == RCS) {
2354 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 2340 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2355 reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE; 2341 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2356 reg_state[CTX_R_PWR_CLK_STATE+1] = make_rpcs(dev); 2342 make_rpcs(dev));
2357 } 2343 }
2358 2344
2359 kunmap_atomic(reg_state); 2345 kunmap_atomic(reg_state);
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 4e60d54ba66d..0b821b91723a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -29,16 +29,16 @@
29#define GEN8_CSB_PTR_MASK 0x07 29#define GEN8_CSB_PTR_MASK 0x07
30 30
31/* Execlists regs */ 31/* Execlists regs */
32#define RING_ELSP(ring) ((ring)->mmio_base+0x230) 32#define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230)
33#define RING_EXECLIST_STATUS_LO(ring) ((ring)->mmio_base+0x234) 33#define RING_EXECLIST_STATUS_LO(ring) _MMIO((ring)->mmio_base + 0x234)
34#define RING_EXECLIST_STATUS_HI(ring) ((ring)->mmio_base+0x234 + 4) 34#define RING_EXECLIST_STATUS_HI(ring) _MMIO((ring)->mmio_base + 0x234 + 4)
35#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) 35#define RING_CONTEXT_CONTROL(ring) _MMIO((ring)->mmio_base + 0x244)
36#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) 36#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
37#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) 37#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
38#define CTX_CTRL_RS_CTX_ENABLE (1 << 1) 38#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
39#define RING_CONTEXT_STATUS_BUF_LO(ring, i) ((ring)->mmio_base+0x370 + (i) * 8) 39#define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8)
40#define RING_CONTEXT_STATUS_BUF_HI(ring, i) ((ring)->mmio_base+0x370 + (i) * 8 + 4) 40#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
41#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) 41#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
42 42
43/* Logical Rings */ 43/* Logical Rings */
44int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); 44int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
@@ -70,6 +70,11 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
70 iowrite32(data, ringbuf->virtual_start + ringbuf->tail); 70 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
71 ringbuf->tail += 4; 71 ringbuf->tail += 4;
72} 72}
73static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
74 i915_reg_t reg)
75{
76 intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
77}
73 78
74/* Logical Ring Contexts */ 79/* Logical Ring Contexts */
75 80
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 7f39b8ad88ae..61f1145f6579 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -51,7 +51,7 @@ struct intel_lvds_encoder {
51 struct intel_encoder base; 51 struct intel_encoder base;
52 52
53 bool is_dual_link; 53 bool is_dual_link;
54 u32 reg; 54 i915_reg_t reg;
55 u32 a3_power; 55 u32 a3_power;
56 56
57 struct intel_lvds_connector *attached_connector; 57 struct intel_lvds_connector *attached_connector;
@@ -210,7 +210,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
210 struct intel_connector *intel_connector = 210 struct intel_connector *intel_connector =
211 &lvds_encoder->attached_connector->base; 211 &lvds_encoder->attached_connector->base;
212 struct drm_i915_private *dev_priv = dev->dev_private; 212 struct drm_i915_private *dev_priv = dev->dev_private;
213 u32 ctl_reg, stat_reg; 213 i915_reg_t ctl_reg, stat_reg;
214 214
215 if (HAS_PCH_SPLIT(dev)) { 215 if (HAS_PCH_SPLIT(dev)) {
216 ctl_reg = PCH_PP_CONTROL; 216 ctl_reg = PCH_PP_CONTROL;
@@ -235,7 +235,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
235 struct drm_device *dev = encoder->base.dev; 235 struct drm_device *dev = encoder->base.dev;
236 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 236 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
237 struct drm_i915_private *dev_priv = dev->dev_private; 237 struct drm_i915_private *dev_priv = dev->dev_private;
238 u32 ctl_reg, stat_reg; 238 i915_reg_t ctl_reg, stat_reg;
239 239
240 if (HAS_PCH_SPLIT(dev)) { 240 if (HAS_PCH_SPLIT(dev)) {
241 ctl_reg = PCH_PP_CONTROL; 241 ctl_reg = PCH_PP_CONTROL;
@@ -939,7 +939,7 @@ void intel_lvds_init(struct drm_device *dev)
939 struct drm_display_mode *downclock_mode = NULL; 939 struct drm_display_mode *downclock_mode = NULL;
940 struct edid *edid; 940 struct edid *edid;
941 struct drm_crtc *crtc; 941 struct drm_crtc *crtc;
942 u32 lvds_reg; 942 i915_reg_t lvds_reg;
943 u32 lvds; 943 u32 lvds;
944 int pipe; 944 int pipe;
945 u8 pin; 945 u8 pin;
@@ -1164,8 +1164,7 @@ out:
1164 DRM_DEBUG_KMS("detected %s-link lvds configuration\n", 1164 DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
1165 lvds_encoder->is_dual_link ? "dual" : "single"); 1165 lvds_encoder->is_dual_link ? "dual" : "single");
1166 1166
1167 lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) & 1167 lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
1168 LVDS_A3_POWER_MASK;
1169 1168
1170 lvds_connector->lid_notifier.notifier_call = intel_lid_notify; 1169 lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
1171 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { 1170 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 6d3c6c0a5c62..fed7bea19cc9 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -143,7 +143,7 @@ static bool get_mocs_settings(struct drm_device *dev,
143{ 143{
144 bool result = false; 144 bool result = false;
145 145
146 if (IS_SKYLAKE(dev)) { 146 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
147 table->size = ARRAY_SIZE(skylake_mocs_table); 147 table->size = ARRAY_SIZE(skylake_mocs_table);
148 table->table = skylake_mocs_table; 148 table->table = skylake_mocs_table;
149 result = true; 149 result = true;
@@ -159,11 +159,30 @@ static bool get_mocs_settings(struct drm_device *dev,
159 return result; 159 return result;
160} 160}
161 161
162static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
163{
164 switch (ring) {
165 case RCS:
166 return GEN9_GFX_MOCS(index);
167 case VCS:
168 return GEN9_MFX0_MOCS(index);
169 case BCS:
170 return GEN9_BLT_MOCS(index);
171 case VECS:
172 return GEN9_VEBOX_MOCS(index);
173 case VCS2:
174 return GEN9_MFX1_MOCS(index);
175 default:
176 MISSING_CASE(ring);
177 return INVALID_MMIO_REG;
178 }
179}
180
162/** 181/**
163 * emit_mocs_control_table() - emit the mocs control table 182 * emit_mocs_control_table() - emit the mocs control table
164 * @req: Request to set up the MOCS table for. 183 * @req: Request to set up the MOCS table for.
165 * @table: The values to program into the control regs. 184 * @table: The values to program into the control regs.
166 * @reg_base: The base for the engine that needs to be programmed. 185 * @ring: The engine for whom to emit the registers.
167 * 186 *
168 * This function simply emits a MI_LOAD_REGISTER_IMM command for the 187 * This function simply emits a MI_LOAD_REGISTER_IMM command for the
169 * given table starting at the given address. 188 * given table starting at the given address.
@@ -172,7 +191,7 @@ static bool get_mocs_settings(struct drm_device *dev,
172 */ 191 */
173static int emit_mocs_control_table(struct drm_i915_gem_request *req, 192static int emit_mocs_control_table(struct drm_i915_gem_request *req,
174 const struct drm_i915_mocs_table *table, 193 const struct drm_i915_mocs_table *table,
175 u32 reg_base) 194 enum intel_ring_id ring)
176{ 195{
177 struct intel_ringbuffer *ringbuf = req->ringbuf; 196 struct intel_ringbuffer *ringbuf = req->ringbuf;
178 unsigned int index; 197 unsigned int index;
@@ -191,7 +210,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
191 MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); 210 MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
192 211
193 for (index = 0; index < table->size; index++) { 212 for (index = 0; index < table->size; index++) {
194 intel_logical_ring_emit(ringbuf, reg_base + index * 4); 213 intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
195 intel_logical_ring_emit(ringbuf, 214 intel_logical_ring_emit(ringbuf,
196 table->table[index].control_value); 215 table->table[index].control_value);
197 } 216 }
@@ -205,7 +224,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
205 * that value to all the used entries. 224 * that value to all the used entries.
206 */ 225 */
207 for (; index < GEN9_NUM_MOCS_ENTRIES; index++) { 226 for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
208 intel_logical_ring_emit(ringbuf, reg_base + index * 4); 227 intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
209 intel_logical_ring_emit(ringbuf, table->table[0].control_value); 228 intel_logical_ring_emit(ringbuf, table->table[0].control_value);
210 } 229 }
211 230
@@ -253,7 +272,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
253 value = (table->table[count].l3cc_value & 0xffff) | 272 value = (table->table[count].l3cc_value & 0xffff) |
254 ((table->table[count + 1].l3cc_value & 0xffff) << 16); 273 ((table->table[count + 1].l3cc_value & 0xffff) << 16);
255 274
256 intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4); 275 intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
257 intel_logical_ring_emit(ringbuf, value); 276 intel_logical_ring_emit(ringbuf, value);
258 } 277 }
259 278
@@ -270,7 +289,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
270 * they are reserved by the hardware. 289 * they are reserved by the hardware.
271 */ 290 */
272 for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) { 291 for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
273 intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4); 292 intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
274 intel_logical_ring_emit(ringbuf, value); 293 intel_logical_ring_emit(ringbuf, value);
275 294
276 value = filler; 295 value = filler;
@@ -304,26 +323,16 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
304 int ret; 323 int ret;
305 324
306 if (get_mocs_settings(req->ring->dev, &t)) { 325 if (get_mocs_settings(req->ring->dev, &t)) {
307 /* Program the control registers */ 326 struct drm_i915_private *dev_priv = req->i915;
308 ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0); 327 struct intel_engine_cs *ring;
309 if (ret) 328 enum intel_ring_id ring_id;
310 return ret;
311
312 ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0);
313 if (ret)
314 return ret;
315 329
316 ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0); 330 /* Program the control registers */
317 if (ret) 331 for_each_ring(ring, dev_priv, ring_id) {
318 return ret; 332 ret = emit_mocs_control_table(req, &t, ring_id);
319 333 if (ret)
320 ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0); 334 return ret;
321 if (ret) 335 }
322 return ret;
323
324 ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0);
325 if (ret)
326 return ret;
327 336
328 /* Now program the l3cc registers */ 337 /* Now program the l3cc registers */
329 ret = emit_mocs_l3cc_table(req, &t); 338 ret = emit_mocs_l3cc_table(req, &t);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 6dc13c02c28e..e362a30776fa 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -682,7 +682,7 @@ static void intel_didl_outputs(struct drm_device *dev)
682 } 682 }
683 683
684 if (!acpi_video_bus) { 684 if (!acpi_video_bus) {
685 DRM_ERROR("No ACPI video bus found\n"); 685 DRM_DEBUG_KMS("No ACPI video bus found\n");
686 return; 686 return;
687 } 687 }
688 688
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 444542696a2c..76f1980a7541 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -749,7 +749,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
749 if (ret != 0) 749 if (ret != 0)
750 return ret; 750 return ret;
751 751
752 ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, NULL, 752 ret = i915_gem_object_pin_to_display_plane(new_bo, 0,
753 &i915_ggtt_view_normal); 753 &i915_ggtt_view_normal);
754 if (ret != 0) 754 if (ret != 0)
755 return ret; 755 return ret;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 071a76b9ac52..96f45d7b3e4b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1708,13 +1708,6 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1708 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; 1708 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1709} 1709}
1710 1710
1711struct skl_pipe_wm_parameters {
1712 bool active;
1713 uint32_t pipe_htotal;
1714 uint32_t pixel_rate; /* in KHz */
1715 struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
1716};
1717
1718struct ilk_wm_maximums { 1711struct ilk_wm_maximums {
1719 uint16_t pri; 1712 uint16_t pri;
1720 uint16_t spr; 1713 uint16_t spr;
@@ -1722,13 +1715,6 @@ struct ilk_wm_maximums {
1722 uint16_t fbc; 1715 uint16_t fbc;
1723}; 1716};
1724 1717
1725/* used in computing the new watermarks state */
1726struct intel_wm_config {
1727 unsigned int num_pipes_active;
1728 bool sprites_enabled;
1729 bool sprites_scaled;
1730};
1731
1732/* 1718/*
1733 * For both WM_PIPE and WM_LP. 1719 * For both WM_PIPE and WM_LP.
1734 * mem_value must be in 0.1us units. 1720 * mem_value must be in 0.1us units.
@@ -1979,9 +1965,11 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1979 const struct intel_crtc *intel_crtc, 1965 const struct intel_crtc *intel_crtc,
1980 int level, 1966 int level,
1981 struct intel_crtc_state *cstate, 1967 struct intel_crtc_state *cstate,
1968 struct intel_plane_state *pristate,
1969 struct intel_plane_state *sprstate,
1970 struct intel_plane_state *curstate,
1982 struct intel_wm_level *result) 1971 struct intel_wm_level *result)
1983{ 1972{
1984 struct intel_plane *intel_plane;
1985 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 1973 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
1986 uint16_t spr_latency = dev_priv->wm.spr_latency[level]; 1974 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
1987 uint16_t cur_latency = dev_priv->wm.cur_latency[level]; 1975 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
@@ -1993,29 +1981,11 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1993 cur_latency *= 5; 1981 cur_latency *= 5;
1994 } 1982 }
1995 1983
1996 for_each_intel_plane_on_crtc(dev_priv->dev, intel_crtc, intel_plane) { 1984 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
1997 struct intel_plane_state *pstate = 1985 pri_latency, level);
1998 to_intel_plane_state(intel_plane->base.state); 1986 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
1999 1987 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2000 switch (intel_plane->base.type) { 1988 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2001 case DRM_PLANE_TYPE_PRIMARY:
2002 result->pri_val = ilk_compute_pri_wm(cstate, pstate,
2003 pri_latency,
2004 level);
2005 result->fbc_val = ilk_compute_fbc_wm(cstate, pstate,
2006 result->pri_val);
2007 break;
2008 case DRM_PLANE_TYPE_OVERLAY:
2009 result->spr_val = ilk_compute_spr_wm(cstate, pstate,
2010 spr_latency);
2011 break;
2012 case DRM_PLANE_TYPE_CURSOR:
2013 result->cur_val = ilk_compute_cur_wm(cstate, pstate,
2014 cur_latency);
2015 break;
2016 }
2017 }
2018
2019 result->enable = true; 1989 result->enable = true;
2020} 1990}
2021 1991
@@ -2274,34 +2244,19 @@ static void skl_setup_wm_latency(struct drm_device *dev)
2274 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); 2244 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2275} 2245}
2276 2246
2277static void ilk_compute_wm_config(struct drm_device *dev,
2278 struct intel_wm_config *config)
2279{
2280 struct intel_crtc *intel_crtc;
2281
2282 /* Compute the currently _active_ config */
2283 for_each_intel_crtc(dev, intel_crtc) {
2284 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2285
2286 if (!wm->pipe_enabled)
2287 continue;
2288
2289 config->sprites_enabled |= wm->sprites_enabled;
2290 config->sprites_scaled |= wm->sprites_scaled;
2291 config->num_pipes_active++;
2292 }
2293}
2294
2295/* Compute new watermarks for the pipe */ 2247/* Compute new watermarks for the pipe */
2296static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate, 2248static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
2297 struct intel_pipe_wm *pipe_wm) 2249 struct drm_atomic_state *state)
2298{ 2250{
2299 struct drm_crtc *crtc = cstate->base.crtc; 2251 struct intel_pipe_wm *pipe_wm;
2300 struct drm_device *dev = crtc->dev; 2252 struct drm_device *dev = intel_crtc->base.dev;
2301 const struct drm_i915_private *dev_priv = dev->dev_private; 2253 const struct drm_i915_private *dev_priv = dev->dev_private;
2302 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2254 struct intel_crtc_state *cstate = NULL;
2303 struct intel_plane *intel_plane; 2255 struct intel_plane *intel_plane;
2256 struct drm_plane_state *ps;
2257 struct intel_plane_state *pristate = NULL;
2304 struct intel_plane_state *sprstate = NULL; 2258 struct intel_plane_state *sprstate = NULL;
2259 struct intel_plane_state *curstate = NULL;
2305 int level, max_level = ilk_wm_max_level(dev); 2260 int level, max_level = ilk_wm_max_level(dev);
2306 /* LP0 watermark maximums depend on this pipe alone */ 2261 /* LP0 watermark maximums depend on this pipe alone */
2307 struct intel_wm_config config = { 2262 struct intel_wm_config config = {
@@ -2309,11 +2264,24 @@ static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate,
2309 }; 2264 };
2310 struct ilk_wm_maximums max; 2265 struct ilk_wm_maximums max;
2311 2266
2267 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
2268 if (IS_ERR(cstate))
2269 return PTR_ERR(cstate);
2270
2271 pipe_wm = &cstate->wm.optimal.ilk;
2272
2312 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 2273 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2313 if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) { 2274 ps = drm_atomic_get_plane_state(state,
2314 sprstate = to_intel_plane_state(intel_plane->base.state); 2275 &intel_plane->base);
2315 break; 2276 if (IS_ERR(ps))
2316 } 2277 return PTR_ERR(ps);
2278
2279 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
2280 pristate = to_intel_plane_state(ps);
2281 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
2282 sprstate = to_intel_plane_state(ps);
2283 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2284 curstate = to_intel_plane_state(ps);
2317 } 2285 }
2318 2286
2319 config.sprites_enabled = sprstate->visible; 2287 config.sprites_enabled = sprstate->visible;
@@ -2322,7 +2290,7 @@ static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate,
2322 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16); 2290 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
2323 2291
2324 pipe_wm->pipe_enabled = cstate->base.active; 2292 pipe_wm->pipe_enabled = cstate->base.active;
2325 pipe_wm->sprites_enabled = sprstate->visible; 2293 pipe_wm->sprites_enabled = config.sprites_enabled;
2326 pipe_wm->sprites_scaled = config.sprites_scaled; 2294 pipe_wm->sprites_scaled = config.sprites_scaled;
2327 2295
2328 /* ILK/SNB: LP2+ watermarks only w/o sprites */ 2296 /* ILK/SNB: LP2+ watermarks only w/o sprites */
@@ -2333,24 +2301,27 @@ static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate,
2333 if (config.sprites_scaled) 2301 if (config.sprites_scaled)
2334 max_level = 0; 2302 max_level = 0;
2335 2303
2336 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, &pipe_wm->wm[0]); 2304 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
2305 pristate, sprstate, curstate, &pipe_wm->wm[0]);
2337 2306
2338 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2307 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2339 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); 2308 pipe_wm->linetime = hsw_compute_linetime_wm(dev,
2309 &intel_crtc->base);
2340 2310
2341 /* LP0 watermarks always use 1/2 DDB partitioning */ 2311 /* LP0 watermarks always use 1/2 DDB partitioning */
2342 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); 2312 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2343 2313
2344 /* At least LP0 must be valid */ 2314 /* At least LP0 must be valid */
2345 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) 2315 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2346 return false; 2316 return -EINVAL;
2347 2317
2348 ilk_compute_wm_reg_maximums(dev, 1, &max); 2318 ilk_compute_wm_reg_maximums(dev, 1, &max);
2349 2319
2350 for (level = 1; level <= max_level; level++) { 2320 for (level = 1; level <= max_level; level++) {
2351 struct intel_wm_level wm = {}; 2321 struct intel_wm_level wm = {};
2352 2322
2353 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, &wm); 2323 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
2324 pristate, sprstate, curstate, &wm);
2354 2325
2355 /* 2326 /*
2356 * Disable any watermark level that exceeds the 2327 * Disable any watermark level that exceeds the
@@ -2363,7 +2334,7 @@ static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate,
2363 pipe_wm->wm[level] = wm; 2334 pipe_wm->wm[level] = wm;
2364 } 2335 }
2365 2336
2366 return true; 2337 return 0;
2367} 2338}
2368 2339
2369/* 2340/*
@@ -2378,7 +2349,9 @@ static void ilk_merge_wm_level(struct drm_device *dev,
2378 ret_wm->enable = true; 2349 ret_wm->enable = true;
2379 2350
2380 for_each_intel_crtc(dev, intel_crtc) { 2351 for_each_intel_crtc(dev, intel_crtc) {
2381 const struct intel_pipe_wm *active = &intel_crtc->wm.active; 2352 const struct intel_crtc_state *cstate =
2353 to_intel_crtc_state(intel_crtc->base.state);
2354 const struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
2382 const struct intel_wm_level *wm = &active->wm[level]; 2355 const struct intel_wm_level *wm = &active->wm[level];
2383 2356
2384 if (!active->pipe_enabled) 2357 if (!active->pipe_enabled)
@@ -2526,14 +2499,15 @@ static void ilk_compute_wm_results(struct drm_device *dev,
2526 2499
2527 /* LP0 register values */ 2500 /* LP0 register values */
2528 for_each_intel_crtc(dev, intel_crtc) { 2501 for_each_intel_crtc(dev, intel_crtc) {
2502 const struct intel_crtc_state *cstate =
2503 to_intel_crtc_state(intel_crtc->base.state);
2529 enum pipe pipe = intel_crtc->pipe; 2504 enum pipe pipe = intel_crtc->pipe;
2530 const struct intel_wm_level *r = 2505 const struct intel_wm_level *r = &cstate->wm.optimal.ilk.wm[0];
2531 &intel_crtc->wm.active.wm[0];
2532 2506
2533 if (WARN_ON(!r->enable)) 2507 if (WARN_ON(!r->enable))
2534 continue; 2508 continue;
2535 2509
2536 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime; 2510 results->wm_linetime[pipe] = cstate->wm.optimal.ilk.linetime;
2537 2511
2538 results->wm_pipe[pipe] = 2512 results->wm_pipe[pipe] =
2539 (r->pri_val << WM0_PIPE_PLANE_SHIFT) | 2513 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
@@ -2755,18 +2729,40 @@ static bool ilk_disable_lp_wm(struct drm_device *dev)
2755#define SKL_DDB_SIZE 896 /* in blocks */ 2729#define SKL_DDB_SIZE 896 /* in blocks */
2756#define BXT_DDB_SIZE 512 2730#define BXT_DDB_SIZE 512
2757 2731
2732/*
2733 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
2734 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
2735 * other universal planes are in indices 1..n. Note that this may leave unused
2736 * indices between the top "sprite" plane and the cursor.
2737 */
2738static int
2739skl_wm_plane_id(const struct intel_plane *plane)
2740{
2741 switch (plane->base.type) {
2742 case DRM_PLANE_TYPE_PRIMARY:
2743 return 0;
2744 case DRM_PLANE_TYPE_CURSOR:
2745 return PLANE_CURSOR;
2746 case DRM_PLANE_TYPE_OVERLAY:
2747 return plane->plane + 1;
2748 default:
2749 MISSING_CASE(plane->base.type);
2750 return plane->plane;
2751 }
2752}
2753
2758static void 2754static void
2759skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 2755skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2760 struct drm_crtc *for_crtc, 2756 const struct intel_crtc_state *cstate,
2761 const struct intel_wm_config *config, 2757 const struct intel_wm_config *config,
2762 const struct skl_pipe_wm_parameters *params,
2763 struct skl_ddb_entry *alloc /* out */) 2758 struct skl_ddb_entry *alloc /* out */)
2764{ 2759{
2760 struct drm_crtc *for_crtc = cstate->base.crtc;
2765 struct drm_crtc *crtc; 2761 struct drm_crtc *crtc;
2766 unsigned int pipe_size, ddb_size; 2762 unsigned int pipe_size, ddb_size;
2767 int nth_active_pipe; 2763 int nth_active_pipe;
2768 2764
2769 if (!params->active) { 2765 if (!cstate->base.active) {
2770 alloc->start = 0; 2766 alloc->start = 0;
2771 alloc->end = 0; 2767 alloc->end = 0;
2772 return; 2768 return;
@@ -2837,19 +2833,29 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2837} 2833}
2838 2834
2839static unsigned int 2835static unsigned int
2840skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y) 2836skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2837 const struct drm_plane_state *pstate,
2838 int y)
2841{ 2839{
2840 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2841 struct drm_framebuffer *fb = pstate->fb;
2842 2842
2843 /* for planar format */ 2843 /* for planar format */
2844 if (p->y_bytes_per_pixel) { 2844 if (fb->pixel_format == DRM_FORMAT_NV12) {
2845 if (y) /* y-plane data rate */ 2845 if (y) /* y-plane data rate */
2846 return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel; 2846 return intel_crtc->config->pipe_src_w *
2847 intel_crtc->config->pipe_src_h *
2848 drm_format_plane_cpp(fb->pixel_format, 0);
2847 else /* uv-plane data rate */ 2849 else /* uv-plane data rate */
2848 return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel; 2850 return (intel_crtc->config->pipe_src_w/2) *
2851 (intel_crtc->config->pipe_src_h/2) *
2852 drm_format_plane_cpp(fb->pixel_format, 1);
2849 } 2853 }
2850 2854
2851 /* for packed formats */ 2855 /* for packed formats */
2852 return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel; 2856 return intel_crtc->config->pipe_src_w *
2857 intel_crtc->config->pipe_src_h *
2858 drm_format_plane_cpp(fb->pixel_format, 0);
2853} 2859}
2854 2860
2855/* 2861/*
@@ -2858,46 +2864,55 @@ skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y)
2858 * 3 * 4096 * 8192 * 4 < 2^32 2864 * 3 * 4096 * 8192 * 4 < 2^32
2859 */ 2865 */
2860static unsigned int 2866static unsigned int
2861skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc, 2867skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate)
2862 const struct skl_pipe_wm_parameters *params)
2863{ 2868{
2869 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2870 struct drm_device *dev = intel_crtc->base.dev;
2871 const struct intel_plane *intel_plane;
2864 unsigned int total_data_rate = 0; 2872 unsigned int total_data_rate = 0;
2865 int plane;
2866 2873
2867 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) { 2874 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2868 const struct intel_plane_wm_parameters *p; 2875 const struct drm_plane_state *pstate = intel_plane->base.state;
2869 2876
2870 p = &params->plane[plane]; 2877 if (pstate->fb == NULL)
2871 if (!p->enabled)
2872 continue; 2878 continue;
2873 2879
2874 total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */ 2880 if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2875 if (p->y_bytes_per_pixel) { 2881 continue;
2876 total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */ 2882
2877 } 2883 /* packed/uv */
2884 total_data_rate += skl_plane_relative_data_rate(cstate,
2885 pstate,
2886 0);
2887
2888 if (pstate->fb->pixel_format == DRM_FORMAT_NV12)
2889 /* y-plane */
2890 total_data_rate += skl_plane_relative_data_rate(cstate,
2891 pstate,
2892 1);
2878 } 2893 }
2879 2894
2880 return total_data_rate; 2895 return total_data_rate;
2881} 2896}
2882 2897
2883static void 2898static void
2884skl_allocate_pipe_ddb(struct drm_crtc *crtc, 2899skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
2885 const struct intel_wm_config *config,
2886 const struct skl_pipe_wm_parameters *params,
2887 struct skl_ddb_allocation *ddb /* out */) 2900 struct skl_ddb_allocation *ddb /* out */)
2888{ 2901{
2902 struct drm_crtc *crtc = cstate->base.crtc;
2889 struct drm_device *dev = crtc->dev; 2903 struct drm_device *dev = crtc->dev;
2890 struct drm_i915_private *dev_priv = dev->dev_private; 2904 struct drm_i915_private *dev_priv = to_i915(dev);
2905 struct intel_wm_config *config = &dev_priv->wm.config;
2891 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2906 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2907 struct intel_plane *intel_plane;
2892 enum pipe pipe = intel_crtc->pipe; 2908 enum pipe pipe = intel_crtc->pipe;
2893 struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; 2909 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
2894 uint16_t alloc_size, start, cursor_blocks; 2910 uint16_t alloc_size, start, cursor_blocks;
2895 uint16_t minimum[I915_MAX_PLANES]; 2911 uint16_t minimum[I915_MAX_PLANES];
2896 uint16_t y_minimum[I915_MAX_PLANES]; 2912 uint16_t y_minimum[I915_MAX_PLANES];
2897 unsigned int total_data_rate; 2913 unsigned int total_data_rate;
2898 int plane;
2899 2914
2900 skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc); 2915 skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc);
2901 alloc_size = skl_ddb_entry_size(alloc); 2916 alloc_size = skl_ddb_entry_size(alloc);
2902 if (alloc_size == 0) { 2917 if (alloc_size == 0) {
2903 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 2918 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
@@ -2914,17 +2929,20 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2914 alloc->end -= cursor_blocks; 2929 alloc->end -= cursor_blocks;
2915 2930
2916 /* 1. Allocate the mininum required blocks for each active plane */ 2931 /* 1. Allocate the mininum required blocks for each active plane */
2917 for_each_plane(dev_priv, pipe, plane) { 2932 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2918 const struct intel_plane_wm_parameters *p; 2933 struct drm_plane *plane = &intel_plane->base;
2934 struct drm_framebuffer *fb = plane->state->fb;
2935 int id = skl_wm_plane_id(intel_plane);
2919 2936
2920 p = &params->plane[plane]; 2937 if (fb == NULL)
2921 if (!p->enabled) 2938 continue;
2939 if (plane->type == DRM_PLANE_TYPE_CURSOR)
2922 continue; 2940 continue;
2923 2941
2924 minimum[plane] = 8; 2942 minimum[id] = 8;
2925 alloc_size -= minimum[plane]; 2943 alloc_size -= minimum[id];
2926 y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0; 2944 y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0;
2927 alloc_size -= y_minimum[plane]; 2945 alloc_size -= y_minimum[id];
2928 } 2946 }
2929 2947
2930 /* 2948 /*
@@ -2933,45 +2951,50 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2933 * 2951 *
2934 * FIXME: we may not allocate every single block here. 2952 * FIXME: we may not allocate every single block here.
2935 */ 2953 */
2936 total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params); 2954 total_data_rate = skl_get_total_relative_data_rate(cstate);
2937 2955
2938 start = alloc->start; 2956 start = alloc->start;
2939 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) { 2957 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2940 const struct intel_plane_wm_parameters *p; 2958 struct drm_plane *plane = &intel_plane->base;
2959 struct drm_plane_state *pstate = intel_plane->base.state;
2941 unsigned int data_rate, y_data_rate; 2960 unsigned int data_rate, y_data_rate;
2942 uint16_t plane_blocks, y_plane_blocks = 0; 2961 uint16_t plane_blocks, y_plane_blocks = 0;
2962 int id = skl_wm_plane_id(intel_plane);
2943 2963
2944 p = &params->plane[plane]; 2964 if (pstate->fb == NULL)
2945 if (!p->enabled) 2965 continue;
2966 if (plane->type == DRM_PLANE_TYPE_CURSOR)
2946 continue; 2967 continue;
2947 2968
2948 data_rate = skl_plane_relative_data_rate(p, 0); 2969 data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
2949 2970
2950 /* 2971 /*
2951 * allocation for (packed formats) or (uv-plane part of planar format): 2972 * allocation for (packed formats) or (uv-plane part of planar format):
2952 * promote the expression to 64 bits to avoid overflowing, the 2973 * promote the expression to 64 bits to avoid overflowing, the
2953 * result is < available as data_rate / total_data_rate < 1 2974 * result is < available as data_rate / total_data_rate < 1
2954 */ 2975 */
2955 plane_blocks = minimum[plane]; 2976 plane_blocks = minimum[id];
2956 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 2977 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
2957 total_data_rate); 2978 total_data_rate);
2958 2979
2959 ddb->plane[pipe][plane].start = start; 2980 ddb->plane[pipe][id].start = start;
2960 ddb->plane[pipe][plane].end = start + plane_blocks; 2981 ddb->plane[pipe][id].end = start + plane_blocks;
2961 2982
2962 start += plane_blocks; 2983 start += plane_blocks;
2963 2984
2964 /* 2985 /*
2965 * allocation for y_plane part of planar format: 2986 * allocation for y_plane part of planar format:
2966 */ 2987 */
2967 if (p->y_bytes_per_pixel) { 2988 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) {
2968 y_data_rate = skl_plane_relative_data_rate(p, 1); 2989 y_data_rate = skl_plane_relative_data_rate(cstate,
2969 y_plane_blocks = y_minimum[plane]; 2990 pstate,
2991 1);
2992 y_plane_blocks = y_minimum[id];
2970 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, 2993 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
2971 total_data_rate); 2994 total_data_rate);
2972 2995
2973 ddb->y_plane[pipe][plane].start = start; 2996 ddb->y_plane[pipe][id].start = start;
2974 ddb->y_plane[pipe][plane].end = start + y_plane_blocks; 2997 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
2975 2998
2976 start += y_plane_blocks; 2999 start += y_plane_blocks;
2977 } 3000 }
@@ -3041,104 +3064,27 @@ static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
3041 struct drm_device *dev = intel_crtc->base.dev; 3064 struct drm_device *dev = intel_crtc->base.dev;
3042 struct drm_i915_private *dev_priv = dev->dev_private; 3065 struct drm_i915_private *dev_priv = dev->dev_private;
3043 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; 3066 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
3044 enum pipe pipe = intel_crtc->pipe;
3045
3046 if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
3047 sizeof(new_ddb->plane[pipe])))
3048 return true;
3049 3067
3050 if (memcmp(&new_ddb->plane[pipe][PLANE_CURSOR], &cur_ddb->plane[pipe][PLANE_CURSOR], 3068 /*
3051 sizeof(new_ddb->plane[pipe][PLANE_CURSOR]))) 3069 * If ddb allocation of pipes changed, it may require recalculation of
3070 * watermarks
3071 */
3072 if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe)))
3052 return true; 3073 return true;
3053 3074
3054 return false; 3075 return false;
3055} 3076}
3056 3077
3057static void skl_compute_wm_global_parameters(struct drm_device *dev,
3058 struct intel_wm_config *config)
3059{
3060 struct drm_crtc *crtc;
3061 struct drm_plane *plane;
3062
3063 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3064 config->num_pipes_active += to_intel_crtc(crtc)->active;
3065
3066 /* FIXME: I don't think we need those two global parameters on SKL */
3067 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
3068 struct intel_plane *intel_plane = to_intel_plane(plane);
3069
3070 config->sprites_enabled |= intel_plane->wm.enabled;
3071 config->sprites_scaled |= intel_plane->wm.scaled;
3072 }
3073}
3074
3075static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
3076 struct skl_pipe_wm_parameters *p)
3077{
3078 struct drm_device *dev = crtc->dev;
3079 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3080 enum pipe pipe = intel_crtc->pipe;
3081 struct drm_plane *plane;
3082 struct drm_framebuffer *fb;
3083 int i = 1; /* Index for sprite planes start */
3084
3085 p->active = intel_crtc->active;
3086 if (p->active) {
3087 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
3088 p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
3089
3090 fb = crtc->primary->state->fb;
3091 /* For planar: Bpp is for uv plane, y_Bpp is for y plane */
3092 if (fb) {
3093 p->plane[0].enabled = true;
3094 p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
3095 drm_format_plane_cpp(fb->pixel_format, 1) :
3096 drm_format_plane_cpp(fb->pixel_format, 0);
3097 p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
3098 drm_format_plane_cpp(fb->pixel_format, 0) : 0;
3099 p->plane[0].tiling = fb->modifier[0];
3100 } else {
3101 p->plane[0].enabled = false;
3102 p->plane[0].bytes_per_pixel = 0;
3103 p->plane[0].y_bytes_per_pixel = 0;
3104 p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
3105 }
3106 p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
3107 p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
3108 p->plane[0].rotation = crtc->primary->state->rotation;
3109
3110 fb = crtc->cursor->state->fb;
3111 p->plane[PLANE_CURSOR].y_bytes_per_pixel = 0;
3112 if (fb) {
3113 p->plane[PLANE_CURSOR].enabled = true;
3114 p->plane[PLANE_CURSOR].bytes_per_pixel = fb->bits_per_pixel / 8;
3115 p->plane[PLANE_CURSOR].horiz_pixels = crtc->cursor->state->crtc_w;
3116 p->plane[PLANE_CURSOR].vert_pixels = crtc->cursor->state->crtc_h;
3117 } else {
3118 p->plane[PLANE_CURSOR].enabled = false;
3119 p->plane[PLANE_CURSOR].bytes_per_pixel = 0;
3120 p->plane[PLANE_CURSOR].horiz_pixels = 64;
3121 p->plane[PLANE_CURSOR].vert_pixels = 64;
3122 }
3123 }
3124
3125 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
3126 struct intel_plane *intel_plane = to_intel_plane(plane);
3127
3128 if (intel_plane->pipe == pipe &&
3129 plane->type == DRM_PLANE_TYPE_OVERLAY)
3130 p->plane[i++] = intel_plane->wm;
3131 }
3132}
3133
3134static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 3078static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3135 struct skl_pipe_wm_parameters *p, 3079 struct intel_crtc_state *cstate,
3136 struct intel_plane_wm_parameters *p_params, 3080 struct intel_plane *intel_plane,
3137 uint16_t ddb_allocation, 3081 uint16_t ddb_allocation,
3138 int level, 3082 int level,
3139 uint16_t *out_blocks, /* out */ 3083 uint16_t *out_blocks, /* out */
3140 uint8_t *out_lines /* out */) 3084 uint8_t *out_lines /* out */)
3141{ 3085{
3086 struct drm_plane *plane = &intel_plane->base;
3087 struct drm_framebuffer *fb = plane->state->fb;
3142 uint32_t latency = dev_priv->wm.skl_latency[level]; 3088 uint32_t latency = dev_priv->wm.skl_latency[level];
3143 uint32_t method1, method2; 3089 uint32_t method1, method2;
3144 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3090 uint32_t plane_bytes_per_line, plane_blocks_per_line;
@@ -3146,31 +3092,33 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3146 uint32_t selected_result; 3092 uint32_t selected_result;
3147 uint8_t bytes_per_pixel; 3093 uint8_t bytes_per_pixel;
3148 3094
3149 if (latency == 0 || !p->active || !p_params->enabled) 3095 if (latency == 0 || !cstate->base.active || !fb)
3150 return false; 3096 return false;
3151 3097
3152 bytes_per_pixel = p_params->y_bytes_per_pixel ? 3098 bytes_per_pixel = drm_format_plane_cpp(fb->pixel_format, 0);
3153 p_params->y_bytes_per_pixel : 3099 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
3154 p_params->bytes_per_pixel;
3155 method1 = skl_wm_method1(p->pixel_rate,
3156 bytes_per_pixel, 3100 bytes_per_pixel,
3157 latency); 3101 latency);
3158 method2 = skl_wm_method2(p->pixel_rate, 3102 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
3159 p->pipe_htotal, 3103 cstate->base.adjusted_mode.crtc_htotal,
3160 p_params->horiz_pixels, 3104 cstate->pipe_src_w,
3161 bytes_per_pixel, 3105 bytes_per_pixel,
3162 p_params->tiling, 3106 fb->modifier[0],
3163 latency); 3107 latency);
3164 3108
3165 plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel; 3109 plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel;
3166 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3110 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3167 3111
3168 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || 3112 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3169 p_params->tiling == I915_FORMAT_MOD_Yf_TILED) { 3113 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3170 uint32_t min_scanlines = 4; 3114 uint32_t min_scanlines = 4;
3171 uint32_t y_tile_minimum; 3115 uint32_t y_tile_minimum;
3172 if (intel_rotation_90_or_270(p_params->rotation)) { 3116 if (intel_rotation_90_or_270(plane->state->rotation)) {
3173 switch (p_params->bytes_per_pixel) { 3117 int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3118 drm_format_plane_cpp(fb->pixel_format, 1) :
3119 drm_format_plane_cpp(fb->pixel_format, 0);
3120
3121 switch (bpp) {
3174 case 1: 3122 case 1:
3175 min_scanlines = 16; 3123 min_scanlines = 16;
3176 break; 3124 break;
@@ -3194,8 +3142,8 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3194 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line); 3142 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
3195 3143
3196 if (level >= 1 && level <= 7) { 3144 if (level >= 1 && level <= 7) {
3197 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || 3145 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3198 p_params->tiling == I915_FORMAT_MOD_Yf_TILED) 3146 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
3199 res_lines += 4; 3147 res_lines += 4;
3200 else 3148 else
3201 res_blocks++; 3149 res_blocks++;
@@ -3212,84 +3160,80 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3212 3160
3213static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, 3161static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3214 struct skl_ddb_allocation *ddb, 3162 struct skl_ddb_allocation *ddb,
3215 struct skl_pipe_wm_parameters *p, 3163 struct intel_crtc_state *cstate,
3216 enum pipe pipe,
3217 int level, 3164 int level,
3218 int num_planes,
3219 struct skl_wm_level *result) 3165 struct skl_wm_level *result)
3220{ 3166{
3167 struct drm_device *dev = dev_priv->dev;
3168 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3169 struct intel_plane *intel_plane;
3221 uint16_t ddb_blocks; 3170 uint16_t ddb_blocks;
3222 int i; 3171 enum pipe pipe = intel_crtc->pipe;
3172
3173 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3174 int i = skl_wm_plane_id(intel_plane);
3223 3175
3224 for (i = 0; i < num_planes; i++) {
3225 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); 3176 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3226 3177
3227 result->plane_en[i] = skl_compute_plane_wm(dev_priv, 3178 result->plane_en[i] = skl_compute_plane_wm(dev_priv,
3228 p, &p->plane[i], 3179 cstate,
3180 intel_plane,
3229 ddb_blocks, 3181 ddb_blocks,
3230 level, 3182 level,
3231 &result->plane_res_b[i], 3183 &result->plane_res_b[i],
3232 &result->plane_res_l[i]); 3184 &result->plane_res_l[i]);
3233 } 3185 }
3234
3235 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][PLANE_CURSOR]);
3236 result->plane_en[PLANE_CURSOR] = skl_compute_plane_wm(dev_priv, p,
3237 &p->plane[PLANE_CURSOR],
3238 ddb_blocks, level,
3239 &result->plane_res_b[PLANE_CURSOR],
3240 &result->plane_res_l[PLANE_CURSOR]);
3241} 3186}
3242 3187
3243static uint32_t 3188static uint32_t
3244skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p) 3189skl_compute_linetime_wm(struct intel_crtc_state *cstate)
3245{ 3190{
3246 if (!to_intel_crtc(crtc)->active) 3191 if (!cstate->base.active)
3247 return 0; 3192 return 0;
3248 3193
3249 if (WARN_ON(p->pixel_rate == 0)) 3194 if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
3250 return 0; 3195 return 0;
3251 3196
3252 return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate); 3197 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
3198 skl_pipe_pixel_rate(cstate));
3253} 3199}
3254 3200
3255static void skl_compute_transition_wm(struct drm_crtc *crtc, 3201static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3256 struct skl_pipe_wm_parameters *params,
3257 struct skl_wm_level *trans_wm /* out */) 3202 struct skl_wm_level *trans_wm /* out */)
3258{ 3203{
3204 struct drm_crtc *crtc = cstate->base.crtc;
3259 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3205 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3260 int i; 3206 struct intel_plane *intel_plane;
3261 3207
3262 if (!params->active) 3208 if (!cstate->base.active)
3263 return; 3209 return;
3264 3210
3265 /* Until we know more, just disable transition WMs */ 3211 /* Until we know more, just disable transition WMs */
3266 for (i = 0; i < intel_num_planes(intel_crtc); i++) 3212 for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
3213 int i = skl_wm_plane_id(intel_plane);
3214
3267 trans_wm->plane_en[i] = false; 3215 trans_wm->plane_en[i] = false;
3268 trans_wm->plane_en[PLANE_CURSOR] = false; 3216 }
3269} 3217}
3270 3218
3271static void skl_compute_pipe_wm(struct drm_crtc *crtc, 3219static void skl_compute_pipe_wm(struct intel_crtc_state *cstate,
3272 struct skl_ddb_allocation *ddb, 3220 struct skl_ddb_allocation *ddb,
3273 struct skl_pipe_wm_parameters *params,
3274 struct skl_pipe_wm *pipe_wm) 3221 struct skl_pipe_wm *pipe_wm)
3275{ 3222{
3276 struct drm_device *dev = crtc->dev; 3223 struct drm_device *dev = cstate->base.crtc->dev;
3277 const struct drm_i915_private *dev_priv = dev->dev_private; 3224 const struct drm_i915_private *dev_priv = dev->dev_private;
3278 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3279 int level, max_level = ilk_wm_max_level(dev); 3225 int level, max_level = ilk_wm_max_level(dev);
3280 3226
3281 for (level = 0; level <= max_level; level++) { 3227 for (level = 0; level <= max_level; level++) {
3282 skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe, 3228 skl_compute_wm_level(dev_priv, ddb, cstate,
3283 level, intel_num_planes(intel_crtc), 3229 level, &pipe_wm->wm[level]);
3284 &pipe_wm->wm[level]);
3285 } 3230 }
3286 pipe_wm->linetime = skl_compute_linetime_wm(crtc, params); 3231 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3287 3232
3288 skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm); 3233 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
3289} 3234}
3290 3235
3291static void skl_compute_wm_results(struct drm_device *dev, 3236static void skl_compute_wm_results(struct drm_device *dev,
3292 struct skl_pipe_wm_parameters *p,
3293 struct skl_pipe_wm *p_wm, 3237 struct skl_pipe_wm *p_wm,
3294 struct skl_wm_values *r, 3238 struct skl_wm_values *r,
3295 struct intel_crtc *intel_crtc) 3239 struct intel_crtc *intel_crtc)
@@ -3346,7 +3290,8 @@ static void skl_compute_wm_results(struct drm_device *dev,
3346 r->wm_linetime[pipe] = p_wm->linetime; 3290 r->wm_linetime[pipe] = p_wm->linetime;
3347} 3291}
3348 3292
3349static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg, 3293static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3294 i915_reg_t reg,
3350 const struct skl_ddb_entry *entry) 3295 const struct skl_ddb_entry *entry)
3351{ 3296{
3352 if (entry->end) 3297 if (entry->end)
@@ -3533,28 +3478,25 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3533} 3478}
3534 3479
3535static bool skl_update_pipe_wm(struct drm_crtc *crtc, 3480static bool skl_update_pipe_wm(struct drm_crtc *crtc,
3536 struct skl_pipe_wm_parameters *params,
3537 struct intel_wm_config *config,
3538 struct skl_ddb_allocation *ddb, /* out */ 3481 struct skl_ddb_allocation *ddb, /* out */
3539 struct skl_pipe_wm *pipe_wm /* out */) 3482 struct skl_pipe_wm *pipe_wm /* out */)
3540{ 3483{
3541 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3484 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3485 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3542 3486
3543 skl_compute_wm_pipe_parameters(crtc, params); 3487 skl_allocate_pipe_ddb(cstate, ddb);
3544 skl_allocate_pipe_ddb(crtc, config, params, ddb); 3488 skl_compute_pipe_wm(cstate, ddb, pipe_wm);
3545 skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
3546 3489
3547 if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm))) 3490 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
3548 return false; 3491 return false;
3549 3492
3550 intel_crtc->wm.skl_active = *pipe_wm; 3493 intel_crtc->wm.active.skl = *pipe_wm;
3551 3494
3552 return true; 3495 return true;
3553} 3496}
3554 3497
3555static void skl_update_other_pipe_wm(struct drm_device *dev, 3498static void skl_update_other_pipe_wm(struct drm_device *dev,
3556 struct drm_crtc *crtc, 3499 struct drm_crtc *crtc,
3557 struct intel_wm_config *config,
3558 struct skl_wm_values *r) 3500 struct skl_wm_values *r)
3559{ 3501{
3560 struct intel_crtc *intel_crtc; 3502 struct intel_crtc *intel_crtc;
@@ -3575,7 +3517,6 @@ static void skl_update_other_pipe_wm(struct drm_device *dev,
3575 */ 3517 */
3576 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 3518 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
3577 base.head) { 3519 base.head) {
3578 struct skl_pipe_wm_parameters params = {};
3579 struct skl_pipe_wm pipe_wm = {}; 3520 struct skl_pipe_wm pipe_wm = {};
3580 bool wm_changed; 3521 bool wm_changed;
3581 3522
@@ -3586,7 +3527,6 @@ static void skl_update_other_pipe_wm(struct drm_device *dev,
3586 continue; 3527 continue;
3587 3528
3588 wm_changed = skl_update_pipe_wm(&intel_crtc->base, 3529 wm_changed = skl_update_pipe_wm(&intel_crtc->base,
3589 &params, config,
3590 &r->ddb, &pipe_wm); 3530 &r->ddb, &pipe_wm);
3591 3531
3592 /* 3532 /*
@@ -3596,7 +3536,7 @@ static void skl_update_other_pipe_wm(struct drm_device *dev,
3596 */ 3536 */
3597 WARN_ON(!wm_changed); 3537 WARN_ON(!wm_changed);
3598 3538
3599 skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc); 3539 skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc);
3600 r->dirty[intel_crtc->pipe] = true; 3540 r->dirty[intel_crtc->pipe] = true;
3601 } 3541 }
3602} 3542}
@@ -3626,10 +3566,9 @@ static void skl_update_wm(struct drm_crtc *crtc)
3626 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3566 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3627 struct drm_device *dev = crtc->dev; 3567 struct drm_device *dev = crtc->dev;
3628 struct drm_i915_private *dev_priv = dev->dev_private; 3568 struct drm_i915_private *dev_priv = dev->dev_private;
3629 struct skl_pipe_wm_parameters params = {};
3630 struct skl_wm_values *results = &dev_priv->wm.skl_results; 3569 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3631 struct skl_pipe_wm pipe_wm = {}; 3570 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3632 struct intel_wm_config config = {}; 3571 struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl;
3633 3572
3634 3573
3635 /* Clear all dirty flags */ 3574 /* Clear all dirty flags */
@@ -3637,16 +3576,13 @@ static void skl_update_wm(struct drm_crtc *crtc)
3637 3576
3638 skl_clear_wm(results, intel_crtc->pipe); 3577 skl_clear_wm(results, intel_crtc->pipe);
3639 3578
3640 skl_compute_wm_global_parameters(dev, &config); 3579 if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm))
3641
3642 if (!skl_update_pipe_wm(crtc, &params, &config,
3643 &results->ddb, &pipe_wm))
3644 return; 3580 return;
3645 3581
3646 skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc); 3582 skl_compute_wm_results(dev, pipe_wm, results, intel_crtc);
3647 results->dirty[intel_crtc->pipe] = true; 3583 results->dirty[intel_crtc->pipe] = true;
3648 3584
3649 skl_update_other_pipe_wm(dev, crtc, &config, results); 3585 skl_update_other_pipe_wm(dev, crtc, results);
3650 skl_write_wm_values(dev_priv, results); 3586 skl_write_wm_values(dev_priv, results);
3651 skl_flush_wm_values(dev_priv, results); 3587 skl_flush_wm_values(dev_priv, results);
3652 3588
@@ -3654,71 +3590,23 @@ static void skl_update_wm(struct drm_crtc *crtc)
3654 dev_priv->wm.skl_hw = *results; 3590 dev_priv->wm.skl_hw = *results;
3655} 3591}
3656 3592
3657static void 3593static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
3658skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
3659 uint32_t sprite_width, uint32_t sprite_height,
3660 int pixel_size, bool enabled, bool scaled)
3661{
3662 struct intel_plane *intel_plane = to_intel_plane(plane);
3663 struct drm_framebuffer *fb = plane->state->fb;
3664
3665 intel_plane->wm.enabled = enabled;
3666 intel_plane->wm.scaled = scaled;
3667 intel_plane->wm.horiz_pixels = sprite_width;
3668 intel_plane->wm.vert_pixels = sprite_height;
3669 intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
3670
3671 /* For planar: Bpp is for UV plane, y_Bpp is for Y plane */
3672 intel_plane->wm.bytes_per_pixel =
3673 (fb && fb->pixel_format == DRM_FORMAT_NV12) ?
3674 drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size;
3675 intel_plane->wm.y_bytes_per_pixel =
3676 (fb && fb->pixel_format == DRM_FORMAT_NV12) ?
3677 drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0;
3678
3679 /*
3680 * Framebuffer can be NULL on plane disable, but it does not
3681 * matter for watermarks if we assume no tiling in that case.
3682 */
3683 if (fb)
3684 intel_plane->wm.tiling = fb->modifier[0];
3685 intel_plane->wm.rotation = plane->state->rotation;
3686
3687 skl_update_wm(crtc);
3688}
3689
3690static void ilk_update_wm(struct drm_crtc *crtc)
3691{ 3594{
3692 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3595 struct drm_device *dev = dev_priv->dev;
3693 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3596 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3694 struct drm_device *dev = crtc->dev;
3695 struct drm_i915_private *dev_priv = dev->dev_private;
3696 struct ilk_wm_maximums max; 3597 struct ilk_wm_maximums max;
3598 struct intel_wm_config *config = &dev_priv->wm.config;
3697 struct ilk_wm_values results = {}; 3599 struct ilk_wm_values results = {};
3698 enum intel_ddb_partitioning partitioning; 3600 enum intel_ddb_partitioning partitioning;
3699 struct intel_pipe_wm pipe_wm = {};
3700 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3701 struct intel_wm_config config = {};
3702
3703 WARN_ON(cstate->base.active != intel_crtc->active);
3704
3705 intel_compute_pipe_wm(cstate, &pipe_wm);
3706 3601
3707 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm))) 3602 ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max);
3708 return; 3603 ilk_wm_merge(dev, config, &max, &lp_wm_1_2);
3709
3710 intel_crtc->wm.active = pipe_wm;
3711
3712 ilk_compute_wm_config(dev, &config);
3713
3714 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
3715 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
3716 3604
3717 /* 5/6 split only in single pipe config on IVB+ */ 3605 /* 5/6 split only in single pipe config on IVB+ */
3718 if (INTEL_INFO(dev)->gen >= 7 && 3606 if (INTEL_INFO(dev)->gen >= 7 &&
3719 config.num_pipes_active == 1 && config.sprites_enabled) { 3607 config->num_pipes_active == 1 && config->sprites_enabled) {
3720 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); 3608 ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max);
3721 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); 3609 ilk_wm_merge(dev, config, &max, &lp_wm_5_6);
3722 3610
3723 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); 3611 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
3724 } else { 3612 } else {
@@ -3733,14 +3621,13 @@ static void ilk_update_wm(struct drm_crtc *crtc)
3733 ilk_write_wm_values(dev_priv, &results); 3621 ilk_write_wm_values(dev_priv, &results);
3734} 3622}
3735 3623
3736static void 3624static void ilk_update_wm(struct drm_crtc *crtc)
3737ilk_update_sprite_wm(struct drm_plane *plane,
3738 struct drm_crtc *crtc,
3739 uint32_t sprite_width, uint32_t sprite_height,
3740 int pixel_size, bool enabled, bool scaled)
3741{ 3625{
3742 struct drm_device *dev = plane->dev; 3626 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3743 struct intel_plane *intel_plane = to_intel_plane(plane); 3627 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3628 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3629
3630 WARN_ON(cstate->base.active != intel_crtc->active);
3744 3631
3745 /* 3632 /*
3746 * IVB workaround: must disable low power watermarks for at least 3633 * IVB workaround: must disable low power watermarks for at least
@@ -3749,10 +3636,14 @@ ilk_update_sprite_wm(struct drm_plane *plane,
3749 * 3636 *
3750 * WaCxSRDisabledForSpriteScaling:ivb 3637 * WaCxSRDisabledForSpriteScaling:ivb
3751 */ 3638 */
3752 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev)) 3639 if (cstate->disable_lp_wm) {
3753 intel_wait_for_vblank(dev, intel_plane->pipe); 3640 ilk_disable_lp_wm(crtc->dev);
3641 intel_wait_for_vblank(crtc->dev, intel_crtc->pipe);
3642 }
3643
3644 intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
3754 3645
3755 ilk_update_wm(crtc); 3646 ilk_program_watermarks(dev_priv);
3756} 3647}
3757 3648
3758static void skl_pipe_wm_active_state(uint32_t val, 3649static void skl_pipe_wm_active_state(uint32_t val,
@@ -3805,7 +3696,8 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3805 struct drm_i915_private *dev_priv = dev->dev_private; 3696 struct drm_i915_private *dev_priv = dev->dev_private;
3806 struct skl_wm_values *hw = &dev_priv->wm.skl_hw; 3697 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3807 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3698 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3808 struct skl_pipe_wm *active = &intel_crtc->wm.skl_active; 3699 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3700 struct skl_pipe_wm *active = &cstate->wm.optimal.skl;
3809 enum pipe pipe = intel_crtc->pipe; 3701 enum pipe pipe = intel_crtc->pipe;
3810 int level, i, max_level; 3702 int level, i, max_level;
3811 uint32_t temp; 3703 uint32_t temp;
@@ -3849,6 +3741,8 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3849 3741
3850 temp = hw->plane_trans[pipe][PLANE_CURSOR]; 3742 temp = hw->plane_trans[pipe][PLANE_CURSOR];
3851 skl_pipe_wm_active_state(temp, active, true, true, i, 0); 3743 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
3744
3745 intel_crtc->wm.active.skl = *active;
3852} 3746}
3853 3747
3854void skl_wm_get_hw_state(struct drm_device *dev) 3748void skl_wm_get_hw_state(struct drm_device *dev)
@@ -3868,9 +3762,10 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3868 struct drm_i915_private *dev_priv = dev->dev_private; 3762 struct drm_i915_private *dev_priv = dev->dev_private;
3869 struct ilk_wm_values *hw = &dev_priv->wm.hw; 3763 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3870 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3764 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3871 struct intel_pipe_wm *active = &intel_crtc->wm.active; 3765 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3766 struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
3872 enum pipe pipe = intel_crtc->pipe; 3767 enum pipe pipe = intel_crtc->pipe;
3873 static const unsigned int wm0_pipe_reg[] = { 3768 static const i915_reg_t wm0_pipe_reg[] = {
3874 [PIPE_A] = WM0_PIPEA_ILK, 3769 [PIPE_A] = WM0_PIPEA_ILK,
3875 [PIPE_B] = WM0_PIPEB_ILK, 3770 [PIPE_B] = WM0_PIPEB_ILK,
3876 [PIPE_C] = WM0_PIPEC_IVB, 3771 [PIPE_C] = WM0_PIPEC_IVB,
@@ -3907,6 +3802,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3907 for (level = 0; level <= max_level; level++) 3802 for (level = 0; level <= max_level; level++)
3908 active->wm[level].enable = true; 3803 active->wm[level].enable = true;
3909 } 3804 }
3805
3806 intel_crtc->wm.active.ilk = *active;
3910} 3807}
3911 3808
3912#define _FW_WM(value, plane) \ 3809#define _FW_WM(value, plane) \
@@ -4132,21 +4029,6 @@ void intel_update_watermarks(struct drm_crtc *crtc)
4132 dev_priv->display.update_wm(crtc); 4029 dev_priv->display.update_wm(crtc);
4133} 4030}
4134 4031
4135void intel_update_sprite_watermarks(struct drm_plane *plane,
4136 struct drm_crtc *crtc,
4137 uint32_t sprite_width,
4138 uint32_t sprite_height,
4139 int pixel_size,
4140 bool enabled, bool scaled)
4141{
4142 struct drm_i915_private *dev_priv = plane->dev->dev_private;
4143
4144 if (dev_priv->display.update_sprite_wm)
4145 dev_priv->display.update_sprite_wm(plane, crtc,
4146 sprite_width, sprite_height,
4147 pixel_size, enabled, scaled);
4148}
4149
4150/** 4032/**
4151 * Lock protecting IPS related data structures 4033 * Lock protecting IPS related data structures
4152 */ 4034 */
@@ -4414,7 +4296,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4414 struct drm_i915_private *dev_priv = dev->dev_private; 4296 struct drm_i915_private *dev_priv = dev->dev_private;
4415 4297
4416 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 4298 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4417 if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) 4299 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
4418 return; 4300 return;
4419 4301
4420 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4302 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -4689,7 +4571,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4689 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 4571 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4690 4572
4691 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; 4573 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4692 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) { 4574 if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
4575 IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
4693 ret = sandybridge_pcode_read(dev_priv, 4576 ret = sandybridge_pcode_read(dev_priv,
4694 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 4577 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4695 &ddcc_status); 4578 &ddcc_status);
@@ -4701,7 +4584,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4701 dev_priv->rps.max_freq); 4584 dev_priv->rps.max_freq);
4702 } 4585 }
4703 4586
4704 if (IS_SKYLAKE(dev)) { 4587 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
4705 /* Store the frequency values in 16.66 MHZ units, which is 4588 /* Store the frequency values in 16.66 MHZ units, which is
4706 the natural hardware unit for SKL */ 4589 the natural hardware unit for SKL */
4707 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; 4590 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
@@ -4738,7 +4621,7 @@ static void gen9_enable_rps(struct drm_device *dev)
4738 gen6_init_rps_frequencies(dev); 4621 gen6_init_rps_frequencies(dev);
4739 4622
4740 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 4623 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4741 if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) { 4624 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
4742 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4625 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4743 return; 4626 return;
4744 } 4627 }
@@ -4783,7 +4666,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
4783 4666
4784 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ 4667 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
4785 if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && 4668 if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
4786 (INTEL_REVID(dev) <= SKL_REVID_E0))) 4669 IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
4787 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); 4670 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
4788 else 4671 else
4789 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 4672 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
@@ -4807,8 +4690,8 @@ static void gen9_enable_rc6(struct drm_device *dev)
4807 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 4690 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4808 "on" : "off"); 4691 "on" : "off");
4809 /* WaRsUseTimeoutMode */ 4692 /* WaRsUseTimeoutMode */
4810 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) || 4693 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
4811 (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0)) { 4694 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
4812 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ 4695 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
4813 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 4696 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4814 GEN7_RC_CTL_TO_MODE | 4697 GEN7_RC_CTL_TO_MODE |
@@ -4824,8 +4707,9 @@ static void gen9_enable_rc6(struct drm_device *dev)
4824 * 3b: Enable Coarse Power Gating only when RC6 is enabled. 4707 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4825 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. 4708 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
4826 */ 4709 */
4827 if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || 4710 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
4828 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0))) 4711 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
4712 IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
4829 I915_WRITE(GEN9_PG_ENABLE, 0); 4713 I915_WRITE(GEN9_PG_ENABLE, 0);
4830 else 4714 else
4831 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 4715 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
@@ -5056,7 +4940,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5056 /* convert DDR frequency from units of 266.6MHz to bandwidth */ 4940 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5057 min_ring_freq = mult_frac(min_ring_freq, 8, 3); 4941 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5058 4942
5059 if (IS_SKYLAKE(dev)) { 4943 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5060 /* Convert GT frequency to 50 HZ units */ 4944 /* Convert GT frequency to 50 HZ units */
5061 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; 4945 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5062 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; 4946 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@@ -5074,7 +4958,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5074 int diff = max_gpu_freq - gpu_freq; 4958 int diff = max_gpu_freq - gpu_freq;
5075 unsigned int ia_freq = 0, ring_freq = 0; 4959 unsigned int ia_freq = 0, ring_freq = 0;
5076 4960
5077 if (IS_SKYLAKE(dev)) { 4961 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5078 /* 4962 /*
5079 * ring_freq = 2 * GT. ring_freq is in 100MHz units 4963 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5080 * No floor required for ring frequency on SKL. 4964 * No floor required for ring frequency on SKL.
@@ -6202,7 +6086,7 @@ static void intel_gen6_powersave_work(struct work_struct *work)
6202 } else if (INTEL_INFO(dev)->gen >= 9) { 6086 } else if (INTEL_INFO(dev)->gen >= 9) {
6203 gen9_enable_rc6(dev); 6087 gen9_enable_rc6(dev);
6204 gen9_enable_rps(dev); 6088 gen9_enable_rps(dev);
6205 if (IS_SKYLAKE(dev)) 6089 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
6206 __gen6_update_ring_freq(dev); 6090 __gen6_update_ring_freq(dev);
6207 } else if (IS_BROADWELL(dev)) { 6091 } else if (IS_BROADWELL(dev)) {
6208 gen8_enable_rps(dev); 6092 gen8_enable_rps(dev);
@@ -7058,7 +6942,6 @@ void intel_init_pm(struct drm_device *dev)
7058 dev_priv->display.init_clock_gating = 6942 dev_priv->display.init_clock_gating =
7059 bxt_init_clock_gating; 6943 bxt_init_clock_gating;
7060 dev_priv->display.update_wm = skl_update_wm; 6944 dev_priv->display.update_wm = skl_update_wm;
7061 dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
7062 } else if (HAS_PCH_SPLIT(dev)) { 6945 } else if (HAS_PCH_SPLIT(dev)) {
7063 ilk_setup_wm_latency(dev); 6946 ilk_setup_wm_latency(dev);
7064 6947
@@ -7067,7 +6950,7 @@ void intel_init_pm(struct drm_device *dev)
7067 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] && 6950 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7068 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { 6951 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7069 dev_priv->display.update_wm = ilk_update_wm; 6952 dev_priv->display.update_wm = ilk_update_wm;
7070 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm; 6953 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
7071 } else { 6954 } else {
7072 DRM_DEBUG_KMS("Failed to read display plane latency. " 6955 DRM_DEBUG_KMS("Failed to read display plane latency. "
7073 "Disable CxSR\n"); 6956 "Disable CxSR\n");
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 213581c215b3..bc5ea2a6cf4c 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -80,7 +80,7 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
80 struct drm_i915_private *dev_priv = dev->dev_private; 80 struct drm_i915_private *dev_priv = dev->dev_private;
81 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 81 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
82 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 82 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
83 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); 83 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
84 uint32_t *data = (uint32_t *) vsc_psr; 84 uint32_t *data = (uint32_t *) vsc_psr;
85 unsigned int i; 85 unsigned int i;
86 86
@@ -151,13 +151,31 @@ static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
151 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); 151 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
152} 152}
153 153
154static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
155 enum port port)
156{
157 if (INTEL_INFO(dev_priv)->gen >= 9)
158 return DP_AUX_CH_CTL(port);
159 else
160 return EDP_PSR_AUX_CTL;
161}
162
163static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
164 enum port port, int index)
165{
166 if (INTEL_INFO(dev_priv)->gen >= 9)
167 return DP_AUX_CH_DATA(port, index);
168 else
169 return EDP_PSR_AUX_DATA(index);
170}
171
154static void hsw_psr_enable_sink(struct intel_dp *intel_dp) 172static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
155{ 173{
156 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 174 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
157 struct drm_device *dev = dig_port->base.base.dev; 175 struct drm_device *dev = dig_port->base.base.dev;
158 struct drm_i915_private *dev_priv = dev->dev_private; 176 struct drm_i915_private *dev_priv = dev->dev_private;
159 uint32_t aux_clock_divider; 177 uint32_t aux_clock_divider;
160 uint32_t aux_data_reg, aux_ctl_reg; 178 i915_reg_t aux_ctl_reg;
161 int precharge = 0x3; 179 int precharge = 0x3;
162 static const uint8_t aux_msg[] = { 180 static const uint8_t aux_msg[] = {
163 [0] = DP_AUX_NATIVE_WRITE << 4, 181 [0] = DP_AUX_NATIVE_WRITE << 4,
@@ -166,6 +184,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
166 [3] = 1 - 1, 184 [3] = 1 - 1,
167 [4] = DP_SET_POWER_D0, 185 [4] = DP_SET_POWER_D0,
168 }; 186 };
187 enum port port = dig_port->port;
169 int i; 188 int i;
170 189
171 BUILD_BUG_ON(sizeof(aux_msg) > 20); 190 BUILD_BUG_ON(sizeof(aux_msg) > 20);
@@ -181,14 +200,11 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
181 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, 200 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
182 DP_AUX_FRAME_SYNC_ENABLE); 201 DP_AUX_FRAME_SYNC_ENABLE);
183 202
184 aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ? 203 aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
185 DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev);
186 aux_ctl_reg = (INTEL_INFO(dev)->gen >= 9) ?
187 DPA_AUX_CH_CTL : EDP_PSR_AUX_CTL(dev);
188 204
189 /* Setup AUX registers */ 205 /* Setup AUX registers */
190 for (i = 0; i < sizeof(aux_msg); i += 4) 206 for (i = 0; i < sizeof(aux_msg); i += 4)
191 I915_WRITE(aux_data_reg + i, 207 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
192 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); 208 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
193 209
194 if (INTEL_INFO(dev)->gen >= 9) { 210 if (INTEL_INFO(dev)->gen >= 9) {
@@ -267,16 +283,11 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
267 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; 283 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
268 284
269 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { 285 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
270 /* It doesn't mean we shouldn't send TPS patters, so let's
271 send the minimal TP1 possible and skip TP2. */
272 val |= EDP_PSR_TP1_TIME_100us;
273 val |= EDP_PSR_TP2_TP3_TIME_0us;
274 val |= EDP_PSR_SKIP_AUX_EXIT;
275 /* Sink should be able to train with the 5 or 6 idle patterns */ 286 /* Sink should be able to train with the 5 or 6 idle patterns */
276 idle_frames += 4; 287 idle_frames += 4;
277 } 288 }
278 289
279 I915_WRITE(EDP_PSR_CTL(dev), val | 290 I915_WRITE(EDP_PSR_CTL, val |
280 (IS_BROADWELL(dev) ? 0 : link_entry_time) | 291 (IS_BROADWELL(dev) ? 0 : link_entry_time) |
281 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | 292 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
282 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | 293 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
@@ -340,7 +351,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
340 struct drm_device *dev = intel_dig_port->base.base.dev; 351 struct drm_device *dev = intel_dig_port->base.base.dev;
341 struct drm_i915_private *dev_priv = dev->dev_private; 352 struct drm_i915_private *dev_priv = dev->dev_private;
342 353
343 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); 354 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
344 WARN_ON(dev_priv->psr.active); 355 WARN_ON(dev_priv->psr.active);
345 lockdep_assert_held(&dev_priv->psr.lock); 356 lockdep_assert_held(&dev_priv->psr.lock);
346 357
@@ -404,7 +415,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
404 } 415 }
405 416
406 /* Avoid continuous PSR exit by masking memup and hpd */ 417 /* Avoid continuous PSR exit by masking memup and hpd */
407 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | 418 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
408 EDP_PSR_DEBUG_MASK_HPD); 419 EDP_PSR_DEBUG_MASK_HPD);
409 420
410 /* Enable PSR on the panel */ 421 /* Enable PSR on the panel */
@@ -427,6 +438,19 @@ void intel_psr_enable(struct intel_dp *intel_dp)
427 vlv_psr_enable_source(intel_dp); 438 vlv_psr_enable_source(intel_dp);
428 } 439 }
429 440
441 /*
442 * FIXME: Activation should happen immediately since this function
443 * is just called after pipe is fully trained and enabled.
444 * However on every platform we face issues when first activation
445 * follows a modeset so quickly.
446 * - On VLV/CHV we get bank screen on first activation
447 * - On HSW/BDW we get a recoverable frozen screen until next
448 * exit-activate sequence.
449 */
450 if (INTEL_INFO(dev)->gen < 9)
451 schedule_delayed_work(&dev_priv->psr.work,
452 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
453
430 dev_priv->psr.enabled = intel_dp; 454 dev_priv->psr.enabled = intel_dp;
431unlock: 455unlock:
432 mutex_unlock(&dev_priv->psr.lock); 456 mutex_unlock(&dev_priv->psr.lock);
@@ -466,17 +490,17 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
466 struct drm_i915_private *dev_priv = dev->dev_private; 490 struct drm_i915_private *dev_priv = dev->dev_private;
467 491
468 if (dev_priv->psr.active) { 492 if (dev_priv->psr.active) {
469 I915_WRITE(EDP_PSR_CTL(dev), 493 I915_WRITE(EDP_PSR_CTL,
470 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); 494 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
471 495
472 /* Wait till PSR is idle */ 496 /* Wait till PSR is idle */
473 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & 497 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
474 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) 498 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
475 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 499 DRM_ERROR("Timed out waiting for PSR Idle State\n");
476 500
477 dev_priv->psr.active = false; 501 dev_priv->psr.active = false;
478 } else { 502 } else {
479 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); 503 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
480 } 504 }
481} 505}
482 506
@@ -523,7 +547,7 @@ static void intel_psr_work(struct work_struct *work)
523 * and be ready for re-enable. 547 * and be ready for re-enable.
524 */ 548 */
525 if (HAS_DDI(dev_priv->dev)) { 549 if (HAS_DDI(dev_priv->dev)) {
526 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) & 550 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
527 EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { 551 EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
528 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); 552 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
529 return; 553 return;
@@ -566,11 +590,11 @@ static void intel_psr_exit(struct drm_device *dev)
566 return; 590 return;
567 591
568 if (HAS_DDI(dev)) { 592 if (HAS_DDI(dev)) {
569 val = I915_READ(EDP_PSR_CTL(dev)); 593 val = I915_READ(EDP_PSR_CTL);
570 594
571 WARN_ON(!(val & EDP_PSR_ENABLE)); 595 WARN_ON(!(val & EDP_PSR_ENABLE));
572 596
573 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE); 597 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
574 } else { 598 } else {
575 val = I915_READ(VLV_PSRCTL(pipe)); 599 val = I915_READ(VLV_PSRCTL(pipe));
576 600
@@ -700,7 +724,6 @@ void intel_psr_flush(struct drm_device *dev,
700 struct drm_i915_private *dev_priv = dev->dev_private; 724 struct drm_i915_private *dev_priv = dev->dev_private;
701 struct drm_crtc *crtc; 725 struct drm_crtc *crtc;
702 enum pipe pipe; 726 enum pipe pipe;
703 int delay_ms = HAS_DDI(dev) ? 100 : 500;
704 727
705 mutex_lock(&dev_priv->psr.lock); 728 mutex_lock(&dev_priv->psr.lock);
706 if (!dev_priv->psr.enabled) { 729 if (!dev_priv->psr.enabled) {
@@ -735,8 +758,9 @@ void intel_psr_flush(struct drm_device *dev,
735 } 758 }
736 759
737 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) 760 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
738 schedule_delayed_work(&dev_priv->psr.work, 761 if (!work_busy(&dev_priv->psr.work.work))
739 msecs_to_jiffies(delay_ms)); 762 schedule_delayed_work(&dev_priv->psr.work,
763 msecs_to_jiffies(100));
740 mutex_unlock(&dev_priv->psr.lock); 764 mutex_unlock(&dev_priv->psr.lock);
741} 765}
742 766
@@ -751,6 +775,9 @@ void intel_psr_init(struct drm_device *dev)
751{ 775{
752 struct drm_i915_private *dev_priv = dev->dev_private; 776 struct drm_i915_private *dev_priv = dev->dev_private;
753 777
778 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
779 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
780
754 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); 781 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
755 mutex_init(&dev_priv->psr.lock); 782 mutex_init(&dev_priv->psr.lock);
756} 783}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9461a238f5d5..57d78f264b53 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -481,7 +481,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
481{ 481{
482 struct drm_device *dev = ring->dev; 482 struct drm_device *dev = ring->dev;
483 struct drm_i915_private *dev_priv = ring->dev->dev_private; 483 struct drm_i915_private *dev_priv = ring->dev->dev_private;
484 u32 mmio = 0; 484 i915_reg_t mmio;
485 485
486 /* The ring status page addresses are no longer next to the rest of 486 /* The ring status page addresses are no longer next to the rest of
487 * the ring registers as of gen7. 487 * the ring registers as of gen7.
@@ -524,7 +524,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
524 * invalidating the TLB? 524 * invalidating the TLB?
525 */ 525 */
526 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 526 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
527 u32 reg = RING_INSTPM(ring->mmio_base); 527 i915_reg_t reg = RING_INSTPM(ring->mmio_base);
528 528
529 /* ring should be idle before issuing a sync flush*/ 529 /* ring should be idle before issuing a sync flush*/
530 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); 530 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
@@ -733,7 +733,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
733 733
734 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); 734 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
735 for (i = 0; i < w->count; i++) { 735 for (i = 0; i < w->count; i++) {
736 intel_ring_emit(ring, w->reg[i].addr); 736 intel_ring_emit_reg(ring, w->reg[i].addr);
737 intel_ring_emit(ring, w->reg[i].value); 737 intel_ring_emit(ring, w->reg[i].value);
738 } 738 }
739 intel_ring_emit(ring, MI_NOOP); 739 intel_ring_emit(ring, MI_NOOP);
@@ -766,7 +766,8 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
766} 766}
767 767
768static int wa_add(struct drm_i915_private *dev_priv, 768static int wa_add(struct drm_i915_private *dev_priv,
769 const u32 addr, const u32 mask, const u32 val) 769 i915_reg_t addr,
770 const u32 mask, const u32 val)
770{ 771{
771 const u32 idx = dev_priv->workarounds.count; 772 const u32 idx = dev_priv->workarounds.count;
772 773
@@ -924,17 +925,15 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
924 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 925 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
925 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); 926 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
926 927
927 if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 || 928 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
928 INTEL_REVID(dev) == SKL_REVID_B0)) || 929 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
929 (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) { 930 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
930 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
931 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 931 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
932 GEN9_DG_MIRROR_FIX_ENABLE); 932 GEN9_DG_MIRROR_FIX_ENABLE);
933 }
934 933
935 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || 934 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
936 (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) { 935 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
937 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 936 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
938 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, 937 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
939 GEN9_RHWO_OPTIMIZATION_DISABLE); 938 GEN9_RHWO_OPTIMIZATION_DISABLE);
940 /* 939 /*
@@ -944,12 +943,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
944 */ 943 */
945 } 944 }
946 945
947 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) || 946 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
948 IS_BROXTON(dev)) { 947 if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
949 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
950 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, 948 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
951 GEN9_ENABLE_YV12_BUGFIX); 949 GEN9_ENABLE_YV12_BUGFIX);
952 }
953 950
954 /* Wa4x4STCOptimizationDisable:skl,bxt */ 951 /* Wa4x4STCOptimizationDisable:skl,bxt */
955 /* WaDisablePartialResolveInVc:skl,bxt */ 952 /* WaDisablePartialResolveInVc:skl,bxt */
@@ -961,24 +958,22 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
961 GEN9_CCS_TLB_PREFETCH_ENABLE); 958 GEN9_CCS_TLB_PREFETCH_ENABLE);
962 959
963 /* WaDisableMaskBasedCammingInRCC:skl,bxt */ 960 /* WaDisableMaskBasedCammingInRCC:skl,bxt */
964 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) || 961 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
965 (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) 962 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
966 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, 963 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
967 PIXEL_MASK_CAMMING_DISABLE); 964 PIXEL_MASK_CAMMING_DISABLE);
968 965
969 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ 966 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
970 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; 967 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
971 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) || 968 if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
972 (IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0)) 969 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
973 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; 970 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
974 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); 971 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
975 972
976 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ 973 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
977 if (IS_SKYLAKE(dev) || 974 if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
978 (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_B0)) {
979 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 975 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
980 GEN8_SAMPLER_POWER_BYPASS_DIS); 976 GEN8_SAMPLER_POWER_BYPASS_DIS);
981 }
982 977
983 /* WaDisableSTUnitPowerOptimization:skl,bxt */ 978 /* WaDisableSTUnitPowerOptimization:skl,bxt */
984 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); 979 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
@@ -1038,7 +1033,7 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
1038 if (ret) 1033 if (ret)
1039 return ret; 1034 return ret;
1040 1035
1041 if (INTEL_REVID(dev) <= SKL_REVID_D0) { 1036 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
1042 /* WaDisableHDCInvalidation:skl */ 1037 /* WaDisableHDCInvalidation:skl */
1043 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 1038 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
1044 BDW_DISABLE_HDC_INVALIDATION); 1039 BDW_DISABLE_HDC_INVALIDATION);
@@ -1051,23 +1046,23 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
1051 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1046 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1052 * involving this register should also be added to WA batch as required. 1047 * involving this register should also be added to WA batch as required.
1053 */ 1048 */
1054 if (INTEL_REVID(dev) <= SKL_REVID_E0) 1049 if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
1055 /* WaDisableLSQCROPERFforOCL:skl */ 1050 /* WaDisableLSQCROPERFforOCL:skl */
1056 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1051 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1057 GEN8_LQSC_RO_PERF_DIS); 1052 GEN8_LQSC_RO_PERF_DIS);
1058 1053
1059 /* WaEnableGapsTsvCreditFix:skl */ 1054 /* WaEnableGapsTsvCreditFix:skl */
1060 if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) { 1055 if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
1061 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 1056 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1062 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1057 GEN9_GAPS_TSV_CREDIT_DISABLE));
1063 } 1058 }
1064 1059
1065 /* WaDisablePowerCompilerClockGating:skl */ 1060 /* WaDisablePowerCompilerClockGating:skl */
1066 if (INTEL_REVID(dev) == SKL_REVID_B0) 1061 if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
1067 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1062 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1068 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1063 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1069 1064
1070 if (INTEL_REVID(dev) <= SKL_REVID_D0) { 1065 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
1071 /* 1066 /*
1072 *Use Force Non-Coherent whenever executing a 3D context. This 1067 *Use Force Non-Coherent whenever executing a 3D context. This
1073 * is a workaround for a possible hang in the unlikely event 1068 * is a workaround for a possible hang in the unlikely event
@@ -1078,19 +1073,17 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
1078 HDC_FORCE_NON_COHERENT); 1073 HDC_FORCE_NON_COHERENT);
1079 } 1074 }
1080 1075
1081 if (INTEL_REVID(dev) == SKL_REVID_C0 || 1076 /* WaBarrierPerformanceFixDisable:skl */
1082 INTEL_REVID(dev) == SKL_REVID_D0) 1077 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
1083 /* WaBarrierPerformanceFixDisable:skl */
1084 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1078 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1085 HDC_FENCE_DEST_SLM_DISABLE | 1079 HDC_FENCE_DEST_SLM_DISABLE |
1086 HDC_BARRIER_PERFORMANCE_DISABLE); 1080 HDC_BARRIER_PERFORMANCE_DISABLE);
1087 1081
1088 /* WaDisableSbeCacheDispatchPortSharing:skl */ 1082 /* WaDisableSbeCacheDispatchPortSharing:skl */
1089 if (INTEL_REVID(dev) <= SKL_REVID_F0) { 1083 if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
1090 WA_SET_BIT_MASKED( 1084 WA_SET_BIT_MASKED(
1091 GEN7_HALF_SLICE_CHICKEN1, 1085 GEN7_HALF_SLICE_CHICKEN1,
1092 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1086 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1093 }
1094 1087
1095 return skl_tune_iz_hashing(ring); 1088 return skl_tune_iz_hashing(ring);
1096} 1089}
@@ -1107,11 +1100,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
1107 1100
1108 /* WaStoreMultiplePTEenable:bxt */ 1101 /* WaStoreMultiplePTEenable:bxt */
1109 /* This is a requirement according to Hardware specification */ 1102 /* This is a requirement according to Hardware specification */
1110 if (INTEL_REVID(dev) == BXT_REVID_A0) 1103 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
1111 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); 1104 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1112 1105
1113 /* WaSetClckGatingDisableMedia:bxt */ 1106 /* WaSetClckGatingDisableMedia:bxt */
1114 if (INTEL_REVID(dev) == BXT_REVID_A0) { 1107 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
1115 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1108 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1116 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); 1109 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1117 } 1110 }
@@ -1121,7 +1114,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
1121 STALL_DOP_GATING_DISABLE); 1114 STALL_DOP_GATING_DISABLE);
1122 1115
1123 /* WaDisableSbeCacheDispatchPortSharing:bxt */ 1116 /* WaDisableSbeCacheDispatchPortSharing:bxt */
1124 if (INTEL_REVID(dev) <= BXT_REVID_B0) { 1117 if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
1125 WA_SET_BIT_MASKED( 1118 WA_SET_BIT_MASKED(
1126 GEN7_HALF_SLICE_CHICKEN1, 1119 GEN7_HALF_SLICE_CHICKEN1,
1127 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1120 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1319,11 +1312,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
1319 return ret; 1312 return ret;
1320 1313
1321 for_each_ring(useless, dev_priv, i) { 1314 for_each_ring(useless, dev_priv, i) {
1322 u32 mbox_reg = signaller->semaphore.mbox.signal[i]; 1315 i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i];
1323 if (mbox_reg != GEN6_NOSYNC) { 1316
1317 if (i915_mmio_reg_valid(mbox_reg)) {
1324 u32 seqno = i915_gem_request_get_seqno(signaller_req); 1318 u32 seqno = i915_gem_request_get_seqno(signaller_req);
1319
1325 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 1320 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
1326 intel_ring_emit(signaller, mbox_reg); 1321 intel_ring_emit_reg(signaller, mbox_reg);
1327 intel_ring_emit(signaller, seqno); 1322 intel_ring_emit(signaller, seqno);
1328 } 1323 }
1329 } 1324 }
@@ -2004,11 +1999,35 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
2004 1999
2005void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 2000void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
2006{ 2001{
2007 iounmap(ringbuf->virtual_start); 2002 if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
2003 vunmap(ringbuf->virtual_start);
2004 else
2005 iounmap(ringbuf->virtual_start);
2008 ringbuf->virtual_start = NULL; 2006 ringbuf->virtual_start = NULL;
2009 i915_gem_object_ggtt_unpin(ringbuf->obj); 2007 i915_gem_object_ggtt_unpin(ringbuf->obj);
2010} 2008}
2011 2009
2010static u32 *vmap_obj(struct drm_i915_gem_object *obj)
2011{
2012 struct sg_page_iter sg_iter;
2013 struct page **pages;
2014 void *addr;
2015 int i;
2016
2017 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
2018 if (pages == NULL)
2019 return NULL;
2020
2021 i = 0;
2022 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
2023 pages[i++] = sg_page_iter_page(&sg_iter);
2024
2025 addr = vmap(pages, i, 0, PAGE_KERNEL);
2026 drm_free_large(pages);
2027
2028 return addr;
2029}
2030
2012int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 2031int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2013 struct intel_ringbuffer *ringbuf) 2032 struct intel_ringbuffer *ringbuf)
2014{ 2033{
@@ -2016,21 +2035,39 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2016 struct drm_i915_gem_object *obj = ringbuf->obj; 2035 struct drm_i915_gem_object *obj = ringbuf->obj;
2017 int ret; 2036 int ret;
2018 2037
2019 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 2038 if (HAS_LLC(dev_priv) && !obj->stolen) {
2020 if (ret) 2039 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
2021 return ret; 2040 if (ret)
2041 return ret;
2022 2042
2023 ret = i915_gem_object_set_to_gtt_domain(obj, true); 2043 ret = i915_gem_object_set_to_cpu_domain(obj, true);
2024 if (ret) { 2044 if (ret) {
2025 i915_gem_object_ggtt_unpin(obj); 2045 i915_gem_object_ggtt_unpin(obj);
2026 return ret; 2046 return ret;
2027 } 2047 }
2048
2049 ringbuf->virtual_start = vmap_obj(obj);
2050 if (ringbuf->virtual_start == NULL) {
2051 i915_gem_object_ggtt_unpin(obj);
2052 return -ENOMEM;
2053 }
2054 } else {
2055 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
2056 if (ret)
2057 return ret;
2028 2058
2029 ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + 2059 ret = i915_gem_object_set_to_gtt_domain(obj, true);
2030 i915_gem_obj_ggtt_offset(obj), ringbuf->size); 2060 if (ret) {
2031 if (ringbuf->virtual_start == NULL) { 2061 i915_gem_object_ggtt_unpin(obj);
2032 i915_gem_object_ggtt_unpin(obj); 2062 return ret;
2033 return -EINVAL; 2063 }
2064
2065 ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
2066 i915_gem_obj_ggtt_offset(obj), ringbuf->size);
2067 if (ringbuf->virtual_start == NULL) {
2068 i915_gem_object_ggtt_unpin(obj);
2069 return -EINVAL;
2070 }
2034 } 2071 }
2035 2072
2036 return 0; 2073 return 0;
@@ -2070,10 +2107,14 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
2070 int ret; 2107 int ret;
2071 2108
2072 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 2109 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2073 if (ring == NULL) 2110 if (ring == NULL) {
2111 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
2112 engine->name);
2074 return ERR_PTR(-ENOMEM); 2113 return ERR_PTR(-ENOMEM);
2114 }
2075 2115
2076 ring->ring = engine; 2116 ring->ring = engine;
2117 list_add(&ring->link, &engine->buffers);
2077 2118
2078 ring->size = size; 2119 ring->size = size;
2079 /* Workaround an erratum on the i830 which causes a hang if 2120 /* Workaround an erratum on the i830 which causes a hang if
@@ -2089,8 +2130,9 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
2089 2130
2090 ret = intel_alloc_ringbuffer_obj(engine->dev, ring); 2131 ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
2091 if (ret) { 2132 if (ret) {
2092 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", 2133 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
2093 engine->name, ret); 2134 engine->name, ret);
2135 list_del(&ring->link);
2094 kfree(ring); 2136 kfree(ring);
2095 return ERR_PTR(ret); 2137 return ERR_PTR(ret);
2096 } 2138 }
@@ -2102,6 +2144,7 @@ void
2102intel_ringbuffer_free(struct intel_ringbuffer *ring) 2144intel_ringbuffer_free(struct intel_ringbuffer *ring)
2103{ 2145{
2104 intel_destroy_ringbuffer_obj(ring); 2146 intel_destroy_ringbuffer_obj(ring);
2147 list_del(&ring->link);
2105 kfree(ring); 2148 kfree(ring);
2106} 2149}
2107 2150
@@ -2117,6 +2160,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2117 INIT_LIST_HEAD(&ring->active_list); 2160 INIT_LIST_HEAD(&ring->active_list);
2118 INIT_LIST_HEAD(&ring->request_list); 2161 INIT_LIST_HEAD(&ring->request_list);
2119 INIT_LIST_HEAD(&ring->execlist_queue); 2162 INIT_LIST_HEAD(&ring->execlist_queue);
2163 INIT_LIST_HEAD(&ring->buffers);
2120 i915_gem_batch_pool_init(dev, &ring->batch_pool); 2164 i915_gem_batch_pool_init(dev, &ring->batch_pool);
2121 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); 2165 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
2122 2166
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 49fa41dc0eb6..5d1eb206151d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -100,6 +100,7 @@ struct intel_ringbuffer {
100 void __iomem *virtual_start; 100 void __iomem *virtual_start;
101 101
102 struct intel_engine_cs *ring; 102 struct intel_engine_cs *ring;
103 struct list_head link;
103 104
104 u32 head; 105 u32 head;
105 u32 tail; 106 u32 tail;
@@ -157,6 +158,7 @@ struct intel_engine_cs {
157 u32 mmio_base; 158 u32 mmio_base;
158 struct drm_device *dev; 159 struct drm_device *dev;
159 struct intel_ringbuffer *buffer; 160 struct intel_ringbuffer *buffer;
161 struct list_head buffers;
160 162
161 /* 163 /*
162 * A pool of objects to use as shadow copies of client batch buffers 164 * A pool of objects to use as shadow copies of client batch buffers
@@ -247,7 +249,7 @@ struct intel_engine_cs {
247 /* our mbox written by others */ 249 /* our mbox written by others */
248 u32 wait[I915_NUM_RINGS]; 250 u32 wait[I915_NUM_RINGS];
249 /* mboxes this ring signals to */ 251 /* mboxes this ring signals to */
250 u32 signal[I915_NUM_RINGS]; 252 i915_reg_t signal[I915_NUM_RINGS];
251 } mbox; 253 } mbox;
252 u64 signal_ggtt[I915_NUM_RINGS]; 254 u64 signal_ggtt[I915_NUM_RINGS];
253 }; 255 };
@@ -441,6 +443,11 @@ static inline void intel_ring_emit(struct intel_engine_cs *ring,
441 iowrite32(data, ringbuf->virtual_start + ringbuf->tail); 443 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
442 ringbuf->tail += 4; 444 ringbuf->tail += 4;
443} 445}
446static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
447 i915_reg_t reg)
448{
449 intel_ring_emit(ring, i915_mmio_reg_offset(reg));
450}
444static inline void intel_ring_advance(struct intel_engine_cs *ring) 451static inline void intel_ring_advance(struct intel_engine_cs *ring)
445{ 452{
446 struct intel_ringbuffer *ringbuf = ring->buffer; 453 struct intel_ringbuffer *ringbuf = ring->buffer;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 13f14208d8aa..afca6c940b9a 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -49,9 +49,6 @@
49 * present for a given platform. 49 * present for a given platform.
50 */ 50 */
51 51
52#define GEN9_ENABLE_DC5(dev) 0
53#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev)
54
55#define for_each_power_well(i, power_well, domain_mask, power_domains) \ 52#define for_each_power_well(i, power_well, domain_mask, power_domains) \
56 for (i = 0; \ 53 for (i = 0; \
57 i < (power_domains)->power_well_count && \ 54 i < (power_domains)->power_well_count && \
@@ -244,12 +241,6 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
244 gen8_irq_power_well_post_enable(dev_priv, 241 gen8_irq_power_well_post_enable(dev_priv,
245 1 << PIPE_C | 1 << PIPE_B); 242 1 << PIPE_C | 1 << PIPE_B);
246 } 243 }
247
248 if (power_well->data == SKL_DISP_PW_1) {
249 if (!dev_priv->power_domains.initializing)
250 intel_prepare_ddi(dev);
251 gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
252 }
253} 244}
254 245
255static void hsw_set_power_well(struct drm_i915_private *dev_priv, 246static void hsw_set_power_well(struct drm_i915_private *dev_priv,
@@ -292,58 +283,38 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
292 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 283 BIT(POWER_DOMAIN_TRANSCODER_C) | \
293 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 284 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
294 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 285 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
295 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 286 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
296 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 287 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
297 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 288 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
298 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 289 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
299 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
300 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
301 BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
302 BIT(POWER_DOMAIN_AUX_B) | \ 290 BIT(POWER_DOMAIN_AUX_B) | \
303 BIT(POWER_DOMAIN_AUX_C) | \ 291 BIT(POWER_DOMAIN_AUX_C) | \
304 BIT(POWER_DOMAIN_AUX_D) | \ 292 BIT(POWER_DOMAIN_AUX_D) | \
305 BIT(POWER_DOMAIN_AUDIO) | \ 293 BIT(POWER_DOMAIN_AUDIO) | \
306 BIT(POWER_DOMAIN_VGA) | \ 294 BIT(POWER_DOMAIN_VGA) | \
307 BIT(POWER_DOMAIN_INIT)) 295 BIT(POWER_DOMAIN_INIT))
308#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
309 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
310 BIT(POWER_DOMAIN_PLLS) | \
311 BIT(POWER_DOMAIN_PIPE_A) | \
312 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
313 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
314 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
315 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
316 BIT(POWER_DOMAIN_AUX_A) | \
317 BIT(POWER_DOMAIN_INIT))
318#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \ 296#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
319 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 297 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
320 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ 298 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
321 BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
322 BIT(POWER_DOMAIN_INIT)) 299 BIT(POWER_DOMAIN_INIT))
323#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \ 300#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
324 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 301 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
325 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
326 BIT(POWER_DOMAIN_INIT)) 302 BIT(POWER_DOMAIN_INIT))
327#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \ 303#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
328 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 304 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
329 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
330 BIT(POWER_DOMAIN_INIT)) 305 BIT(POWER_DOMAIN_INIT))
331#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \ 306#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
332 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 307 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
333 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
334 BIT(POWER_DOMAIN_INIT)) 308 BIT(POWER_DOMAIN_INIT))
335#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \ 309#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
336 SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ 310 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
337 BIT(POWER_DOMAIN_PLLS) | \ 311 BIT(POWER_DOMAIN_MODESET) | \
312 BIT(POWER_DOMAIN_AUX_A) | \
338 BIT(POWER_DOMAIN_INIT)) 313 BIT(POWER_DOMAIN_INIT))
339#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ 314#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
340 (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ 315 (POWER_DOMAIN_MASK & ~( \
341 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 316 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
342 SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \ 317 SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) | \
343 SKL_DISPLAY_DDI_B_POWER_DOMAINS | \
344 SKL_DISPLAY_DDI_C_POWER_DOMAINS | \
345 SKL_DISPLAY_DDI_D_POWER_DOMAINS | \
346 SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
347 BIT(POWER_DOMAIN_INIT)) 318 BIT(POWER_DOMAIN_INIT))
348 319
349#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 320#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
@@ -354,25 +325,28 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
354 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 325 BIT(POWER_DOMAIN_TRANSCODER_C) | \
355 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 326 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
356 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 327 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
357 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 328 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
358 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 329 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
359 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
360 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
361 BIT(POWER_DOMAIN_AUX_B) | \ 330 BIT(POWER_DOMAIN_AUX_B) | \
362 BIT(POWER_DOMAIN_AUX_C) | \ 331 BIT(POWER_DOMAIN_AUX_C) | \
363 BIT(POWER_DOMAIN_AUDIO) | \ 332 BIT(POWER_DOMAIN_AUDIO) | \
364 BIT(POWER_DOMAIN_VGA) | \ 333 BIT(POWER_DOMAIN_VGA) | \
334 BIT(POWER_DOMAIN_GMBUS) | \
365 BIT(POWER_DOMAIN_INIT)) 335 BIT(POWER_DOMAIN_INIT))
366#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ 336#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
367 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 337 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
368 BIT(POWER_DOMAIN_PIPE_A) | \ 338 BIT(POWER_DOMAIN_PIPE_A) | \
369 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ 339 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
370 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 340 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
371 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 341 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
372 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
373 BIT(POWER_DOMAIN_AUX_A) | \ 342 BIT(POWER_DOMAIN_AUX_A) | \
374 BIT(POWER_DOMAIN_PLLS) | \ 343 BIT(POWER_DOMAIN_PLLS) | \
375 BIT(POWER_DOMAIN_INIT)) 344 BIT(POWER_DOMAIN_INIT))
345#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
346 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
347 BIT(POWER_DOMAIN_MODESET) | \
348 BIT(POWER_DOMAIN_AUX_A) | \
349 BIT(POWER_DOMAIN_INIT))
376#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ 350#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
377 (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ 351 (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
378 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \ 352 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \
@@ -416,46 +390,74 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
416 */ 390 */
417} 391}
418 392
419void bxt_enable_dc9(struct drm_i915_private *dev_priv) 393static void gen9_set_dc_state_debugmask_memory_up(
394 struct drm_i915_private *dev_priv)
420{ 395{
421 uint32_t val; 396 uint32_t val;
422 397
423 assert_can_enable_dc9(dev_priv); 398 /* The below bit doesn't need to be cleared ever afterwards */
399 val = I915_READ(DC_STATE_DEBUG);
400 if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
401 val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
402 I915_WRITE(DC_STATE_DEBUG, val);
403 POSTING_READ(DC_STATE_DEBUG);
404 }
405}
424 406
425 DRM_DEBUG_KMS("Enabling DC9\n"); 407static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
408{
409 uint32_t val;
410 uint32_t mask;
411
412 mask = DC_STATE_EN_UPTO_DC5;
413 if (IS_BROXTON(dev_priv))
414 mask |= DC_STATE_EN_DC9;
415 else
416 mask |= DC_STATE_EN_UPTO_DC6;
417
418 WARN_ON_ONCE(state & ~mask);
419
420 if (i915.enable_dc == 0)
421 state = DC_STATE_DISABLE;
422 else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
423 state = DC_STATE_EN_UPTO_DC5;
424
425 if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK)
426 gen9_set_dc_state_debugmask_memory_up(dev_priv);
426 427
427 val = I915_READ(DC_STATE_EN); 428 val = I915_READ(DC_STATE_EN);
428 val |= DC_STATE_EN_DC9; 429 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
430 val & mask, state);
431 val &= ~mask;
432 val |= state;
429 I915_WRITE(DC_STATE_EN, val); 433 I915_WRITE(DC_STATE_EN, val);
430 POSTING_READ(DC_STATE_EN); 434 POSTING_READ(DC_STATE_EN);
431} 435}
432 436
433void bxt_disable_dc9(struct drm_i915_private *dev_priv) 437void bxt_enable_dc9(struct drm_i915_private *dev_priv)
434{ 438{
435 uint32_t val; 439 assert_can_enable_dc9(dev_priv);
440
441 DRM_DEBUG_KMS("Enabling DC9\n");
442
443 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
444}
436 445
446void bxt_disable_dc9(struct drm_i915_private *dev_priv)
447{
437 assert_can_disable_dc9(dev_priv); 448 assert_can_disable_dc9(dev_priv);
438 449
439 DRM_DEBUG_KMS("Disabling DC9\n"); 450 DRM_DEBUG_KMS("Disabling DC9\n");
440 451
441 val = I915_READ(DC_STATE_EN); 452 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
442 val &= ~DC_STATE_EN_DC9;
443 I915_WRITE(DC_STATE_EN, val);
444 POSTING_READ(DC_STATE_EN);
445} 453}
446 454
447static void gen9_set_dc_state_debugmask_memory_up( 455static void assert_csr_loaded(struct drm_i915_private *dev_priv)
448 struct drm_i915_private *dev_priv)
449{ 456{
450 uint32_t val; 457 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
451 458 "CSR program storage start is NULL\n");
452 /* The below bit doesn't need to be cleared ever afterwards */ 459 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
453 val = I915_READ(DC_STATE_DEBUG); 460 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
454 if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
455 val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
456 I915_WRITE(DC_STATE_DEBUG, val);
457 POSTING_READ(DC_STATE_DEBUG);
458 }
459} 461}
460 462
461static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 463static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
@@ -478,8 +480,6 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
478 480
479static void assert_can_disable_dc5(struct drm_i915_private *dev_priv) 481static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
480{ 482{
481 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
482 SKL_DISP_PW_2);
483 /* 483 /*
484 * During initialization, the firmware may not be loaded yet. 484 * During initialization, the firmware may not be loaded yet.
485 * We still want to make sure that the DC enabling flag is cleared. 485 * We still want to make sure that the DC enabling flag is cleared.
@@ -487,40 +487,17 @@ static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
487 if (dev_priv->power_domains.initializing) 487 if (dev_priv->power_domains.initializing)
488 return; 488 return;
489 489
490 WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
491 WARN_ONCE(dev_priv->pm.suspended, 490 WARN_ONCE(dev_priv->pm.suspended,
492 "Disabling of DC5 while platform is runtime-suspended should never happen.\n"); 491 "Disabling of DC5 while platform is runtime-suspended should never happen.\n");
493} 492}
494 493
495static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 494static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
496{ 495{
497 uint32_t val;
498
499 assert_can_enable_dc5(dev_priv); 496 assert_can_enable_dc5(dev_priv);
500 497
501 DRM_DEBUG_KMS("Enabling DC5\n"); 498 DRM_DEBUG_KMS("Enabling DC5\n");
502 499
503 gen9_set_dc_state_debugmask_memory_up(dev_priv); 500 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
504
505 val = I915_READ(DC_STATE_EN);
506 val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
507 val |= DC_STATE_EN_UPTO_DC5;
508 I915_WRITE(DC_STATE_EN, val);
509 POSTING_READ(DC_STATE_EN);
510}
511
512static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
513{
514 uint32_t val;
515
516 assert_can_disable_dc5(dev_priv);
517
518 DRM_DEBUG_KMS("Disabling DC5\n");
519
520 val = I915_READ(DC_STATE_EN);
521 val &= ~DC_STATE_EN_UPTO_DC5;
522 I915_WRITE(DC_STATE_EN, val);
523 POSTING_READ(DC_STATE_EN);
524} 501}
525 502
526static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 503static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
@@ -546,40 +523,37 @@ static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
546 if (dev_priv->power_domains.initializing) 523 if (dev_priv->power_domains.initializing)
547 return; 524 return;
548 525
549 assert_csr_loaded(dev_priv);
550 WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 526 WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
551 "DC6 already programmed to be disabled.\n"); 527 "DC6 already programmed to be disabled.\n");
552} 528}
553 529
554static void skl_enable_dc6(struct drm_i915_private *dev_priv) 530static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
555{ 531{
556 uint32_t val; 532 assert_can_disable_dc5(dev_priv);
557 533
534 if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
535 assert_can_disable_dc6(dev_priv);
536
537 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
538}
539
540void skl_enable_dc6(struct drm_i915_private *dev_priv)
541{
558 assert_can_enable_dc6(dev_priv); 542 assert_can_enable_dc6(dev_priv);
559 543
560 DRM_DEBUG_KMS("Enabling DC6\n"); 544 DRM_DEBUG_KMS("Enabling DC6\n");
561 545
562 gen9_set_dc_state_debugmask_memory_up(dev_priv); 546 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
563 547
564 val = I915_READ(DC_STATE_EN);
565 val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
566 val |= DC_STATE_EN_UPTO_DC6;
567 I915_WRITE(DC_STATE_EN, val);
568 POSTING_READ(DC_STATE_EN);
569} 548}
570 549
571static void skl_disable_dc6(struct drm_i915_private *dev_priv) 550void skl_disable_dc6(struct drm_i915_private *dev_priv)
572{ 551{
573 uint32_t val;
574
575 assert_can_disable_dc6(dev_priv); 552 assert_can_disable_dc6(dev_priv);
576 553
577 DRM_DEBUG_KMS("Disabling DC6\n"); 554 DRM_DEBUG_KMS("Disabling DC6\n");
578 555
579 val = I915_READ(DC_STATE_EN); 556 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
580 val &= ~DC_STATE_EN_UPTO_DC6;
581 I915_WRITE(DC_STATE_EN, val);
582 POSTING_READ(DC_STATE_EN);
583} 557}
584 558
585static void skl_set_power_well(struct drm_i915_private *dev_priv, 559static void skl_set_power_well(struct drm_i915_private *dev_priv,
@@ -629,20 +603,16 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
629 !I915_READ(HSW_PWR_WELL_BIOS), 603 !I915_READ(HSW_PWR_WELL_BIOS),
630 "Invalid for power well status to be enabled, unless done by the BIOS, \ 604 "Invalid for power well status to be enabled, unless done by the BIOS, \
631 when request is to disable!\n"); 605 when request is to disable!\n");
632 if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) && 606 if (power_well->data == SKL_DISP_PW_2) {
633 power_well->data == SKL_DISP_PW_2) { 607 /*
634 if (SKL_ENABLE_DC6(dev)) { 608 * DDI buffer programming unnecessary during
635 skl_disable_dc6(dev_priv); 609 * driver-load/resume as it's already done
636 /* 610 * during modeset initialization then. It's
637 * DDI buffer programming unnecessary during driver-load/resume 611 * also invalid here as encoder list is still
638 * as it's already done during modeset initialization then. 612 * uninitialized.
639 * It's also invalid here as encoder list is still uninitialized. 613 */
640 */ 614 if (!dev_priv->power_domains.initializing)
641 if (!dev_priv->power_domains.initializing) 615 intel_prepare_ddi(dev);
642 intel_prepare_ddi(dev);
643 } else {
644 gen9_disable_dc5(dev_priv);
645 }
646 } 616 }
647 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); 617 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
648 } 618 }
@@ -657,34 +627,9 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
657 } 627 }
658 } else { 628 } else {
659 if (enable_requested) { 629 if (enable_requested) {
660 if (IS_SKYLAKE(dev) && 630 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
661 (power_well->data == SKL_DISP_PW_1) && 631 POSTING_READ(HSW_PWR_WELL_DRIVER);
662 (intel_csr_load_status_get(dev_priv) == FW_LOADED)) 632 DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
663 DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n");
664 else {
665 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
666 POSTING_READ(HSW_PWR_WELL_DRIVER);
667 DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
668 }
669
670 if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
671 power_well->data == SKL_DISP_PW_2) {
672 enum csr_state state;
673 /* TODO: wait for a completion event or
674 * similar here instead of busy
675 * waiting using wait_for function.
676 */
677 wait_for((state = intel_csr_load_status_get(dev_priv)) !=
678 FW_UNINITIALIZED, 1000);
679 if (state != FW_LOADED)
680 DRM_DEBUG("CSR firmware not ready (%d)\n",
681 state);
682 else
683 if (SKL_ENABLE_DC6(dev))
684 skl_enable_dc6(dev_priv);
685 else
686 gen9_enable_dc5(dev_priv);
687 }
688 } 633 }
689 } 634 }
690 635
@@ -759,6 +704,41 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv,
759 skl_set_power_well(dev_priv, power_well, false); 704 skl_set_power_well(dev_priv, power_well, false);
760} 705}
761 706
707static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
708 struct i915_power_well *power_well)
709{
710 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
711}
712
713static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
714 struct i915_power_well *power_well)
715{
716 gen9_disable_dc5_dc6(dev_priv);
717}
718
719static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
720 struct i915_power_well *power_well)
721{
722 if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
723 skl_enable_dc6(dev_priv);
724 else
725 gen9_enable_dc5(dev_priv);
726}
727
728static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
729 struct i915_power_well *power_well)
730{
731 if (power_well->count > 0) {
732 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
733 } else {
734 if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 &&
735 i915.enable_dc != 1)
736 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
737 else
738 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
739 }
740}
741
762static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 742static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
763 struct i915_power_well *power_well) 743 struct i915_power_well *power_well)
764{ 744{
@@ -973,10 +953,12 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
973 int power_well_id) 953 int power_well_id)
974{ 954{
975 struct i915_power_domains *power_domains = &dev_priv->power_domains; 955 struct i915_power_domains *power_domains = &dev_priv->power_domains;
976 struct i915_power_well *power_well;
977 int i; 956 int i;
978 957
979 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 958 for (i = 0; i < power_domains->power_well_count; i++) {
959 struct i915_power_well *power_well;
960
961 power_well = &power_domains->power_wells[i];
980 if (power_well->data == power_well_id) 962 if (power_well->data == power_well_id)
981 return power_well; 963 return power_well;
982 } 964 }
@@ -1457,7 +1439,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1457 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 1439 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
1458 WARN_ON(!power_well->count); 1440 WARN_ON(!power_well->count);
1459 1441
1460 if (!--power_well->count && i915.disable_power_well) 1442 if (!--power_well->count)
1461 intel_power_well_disable(dev_priv, power_well); 1443 intel_power_well_disable(dev_priv, power_well);
1462 } 1444 }
1463 1445
@@ -1469,20 +1451,17 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1469#define HSW_ALWAYS_ON_POWER_DOMAINS ( \ 1451#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
1470 BIT(POWER_DOMAIN_PIPE_A) | \ 1452 BIT(POWER_DOMAIN_PIPE_A) | \
1471 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ 1453 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
1472 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 1454 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
1473 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ 1455 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1474 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1456 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1475 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1457 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1476 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
1477 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
1478 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
1479 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
1480 BIT(POWER_DOMAIN_PORT_CRT) | \ 1458 BIT(POWER_DOMAIN_PORT_CRT) | \
1481 BIT(POWER_DOMAIN_PLLS) | \ 1459 BIT(POWER_DOMAIN_PLLS) | \
1482 BIT(POWER_DOMAIN_AUX_A) | \ 1460 BIT(POWER_DOMAIN_AUX_A) | \
1483 BIT(POWER_DOMAIN_AUX_B) | \ 1461 BIT(POWER_DOMAIN_AUX_B) | \
1484 BIT(POWER_DOMAIN_AUX_C) | \ 1462 BIT(POWER_DOMAIN_AUX_C) | \
1485 BIT(POWER_DOMAIN_AUX_D) | \ 1463 BIT(POWER_DOMAIN_AUX_D) | \
1464 BIT(POWER_DOMAIN_GMBUS) | \
1486 BIT(POWER_DOMAIN_INIT)) 1465 BIT(POWER_DOMAIN_INIT))
1487#define HSW_DISPLAY_POWER_DOMAINS ( \ 1466#define HSW_DISPLAY_POWER_DOMAINS ( \
1488 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ 1467 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
@@ -1499,49 +1478,42 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1499#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK 1478#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
1500 1479
1501#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1480#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
1502 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1481 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1503 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1482 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1504 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
1505 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
1506 BIT(POWER_DOMAIN_PORT_CRT) | \ 1483 BIT(POWER_DOMAIN_PORT_CRT) | \
1507 BIT(POWER_DOMAIN_AUX_B) | \ 1484 BIT(POWER_DOMAIN_AUX_B) | \
1508 BIT(POWER_DOMAIN_AUX_C) | \ 1485 BIT(POWER_DOMAIN_AUX_C) | \
1509 BIT(POWER_DOMAIN_INIT)) 1486 BIT(POWER_DOMAIN_INIT))
1510 1487
1511#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 1488#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
1512 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1489 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1513 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
1514 BIT(POWER_DOMAIN_AUX_B) | \ 1490 BIT(POWER_DOMAIN_AUX_B) | \
1515 BIT(POWER_DOMAIN_INIT)) 1491 BIT(POWER_DOMAIN_INIT))
1516 1492
1517#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 1493#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
1518 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1494 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1519 BIT(POWER_DOMAIN_AUX_B) | \ 1495 BIT(POWER_DOMAIN_AUX_B) | \
1520 BIT(POWER_DOMAIN_INIT)) 1496 BIT(POWER_DOMAIN_INIT))
1521 1497
1522#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 1498#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
1523 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 1499 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1524 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
1525 BIT(POWER_DOMAIN_AUX_C) | \ 1500 BIT(POWER_DOMAIN_AUX_C) | \
1526 BIT(POWER_DOMAIN_INIT)) 1501 BIT(POWER_DOMAIN_INIT))
1527 1502
1528#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 1503#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
1529 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 1504 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1530 BIT(POWER_DOMAIN_AUX_C) | \ 1505 BIT(POWER_DOMAIN_AUX_C) | \
1531 BIT(POWER_DOMAIN_INIT)) 1506 BIT(POWER_DOMAIN_INIT))
1532 1507
1533#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1508#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
1534 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1509 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1535 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1510 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1536 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
1537 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
1538 BIT(POWER_DOMAIN_AUX_B) | \ 1511 BIT(POWER_DOMAIN_AUX_B) | \
1539 BIT(POWER_DOMAIN_AUX_C) | \ 1512 BIT(POWER_DOMAIN_AUX_C) | \
1540 BIT(POWER_DOMAIN_INIT)) 1513 BIT(POWER_DOMAIN_INIT))
1541 1514
1542#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 1515#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
1543 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 1516 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1544 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
1545 BIT(POWER_DOMAIN_AUX_D) | \ 1517 BIT(POWER_DOMAIN_AUX_D) | \
1546 BIT(POWER_DOMAIN_INIT)) 1518 BIT(POWER_DOMAIN_INIT))
1547 1519
@@ -1589,6 +1561,13 @@ static const struct i915_power_well_ops skl_power_well_ops = {
1589 .is_enabled = skl_power_well_enabled, 1561 .is_enabled = skl_power_well_enabled,
1590}; 1562};
1591 1563
1564static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1565 .sync_hw = gen9_dc_off_power_well_sync_hw,
1566 .enable = gen9_dc_off_power_well_enable,
1567 .disable = gen9_dc_off_power_well_disable,
1568 .is_enabled = gen9_dc_off_power_well_enabled,
1569};
1570
1592static struct i915_power_well hsw_power_wells[] = { 1571static struct i915_power_well hsw_power_wells[] = {
1593 { 1572 {
1594 .name = "always-on", 1573 .name = "always-on",
@@ -1644,6 +1623,7 @@ static struct i915_power_well vlv_power_wells[] = {
1644 .always_on = 1, 1623 .always_on = 1,
1645 .domains = VLV_ALWAYS_ON_POWER_DOMAINS, 1624 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1646 .ops = &i9xx_always_on_power_well_ops, 1625 .ops = &i9xx_always_on_power_well_ops,
1626 .data = PUNIT_POWER_WELL_ALWAYS_ON,
1647 }, 1627 },
1648 { 1628 {
1649 .name = "display", 1629 .name = "display",
@@ -1745,20 +1725,29 @@ static struct i915_power_well skl_power_wells[] = {
1745 .always_on = 1, 1725 .always_on = 1,
1746 .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS, 1726 .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1747 .ops = &i9xx_always_on_power_well_ops, 1727 .ops = &i9xx_always_on_power_well_ops,
1728 .data = SKL_DISP_PW_ALWAYS_ON,
1748 }, 1729 },
1749 { 1730 {
1750 .name = "power well 1", 1731 .name = "power well 1",
1751 .domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS, 1732 /* Handled by the DMC firmware */
1733 .domains = 0,
1752 .ops = &skl_power_well_ops, 1734 .ops = &skl_power_well_ops,
1753 .data = SKL_DISP_PW_1, 1735 .data = SKL_DISP_PW_1,
1754 }, 1736 },
1755 { 1737 {
1756 .name = "MISC IO power well", 1738 .name = "MISC IO power well",
1757 .domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS, 1739 /* Handled by the DMC firmware */
1740 .domains = 0,
1758 .ops = &skl_power_well_ops, 1741 .ops = &skl_power_well_ops,
1759 .data = SKL_DISP_PW_MISC_IO, 1742 .data = SKL_DISP_PW_MISC_IO,
1760 }, 1743 },
1761 { 1744 {
1745 .name = "DC off",
1746 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
1747 .ops = &gen9_dc_off_power_well_ops,
1748 .data = SKL_DISP_PW_DC_OFF,
1749 },
1750 {
1762 .name = "power well 2", 1751 .name = "power well 2",
1763 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 1752 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1764 .ops = &skl_power_well_ops, 1753 .ops = &skl_power_well_ops,
@@ -1790,6 +1779,34 @@ static struct i915_power_well skl_power_wells[] = {
1790 }, 1779 },
1791}; 1780};
1792 1781
1782void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
1783{
1784 struct i915_power_well *well;
1785
1786 if (!IS_SKYLAKE(dev_priv))
1787 return;
1788
1789 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1790 intel_power_well_enable(dev_priv, well);
1791
1792 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1793 intel_power_well_enable(dev_priv, well);
1794}
1795
1796void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
1797{
1798 struct i915_power_well *well;
1799
1800 if (!IS_SKYLAKE(dev_priv))
1801 return;
1802
1803 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1804 intel_power_well_disable(dev_priv, well);
1805
1806 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1807 intel_power_well_disable(dev_priv, well);
1808}
1809
1793static struct i915_power_well bxt_power_wells[] = { 1810static struct i915_power_well bxt_power_wells[] = {
1794 { 1811 {
1795 .name = "always-on", 1812 .name = "always-on",
@@ -1804,11 +1821,17 @@ static struct i915_power_well bxt_power_wells[] = {
1804 .data = SKL_DISP_PW_1, 1821 .data = SKL_DISP_PW_1,
1805 }, 1822 },
1806 { 1823 {
1824 .name = "DC off",
1825 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
1826 .ops = &gen9_dc_off_power_well_ops,
1827 .data = SKL_DISP_PW_DC_OFF,
1828 },
1829 {
1807 .name = "power well 2", 1830 .name = "power well 2",
1808 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 1831 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1809 .ops = &skl_power_well_ops, 1832 .ops = &skl_power_well_ops,
1810 .data = SKL_DISP_PW_2, 1833 .data = SKL_DISP_PW_2,
1811 } 1834 },
1812}; 1835};
1813 1836
1814static int 1837static int
@@ -1845,6 +1868,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
1845 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, 1868 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
1846 i915.disable_power_well); 1869 i915.disable_power_well);
1847 1870
1871 BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
1872
1848 mutex_init(&power_domains->lock); 1873 mutex_init(&power_domains->lock);
1849 1874
1850 /* 1875 /*
@@ -1855,7 +1880,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
1855 set_power_wells(power_domains, hsw_power_wells); 1880 set_power_wells(power_domains, hsw_power_wells);
1856 } else if (IS_BROADWELL(dev_priv->dev)) { 1881 } else if (IS_BROADWELL(dev_priv->dev)) {
1857 set_power_wells(power_domains, bdw_power_wells); 1882 set_power_wells(power_domains, bdw_power_wells);
1858 } else if (IS_SKYLAKE(dev_priv->dev)) { 1883 } else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) {
1859 set_power_wells(power_domains, skl_power_wells); 1884 set_power_wells(power_domains, skl_power_wells);
1860 } else if (IS_BROXTON(dev_priv->dev)) { 1885 } else if (IS_BROXTON(dev_priv->dev)) {
1861 set_power_wells(power_domains, bxt_power_wells); 1886 set_power_wells(power_domains, bxt_power_wells);
@@ -1870,21 +1895,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
1870 return 0; 1895 return 0;
1871} 1896}
1872 1897
1873static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
1874{
1875 struct drm_device *dev = dev_priv->dev;
1876 struct device *device = &dev->pdev->dev;
1877
1878 if (!HAS_RUNTIME_PM(dev))
1879 return;
1880
1881 if (!intel_enable_rc6(dev))
1882 return;
1883
1884 /* Make sure we're not suspended first. */
1885 pm_runtime_get_sync(device);
1886}
1887
1888/** 1898/**
1889 * intel_power_domains_fini - finalizes the power domain structures 1899 * intel_power_domains_fini - finalizes the power domain structures
1890 * @dev_priv: i915 device instance 1900 * @dev_priv: i915 device instance
@@ -1895,15 +1905,17 @@ static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
1895 */ 1905 */
1896void intel_power_domains_fini(struct drm_i915_private *dev_priv) 1906void intel_power_domains_fini(struct drm_i915_private *dev_priv)
1897{ 1907{
1898 intel_runtime_pm_disable(dev_priv);
1899
1900 /* The i915.ko module is still not prepared to be loaded when 1908 /* The i915.ko module is still not prepared to be loaded when
1901 * the power well is not enabled, so just enable it in case 1909 * the power well is not enabled, so just enable it in case
1902 * we're going to unload/reload. */ 1910 * we're going to unload/reload. */
1903 intel_display_set_init_power(dev_priv, true); 1911 intel_display_set_init_power(dev_priv, true);
1912
1913 /* Remove the refcount we took to keep power well support disabled. */
1914 if (!i915.disable_power_well)
1915 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1904} 1916}
1905 1917
1906static void intel_power_domains_resume(struct drm_i915_private *dev_priv) 1918static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
1907{ 1919{
1908 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1920 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1909 struct i915_power_well *power_well; 1921 struct i915_power_well *power_well;
@@ -1918,6 +1930,47 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
1918 mutex_unlock(&power_domains->lock); 1930 mutex_unlock(&power_domains->lock);
1919} 1931}
1920 1932
1933static void skl_display_core_init(struct drm_i915_private *dev_priv,
1934 bool resume)
1935{
1936 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1937 uint32_t val;
1938
1939 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1940
1941 /* enable PCH reset handshake */
1942 val = I915_READ(HSW_NDE_RSTWRN_OPT);
1943 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
1944
1945 /* enable PG1 and Misc I/O */
1946 mutex_lock(&power_domains->lock);
1947 skl_pw1_misc_io_init(dev_priv);
1948 mutex_unlock(&power_domains->lock);
1949
1950 if (!resume)
1951 return;
1952
1953 skl_init_cdclk(dev_priv);
1954
1955 if (dev_priv->csr.dmc_payload)
1956 intel_csr_load_program(dev_priv);
1957}
1958
1959static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
1960{
1961 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1962
1963 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1964
1965 skl_uninit_cdclk(dev_priv);
1966
1967 /* The spec doesn't call for removing the reset handshake flag */
1968 /* disable PG1 and Misc I/O */
1969 mutex_lock(&power_domains->lock);
1970 skl_pw1_misc_io_fini(dev_priv);
1971 mutex_unlock(&power_domains->lock);
1972}
1973
1921static void chv_phy_control_init(struct drm_i915_private *dev_priv) 1974static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1922{ 1975{
1923 struct i915_power_well *cmn_bc = 1976 struct i915_power_well *cmn_bc =
@@ -2040,14 +2093,16 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2040 * This function initializes the hardware power domain state and enables all 2093 * This function initializes the hardware power domain state and enables all
2041 * power domains using intel_display_set_init_power(). 2094 * power domains using intel_display_set_init_power().
2042 */ 2095 */
2043void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) 2096void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2044{ 2097{
2045 struct drm_device *dev = dev_priv->dev; 2098 struct drm_device *dev = dev_priv->dev;
2046 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2099 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2047 2100
2048 power_domains->initializing = true; 2101 power_domains->initializing = true;
2049 2102
2050 if (IS_CHERRYVIEW(dev)) { 2103 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
2104 skl_display_core_init(dev_priv, resume);
2105 } else if (IS_CHERRYVIEW(dev)) {
2051 mutex_lock(&power_domains->lock); 2106 mutex_lock(&power_domains->lock);
2052 chv_phy_control_init(dev_priv); 2107 chv_phy_control_init(dev_priv);
2053 mutex_unlock(&power_domains->lock); 2108 mutex_unlock(&power_domains->lock);
@@ -2059,38 +2114,31 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
2059 2114
2060 /* For now, we need the power well to be always enabled. */ 2115 /* For now, we need the power well to be always enabled. */
2061 intel_display_set_init_power(dev_priv, true); 2116 intel_display_set_init_power(dev_priv, true);
2062 intel_power_domains_resume(dev_priv); 2117 /* Disable power support if the user asked so. */
2118 if (!i915.disable_power_well)
2119 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
2120 intel_power_domains_sync_hw(dev_priv);
2063 power_domains->initializing = false; 2121 power_domains->initializing = false;
2064} 2122}
2065 2123
2066/** 2124/**
2067 * intel_aux_display_runtime_get - grab an auxiliary power domain reference 2125 * intel_power_domains_suspend - suspend power domain state
2068 * @dev_priv: i915 device instance 2126 * @dev_priv: i915 device instance
2069 * 2127 *
2070 * This function grabs a power domain reference for the auxiliary power domain 2128 * This function prepares the hardware power domain state before entering
2071 * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its 2129 * system suspend. It must be paired with intel_power_domains_init_hw().
2072 * parents are powered up. Therefore users should only grab a reference to the
2073 * innermost power domain they need.
2074 *
2075 * Any power domain reference obtained by this function must have a symmetric
2076 * call to intel_aux_display_runtime_put() to release the reference again.
2077 */ 2130 */
2078void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) 2131void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2079{ 2132{
2080 intel_runtime_pm_get(dev_priv); 2133 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
2081} 2134 skl_display_core_uninit(dev_priv);
2082 2135
2083/** 2136 /*
2084 * intel_aux_display_runtime_put - release an auxiliary power domain reference 2137 * Even if power well support was disabled we still want to disable
2085 * @dev_priv: i915 device instance 2138 * power wells while we are system suspended.
2086 * 2139 */
2087 * This function drops the auxiliary power domain reference obtained by 2140 if (!i915.disable_power_well)
2088 * intel_aux_display_runtime_get() and might power down the corresponding 2141 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2089 * hardware block right away if this is the last reference.
2090 */
2091void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
2092{
2093 intel_runtime_pm_put(dev_priv);
2094} 2142}
2095 2143
2096/** 2144/**
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c42b636c2087..06679f164b3e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -74,7 +74,7 @@ struct intel_sdvo {
74 struct i2c_adapter ddc; 74 struct i2c_adapter ddc;
75 75
76 /* Register for the SDVO device: SDVOB or SDVOC */ 76 /* Register for the SDVO device: SDVOB or SDVOC */
77 uint32_t sdvo_reg; 77 i915_reg_t sdvo_reg;
78 78
79 /* Active outputs controlled by this SDVO output */ 79 /* Active outputs controlled by this SDVO output */
80 uint16_t controlled_output; 80 uint16_t controlled_output;
@@ -120,8 +120,7 @@ struct intel_sdvo {
120 */ 120 */
121 bool is_tv; 121 bool is_tv;
122 122
123 /* On different gens SDVOB is at different places. */ 123 enum port port;
124 bool is_sdvob;
125 124
126 /* This is for current tv format name */ 125 /* This is for current tv format name */
127 int tv_format_index; 126 int tv_format_index;
@@ -245,7 +244,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
245 u32 bval = val, cval = val; 244 u32 bval = val, cval = val;
246 int i; 245 int i;
247 246
248 if (intel_sdvo->sdvo_reg == PCH_SDVOB) { 247 if (HAS_PCH_SPLIT(dev_priv)) {
249 I915_WRITE(intel_sdvo->sdvo_reg, val); 248 I915_WRITE(intel_sdvo->sdvo_reg, val);
250 POSTING_READ(intel_sdvo->sdvo_reg); 249 POSTING_READ(intel_sdvo->sdvo_reg);
251 /* 250 /*
@@ -259,7 +258,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
259 return; 258 return;
260 } 259 }
261 260
262 if (intel_sdvo->sdvo_reg == GEN3_SDVOB) 261 if (intel_sdvo->port == PORT_B)
263 cval = I915_READ(GEN3_SDVOC); 262 cval = I915_READ(GEN3_SDVOC);
264 else 263 else
265 bval = I915_READ(GEN3_SDVOB); 264 bval = I915_READ(GEN3_SDVOB);
@@ -422,7 +421,7 @@ static const struct _sdvo_cmd_name {
422 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), 421 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
423}; 422};
424 423
425#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC") 424#define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC")
426 425
427static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, 426static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
428 const void *args, int args_len) 427 const void *args, int args_len)
@@ -1282,14 +1281,10 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
1282 sdvox |= SDVO_BORDER_ENABLE; 1281 sdvox |= SDVO_BORDER_ENABLE;
1283 } else { 1282 } else {
1284 sdvox = I915_READ(intel_sdvo->sdvo_reg); 1283 sdvox = I915_READ(intel_sdvo->sdvo_reg);
1285 switch (intel_sdvo->sdvo_reg) { 1284 if (intel_sdvo->port == PORT_B)
1286 case GEN3_SDVOB:
1287 sdvox &= SDVOB_PRESERVE_MASK; 1285 sdvox &= SDVOB_PRESERVE_MASK;
1288 break; 1286 else
1289 case GEN3_SDVOC:
1290 sdvox &= SDVOC_PRESERVE_MASK; 1287 sdvox &= SDVOC_PRESERVE_MASK;
1291 break;
1292 }
1293 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; 1288 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
1294 } 1289 }
1295 1290
@@ -1464,12 +1459,23 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
1464 * matching DP port to be enabled on transcoder A. 1459 * matching DP port to be enabled on transcoder A.
1465 */ 1460 */
1466 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) { 1461 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
1462 /*
1463 * We get CPU/PCH FIFO underruns on the other pipe when
1464 * doing the workaround. Sweep them under the rug.
1465 */
1466 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
1467 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
1468
1467 temp &= ~SDVO_PIPE_B_SELECT; 1469 temp &= ~SDVO_PIPE_B_SELECT;
1468 temp |= SDVO_ENABLE; 1470 temp |= SDVO_ENABLE;
1469 intel_sdvo_write_sdvox(intel_sdvo, temp); 1471 intel_sdvo_write_sdvox(intel_sdvo, temp);
1470 1472
1471 temp &= ~SDVO_ENABLE; 1473 temp &= ~SDVO_ENABLE;
1472 intel_sdvo_write_sdvox(intel_sdvo, temp); 1474 intel_sdvo_write_sdvox(intel_sdvo, temp);
1475
1476 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
1477 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1478 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1473 } 1479 }
1474} 1480}
1475 1481
@@ -2251,7 +2257,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
2251{ 2257{
2252 struct sdvo_device_mapping *mapping; 2258 struct sdvo_device_mapping *mapping;
2253 2259
2254 if (sdvo->is_sdvob) 2260 if (sdvo->port == PORT_B)
2255 mapping = &(dev_priv->sdvo_mappings[0]); 2261 mapping = &(dev_priv->sdvo_mappings[0]);
2256 else 2262 else
2257 mapping = &(dev_priv->sdvo_mappings[1]); 2263 mapping = &(dev_priv->sdvo_mappings[1]);
@@ -2269,7 +2275,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
2269 struct sdvo_device_mapping *mapping; 2275 struct sdvo_device_mapping *mapping;
2270 u8 pin; 2276 u8 pin;
2271 2277
2272 if (sdvo->is_sdvob) 2278 if (sdvo->port == PORT_B)
2273 mapping = &dev_priv->sdvo_mappings[0]; 2279 mapping = &dev_priv->sdvo_mappings[0];
2274 else 2280 else
2275 mapping = &dev_priv->sdvo_mappings[1]; 2281 mapping = &dev_priv->sdvo_mappings[1];
@@ -2307,7 +2313,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
2307 struct drm_i915_private *dev_priv = dev->dev_private; 2313 struct drm_i915_private *dev_priv = dev->dev_private;
2308 struct sdvo_device_mapping *my_mapping, *other_mapping; 2314 struct sdvo_device_mapping *my_mapping, *other_mapping;
2309 2315
2310 if (sdvo->is_sdvob) { 2316 if (sdvo->port == PORT_B) {
2311 my_mapping = &dev_priv->sdvo_mappings[0]; 2317 my_mapping = &dev_priv->sdvo_mappings[0];
2312 other_mapping = &dev_priv->sdvo_mappings[1]; 2318 other_mapping = &dev_priv->sdvo_mappings[1];
2313 } else { 2319 } else {
@@ -2332,7 +2338,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
2332 /* No SDVO device info is found for another DVO port, 2338 /* No SDVO device info is found for another DVO port,
2333 * so use mapping assumption we had before BIOS parsing. 2339 * so use mapping assumption we had before BIOS parsing.
2334 */ 2340 */
2335 if (sdvo->is_sdvob) 2341 if (sdvo->port == PORT_B)
2336 return 0x70; 2342 return 0x70;
2337 else 2343 else
2338 return 0x72; 2344 return 0x72;
@@ -2939,18 +2945,31 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
2939 return i2c_add_adapter(&sdvo->ddc) == 0; 2945 return i2c_add_adapter(&sdvo->ddc) == 0;
2940} 2946}
2941 2947
2942bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) 2948static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
2949 enum port port)
2950{
2951 if (HAS_PCH_SPLIT(dev_priv))
2952 WARN_ON(port != PORT_B);
2953 else
2954 WARN_ON(port != PORT_B && port != PORT_C);
2955}
2956
2957bool intel_sdvo_init(struct drm_device *dev,
2958 i915_reg_t sdvo_reg, enum port port)
2943{ 2959{
2944 struct drm_i915_private *dev_priv = dev->dev_private; 2960 struct drm_i915_private *dev_priv = dev->dev_private;
2945 struct intel_encoder *intel_encoder; 2961 struct intel_encoder *intel_encoder;
2946 struct intel_sdvo *intel_sdvo; 2962 struct intel_sdvo *intel_sdvo;
2947 int i; 2963 int i;
2964
2965 assert_sdvo_port_valid(dev_priv, port);
2966
2948 intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL); 2967 intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
2949 if (!intel_sdvo) 2968 if (!intel_sdvo)
2950 return false; 2969 return false;
2951 2970
2952 intel_sdvo->sdvo_reg = sdvo_reg; 2971 intel_sdvo->sdvo_reg = sdvo_reg;
2953 intel_sdvo->is_sdvob = is_sdvob; 2972 intel_sdvo->port = port;
2954 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; 2973 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
2955 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo); 2974 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo);
2956 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) 2975 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
@@ -3000,8 +3019,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
3000 * hotplug lines. 3019 * hotplug lines.
3001 */ 3020 */
3002 if (intel_sdvo->hotplug_active) { 3021 if (intel_sdvo->hotplug_active) {
3003 intel_encoder->hpd_pin = 3022 if (intel_sdvo->port == PORT_B)
3004 intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C; 3023 intel_encoder->hpd_pin = HPD_SDVO_B;
3024 else
3025 intel_encoder->hpd_pin = HPD_SDVO_C;
3005 } 3026 }
3006 3027
3007 /* 3028 /*
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index c3b735971cec..2b96f336589e 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -192,10 +192,9 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
192 const int pipe = intel_plane->pipe; 192 const int pipe = intel_plane->pipe;
193 const int plane = intel_plane->plane + 1; 193 const int plane = intel_plane->plane + 1;
194 u32 plane_ctl, stride_div, stride; 194 u32 plane_ctl, stride_div, stride;
195 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
196 const struct drm_intel_sprite_colorkey *key = 195 const struct drm_intel_sprite_colorkey *key =
197 &to_intel_plane_state(drm_plane->state)->ckey; 196 &to_intel_plane_state(drm_plane->state)->ckey;
198 unsigned long surf_addr; 197 u32 surf_addr;
199 u32 tile_height, plane_offset, plane_size; 198 u32 tile_height, plane_offset, plane_size;
200 unsigned int rotation; 199 unsigned int rotation;
201 int x_offset, y_offset; 200 int x_offset, y_offset;
@@ -212,10 +211,6 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
212 rotation = drm_plane->state->rotation; 211 rotation = drm_plane->state->rotation;
213 plane_ctl |= skl_plane_ctl_rotation(rotation); 212 plane_ctl |= skl_plane_ctl_rotation(rotation);
214 213
215 intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
216 pixel_size, true,
217 src_w != crtc_w || src_h != crtc_h);
218
219 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], 214 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
220 fb->pixel_format); 215 fb->pixel_format);
221 216
@@ -297,8 +292,6 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
297 292
298 I915_WRITE(PLANE_SURF(pipe, plane), 0); 293 I915_WRITE(PLANE_SURF(pipe, plane), 0);
299 POSTING_READ(PLANE_SURF(pipe, plane)); 294 POSTING_READ(PLANE_SURF(pipe, plane));
300
301 intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
302} 295}
303 296
304static void 297static void
@@ -541,10 +534,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
541 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 534 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
542 sprctl |= SPRITE_PIPE_CSC_ENABLE; 535 sprctl |= SPRITE_PIPE_CSC_ENABLE;
543 536
544 intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
545 true,
546 src_w != crtc_w || src_h != crtc_h);
547
548 /* Sizes are 0 based */ 537 /* Sizes are 0 based */
549 src_w--; 538 src_w--;
550 src_h--; 539 src_h--;
@@ -678,10 +667,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
678 if (IS_GEN6(dev)) 667 if (IS_GEN6(dev))
679 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ 668 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
680 669
681 intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
682 pixel_size, true,
683 src_w != crtc_w || src_h != crtc_h);
684
685 /* Sizes are 0 based */ 670 /* Sizes are 0 based */
686 src_w--; 671 src_w--;
687 src_h--; 672 src_h--;
@@ -938,9 +923,6 @@ intel_commit_sprite_plane(struct drm_plane *plane,
938 923
939 crtc = crtc ? crtc : plane->crtc; 924 crtc = crtc ? crtc : plane->crtc;
940 925
941 if (!crtc->state->active)
942 return;
943
944 if (state->visible) { 926 if (state->visible) {
945 intel_plane->update_plane(plane, crtc, fb, 927 intel_plane->update_plane(plane, crtc, fb,
946 state->dst.x1, state->dst.y1, 928 state->dst.x1, state->dst.y1,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 43cba129a0c0..c2358ba78b30 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -29,19 +29,7 @@
29 29
30#define FORCEWAKE_ACK_TIMEOUT_MS 50 30#define FORCEWAKE_ACK_TIMEOUT_MS 50
31 31
32#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) 32#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
33#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
34
35#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
36#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
37
38#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
39#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
40
41#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
42#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
43
44#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
45 33
46static const char * const forcewake_domain_names[] = { 34static const char * const forcewake_domain_names[] = {
47 "render", 35 "render",
@@ -72,7 +60,7 @@ assert_device_not_suspended(struct drm_i915_private *dev_priv)
72static inline void 60static inline void
73fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 61fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
74{ 62{
75 WARN_ON(d->reg_set == 0); 63 WARN_ON(!i915_mmio_reg_valid(d->reg_set));
76 __raw_i915_write32(d->i915, d->reg_set, d->val_reset); 64 __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
77} 65}
78 66
@@ -118,7 +106,7 @@ static inline void
118fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d) 106fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
119{ 107{
120 /* something from same cacheline, but not from the set register */ 108 /* something from same cacheline, but not from the set register */
121 if (d->reg_post) 109 if (i915_mmio_reg_valid(d->reg_post))
122 __raw_posting_read(d->i915, d->reg_post); 110 __raw_posting_read(d->i915, d->reg_post);
123} 111}
124 112
@@ -525,8 +513,7 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
525} 513}
526 514
527/* We give fast paths for the really cool registers */ 515/* We give fast paths for the really cool registers */
528#define NEEDS_FORCE_WAKE(reg) \ 516#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
529 ((reg) < 0x40000 && (reg) != FORCEWAKE)
530 517
531#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) 518#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
532 519
@@ -589,7 +576,7 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
589 REG_RANGE((reg), 0x9400, 0x9800) 576 REG_RANGE((reg), 0x9400, 0x9800)
590 577
591#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \ 578#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
592 ((reg) < 0x40000 &&\ 579 ((reg) < 0x40000 && \
593 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \ 580 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
594 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \ 581 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
595 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \ 582 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
@@ -605,8 +592,8 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
605} 592}
606 593
607static void 594static void
608hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read, 595hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv,
609 bool before) 596 i915_reg_t reg, bool read, bool before)
610{ 597{
611 const char *op = read ? "reading" : "writing to"; 598 const char *op = read ? "reading" : "writing to";
612 const char *when = before ? "before" : "after"; 599 const char *when = before ? "before" : "after";
@@ -616,7 +603,7 @@ hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
616 603
617 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 604 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
618 WARN(1, "Unclaimed register detected %s %s register 0x%x\n", 605 WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
619 when, op, reg); 606 when, op, i915_mmio_reg_offset(reg));
620 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 607 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
621 i915.mmio_debug--; /* Only report the first N failures */ 608 i915.mmio_debug--; /* Only report the first N failures */
622 } 609 }
@@ -649,7 +636,7 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
649 636
650#define __gen2_read(x) \ 637#define __gen2_read(x) \
651static u##x \ 638static u##x \
652gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 639gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
653 GEN2_READ_HEADER(x); \ 640 GEN2_READ_HEADER(x); \
654 val = __raw_i915_read##x(dev_priv, reg); \ 641 val = __raw_i915_read##x(dev_priv, reg); \
655 GEN2_READ_FOOTER; \ 642 GEN2_READ_FOOTER; \
@@ -657,7 +644,7 @@ gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
657 644
658#define __gen5_read(x) \ 645#define __gen5_read(x) \
659static u##x \ 646static u##x \
660gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 647gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
661 GEN2_READ_HEADER(x); \ 648 GEN2_READ_HEADER(x); \
662 ilk_dummy_write(dev_priv); \ 649 ilk_dummy_write(dev_priv); \
663 val = __raw_i915_read##x(dev_priv, reg); \ 650 val = __raw_i915_read##x(dev_priv, reg); \
@@ -680,6 +667,7 @@ __gen2_read(64)
680#undef GEN2_READ_HEADER 667#undef GEN2_READ_HEADER
681 668
682#define GEN6_READ_HEADER(x) \ 669#define GEN6_READ_HEADER(x) \
670 u32 offset = i915_mmio_reg_offset(reg); \
683 unsigned long irqflags; \ 671 unsigned long irqflags; \
684 u##x val = 0; \ 672 u##x val = 0; \
685 assert_device_not_suspended(dev_priv); \ 673 assert_device_not_suspended(dev_priv); \
@@ -714,20 +702,12 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv,
714 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 702 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
715} 703}
716 704
717#define __vgpu_read(x) \
718static u##x \
719vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
720 GEN6_READ_HEADER(x); \
721 val = __raw_i915_read##x(dev_priv, reg); \
722 GEN6_READ_FOOTER; \
723}
724
725#define __gen6_read(x) \ 705#define __gen6_read(x) \
726static u##x \ 706static u##x \
727gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 707gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
728 GEN6_READ_HEADER(x); \ 708 GEN6_READ_HEADER(x); \
729 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ 709 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
730 if (NEEDS_FORCE_WAKE(reg)) \ 710 if (NEEDS_FORCE_WAKE(offset)) \
731 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 711 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
732 val = __raw_i915_read##x(dev_priv, reg); \ 712 val = __raw_i915_read##x(dev_priv, reg); \
733 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ 713 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
@@ -736,47 +716,56 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
736 716
737#define __vlv_read(x) \ 717#define __vlv_read(x) \
738static u##x \ 718static u##x \
739vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 719vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
720 enum forcewake_domains fw_engine = 0; \
740 GEN6_READ_HEADER(x); \ 721 GEN6_READ_HEADER(x); \
741 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \ 722 if (!NEEDS_FORCE_WAKE(offset)) \
742 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 723 fw_engine = 0; \
743 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \ 724 else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
744 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 725 fw_engine = FORCEWAKE_RENDER; \
726 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
727 fw_engine = FORCEWAKE_MEDIA; \
728 if (fw_engine) \
729 __force_wake_get(dev_priv, fw_engine); \
745 val = __raw_i915_read##x(dev_priv, reg); \ 730 val = __raw_i915_read##x(dev_priv, reg); \
746 GEN6_READ_FOOTER; \ 731 GEN6_READ_FOOTER; \
747} 732}
748 733
749#define __chv_read(x) \ 734#define __chv_read(x) \
750static u##x \ 735static u##x \
751chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 736chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
737 enum forcewake_domains fw_engine = 0; \
752 GEN6_READ_HEADER(x); \ 738 GEN6_READ_HEADER(x); \
753 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 739 if (!NEEDS_FORCE_WAKE(offset)) \
754 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 740 fw_engine = 0; \
755 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 741 else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
756 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 742 fw_engine = FORCEWAKE_RENDER; \
757 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 743 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
758 __force_wake_get(dev_priv, \ 744 fw_engine = FORCEWAKE_MEDIA; \
759 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 745 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
746 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
747 if (fw_engine) \
748 __force_wake_get(dev_priv, fw_engine); \
760 val = __raw_i915_read##x(dev_priv, reg); \ 749 val = __raw_i915_read##x(dev_priv, reg); \
761 GEN6_READ_FOOTER; \ 750 GEN6_READ_FOOTER; \
762} 751}
763 752
764#define SKL_NEEDS_FORCE_WAKE(reg) \ 753#define SKL_NEEDS_FORCE_WAKE(reg) \
765 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) 754 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
766 755
767#define __gen9_read(x) \ 756#define __gen9_read(x) \
768static u##x \ 757static u##x \
769gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 758gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
770 enum forcewake_domains fw_engine; \ 759 enum forcewake_domains fw_engine; \
771 GEN6_READ_HEADER(x); \ 760 GEN6_READ_HEADER(x); \
772 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ 761 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
773 if (!SKL_NEEDS_FORCE_WAKE(reg)) \ 762 if (!SKL_NEEDS_FORCE_WAKE(offset)) \
774 fw_engine = 0; \ 763 fw_engine = 0; \
775 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 764 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
776 fw_engine = FORCEWAKE_RENDER; \ 765 fw_engine = FORCEWAKE_RENDER; \
777 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 766 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
778 fw_engine = FORCEWAKE_MEDIA; \ 767 fw_engine = FORCEWAKE_MEDIA; \
779 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 768 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
780 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 769 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
781 else \ 770 else \
782 fw_engine = FORCEWAKE_BLITTER; \ 771 fw_engine = FORCEWAKE_BLITTER; \
@@ -787,10 +776,6 @@ gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
787 GEN6_READ_FOOTER; \ 776 GEN6_READ_FOOTER; \
788} 777}
789 778
790__vgpu_read(8)
791__vgpu_read(16)
792__vgpu_read(32)
793__vgpu_read(64)
794__gen9_read(8) 779__gen9_read(8)
795__gen9_read(16) 780__gen9_read(16)
796__gen9_read(32) 781__gen9_read(32)
@@ -812,10 +797,37 @@ __gen6_read(64)
812#undef __chv_read 797#undef __chv_read
813#undef __vlv_read 798#undef __vlv_read
814#undef __gen6_read 799#undef __gen6_read
815#undef __vgpu_read
816#undef GEN6_READ_FOOTER 800#undef GEN6_READ_FOOTER
817#undef GEN6_READ_HEADER 801#undef GEN6_READ_HEADER
818 802
803#define VGPU_READ_HEADER(x) \
804 unsigned long irqflags; \
805 u##x val = 0; \
806 assert_device_not_suspended(dev_priv); \
807 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
808
809#define VGPU_READ_FOOTER \
810 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
811 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
812 return val
813
814#define __vgpu_read(x) \
815static u##x \
816vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
817 VGPU_READ_HEADER(x); \
818 val = __raw_i915_read##x(dev_priv, reg); \
819 VGPU_READ_FOOTER; \
820}
821
822__vgpu_read(8)
823__vgpu_read(16)
824__vgpu_read(32)
825__vgpu_read(64)
826
827#undef __vgpu_read
828#undef VGPU_READ_FOOTER
829#undef VGPU_READ_HEADER
830
819#define GEN2_WRITE_HEADER \ 831#define GEN2_WRITE_HEADER \
820 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 832 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
821 assert_device_not_suspended(dev_priv); \ 833 assert_device_not_suspended(dev_priv); \
@@ -824,7 +836,7 @@ __gen6_read(64)
824 836
825#define __gen2_write(x) \ 837#define __gen2_write(x) \
826static void \ 838static void \
827gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 839gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
828 GEN2_WRITE_HEADER; \ 840 GEN2_WRITE_HEADER; \
829 __raw_i915_write##x(dev_priv, reg, val); \ 841 __raw_i915_write##x(dev_priv, reg, val); \
830 GEN2_WRITE_FOOTER; \ 842 GEN2_WRITE_FOOTER; \
@@ -832,7 +844,7 @@ gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
832 844
833#define __gen5_write(x) \ 845#define __gen5_write(x) \
834static void \ 846static void \
835gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 847gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
836 GEN2_WRITE_HEADER; \ 848 GEN2_WRITE_HEADER; \
837 ilk_dummy_write(dev_priv); \ 849 ilk_dummy_write(dev_priv); \
838 __raw_i915_write##x(dev_priv, reg, val); \ 850 __raw_i915_write##x(dev_priv, reg, val); \
@@ -855,6 +867,7 @@ __gen2_write(64)
855#undef GEN2_WRITE_HEADER 867#undef GEN2_WRITE_HEADER
856 868
857#define GEN6_WRITE_HEADER \ 869#define GEN6_WRITE_HEADER \
870 u32 offset = i915_mmio_reg_offset(reg); \
858 unsigned long irqflags; \ 871 unsigned long irqflags; \
859 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 872 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
860 assert_device_not_suspended(dev_priv); \ 873 assert_device_not_suspended(dev_priv); \
@@ -865,10 +878,10 @@ __gen2_write(64)
865 878
866#define __gen6_write(x) \ 879#define __gen6_write(x) \
867static void \ 880static void \
868gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 881gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
869 u32 __fifo_ret = 0; \ 882 u32 __fifo_ret = 0; \
870 GEN6_WRITE_HEADER; \ 883 GEN6_WRITE_HEADER; \
871 if (NEEDS_FORCE_WAKE(reg)) { \ 884 if (NEEDS_FORCE_WAKE(offset)) { \
872 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 885 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
873 } \ 886 } \
874 __raw_i915_write##x(dev_priv, reg, val); \ 887 __raw_i915_write##x(dev_priv, reg, val); \
@@ -880,10 +893,10 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
880 893
881#define __hsw_write(x) \ 894#define __hsw_write(x) \
882static void \ 895static void \
883hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 896hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
884 u32 __fifo_ret = 0; \ 897 u32 __fifo_ret = 0; \
885 GEN6_WRITE_HEADER; \ 898 GEN6_WRITE_HEADER; \
886 if (NEEDS_FORCE_WAKE(reg)) { \ 899 if (NEEDS_FORCE_WAKE(offset)) { \
887 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 900 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
888 } \ 901 } \
889 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 902 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
@@ -896,15 +909,7 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
896 GEN6_WRITE_FOOTER; \ 909 GEN6_WRITE_FOOTER; \
897} 910}
898 911
899#define __vgpu_write(x) \ 912static const i915_reg_t gen8_shadowed_regs[] = {
900static void vgpu_write##x(struct drm_i915_private *dev_priv, \
901 off_t reg, u##x val, bool trace) { \
902 GEN6_WRITE_HEADER; \
903 __raw_i915_write##x(dev_priv, reg, val); \
904 GEN6_WRITE_FOOTER; \
905}
906
907static const u32 gen8_shadowed_regs[] = {
908 FORCEWAKE_MT, 913 FORCEWAKE_MT,
909 GEN6_RPNSWREQ, 914 GEN6_RPNSWREQ,
910 GEN6_RC_VIDEO_FREQ, 915 GEN6_RC_VIDEO_FREQ,
@@ -915,11 +920,12 @@ static const u32 gen8_shadowed_regs[] = {
915 /* TODO: Other registers are not yet used */ 920 /* TODO: Other registers are not yet used */
916}; 921};
917 922
918static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) 923static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
924 i915_reg_t reg)
919{ 925{
920 int i; 926 int i;
921 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) 927 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
922 if (reg == gen8_shadowed_regs[i]) 928 if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
923 return true; 929 return true;
924 930
925 return false; 931 return false;
@@ -927,10 +933,10 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
927 933
928#define __gen8_write(x) \ 934#define __gen8_write(x) \
929static void \ 935static void \
930gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 936gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
931 GEN6_WRITE_HEADER; \ 937 GEN6_WRITE_HEADER; \
932 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 938 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
933 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \ 939 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
934 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 940 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
935 __raw_i915_write##x(dev_priv, reg, val); \ 941 __raw_i915_write##x(dev_priv, reg, val); \
936 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 942 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
@@ -940,22 +946,25 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
940 946
941#define __chv_write(x) \ 947#define __chv_write(x) \
942static void \ 948static void \
943chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 949chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
944 bool shadowed = is_gen8_shadowed(dev_priv, reg); \ 950 enum forcewake_domains fw_engine = 0; \
945 GEN6_WRITE_HEADER; \ 951 GEN6_WRITE_HEADER; \
946 if (!shadowed) { \ 952 if (!NEEDS_FORCE_WAKE(offset) || \
947 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 953 is_gen8_shadowed(dev_priv, reg)) \
948 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 954 fw_engine = 0; \
949 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 955 else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
950 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 956 fw_engine = FORCEWAKE_RENDER; \
951 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 957 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
952 __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 958 fw_engine = FORCEWAKE_MEDIA; \
953 } \ 959 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
960 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
961 if (fw_engine) \
962 __force_wake_get(dev_priv, fw_engine); \
954 __raw_i915_write##x(dev_priv, reg, val); \ 963 __raw_i915_write##x(dev_priv, reg, val); \
955 GEN6_WRITE_FOOTER; \ 964 GEN6_WRITE_FOOTER; \
956} 965}
957 966
958static const u32 gen9_shadowed_regs[] = { 967static const i915_reg_t gen9_shadowed_regs[] = {
959 RING_TAIL(RENDER_RING_BASE), 968 RING_TAIL(RENDER_RING_BASE),
960 RING_TAIL(GEN6_BSD_RING_BASE), 969 RING_TAIL(GEN6_BSD_RING_BASE),
961 RING_TAIL(VEBOX_RING_BASE), 970 RING_TAIL(VEBOX_RING_BASE),
@@ -968,11 +977,12 @@ static const u32 gen9_shadowed_regs[] = {
968 /* TODO: Other registers are not yet used */ 977 /* TODO: Other registers are not yet used */
969}; 978};
970 979
971static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg) 980static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
981 i915_reg_t reg)
972{ 982{
973 int i; 983 int i;
974 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) 984 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
975 if (reg == gen9_shadowed_regs[i]) 985 if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
976 return true; 986 return true;
977 987
978 return false; 988 return false;
@@ -980,19 +990,19 @@ static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
980 990
981#define __gen9_write(x) \ 991#define __gen9_write(x) \
982static void \ 992static void \
983gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \ 993gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
984 bool trace) { \ 994 bool trace) { \
985 enum forcewake_domains fw_engine; \ 995 enum forcewake_domains fw_engine; \
986 GEN6_WRITE_HEADER; \ 996 GEN6_WRITE_HEADER; \
987 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 997 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
988 if (!SKL_NEEDS_FORCE_WAKE(reg) || \ 998 if (!SKL_NEEDS_FORCE_WAKE(offset) || \
989 is_gen9_shadowed(dev_priv, reg)) \ 999 is_gen9_shadowed(dev_priv, reg)) \
990 fw_engine = 0; \ 1000 fw_engine = 0; \
991 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 1001 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
992 fw_engine = FORCEWAKE_RENDER; \ 1002 fw_engine = FORCEWAKE_RENDER; \
993 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 1003 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
994 fw_engine = FORCEWAKE_MEDIA; \ 1004 fw_engine = FORCEWAKE_MEDIA; \
995 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 1005 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
996 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 1006 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
997 else \ 1007 else \
998 fw_engine = FORCEWAKE_BLITTER; \ 1008 fw_engine = FORCEWAKE_BLITTER; \
@@ -1024,20 +1034,41 @@ __gen6_write(8)
1024__gen6_write(16) 1034__gen6_write(16)
1025__gen6_write(32) 1035__gen6_write(32)
1026__gen6_write(64) 1036__gen6_write(64)
1027__vgpu_write(8)
1028__vgpu_write(16)
1029__vgpu_write(32)
1030__vgpu_write(64)
1031 1037
1032#undef __gen9_write 1038#undef __gen9_write
1033#undef __chv_write 1039#undef __chv_write
1034#undef __gen8_write 1040#undef __gen8_write
1035#undef __hsw_write 1041#undef __hsw_write
1036#undef __gen6_write 1042#undef __gen6_write
1037#undef __vgpu_write
1038#undef GEN6_WRITE_FOOTER 1043#undef GEN6_WRITE_FOOTER
1039#undef GEN6_WRITE_HEADER 1044#undef GEN6_WRITE_HEADER
1040 1045
1046#define VGPU_WRITE_HEADER \
1047 unsigned long irqflags; \
1048 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1049 assert_device_not_suspended(dev_priv); \
1050 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1051
1052#define VGPU_WRITE_FOOTER \
1053 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1054
1055#define __vgpu_write(x) \
1056static void vgpu_write##x(struct drm_i915_private *dev_priv, \
1057 i915_reg_t reg, u##x val, bool trace) { \
1058 VGPU_WRITE_HEADER; \
1059 __raw_i915_write##x(dev_priv, reg, val); \
1060 VGPU_WRITE_FOOTER; \
1061}
1062
1063__vgpu_write(8)
1064__vgpu_write(16)
1065__vgpu_write(32)
1066__vgpu_write(64)
1067
1068#undef __vgpu_write
1069#undef VGPU_WRITE_FOOTER
1070#undef VGPU_WRITE_HEADER
1071
1041#define ASSIGN_WRITE_MMIO_VFUNCS(x) \ 1072#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1042do { \ 1073do { \
1043 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ 1074 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
@@ -1057,7 +1088,8 @@ do { \
1057 1088
1058static void fw_domain_init(struct drm_i915_private *dev_priv, 1089static void fw_domain_init(struct drm_i915_private *dev_priv,
1059 enum forcewake_domain_id domain_id, 1090 enum forcewake_domain_id domain_id,
1060 u32 reg_set, u32 reg_ack) 1091 i915_reg_t reg_set,
1092 i915_reg_t reg_ack)
1061{ 1093{
1062 struct intel_uncore_forcewake_domain *d; 1094 struct intel_uncore_forcewake_domain *d;
1063 1095
@@ -1087,8 +1119,6 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
1087 d->reg_post = FORCEWAKE_ACK_VLV; 1119 d->reg_post = FORCEWAKE_ACK_VLV;
1088 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) 1120 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1089 d->reg_post = ECOBUS; 1121 d->reg_post = ECOBUS;
1090 else
1091 d->reg_post = 0;
1092 1122
1093 d->i915 = dev_priv; 1123 d->i915 = dev_priv;
1094 d->id = domain_id; 1124 d->id = domain_id;
@@ -1262,12 +1292,14 @@ void intel_uncore_fini(struct drm_device *dev)
1262#define GEN_RANGE(l, h) GENMASK(h, l) 1292#define GEN_RANGE(l, h) GENMASK(h, l)
1263 1293
1264static const struct register_whitelist { 1294static const struct register_whitelist {
1265 uint64_t offset; 1295 i915_reg_t offset_ldw, offset_udw;
1266 uint32_t size; 1296 uint32_t size;
1267 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1297 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1268 uint32_t gen_bitmask; 1298 uint32_t gen_bitmask;
1269} whitelist[] = { 1299} whitelist[] = {
1270 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) }, 1300 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1301 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1302 .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1271}; 1303};
1272 1304
1273int i915_reg_read_ioctl(struct drm_device *dev, 1305int i915_reg_read_ioctl(struct drm_device *dev,
@@ -1277,11 +1309,11 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1277 struct drm_i915_reg_read *reg = data; 1309 struct drm_i915_reg_read *reg = data;
1278 struct register_whitelist const *entry = whitelist; 1310 struct register_whitelist const *entry = whitelist;
1279 unsigned size; 1311 unsigned size;
1280 u64 offset; 1312 i915_reg_t offset_ldw, offset_udw;
1281 int i, ret = 0; 1313 int i, ret = 0;
1282 1314
1283 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1315 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1284 if (entry->offset == (reg->offset & -entry->size) && 1316 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1285 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1317 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1286 break; 1318 break;
1287 } 1319 }
@@ -1293,27 +1325,28 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1293 * be naturally aligned (and those that are not so aligned merely 1325 * be naturally aligned (and those that are not so aligned merely
1294 * limit the available flags for that register). 1326 * limit the available flags for that register).
1295 */ 1327 */
1296 offset = entry->offset; 1328 offset_ldw = entry->offset_ldw;
1329 offset_udw = entry->offset_udw;
1297 size = entry->size; 1330 size = entry->size;
1298 size |= reg->offset ^ offset; 1331 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1299 1332
1300 intel_runtime_pm_get(dev_priv); 1333 intel_runtime_pm_get(dev_priv);
1301 1334
1302 switch (size) { 1335 switch (size) {
1303 case 8 | 1: 1336 case 8 | 1:
1304 reg->val = I915_READ64_2x32(offset, offset+4); 1337 reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1305 break; 1338 break;
1306 case 8: 1339 case 8:
1307 reg->val = I915_READ64(offset); 1340 reg->val = I915_READ64(offset_ldw);
1308 break; 1341 break;
1309 case 4: 1342 case 4:
1310 reg->val = I915_READ(offset); 1343 reg->val = I915_READ(offset_ldw);
1311 break; 1344 break;
1312 case 2: 1345 case 2:
1313 reg->val = I915_READ16(offset); 1346 reg->val = I915_READ16(offset_ldw);
1314 break; 1347 break;
1315 case 1: 1348 case 1:
1316 reg->val = I915_READ8(offset); 1349 reg->val = I915_READ8(offset_ldw);
1317 break; 1350 break;
1318 default: 1351 default:
1319 ret = -EINVAL; 1352 ret = -EINVAL;
@@ -1470,7 +1503,7 @@ static int gen6_do_reset(struct drm_device *dev)
1470} 1503}
1471 1504
1472static int wait_for_register(struct drm_i915_private *dev_priv, 1505static int wait_for_register(struct drm_i915_private *dev_priv,
1473 const u32 reg, 1506 i915_reg_t reg,
1474 const u32 mask, 1507 const u32 mask,
1475 const u32 value, 1508 const u32 value,
1476 const unsigned long timeout_ms) 1509 const unsigned long timeout_ms)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 7e327309cf69..c2dd52ea4198 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3405,7 +3405,9 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
3405 return 0; 3405 return 0;
3406} 3406}
3407 3407
3408#include "../gpu/drm/i915/i915_reg.h" 3408#define SOUTH_CHICKEN2 0xc2004
3409#define PCH_PP_STATUS 0xc7200
3410#define PCH_PP_CONTROL 0xc7204
3409#define MSG_CTL 0x45010 3411#define MSG_CTL 0x45010
3410#define NSDE_PWR_STATE 0xd0100 3412#define NSDE_PWR_STATE 0xd0100
3411#define IGD_OPERATION_TIMEOUT 10000 /* set timeout 10 seconds */ 3413#define IGD_OPERATION_TIMEOUT 10000 /* set timeout 10 seconds */
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index 30d89e0da2c6..fab13851f95a 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -31,47 +31,80 @@
31#define MAX_PORTS 5 31#define MAX_PORTS 5
32 32
33/** 33/**
34 * struct i915_audio_component_ops - callbacks defined in gfx driver 34 * struct i915_audio_component_ops - Ops implemented by i915 driver, called by hda driver
35 * @owner: the module owner
36 * @get_power: get the POWER_DOMAIN_AUDIO power well
37 * @put_power: put the POWER_DOMAIN_AUDIO power well
38 * @codec_wake_override: Enable/Disable generating the codec wake signal
39 * @get_cdclk_freq: get the Core Display Clock in KHz
40 * @sync_audio_rate: set n/cts based on the sample rate
41 */ 35 */
42struct i915_audio_component_ops { 36struct i915_audio_component_ops {
37 /**
38 * @owner: i915 module
39 */
43 struct module *owner; 40 struct module *owner;
41 /**
42 * @get_power: get the POWER_DOMAIN_AUDIO power well
43 *
44 * Request the power well to be turned on.
45 */
44 void (*get_power)(struct device *); 46 void (*get_power)(struct device *);
47 /**
48 * @put_power: put the POWER_DOMAIN_AUDIO power well
49 *
50 * Allow the power well to be turned off.
51 */
45 void (*put_power)(struct device *); 52 void (*put_power)(struct device *);
53 /**
54 * @codec_wake_override: Enable/disable codec wake signal
55 */
46 void (*codec_wake_override)(struct device *, bool enable); 56 void (*codec_wake_override)(struct device *, bool enable);
57 /**
58 * @get_cdclk_freq: Get the Core Display Clock in kHz
59 */
47 int (*get_cdclk_freq)(struct device *); 60 int (*get_cdclk_freq)(struct device *);
61 /**
62 * @sync_audio_rate: set n/cts based on the sample rate
63 *
64 * Called from audio driver. After audio driver sets the
65 * sample rate, it will call this function to set n/cts
66 */
48 int (*sync_audio_rate)(struct device *, int port, int rate); 67 int (*sync_audio_rate)(struct device *, int port, int rate);
49}; 68};
50 69
70/**
71 * struct i915_audio_component_audio_ops - Ops implemented by hda driver, called by i915 driver
72 */
51struct i915_audio_component_audio_ops { 73struct i915_audio_component_audio_ops {
74 /**
75 * @audio_ptr: Pointer to be used in call to pin_eld_notify
76 */
52 void *audio_ptr; 77 void *audio_ptr;
53 /** 78 /**
54 * Call from i915 driver, notifying the HDA driver that 79 * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed
55 * pin sense and/or ELD information has changed. 80 *
56 * @audio_ptr: HDA driver object 81 * Called when the i915 driver has set up audio pipeline or has just
57 * @port: Which port has changed (PORTA / PORTB / PORTC etc) 82 * begun to tear it down. This allows the HDA driver to update its
83 * status accordingly (even when the HDA controller is in power save
84 * mode).
58 */ 85 */
59 void (*pin_eld_notify)(void *audio_ptr, int port); 86 void (*pin_eld_notify)(void *audio_ptr, int port);
60}; 87};
61 88
62/** 89/**
63 * struct i915_audio_component - used for audio video interaction 90 * struct i915_audio_component - Used for direct communication between i915 and hda drivers
64 * @dev: the device from gfx driver
65 * @aud_sample_rate: the array of audio sample rate per port
66 * @ops: callback for audio driver calling
67 * @audio_ops: Call from i915 driver
68 */ 91 */
69struct i915_audio_component { 92struct i915_audio_component {
93 /**
94 * @dev: i915 device, used as parameter for ops
95 */
70 struct device *dev; 96 struct device *dev;
97 /**
98 * @aud_sample_rate: the array of audio sample rate per port
99 */
71 int aud_sample_rate[MAX_PORTS]; 100 int aud_sample_rate[MAX_PORTS];
72 101 /**
102 * @ops: Ops implemented by i915 driver, called by hda driver
103 */
73 const struct i915_audio_component_ops *ops; 104 const struct i915_audio_component_ops *ops;
74 105 /**
106 * @audio_ops: Ops implemented by hda driver, called by i915 driver
107 */
75 const struct i915_audio_component_audio_ops *audio_ops; 108 const struct i915_audio_component_audio_ops *audio_ops;
76}; 109};
77 110
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 17c445612e01..f1a113e35f98 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -291,4 +291,40 @@
291 INTEL_VGA_DEVICE(0x1A84, info), \ 291 INTEL_VGA_DEVICE(0x1A84, info), \
292 INTEL_VGA_DEVICE(0x5A84, info) 292 INTEL_VGA_DEVICE(0x5A84, info)
293 293
294#define INTEL_KBL_GT1_IDS(info) \
295 INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
296 INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \
297 INTEL_VGA_DEVICE(0x5917, info), /* DT GT1.5 */ \
298 INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
299 INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
300 INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
301 INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
302 INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
303
304#define INTEL_KBL_GT2_IDS(info) \
305 INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
306 INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
307 INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
308 INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
309 INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
310 INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \
311 INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
312
313#define INTEL_KBL_GT3_IDS(info) \
314 INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \
315 INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \
316 INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */
317
318#define INTEL_KBL_GT4_IDS(info) \
319 INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \
320 INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \
321 INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \
322 INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */
323
324#define INTEL_KBL_IDS(info) \
325 INTEL_KBL_GT1_IDS(info), \
326 INTEL_KBL_GT2_IDS(info), \
327 INTEL_KBL_GT3_IDS(info), \
328 INTEL_KBL_GT4_IDS(info)
329
294#endif /* _I915_PCIIDS_H */ 330#endif /* _I915_PCIIDS_H */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 484a9fb20479..67ef73a5d6eb 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1079,6 +1079,12 @@ struct drm_i915_gem_context_destroy {
1079}; 1079};
1080 1080
1081struct drm_i915_reg_read { 1081struct drm_i915_reg_read {
1082 /*
1083 * Register offset.
1084 * For 64bit wide registers where the upper 32bits don't immediately
1085 * follow the lower 32bits, the offset of the lower 32bits must
1086 * be specified
1087 */
1082 __u64 offset; 1088 __u64 offset;
1083 __u64 val; /* Return value */ 1089 __u64 val; /* Return value */
1084}; 1090};
@@ -1125,8 +1131,9 @@ struct drm_i915_gem_context_param {
1125 __u32 ctx_id; 1131 __u32 ctx_id;
1126 __u32 size; 1132 __u32 size;
1127 __u64 param; 1133 __u64 param;
1128#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 1134#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
1129#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 1135#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
1136#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1130 __u64 value; 1137 __u64 value;
1131}; 1138};
1132 1139
diff --git a/kernel/async.c b/kernel/async.c
index 4c3773c0bf63..d2edd6efec56 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -326,3 +326,4 @@ bool current_is_async(void)
326 326
327 return worker && worker->current_func == async_run_entry_fn; 327 return worker && worker->current_func == async_run_entry_fn;
328} 328}
329EXPORT_SYMBOL_GPL(current_is_async);