aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-12-02 17:25:59 -0500
committerDave Airlie <airlied@redhat.com>2014-12-02 17:25:59 -0500
commit26045b53c96f23b75a48544349c3d936e8402418 (patch)
treece550e556d6135aa45a376f067855b8aaa1b26ad
parent1a92b7a241dcf06a92d84219b4124dcf420ae315 (diff)
parent064ca1d250b14b785e662b0a13d8d20cb84574e1 (diff)
Merge tag 'drm-intel-next-2014-11-21-fixed' of git://anongit.freedesktop.org/drm-intel into drm-next
drm-intel-next-2014-11-21: - infoframe tracking (for fastboot) from Jesse - start of the dri1/ums support removal - vlv forcewake timeout fixes (Imre) - bunch of patches to polish the rps code (Imre) and improve it on bdw (Tom O'Rourke) - on-demand pinning for execlist contexts - vlv/chv backlight improvements (Ville) - gen8+ render ctx w/a work from various people - skl edp programming (Satheeshakrishna et al.) - psr docbook (Rodrigo) - piles of little fixes and improvements all over, as usual * tag 'drm-intel-next-2014-11-21-fixed' of git://anongit.freedesktop.org/drm-intel: (117 commits) drm/i915: Don't pin LRC in GGTT when dumping in debugfs drm/i915: Update DRIVER_DATE to 20141121 drm/i915/g4x: fix g4x infoframe readout drm/i915: Only call mod_timer() if not already pending drm/i915: Don't rely upon encoder->type for infoframe hw state readout drm/i915: remove the IRQs enabled WARN from intel_disable_gt_powersave drm/i915: Use ggtt error obj capture helper for gen8 semaphores drm/i915: vlv: increase timeout when setting idle GPU freq drm/i915: vlv: fix cdclk setting during modeset while suspended drm/i915: Dump hdmi pipe_config state drm/i915: Gen9 shadowed registers drm/i915/skl: Gen9 multi-engine forcewake drm/i915: Read power well status before other registers for drpc info drm/i915: Pin tiled objects for L-shaped configs drm/i915: Update ring freq for full gpu freq range drm/i915: change initial rps frequency for gen8 drm/i915: Keep min freq above floor on HSW/BDW drm/i915: Use efficient frequency for HSW/BDW drm/i915: Can i915_gem_init_ioctl drm/i915: Sanitize ->lastclose ...
-rw-r--r--Documentation/DocBook/drm.tmpl26
-rw-r--r--drivers/gpu/drm/drm_edid.c7
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c6
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c85
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1029
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h95
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c397
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c49
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c36
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c17
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c223
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h112
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c49
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h104
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c14
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c17
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c604
-rw-r--r--drivers/gpu/drm/i915/intel_display.c384
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c451
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h40
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c69
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c302
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c99
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c317
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c481
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c216
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h12
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c23
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c5
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c156
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c3
-rw-r--r--include/uapi/drm/i915_drm.h1
40 files changed, 3019 insertions, 2477 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 3789f2db3c21..56e2a9b65c68 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -3921,6 +3921,11 @@ int num_ioctls;</synopsis>
3921!Idrivers/gpu/drm/i915/intel_audio.c 3921!Idrivers/gpu/drm/i915/intel_audio.c
3922 </sect2> 3922 </sect2>
3923 <sect2> 3923 <sect2>
3924 <title>Panel Self Refresh PSR (PSR/SRD)</title>
3925!Pdrivers/gpu/drm/i915/intel_psr.c Panel Self Refresh (PSR/SRD)
3926!Idrivers/gpu/drm/i915/intel_psr.c
3927 </sect2>
3928 <sect2>
3924 <title>DPIO</title> 3929 <title>DPIO</title>
3925!Pdrivers/gpu/drm/i915/i915_reg.h DPIO 3930!Pdrivers/gpu/drm/i915/i915_reg.h DPIO
3926 <table id="dpiox2"> 3931 <table id="dpiox2">
@@ -4029,6 +4034,27 @@ int num_ioctls;</synopsis>
4029!Idrivers/gpu/drm/i915/intel_lrc.c 4034!Idrivers/gpu/drm/i915/intel_lrc.c
4030 </sect2> 4035 </sect2>
4031 </sect1> 4036 </sect1>
4037
4038 <sect1>
4039 <title> Tracing </title>
4040 <para>
4041 This sections covers all things related to the tracepoints implemented in
4042 the i915 driver.
4043 </para>
4044 <sect2>
4045 <title> i915_ppgtt_create and i915_ppgtt_release </title>
4046!Pdrivers/gpu/drm/i915/i915_trace.h i915_ppgtt_create and i915_ppgtt_release tracepoints
4047 </sect2>
4048 <sect2>
4049 <title> i915_context_create and i915_context_free </title>
4050!Pdrivers/gpu/drm/i915/i915_trace.h i915_context_create and i915_context_free tracepoints
4051 </sect2>
4052 <sect2>
4053 <title> switch_mm </title>
4054!Pdrivers/gpu/drm/i915/i915_trace.h switch_mm tracepoint
4055 </sect2>
4056 </sect1>
4057
4032 </chapter> 4058 </chapter>
4033!Cdrivers/gpu/drm/i915/i915_irq.c 4059!Cdrivers/gpu/drm/i915/i915_irq.c
4034</part> 4060</part>
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 1a77a49d2695..a7b5a71856a7 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3145,9 +3145,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
3145 } 3145 }
3146 } 3146 }
3147 eld[5] |= sad_count << 4; 3147 eld[5] |= sad_count << 4;
3148 eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
3149 3148
3150 DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count); 3149 eld[DRM_ELD_BASELINE_ELD_LEN] =
3150 DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
3151
3152 DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
3153 drm_eld_size(eld), sad_count);
3151} 3154}
3152EXPORT_SYMBOL(drm_edid_to_eld); 3155EXPORT_SYMBOL(drm_edid_to_eld);
3153 3156
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 891e584e97ea..e4083e41a600 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -51,6 +51,7 @@ i915-y += intel_audio.o \
51 intel_frontbuffer.o \ 51 intel_frontbuffer.o \
52 intel_modes.o \ 52 intel_modes.o \
53 intel_overlay.o \ 53 intel_overlay.o \
54 intel_psr.o \
54 intel_sideband.o \ 55 intel_sideband.o \
55 intel_sprite.o 56 intel_sprite.o
56i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o 57i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 809bb957b452..22c992a78ac6 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -413,6 +413,8 @@ static const u32 gen7_render_regs[] = {
413 REG64(PS_INVOCATION_COUNT), 413 REG64(PS_INVOCATION_COUNT),
414 REG64(PS_DEPTH_COUNT), 414 REG64(PS_DEPTH_COUNT),
415 OACONTROL, /* Only allowed for LRI and SRM. See below. */ 415 OACONTROL, /* Only allowed for LRI and SRM. See below. */
416 REG64(MI_PREDICATE_SRC0),
417 REG64(MI_PREDICATE_SRC1),
416 GEN7_3DPRIM_END_OFFSET, 418 GEN7_3DPRIM_END_OFFSET,
417 GEN7_3DPRIM_START_VERTEX, 419 GEN7_3DPRIM_START_VERTEX,
418 GEN7_3DPRIM_VERTEX_COUNT, 420 GEN7_3DPRIM_VERTEX_COUNT,
@@ -1072,6 +1074,8 @@ int i915_cmd_parser_get_version(void)
1072 * 1074 *
1073 * 1. Initial version. Checks batches and reports violations, but leaves 1075 * 1. Initial version. Checks batches and reports violations, but leaves
1074 * hardware parsing enabled (so does not allow new use cases). 1076 * hardware parsing enabled (so does not allow new use cases).
1077 * 2. Allow access to the MI_PREDICATE_SRC0 and
1078 * MI_PREDICATE_SRC1 registers.
1075 */ 1079 */
1076 return 1; 1080 return 2;
1077} 1081}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 319da61354b0..d4a0dddbfefb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1240,11 +1240,12 @@ static int vlv_drpc_info(struct seq_file *m)
1240 struct drm_info_node *node = m->private; 1240 struct drm_info_node *node = m->private;
1241 struct drm_device *dev = node->minor->dev; 1241 struct drm_device *dev = node->minor->dev;
1242 struct drm_i915_private *dev_priv = dev->dev_private; 1242 struct drm_i915_private *dev_priv = dev->dev_private;
1243 u32 rpmodectl1, rcctl1; 1243 u32 rpmodectl1, rcctl1, pw_status;
1244 unsigned fw_rendercount = 0, fw_mediacount = 0; 1244 unsigned fw_rendercount = 0, fw_mediacount = 0;
1245 1245
1246 intel_runtime_pm_get(dev_priv); 1246 intel_runtime_pm_get(dev_priv);
1247 1247
1248 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1248 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1249 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1249 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1250 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1250 1251
@@ -1263,11 +1264,9 @@ static int vlv_drpc_info(struct seq_file *m)
1263 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1264 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1264 GEN6_RC_CTL_EI_MODE(1)))); 1265 GEN6_RC_CTL_EI_MODE(1))));
1265 seq_printf(m, "Render Power Well: %s\n", 1266 seq_printf(m, "Render Power Well: %s\n",
1266 (I915_READ(VLV_GTLC_PW_STATUS) & 1267 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1267 VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1268 seq_printf(m, "Media Power Well: %s\n", 1268 seq_printf(m, "Media Power Well: %s\n",
1269 (I915_READ(VLV_GTLC_PW_STATUS) & 1269 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1270 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1271 1270
1272 seq_printf(m, "Render RC6 residency since boot: %u\n", 1271 seq_printf(m, "Render RC6 residency since boot: %u\n",
1273 I915_READ(VLV_GT_RENDER_RC6)); 1272 I915_READ(VLV_GT_RENDER_RC6));
@@ -1773,6 +1772,50 @@ static int i915_context_status(struct seq_file *m, void *unused)
1773 return 0; 1772 return 0;
1774} 1773}
1775 1774
1775static void i915_dump_lrc_obj(struct seq_file *m,
1776 struct intel_engine_cs *ring,
1777 struct drm_i915_gem_object *ctx_obj)
1778{
1779 struct page *page;
1780 uint32_t *reg_state;
1781 int j;
1782 unsigned long ggtt_offset = 0;
1783
1784 if (ctx_obj == NULL) {
1785 seq_printf(m, "Context on %s with no gem object\n",
1786 ring->name);
1787 return;
1788 }
1789
1790 seq_printf(m, "CONTEXT: %s %u\n", ring->name,
1791 intel_execlists_ctx_id(ctx_obj));
1792
1793 if (!i915_gem_obj_ggtt_bound(ctx_obj))
1794 seq_puts(m, "\tNot bound in GGTT\n");
1795 else
1796 ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
1797
1798 if (i915_gem_object_get_pages(ctx_obj)) {
1799 seq_puts(m, "\tFailed to get pages for context object\n");
1800 return;
1801 }
1802
1803 page = i915_gem_object_get_page(ctx_obj, 1);
1804 if (!WARN_ON(page == NULL)) {
1805 reg_state = kmap_atomic(page);
1806
1807 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
1808 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1809 ggtt_offset + 4096 + (j * 4),
1810 reg_state[j], reg_state[j + 1],
1811 reg_state[j + 2], reg_state[j + 3]);
1812 }
1813 kunmap_atomic(reg_state);
1814 }
1815
1816 seq_putc(m, '\n');
1817}
1818
1776static int i915_dump_lrc(struct seq_file *m, void *unused) 1819static int i915_dump_lrc(struct seq_file *m, void *unused)
1777{ 1820{
1778 struct drm_info_node *node = (struct drm_info_node *) m->private; 1821 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1793,29 +1836,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
1793 1836
1794 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1837 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1795 for_each_ring(ring, dev_priv, i) { 1838 for_each_ring(ring, dev_priv, i) {
1796 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; 1839 if (ring->default_context != ctx)
1797 1840 i915_dump_lrc_obj(m, ring,
1798 if (ring->default_context == ctx) 1841 ctx->engine[i].state);
1799 continue;
1800
1801 if (ctx_obj) {
1802 struct page *page = i915_gem_object_get_page(ctx_obj, 1);
1803 uint32_t *reg_state = kmap_atomic(page);
1804 int j;
1805
1806 seq_printf(m, "CONTEXT: %s %u\n", ring->name,
1807 intel_execlists_ctx_id(ctx_obj));
1808
1809 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
1810 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1811 i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
1812 reg_state[j], reg_state[j + 1],
1813 reg_state[j + 2], reg_state[j + 3]);
1814 }
1815 kunmap_atomic(reg_state);
1816
1817 seq_putc(m, '\n');
1818 }
1819 } 1842 }
1820 } 1843 }
1821 1844
@@ -1975,6 +1998,8 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1975 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1998 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1976 seq_printf(m, "DDC = 0x%08x\n", 1999 seq_printf(m, "DDC = 0x%08x\n",
1977 I915_READ(DCC)); 2000 I915_READ(DCC));
2001 seq_printf(m, "DDC2 = 0x%08x\n",
2002 I915_READ(DCC2));
1978 seq_printf(m, "C0DRB3 = 0x%04x\n", 2003 seq_printf(m, "C0DRB3 = 0x%04x\n",
1979 I915_READ16(C0DRB3)); 2004 I915_READ16(C0DRB3));
1980 seq_printf(m, "C1DRB3 = 0x%04x\n", 2005 seq_printf(m, "C1DRB3 = 0x%04x\n",
@@ -1997,6 +2022,10 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1997 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 2022 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1998 I915_READ(DISP_ARB_CTL)); 2023 I915_READ(DISP_ARB_CTL));
1999 } 2024 }
2025
2026 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2027 seq_puts(m, "L-shaped memory detected\n");
2028
2000 intel_runtime_pm_put(dev_priv); 2029 intel_runtime_pm_put(dev_priv);
2001 mutex_unlock(&dev->struct_mutex); 2030 mutex_unlock(&dev->struct_mutex);
2002 2031
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 7a966f3c9950..ecee3bcc8772 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -50,884 +50,6 @@
50#include <linux/pm_runtime.h> 50#include <linux/pm_runtime.h>
51#include <linux/oom.h> 51#include <linux/oom.h>
52 52
53#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
54
55#define BEGIN_LP_RING(n) \
56 intel_ring_begin(LP_RING(dev_priv), (n))
57
58#define OUT_RING(x) \
59 intel_ring_emit(LP_RING(dev_priv), x)
60
61#define ADVANCE_LP_RING() \
62 __intel_ring_advance(LP_RING(dev_priv))
63
64/**
65 * Lock test for when it's just for synchronization of ring access.
66 *
67 * In that case, we don't need to do it when GEM is initialized as nobody else
68 * has access to the ring.
69 */
70#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
71 if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
72 LOCK_TEST_WITH_RETURN(dev, file); \
73} while (0)
74
75static inline u32
76intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
77{
78 if (I915_NEED_GFX_HWS(dev_priv->dev))
79 return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
80 else
81 return intel_read_status_page(LP_RING(dev_priv), reg);
82}
83
84#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
85#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
86#define I915_BREADCRUMB_INDEX 0x21
87
88void i915_update_dri1_breadcrumb(struct drm_device *dev)
89{
90 struct drm_i915_private *dev_priv = dev->dev_private;
91 struct drm_i915_master_private *master_priv;
92
93 /*
94 * The dri breadcrumb update races against the drm master disappearing.
95 * Instead of trying to fix this (this is by far not the only ums issue)
96 * just don't do the update in kms mode.
97 */
98 if (drm_core_check_feature(dev, DRIVER_MODESET))
99 return;
100
101 if (dev->primary->master) {
102 master_priv = dev->primary->master->driver_priv;
103 if (master_priv->sarea_priv)
104 master_priv->sarea_priv->last_dispatch =
105 READ_BREADCRUMB(dev_priv);
106 }
107}
108
109static void i915_write_hws_pga(struct drm_device *dev)
110{
111 struct drm_i915_private *dev_priv = dev->dev_private;
112 u32 addr;
113
114 addr = dev_priv->status_page_dmah->busaddr;
115 if (INTEL_INFO(dev)->gen >= 4)
116 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
117 I915_WRITE(HWS_PGA, addr);
118}
119
120/**
121 * Frees the hardware status page, whether it's a physical address or a virtual
122 * address set up by the X Server.
123 */
124static void i915_free_hws(struct drm_device *dev)
125{
126 struct drm_i915_private *dev_priv = dev->dev_private;
127 struct intel_engine_cs *ring = LP_RING(dev_priv);
128
129 if (dev_priv->status_page_dmah) {
130 drm_pci_free(dev, dev_priv->status_page_dmah);
131 dev_priv->status_page_dmah = NULL;
132 }
133
134 if (ring->status_page.gfx_addr) {
135 ring->status_page.gfx_addr = 0;
136 iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
137 }
138
139 /* Need to rewrite hardware status page */
140 I915_WRITE(HWS_PGA, 0x1ffff000);
141}
142
143void i915_kernel_lost_context(struct drm_device *dev)
144{
145 struct drm_i915_private *dev_priv = dev->dev_private;
146 struct drm_i915_master_private *master_priv;
147 struct intel_engine_cs *ring = LP_RING(dev_priv);
148 struct intel_ringbuffer *ringbuf = ring->buffer;
149
150 /*
151 * We should never lose context on the ring with modesetting
152 * as we don't expose it to userspace
153 */
154 if (drm_core_check_feature(dev, DRIVER_MODESET))
155 return;
156
157 ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
158 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
159 ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
160 if (ringbuf->space < 0)
161 ringbuf->space += ringbuf->size;
162
163 if (!dev->primary->master)
164 return;
165
166 master_priv = dev->primary->master->driver_priv;
167 if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
168 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
169}
170
171static int i915_dma_cleanup(struct drm_device *dev)
172{
173 struct drm_i915_private *dev_priv = dev->dev_private;
174 int i;
175
176 /* Make sure interrupts are disabled here because the uninstall ioctl
177 * may not have been called from userspace and after dev_private
178 * is freed, it's too late.
179 */
180 if (dev->irq_enabled)
181 drm_irq_uninstall(dev);
182
183 mutex_lock(&dev->struct_mutex);
184 for (i = 0; i < I915_NUM_RINGS; i++)
185 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
186 mutex_unlock(&dev->struct_mutex);
187
188 /* Clear the HWS virtual address at teardown */
189 if (I915_NEED_GFX_HWS(dev))
190 i915_free_hws(dev);
191
192 return 0;
193}
194
195static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
196{
197 struct drm_i915_private *dev_priv = dev->dev_private;
198 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
199 int ret;
200
201 master_priv->sarea = drm_legacy_getsarea(dev);
202 if (master_priv->sarea) {
203 master_priv->sarea_priv = (drm_i915_sarea_t *)
204 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
205 } else {
206 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
207 }
208
209 if (init->ring_size != 0) {
210 if (LP_RING(dev_priv)->buffer->obj != NULL) {
211 i915_dma_cleanup(dev);
212 DRM_ERROR("Client tried to initialize ringbuffer in "
213 "GEM mode\n");
214 return -EINVAL;
215 }
216
217 ret = intel_render_ring_init_dri(dev,
218 init->ring_start,
219 init->ring_size);
220 if (ret) {
221 i915_dma_cleanup(dev);
222 return ret;
223 }
224 }
225
226 dev_priv->dri1.cpp = init->cpp;
227 dev_priv->dri1.back_offset = init->back_offset;
228 dev_priv->dri1.front_offset = init->front_offset;
229 dev_priv->dri1.current_page = 0;
230 if (master_priv->sarea_priv)
231 master_priv->sarea_priv->pf_current_page = 0;
232
233 /* Allow hardware batchbuffers unless told otherwise.
234 */
235 dev_priv->dri1.allow_batchbuffer = 1;
236
237 return 0;
238}
239
240static int i915_dma_resume(struct drm_device *dev)
241{
242 struct drm_i915_private *dev_priv = dev->dev_private;
243 struct intel_engine_cs *ring = LP_RING(dev_priv);
244
245 DRM_DEBUG_DRIVER("%s\n", __func__);
246
247 if (ring->buffer->virtual_start == NULL) {
248 DRM_ERROR("can not ioremap virtual address for"
249 " ring buffer\n");
250 return -ENOMEM;
251 }
252
253 /* Program Hardware Status Page */
254 if (!ring->status_page.page_addr) {
255 DRM_ERROR("Can not find hardware status page\n");
256 return -EINVAL;
257 }
258 DRM_DEBUG_DRIVER("hw status page @ %p\n",
259 ring->status_page.page_addr);
260 if (ring->status_page.gfx_addr != 0)
261 intel_ring_setup_status_page(ring);
262 else
263 i915_write_hws_pga(dev);
264
265 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
266
267 return 0;
268}
269
270static int i915_dma_init(struct drm_device *dev, void *data,
271 struct drm_file *file_priv)
272{
273 drm_i915_init_t *init = data;
274 int retcode = 0;
275
276 if (drm_core_check_feature(dev, DRIVER_MODESET))
277 return -ENODEV;
278
279 switch (init->func) {
280 case I915_INIT_DMA:
281 retcode = i915_initialize(dev, init);
282 break;
283 case I915_CLEANUP_DMA:
284 retcode = i915_dma_cleanup(dev);
285 break;
286 case I915_RESUME_DMA:
287 retcode = i915_dma_resume(dev);
288 break;
289 default:
290 retcode = -EINVAL;
291 break;
292 }
293
294 return retcode;
295}
296
297/* Implement basically the same security restrictions as hardware does
298 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
299 *
300 * Most of the calculations below involve calculating the size of a
301 * particular instruction. It's important to get the size right as
302 * that tells us where the next instruction to check is. Any illegal
303 * instruction detected will be given a size of zero, which is a
304 * signal to abort the rest of the buffer.
305 */
306static int validate_cmd(int cmd)
307{
308 switch (((cmd >> 29) & 0x7)) {
309 case 0x0:
310 switch ((cmd >> 23) & 0x3f) {
311 case 0x0:
312 return 1; /* MI_NOOP */
313 case 0x4:
314 return 1; /* MI_FLUSH */
315 default:
316 return 0; /* disallow everything else */
317 }
318 break;
319 case 0x1:
320 return 0; /* reserved */
321 case 0x2:
322 return (cmd & 0xff) + 2; /* 2d commands */
323 case 0x3:
324 if (((cmd >> 24) & 0x1f) <= 0x18)
325 return 1;
326
327 switch ((cmd >> 24) & 0x1f) {
328 case 0x1c:
329 return 1;
330 case 0x1d:
331 switch ((cmd >> 16) & 0xff) {
332 case 0x3:
333 return (cmd & 0x1f) + 2;
334 case 0x4:
335 return (cmd & 0xf) + 2;
336 default:
337 return (cmd & 0xffff) + 2;
338 }
339 case 0x1e:
340 if (cmd & (1 << 23))
341 return (cmd & 0xffff) + 1;
342 else
343 return 1;
344 case 0x1f:
345 if ((cmd & (1 << 23)) == 0) /* inline vertices */
346 return (cmd & 0x1ffff) + 2;
347 else if (cmd & (1 << 17)) /* indirect random */
348 if ((cmd & 0xffff) == 0)
349 return 0; /* unknown length, too hard */
350 else
351 return (((cmd & 0xffff) + 1) / 2) + 1;
352 else
353 return 2; /* indirect sequential */
354 default:
355 return 0;
356 }
357 default:
358 return 0;
359 }
360
361 return 0;
362}
363
364static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
365{
366 struct drm_i915_private *dev_priv = dev->dev_private;
367 int i, ret;
368
369 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
370 return -EINVAL;
371
372 for (i = 0; i < dwords;) {
373 int sz = validate_cmd(buffer[i]);
374
375 if (sz == 0 || i + sz > dwords)
376 return -EINVAL;
377 i += sz;
378 }
379
380 ret = BEGIN_LP_RING((dwords+1)&~1);
381 if (ret)
382 return ret;
383
384 for (i = 0; i < dwords; i++)
385 OUT_RING(buffer[i]);
386 if (dwords & 1)
387 OUT_RING(0);
388
389 ADVANCE_LP_RING();
390
391 return 0;
392}
393
394int
395i915_emit_box(struct drm_device *dev,
396 struct drm_clip_rect *box,
397 int DR1, int DR4)
398{
399 struct drm_i915_private *dev_priv = dev->dev_private;
400 int ret;
401
402 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
403 box->y2 <= 0 || box->x2 <= 0) {
404 DRM_ERROR("Bad box %d,%d..%d,%d\n",
405 box->x1, box->y1, box->x2, box->y2);
406 return -EINVAL;
407 }
408
409 if (INTEL_INFO(dev)->gen >= 4) {
410 ret = BEGIN_LP_RING(4);
411 if (ret)
412 return ret;
413
414 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
415 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
416 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
417 OUT_RING(DR4);
418 } else {
419 ret = BEGIN_LP_RING(6);
420 if (ret)
421 return ret;
422
423 OUT_RING(GFX_OP_DRAWRECT_INFO);
424 OUT_RING(DR1);
425 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
426 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
427 OUT_RING(DR4);
428 OUT_RING(0);
429 }
430 ADVANCE_LP_RING();
431
432 return 0;
433}
434
435/* XXX: Emitting the counter should really be moved to part of the IRQ
436 * emit. For now, do it in both places:
437 */
438
439static void i915_emit_breadcrumb(struct drm_device *dev)
440{
441 struct drm_i915_private *dev_priv = dev->dev_private;
442 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
443
444 dev_priv->dri1.counter++;
445 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
446 dev_priv->dri1.counter = 0;
447 if (master_priv->sarea_priv)
448 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
449
450 if (BEGIN_LP_RING(4) == 0) {
451 OUT_RING(MI_STORE_DWORD_INDEX);
452 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
453 OUT_RING(dev_priv->dri1.counter);
454 OUT_RING(0);
455 ADVANCE_LP_RING();
456 }
457}
458
459static int i915_dispatch_cmdbuffer(struct drm_device *dev,
460 drm_i915_cmdbuffer_t *cmd,
461 struct drm_clip_rect *cliprects,
462 void *cmdbuf)
463{
464 int nbox = cmd->num_cliprects;
465 int i = 0, count, ret;
466
467 if (cmd->sz & 0x3) {
468 DRM_ERROR("alignment");
469 return -EINVAL;
470 }
471
472 i915_kernel_lost_context(dev);
473
474 count = nbox ? nbox : 1;
475
476 for (i = 0; i < count; i++) {
477 if (i < nbox) {
478 ret = i915_emit_box(dev, &cliprects[i],
479 cmd->DR1, cmd->DR4);
480 if (ret)
481 return ret;
482 }
483
484 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
485 if (ret)
486 return ret;
487 }
488
489 i915_emit_breadcrumb(dev);
490 return 0;
491}
492
493static int i915_dispatch_batchbuffer(struct drm_device *dev,
494 drm_i915_batchbuffer_t *batch,
495 struct drm_clip_rect *cliprects)
496{
497 struct drm_i915_private *dev_priv = dev->dev_private;
498 int nbox = batch->num_cliprects;
499 int i, count, ret;
500
501 if ((batch->start | batch->used) & 0x7) {
502 DRM_ERROR("alignment");
503 return -EINVAL;
504 }
505
506 i915_kernel_lost_context(dev);
507
508 count = nbox ? nbox : 1;
509 for (i = 0; i < count; i++) {
510 if (i < nbox) {
511 ret = i915_emit_box(dev, &cliprects[i],
512 batch->DR1, batch->DR4);
513 if (ret)
514 return ret;
515 }
516
517 if (!IS_I830(dev) && !IS_845G(dev)) {
518 ret = BEGIN_LP_RING(2);
519 if (ret)
520 return ret;
521
522 if (INTEL_INFO(dev)->gen >= 4) {
523 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
524 OUT_RING(batch->start);
525 } else {
526 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
527 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
528 }
529 } else {
530 ret = BEGIN_LP_RING(4);
531 if (ret)
532 return ret;
533
534 OUT_RING(MI_BATCH_BUFFER);
535 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
536 OUT_RING(batch->start + batch->used - 4);
537 OUT_RING(0);
538 }
539 ADVANCE_LP_RING();
540 }
541
542
543 if (IS_G4X(dev) || IS_GEN5(dev)) {
544 if (BEGIN_LP_RING(2) == 0) {
545 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
546 OUT_RING(MI_NOOP);
547 ADVANCE_LP_RING();
548 }
549 }
550
551 i915_emit_breadcrumb(dev);
552 return 0;
553}
554
555static int i915_dispatch_flip(struct drm_device *dev)
556{
557 struct drm_i915_private *dev_priv = dev->dev_private;
558 struct drm_i915_master_private *master_priv =
559 dev->primary->master->driver_priv;
560 int ret;
561
562 if (!master_priv->sarea_priv)
563 return -EINVAL;
564
565 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
566 __func__,
567 dev_priv->dri1.current_page,
568 master_priv->sarea_priv->pf_current_page);
569
570 i915_kernel_lost_context(dev);
571
572 ret = BEGIN_LP_RING(10);
573 if (ret)
574 return ret;
575
576 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
577 OUT_RING(0);
578
579 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
580 OUT_RING(0);
581 if (dev_priv->dri1.current_page == 0) {
582 OUT_RING(dev_priv->dri1.back_offset);
583 dev_priv->dri1.current_page = 1;
584 } else {
585 OUT_RING(dev_priv->dri1.front_offset);
586 dev_priv->dri1.current_page = 0;
587 }
588 OUT_RING(0);
589
590 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
591 OUT_RING(0);
592
593 ADVANCE_LP_RING();
594
595 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
596
597 if (BEGIN_LP_RING(4) == 0) {
598 OUT_RING(MI_STORE_DWORD_INDEX);
599 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
600 OUT_RING(dev_priv->dri1.counter);
601 OUT_RING(0);
602 ADVANCE_LP_RING();
603 }
604
605 master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
606 return 0;
607}
608
609static int i915_quiescent(struct drm_device *dev)
610{
611 i915_kernel_lost_context(dev);
612 return intel_ring_idle(LP_RING(dev->dev_private));
613}
614
615static int i915_flush_ioctl(struct drm_device *dev, void *data,
616 struct drm_file *file_priv)
617{
618 int ret;
619
620 if (drm_core_check_feature(dev, DRIVER_MODESET))
621 return -ENODEV;
622
623 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
624
625 mutex_lock(&dev->struct_mutex);
626 ret = i915_quiescent(dev);
627 mutex_unlock(&dev->struct_mutex);
628
629 return ret;
630}
631
632static int i915_batchbuffer(struct drm_device *dev, void *data,
633 struct drm_file *file_priv)
634{
635 struct drm_i915_private *dev_priv = dev->dev_private;
636 struct drm_i915_master_private *master_priv;
637 drm_i915_sarea_t *sarea_priv;
638 drm_i915_batchbuffer_t *batch = data;
639 int ret;
640 struct drm_clip_rect *cliprects = NULL;
641
642 if (drm_core_check_feature(dev, DRIVER_MODESET))
643 return -ENODEV;
644
645 master_priv = dev->primary->master->driver_priv;
646 sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
647
648 if (!dev_priv->dri1.allow_batchbuffer) {
649 DRM_ERROR("Batchbuffer ioctl disabled\n");
650 return -EINVAL;
651 }
652
653 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
654 batch->start, batch->used, batch->num_cliprects);
655
656 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
657
658 if (batch->num_cliprects < 0)
659 return -EINVAL;
660
661 if (batch->num_cliprects) {
662 cliprects = kcalloc(batch->num_cliprects,
663 sizeof(*cliprects),
664 GFP_KERNEL);
665 if (cliprects == NULL)
666 return -ENOMEM;
667
668 ret = copy_from_user(cliprects, batch->cliprects,
669 batch->num_cliprects *
670 sizeof(struct drm_clip_rect));
671 if (ret != 0) {
672 ret = -EFAULT;
673 goto fail_free;
674 }
675 }
676
677 mutex_lock(&dev->struct_mutex);
678 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
679 mutex_unlock(&dev->struct_mutex);
680
681 if (sarea_priv)
682 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
683
684fail_free:
685 kfree(cliprects);
686
687 return ret;
688}
689
690static int i915_cmdbuffer(struct drm_device *dev, void *data,
691 struct drm_file *file_priv)
692{
693 struct drm_i915_private *dev_priv = dev->dev_private;
694 struct drm_i915_master_private *master_priv;
695 drm_i915_sarea_t *sarea_priv;
696 drm_i915_cmdbuffer_t *cmdbuf = data;
697 struct drm_clip_rect *cliprects = NULL;
698 void *batch_data;
699 int ret;
700
701 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
702 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
703
704 if (drm_core_check_feature(dev, DRIVER_MODESET))
705 return -ENODEV;
706
707 master_priv = dev->primary->master->driver_priv;
708 sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
709
710 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
711
712 if (cmdbuf->num_cliprects < 0)
713 return -EINVAL;
714
715 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
716 if (batch_data == NULL)
717 return -ENOMEM;
718
719 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
720 if (ret != 0) {
721 ret = -EFAULT;
722 goto fail_batch_free;
723 }
724
725 if (cmdbuf->num_cliprects) {
726 cliprects = kcalloc(cmdbuf->num_cliprects,
727 sizeof(*cliprects), GFP_KERNEL);
728 if (cliprects == NULL) {
729 ret = -ENOMEM;
730 goto fail_batch_free;
731 }
732
733 ret = copy_from_user(cliprects, cmdbuf->cliprects,
734 cmdbuf->num_cliprects *
735 sizeof(struct drm_clip_rect));
736 if (ret != 0) {
737 ret = -EFAULT;
738 goto fail_clip_free;
739 }
740 }
741
742 mutex_lock(&dev->struct_mutex);
743 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
744 mutex_unlock(&dev->struct_mutex);
745 if (ret) {
746 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
747 goto fail_clip_free;
748 }
749
750 if (sarea_priv)
751 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
752
753fail_clip_free:
754 kfree(cliprects);
755fail_batch_free:
756 kfree(batch_data);
757
758 return ret;
759}
760
761static int i915_emit_irq(struct drm_device *dev)
762{
763 struct drm_i915_private *dev_priv = dev->dev_private;
764 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
765
766 i915_kernel_lost_context(dev);
767
768 DRM_DEBUG_DRIVER("\n");
769
770 dev_priv->dri1.counter++;
771 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
772 dev_priv->dri1.counter = 1;
773 if (master_priv->sarea_priv)
774 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
775
776 if (BEGIN_LP_RING(4) == 0) {
777 OUT_RING(MI_STORE_DWORD_INDEX);
778 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
779 OUT_RING(dev_priv->dri1.counter);
780 OUT_RING(MI_USER_INTERRUPT);
781 ADVANCE_LP_RING();
782 }
783
784 return dev_priv->dri1.counter;
785}
786
787static int i915_wait_irq(struct drm_device *dev, int irq_nr)
788{
789 struct drm_i915_private *dev_priv = dev->dev_private;
790 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
791 int ret = 0;
792 struct intel_engine_cs *ring = LP_RING(dev_priv);
793
794 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
795 READ_BREADCRUMB(dev_priv));
796
797 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
798 if (master_priv->sarea_priv)
799 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
800 return 0;
801 }
802
803 if (master_priv->sarea_priv)
804 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
805
806 if (ring->irq_get(ring)) {
807 DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
808 READ_BREADCRUMB(dev_priv) >= irq_nr);
809 ring->irq_put(ring);
810 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
811 ret = -EBUSY;
812
813 if (ret == -EBUSY) {
814 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
815 READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
816 }
817
818 return ret;
819}
820
821/* Needs the lock as it touches the ring.
822 */
823static int i915_irq_emit(struct drm_device *dev, void *data,
824 struct drm_file *file_priv)
825{
826 struct drm_i915_private *dev_priv = dev->dev_private;
827 drm_i915_irq_emit_t *emit = data;
828 int result;
829
830 if (drm_core_check_feature(dev, DRIVER_MODESET))
831 return -ENODEV;
832
833 if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
834 DRM_ERROR("called with no initialization\n");
835 return -EINVAL;
836 }
837
838 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
839
840 mutex_lock(&dev->struct_mutex);
841 result = i915_emit_irq(dev);
842 mutex_unlock(&dev->struct_mutex);
843
844 if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
845 DRM_ERROR("copy_to_user\n");
846 return -EFAULT;
847 }
848
849 return 0;
850}
851
852/* Doesn't need the hardware lock.
853 */
854static int i915_irq_wait(struct drm_device *dev, void *data,
855 struct drm_file *file_priv)
856{
857 struct drm_i915_private *dev_priv = dev->dev_private;
858 drm_i915_irq_wait_t *irqwait = data;
859
860 if (drm_core_check_feature(dev, DRIVER_MODESET))
861 return -ENODEV;
862
863 if (!dev_priv) {
864 DRM_ERROR("called with no initialization\n");
865 return -EINVAL;
866 }
867
868 return i915_wait_irq(dev, irqwait->irq_seq);
869}
870
871static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
872 struct drm_file *file_priv)
873{
874 struct drm_i915_private *dev_priv = dev->dev_private;
875 drm_i915_vblank_pipe_t *pipe = data;
876
877 if (drm_core_check_feature(dev, DRIVER_MODESET))
878 return -ENODEV;
879
880 if (!dev_priv) {
881 DRM_ERROR("called with no initialization\n");
882 return -EINVAL;
883 }
884
885 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
886
887 return 0;
888}
889
890/**
891 * Schedule buffer swap at given vertical blank.
892 */
893static int i915_vblank_swap(struct drm_device *dev, void *data,
894 struct drm_file *file_priv)
895{
896 /* The delayed swap mechanism was fundamentally racy, and has been
897 * removed. The model was that the client requested a delayed flip/swap
898 * from the kernel, then waited for vblank before continuing to perform
899 * rendering. The problem was that the kernel might wake the client
900 * up before it dispatched the vblank swap (since the lock has to be
901 * held while touching the ringbuffer), in which case the client would
902 * clear and start the next frame before the swap occurred, and
903 * flicker would occur in addition to likely missing the vblank.
904 *
905 * In the absence of this ioctl, userland falls back to a correct path
906 * of waiting for a vblank, then dispatching the swap on its own.
907 * Context switching to userland and back is plenty fast enough for
908 * meeting the requirements of vblank swapping.
909 */
910 return -EINVAL;
911}
912
913static int i915_flip_bufs(struct drm_device *dev, void *data,
914 struct drm_file *file_priv)
915{
916 int ret;
917
918 if (drm_core_check_feature(dev, DRIVER_MODESET))
919 return -ENODEV;
920
921 DRM_DEBUG_DRIVER("%s\n", __func__);
922
923 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
924
925 mutex_lock(&dev->struct_mutex);
926 ret = i915_dispatch_flip(dev);
927 mutex_unlock(&dev->struct_mutex);
928
929 return ret;
930}
931 53
932static int i915_getparam(struct drm_device *dev, void *data, 54static int i915_getparam(struct drm_device *dev, void *data,
933 struct drm_file *file_priv) 55 struct drm_file *file_priv)
@@ -936,21 +58,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
936 drm_i915_getparam_t *param = data; 58 drm_i915_getparam_t *param = data;
937 int value; 59 int value;
938 60
939 if (!dev_priv) {
940 DRM_ERROR("called with no initialization\n");
941 return -EINVAL;
942 }
943
944 switch (param->param) { 61 switch (param->param) {
945 case I915_PARAM_IRQ_ACTIVE: 62 case I915_PARAM_IRQ_ACTIVE:
946 value = dev->pdev->irq ? 1 : 0;
947 break;
948 case I915_PARAM_ALLOW_BATCHBUFFER: 63 case I915_PARAM_ALLOW_BATCHBUFFER:
949 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
950 break;
951 case I915_PARAM_LAST_DISPATCH: 64 case I915_PARAM_LAST_DISPATCH:
952 value = READ_BREADCRUMB(dev_priv); 65 /* Reject all old ums/dri params. */
953 break; 66 return -ENODEV;
954 case I915_PARAM_CHIPSET_ID: 67 case I915_PARAM_CHIPSET_ID:
955 value = dev->pdev->device; 68 value = dev->pdev->device;
956 break; 69 break;
@@ -1027,6 +140,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
1027 case I915_PARAM_CMD_PARSER_VERSION: 140 case I915_PARAM_CMD_PARSER_VERSION:
1028 value = i915_cmd_parser_get_version(); 141 value = i915_cmd_parser_get_version();
1029 break; 142 break;
143 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
144 value = 1;
145 break;
1030 default: 146 default:
1031 DRM_DEBUG("Unknown parameter %d\n", param->param); 147 DRM_DEBUG("Unknown parameter %d\n", param->param);
1032 return -EINVAL; 148 return -EINVAL;
@@ -1046,19 +162,13 @@ static int i915_setparam(struct drm_device *dev, void *data,
1046 struct drm_i915_private *dev_priv = dev->dev_private; 162 struct drm_i915_private *dev_priv = dev->dev_private;
1047 drm_i915_setparam_t *param = data; 163 drm_i915_setparam_t *param = data;
1048 164
1049 if (!dev_priv) {
1050 DRM_ERROR("called with no initialization\n");
1051 return -EINVAL;
1052 }
1053
1054 switch (param->param) { 165 switch (param->param) {
1055 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 166 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1056 break;
1057 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 167 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1058 break;
1059 case I915_SETPARAM_ALLOW_BATCHBUFFER: 168 case I915_SETPARAM_ALLOW_BATCHBUFFER:
1060 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; 169 /* Reject all old ums/dri params. */
1061 break; 170 return -ENODEV;
171
1062 case I915_SETPARAM_NUM_USED_FENCES: 172 case I915_SETPARAM_NUM_USED_FENCES:
1063 if (param->value > dev_priv->num_fence_regs || 173 if (param->value > dev_priv->num_fence_regs ||
1064 param->value < 0) 174 param->value < 0)
@@ -1075,54 +185,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
1075 return 0; 185 return 0;
1076} 186}
1077 187
1078static int i915_set_status_page(struct drm_device *dev, void *data,
1079 struct drm_file *file_priv)
1080{
1081 struct drm_i915_private *dev_priv = dev->dev_private;
1082 drm_i915_hws_addr_t *hws = data;
1083 struct intel_engine_cs *ring;
1084
1085 if (drm_core_check_feature(dev, DRIVER_MODESET))
1086 return -ENODEV;
1087
1088 if (!I915_NEED_GFX_HWS(dev))
1089 return -EINVAL;
1090
1091 if (!dev_priv) {
1092 DRM_ERROR("called with no initialization\n");
1093 return -EINVAL;
1094 }
1095
1096 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1097 WARN(1, "tried to set status page when mode setting active\n");
1098 return 0;
1099 }
1100
1101 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1102
1103 ring = LP_RING(dev_priv);
1104 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1105
1106 dev_priv->dri1.gfx_hws_cpu_addr =
1107 ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
1108 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1109 i915_dma_cleanup(dev);
1110 ring->status_page.gfx_addr = 0;
1111 DRM_ERROR("can not ioremap virtual address for"
1112 " G33 hw status page\n");
1113 return -ENOMEM;
1114 }
1115
1116 memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1117 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
1118
1119 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1120 ring->status_page.gfx_addr);
1121 DRM_DEBUG_DRIVER("load hws at %p\n",
1122 ring->status_page.page_addr);
1123 return 0;
1124}
1125
1126static int i915_get_bridge_dev(struct drm_device *dev) 188static int i915_get_bridge_dev(struct drm_device *dev)
1127{ 189{
1128 struct drm_i915_private *dev_priv = dev->dev_private; 190 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1398,30 +460,6 @@ out:
1398 return ret; 460 return ret;
1399} 461}
1400 462
1401int i915_master_create(struct drm_device *dev, struct drm_master *master)
1402{
1403 struct drm_i915_master_private *master_priv;
1404
1405 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
1406 if (!master_priv)
1407 return -ENOMEM;
1408
1409 master->driver_priv = master_priv;
1410 return 0;
1411}
1412
1413void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1414{
1415 struct drm_i915_master_private *master_priv = master->driver_priv;
1416
1417 if (!master_priv)
1418 return;
1419
1420 kfree(master_priv);
1421
1422 master->driver_priv = NULL;
1423}
1424
1425#if IS_ENABLED(CONFIG_FB) 463#if IS_ENABLED(CONFIG_FB)
1426static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 464static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1427{ 465{
@@ -1777,9 +815,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1777 DRM_ERROR("failed to init modeset\n"); 815 DRM_ERROR("failed to init modeset\n");
1778 goto out_power_well; 816 goto out_power_well;
1779 } 817 }
1780 } else {
1781 /* Start out suspended in ums mode. */
1782 dev_priv->ums.mm_suspended = 1;
1783 } 818 }
1784 819
1785 i915_setup_sysfs(dev); 820 i915_setup_sysfs(dev);
@@ -1896,9 +931,6 @@ int i915_driver_unload(struct drm_device *dev)
1896 i915_gem_context_fini(dev); 931 i915_gem_context_fini(dev);
1897 mutex_unlock(&dev->struct_mutex); 932 mutex_unlock(&dev->struct_mutex);
1898 i915_gem_cleanup_stolen(dev); 933 i915_gem_cleanup_stolen(dev);
1899
1900 if (!I915_NEED_GFX_HWS(dev))
1901 i915_free_hws(dev);
1902 } 934 }
1903 935
1904 intel_teardown_gmbus(dev); 936 intel_teardown_gmbus(dev);
@@ -1948,23 +980,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1948 */ 980 */
1949void i915_driver_lastclose(struct drm_device *dev) 981void i915_driver_lastclose(struct drm_device *dev)
1950{ 982{
1951 struct drm_i915_private *dev_priv = dev->dev_private; 983 intel_fbdev_restore_mode(dev);
1952 984 vga_switcheroo_process_delayed_switch();
1953 /* On gen6+ we refuse to init without kms enabled, but then the drm core
1954 * goes right around and calls lastclose. Check for this and don't clean
1955 * up anything. */
1956 if (!dev_priv)
1957 return;
1958
1959 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1960 intel_fbdev_restore_mode(dev);
1961 vga_switcheroo_process_delayed_switch();
1962 return;
1963 }
1964
1965 i915_gem_lastclose(dev);
1966
1967 i915_dma_cleanup(dev);
1968} 985}
1969 986
1970void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) 987void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
@@ -1988,24 +1005,24 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1988} 1005}
1989 1006
1990const struct drm_ioctl_desc i915_ioctls[] = { 1007const struct drm_ioctl_desc i915_ioctls[] = {
1991 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1008 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1992 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 1009 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1993 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), 1010 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1994 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 1011 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1995 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 1012 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1996 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 1013 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1997 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), 1014 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1998 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1015 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1999 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 1016 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
2000 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 1017 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
2001 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1018 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2002 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 1019 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
2003 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1020 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2004 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1021 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2005 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), 1022 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
2006 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 1023 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
2007 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1024 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2008 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1025 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2009 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), 1026 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
2010 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 1027 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
2011 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1028 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -2014,8 +1031,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
2014 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1031 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
2015 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1032 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
2016 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 1033 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
2017 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1034 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2018 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1035 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2019 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1036 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
2020 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1037 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
2021 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1038 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c743908b0a7e..1e9c136a874c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -584,6 +584,8 @@ static int i915_drm_suspend(struct drm_device *dev)
584 return error; 584 return error;
585 } 585 }
586 586
587 intel_suspend_gt_powersave(dev);
588
587 /* 589 /*
588 * Disable CRTCs directly since we want to preserve sw state 590 * Disable CRTCs directly since we want to preserve sw state
589 * for _thaw. Also, power gate the CRTC power wells. 591 * for _thaw. Also, power gate the CRTC power wells.
@@ -595,15 +597,11 @@ static int i915_drm_suspend(struct drm_device *dev)
595 597
596 intel_dp_mst_suspend(dev); 598 intel_dp_mst_suspend(dev);
597 599
598 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
599
600 intel_runtime_pm_disable_interrupts(dev_priv); 600 intel_runtime_pm_disable_interrupts(dev_priv);
601 intel_hpd_cancel_work(dev_priv); 601 intel_hpd_cancel_work(dev_priv);
602 602
603 intel_suspend_encoders(dev_priv); 603 intel_suspend_encoders(dev_priv);
604 604
605 intel_suspend_gt_powersave(dev);
606
607 intel_suspend_hw(dev); 605 intel_suspend_hw(dev);
608 } 606 }
609 607
@@ -703,12 +701,10 @@ static int i915_drm_resume(struct drm_device *dev)
703 701
704 intel_modeset_init_hw(dev); 702 intel_modeset_init_hw(dev);
705 703
706 { 704 spin_lock_irq(&dev_priv->irq_lock);
707 spin_lock_irq(&dev_priv->irq_lock); 705 if (dev_priv->display.hpd_irq_setup)
708 if (dev_priv->display.hpd_irq_setup) 706 dev_priv->display.hpd_irq_setup(dev);
709 dev_priv->display.hpd_irq_setup(dev); 707 spin_unlock_irq(&dev_priv->irq_lock);
710 spin_unlock_irq(&dev_priv->irq_lock);
711 }
712 708
713 intel_dp_mst_resume(dev); 709 intel_dp_mst_resume(dev);
714 drm_modeset_lock_all(dev); 710 drm_modeset_lock_all(dev);
@@ -856,10 +852,7 @@ int i915_reset(struct drm_device *dev)
856 * was running at the time of the reset (i.e. we weren't VT 852 * was running at the time of the reset (i.e. we weren't VT
857 * switched away). 853 * switched away).
858 */ 854 */
859 if (drm_core_check_feature(dev, DRIVER_MODESET) || 855 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
860 !dev_priv->ums.mm_suspended) {
861 dev_priv->ums.mm_suspended = 0;
862
863 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ 856 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
864 dev_priv->gpu_error.reload_in_reset = true; 857 dev_priv->gpu_error.reload_in_reset = true;
865 858
@@ -1395,9 +1388,8 @@ static int intel_runtime_suspend(struct device *device)
1395 i915_gem_release_all_mmaps(dev_priv); 1388 i915_gem_release_all_mmaps(dev_priv);
1396 mutex_unlock(&dev->struct_mutex); 1389 mutex_unlock(&dev->struct_mutex);
1397 1390
1398 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1399 intel_runtime_pm_disable_interrupts(dev_priv);
1400 intel_suspend_gt_powersave(dev); 1391 intel_suspend_gt_powersave(dev);
1392 intel_runtime_pm_disable_interrupts(dev_priv);
1401 1393
1402 ret = intel_suspend_complete(dev_priv); 1394 ret = intel_suspend_complete(dev_priv);
1403 if (ret) { 1395 if (ret) {
@@ -1578,8 +1570,6 @@ static struct drm_driver driver = {
1578 .resume = i915_resume_legacy, 1570 .resume = i915_resume_legacy,
1579 1571
1580 .device_is_agp = i915_driver_device_is_agp, 1572 .device_is_agp = i915_driver_device_is_agp,
1581 .master_create = i915_master_create,
1582 .master_destroy = i915_master_destroy,
1583#if defined(CONFIG_DEBUG_FS) 1573#if defined(CONFIG_DEBUG_FS)
1584 .debugfs_init = i915_debugfs_init, 1574 .debugfs_init = i915_debugfs_init,
1585 .debugfs_cleanup = i915_debugfs_cleanup, 1575 .debugfs_cleanup = i915_debugfs_cleanup,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4ba1aca071da..bb1892d72efe 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,7 +55,7 @@
55 55
56#define DRIVER_NAME "i915" 56#define DRIVER_NAME "i915"
57#define DRIVER_DESC "Intel Graphics" 57#define DRIVER_DESC "Intel Graphics"
58#define DRIVER_DATE "20141107" 58#define DRIVER_DATE "20141121"
59 59
60#undef WARN_ON 60#undef WARN_ON
61#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")") 61#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")")
@@ -213,10 +213,15 @@ enum intel_dpll_id {
213 /* real shared dpll ids must be >= 0 */ 213 /* real shared dpll ids must be >= 0 */
214 DPLL_ID_PCH_PLL_A = 0, 214 DPLL_ID_PCH_PLL_A = 0,
215 DPLL_ID_PCH_PLL_B = 1, 215 DPLL_ID_PCH_PLL_B = 1,
216 /* hsw/bdw */
216 DPLL_ID_WRPLL1 = 0, 217 DPLL_ID_WRPLL1 = 0,
217 DPLL_ID_WRPLL2 = 1, 218 DPLL_ID_WRPLL2 = 1,
219 /* skl */
220 DPLL_ID_SKL_DPLL1 = 0,
221 DPLL_ID_SKL_DPLL2 = 1,
222 DPLL_ID_SKL_DPLL3 = 2,
218}; 223};
219#define I915_NUM_PLLS 2 224#define I915_NUM_PLLS 3
220 225
221struct intel_dpll_hw_state { 226struct intel_dpll_hw_state {
222 /* i9xx, pch plls */ 227 /* i9xx, pch plls */
@@ -227,6 +232,17 @@ struct intel_dpll_hw_state {
227 232
228 /* hsw, bdw */ 233 /* hsw, bdw */
229 uint32_t wrpll; 234 uint32_t wrpll;
235
236 /* skl */
237 /*
238 * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
239 * lower part of crtl1 and they get shifted into position when writing
240 * the register. This allows us to easily compare the state to share
241 * the DPLL.
242 */
243 uint32_t ctrl1;
244 /* HDMI only, 0 when used for DP */
245 uint32_t cfgcr1, cfgcr2;
230}; 246};
231 247
232struct intel_shared_dpll_config { 248struct intel_shared_dpll_config {
@@ -256,6 +272,11 @@ struct intel_shared_dpll {
256 struct intel_dpll_hw_state *hw_state); 272 struct intel_dpll_hw_state *hw_state);
257}; 273};
258 274
275#define SKL_DPLL0 0
276#define SKL_DPLL1 1
277#define SKL_DPLL2 2
278#define SKL_DPLL3 3
279
259/* Used by dp and fdi links */ 280/* Used by dp and fdi links */
260struct intel_link_m_n { 281struct intel_link_m_n {
261 uint32_t tu; 282 uint32_t tu;
@@ -306,12 +327,6 @@ struct intel_opregion {
306struct intel_overlay; 327struct intel_overlay;
307struct intel_overlay_error_state; 328struct intel_overlay_error_state;
308 329
309struct drm_local_map;
310
311struct drm_i915_master_private {
312 struct drm_local_map *sarea;
313 struct _drm_i915_sarea *sarea_priv;
314};
315#define I915_FENCE_REG_NONE -1 330#define I915_FENCE_REG_NONE -1
316#define I915_MAX_NUM_FENCES 32 331#define I915_MAX_NUM_FENCES 32
317/* 32 fences + sign bit for FENCE_REG_NONE */ 332/* 32 fences + sign bit for FENCE_REG_NONE */
@@ -510,7 +525,7 @@ struct drm_i915_display_funcs {
510 /* display clock increase/decrease */ 525 /* display clock increase/decrease */
511 /* pll clock increase/decrease */ 526 /* pll clock increase/decrease */
512 527
513 int (*setup_backlight)(struct intel_connector *connector); 528 int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe);
514 uint32_t (*get_backlight)(struct intel_connector *connector); 529 uint32_t (*get_backlight)(struct intel_connector *connector);
515 void (*set_backlight)(struct intel_connector *connector, 530 void (*set_backlight)(struct intel_connector *connector,
516 uint32_t level); 531 uint32_t level);
@@ -664,6 +679,7 @@ struct intel_context {
664 struct { 679 struct {
665 struct drm_i915_gem_object *state; 680 struct drm_i915_gem_object *state;
666 struct intel_ringbuffer *ringbuf; 681 struct intel_ringbuffer *ringbuf;
682 int unpin_count;
667 } engine[I915_NUM_RINGS]; 683 } engine[I915_NUM_RINGS];
668 684
669 struct list_head link; 685 struct list_head link;
@@ -748,6 +764,7 @@ enum intel_sbi_destination {
748#define QUIRK_INVERT_BRIGHTNESS (1<<2) 764#define QUIRK_INVERT_BRIGHTNESS (1<<2)
749#define QUIRK_BACKLIGHT_PRESENT (1<<3) 765#define QUIRK_BACKLIGHT_PRESENT (1<<3)
750#define QUIRK_PIPEB_FORCE (1<<4) 766#define QUIRK_PIPEB_FORCE (1<<4)
767#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
751 768
752struct intel_fbdev; 769struct intel_fbdev;
753struct intel_fbc_work; 770struct intel_fbc_work;
@@ -799,7 +816,6 @@ struct i915_suspend_saved_registers {
799 u32 saveBLC_HIST_CTL; 816 u32 saveBLC_HIST_CTL;
800 u32 saveBLC_PWM_CTL; 817 u32 saveBLC_PWM_CTL;
801 u32 saveBLC_PWM_CTL2; 818 u32 saveBLC_PWM_CTL2;
802 u32 saveBLC_HIST_CTL_B;
803 u32 saveBLC_CPU_PWM_CTL; 819 u32 saveBLC_CPU_PWM_CTL;
804 u32 saveBLC_CPU_PWM_CTL2; 820 u32 saveBLC_CPU_PWM_CTL2;
805 u32 saveFPB0; 821 u32 saveFPB0;
@@ -978,8 +994,12 @@ struct intel_rps_ei {
978}; 994};
979 995
980struct intel_gen6_power_mgmt { 996struct intel_gen6_power_mgmt {
981 /* work and pm_iir are protected by dev_priv->irq_lock */ 997 /*
998 * work, interrupts_enabled and pm_iir are protected by
999 * dev_priv->irq_lock
1000 */
982 struct work_struct work; 1001 struct work_struct work;
1002 bool interrupts_enabled;
983 u32 pm_iir; 1003 u32 pm_iir;
984 1004
985 /* Frequencies are stored in potentially platform dependent multiples. 1005 /* Frequencies are stored in potentially platform dependent multiples.
@@ -1102,31 +1122,6 @@ struct i915_power_domains {
1102 struct i915_power_well *power_wells; 1122 struct i915_power_well *power_wells;
1103}; 1123};
1104 1124
1105struct i915_dri1_state {
1106 unsigned allow_batchbuffer : 1;
1107 u32 __iomem *gfx_hws_cpu_addr;
1108
1109 unsigned int cpp;
1110 int back_offset;
1111 int front_offset;
1112 int current_page;
1113 int page_flipping;
1114
1115 uint32_t counter;
1116};
1117
1118struct i915_ums_state {
1119 /**
1120 * Flag if the X Server, and thus DRM, is not currently in
1121 * control of the device.
1122 *
1123 * This is set between LeaveVT and EnterVT. It needs to be
1124 * replaced with a semaphore. It also needs to be
1125 * transitioned away from for kernel modesetting.
1126 */
1127 int mm_suspended;
1128};
1129
1130#define MAX_L3_SLICES 2 1125#define MAX_L3_SLICES 2
1131struct intel_l3_parity { 1126struct intel_l3_parity {
1132 u32 *remap_info[MAX_L3_SLICES]; 1127 u32 *remap_info[MAX_L3_SLICES];
@@ -1762,12 +1757,6 @@ struct drm_i915_private {
1762 1757
1763 uint32_t bios_vgacntr; 1758 uint32_t bios_vgacntr;
1764 1759
1765 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1766 * here! */
1767 struct i915_dri1_state dri1;
1768 /* Old ums support infrastructure, same warning applies. */
1769 struct i915_ums_state ums;
1770
1771 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1760 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
1772 struct { 1761 struct {
1773 int (*do_execbuf)(struct drm_device *dev, struct drm_file *file, 1762 int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
@@ -1957,10 +1946,10 @@ struct drm_i915_gem_object {
1957 unsigned long user_pin_count; 1946 unsigned long user_pin_count;
1958 struct drm_file *pin_filp; 1947 struct drm_file *pin_filp;
1959 1948
1960 /** for phy allocated objects */
1961 struct drm_dma_handle *phys_handle;
1962
1963 union { 1949 union {
1950 /** for phy allocated objects */
1951 struct drm_dma_handle *phys_handle;
1952
1964 struct i915_gem_userptr { 1953 struct i915_gem_userptr {
1965 uintptr_t ptr; 1954 uintptr_t ptr;
1966 unsigned read_only :1; 1955 unsigned read_only :1;
@@ -2326,8 +2315,6 @@ struct i915_params {
2326extern struct i915_params i915 __read_mostly; 2315extern struct i915_params i915 __read_mostly;
2327 2316
2328 /* i915_dma.c */ 2317 /* i915_dma.c */
2329void i915_update_dri1_breadcrumb(struct drm_device *dev);
2330extern void i915_kernel_lost_context(struct drm_device * dev);
2331extern int i915_driver_load(struct drm_device *, unsigned long flags); 2318extern int i915_driver_load(struct drm_device *, unsigned long flags);
2332extern int i915_driver_unload(struct drm_device *); 2319extern int i915_driver_unload(struct drm_device *);
2333extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); 2320extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
@@ -2341,9 +2328,6 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
2341extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2328extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2342 unsigned long arg); 2329 unsigned long arg);
2343#endif 2330#endif
2344extern int i915_emit_box(struct drm_device *dev,
2345 struct drm_clip_rect *box,
2346 int DR1, int DR4);
2347extern int intel_gpu_reset(struct drm_device *dev); 2331extern int intel_gpu_reset(struct drm_device *dev);
2348extern int i915_reset(struct drm_device *dev); 2332extern int i915_reset(struct drm_device *dev);
2349extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2333extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2395,8 +2379,6 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
2395 ibx_display_interrupt_update((dev_priv), (bits), 0) 2379 ibx_display_interrupt_update((dev_priv), (bits), 0)
2396 2380
2397/* i915_gem.c */ 2381/* i915_gem.c */
2398int i915_gem_init_ioctl(struct drm_device *dev, void *data,
2399 struct drm_file *file_priv);
2400int i915_gem_create_ioctl(struct drm_device *dev, void *data, 2382int i915_gem_create_ioctl(struct drm_device *dev, void *data,
2401 struct drm_file *file_priv); 2383 struct drm_file *file_priv);
2402int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 2384int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
@@ -2443,10 +2425,6 @@ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2443 struct drm_file *file_priv); 2425 struct drm_file *file_priv);
2444int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 2426int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
2445 struct drm_file *file_priv); 2427 struct drm_file *file_priv);
2446int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2447 struct drm_file *file_priv);
2448int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2449 struct drm_file *file_priv);
2450int i915_gem_set_tiling(struct drm_device *dev, void *data, 2428int i915_gem_set_tiling(struct drm_device *dev, void *data,
2451 struct drm_file *file_priv); 2429 struct drm_file *file_priv);
2452int i915_gem_get_tiling(struct drm_device *dev, void *data, 2430int i915_gem_get_tiling(struct drm_device *dev, void *data,
@@ -2489,7 +2467,6 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
2489int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2467int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2490void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2468void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
2491void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2469void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
2492void i915_gem_lastclose(struct drm_device *dev);
2493 2470
2494int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 2471int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
2495 int *needs_clflush); 2472 int *needs_clflush);
@@ -2956,8 +2933,8 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
2956void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); 2933void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
2957void assert_force_wake_inactive(struct drm_i915_private *dev_priv); 2934void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
2958 2935
2959int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); 2936int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
2960int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); 2937int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
2961 2938
2962/* intel_sideband.c */ 2939/* intel_sideband.c */
2963u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr); 2940u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 50b842231c26..fd17ccabd8a4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -160,33 +160,6 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
160} 160}
161 161
162int 162int
163i915_gem_init_ioctl(struct drm_device *dev, void *data,
164 struct drm_file *file)
165{
166 struct drm_i915_private *dev_priv = dev->dev_private;
167 struct drm_i915_gem_init *args = data;
168
169 if (drm_core_check_feature(dev, DRIVER_MODESET))
170 return -ENODEV;
171
172 if (args->gtt_start >= args->gtt_end ||
173 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
174 return -EINVAL;
175
176 /* GEM with user mode setting was never supported on ilk and later. */
177 if (INTEL_INFO(dev)->gen >= 5)
178 return -ENODEV;
179
180 mutex_lock(&dev->struct_mutex);
181 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
182 args->gtt_end);
183 dev_priv->gtt.mappable_end = args->gtt_end;
184 mutex_unlock(&dev->struct_mutex);
185
186 return 0;
187}
188
189int
190i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 163i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
191 struct drm_file *file) 164 struct drm_file *file)
192{ 165{
@@ -208,40 +181,137 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
208 return 0; 181 return 0;
209} 182}
210 183
211static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj) 184static int
185i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
212{ 186{
213 drm_dma_handle_t *phys = obj->phys_handle; 187 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
188 char *vaddr = obj->phys_handle->vaddr;
189 struct sg_table *st;
190 struct scatterlist *sg;
191 int i;
214 192
215 if (!phys) 193 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
216 return; 194 return -EINVAL;
195
196 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
197 struct page *page;
198 char *src;
199
200 page = shmem_read_mapping_page(mapping, i);
201 if (IS_ERR(page))
202 return PTR_ERR(page);
203
204 src = kmap_atomic(page);
205 memcpy(vaddr, src, PAGE_SIZE);
206 drm_clflush_virt_range(vaddr, PAGE_SIZE);
207 kunmap_atomic(src);
208
209 page_cache_release(page);
210 vaddr += PAGE_SIZE;
211 }
212
213 i915_gem_chipset_flush(obj->base.dev);
214
215 st = kmalloc(sizeof(*st), GFP_KERNEL);
216 if (st == NULL)
217 return -ENOMEM;
218
219 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
220 kfree(st);
221 return -ENOMEM;
222 }
223
224 sg = st->sgl;
225 sg->offset = 0;
226 sg->length = obj->base.size;
227
228 sg_dma_address(sg) = obj->phys_handle->busaddr;
229 sg_dma_len(sg) = obj->base.size;
230
231 obj->pages = st;
232 obj->has_dma_mapping = true;
233 return 0;
234}
235
236static void
237i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
238{
239 int ret;
240
241 BUG_ON(obj->madv == __I915_MADV_PURGED);
217 242
218 if (obj->madv == I915_MADV_WILLNEED) { 243 ret = i915_gem_object_set_to_cpu_domain(obj, true);
244 if (ret) {
245 /* In the event of a disaster, abandon all caches and
246 * hope for the best.
247 */
248 WARN_ON(ret != -EIO);
249 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
250 }
251
252 if (obj->madv == I915_MADV_DONTNEED)
253 obj->dirty = 0;
254
255 if (obj->dirty) {
219 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; 256 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
220 char *vaddr = phys->vaddr; 257 char *vaddr = obj->phys_handle->vaddr;
221 int i; 258 int i;
222 259
223 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 260 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
224 struct page *page = shmem_read_mapping_page(mapping, i); 261 struct page *page;
225 if (!IS_ERR(page)) { 262 char *dst;
226 char *dst = kmap_atomic(page); 263
227 memcpy(dst, vaddr, PAGE_SIZE); 264 page = shmem_read_mapping_page(mapping, i);
228 drm_clflush_virt_range(dst, PAGE_SIZE); 265 if (IS_ERR(page))
229 kunmap_atomic(dst); 266 continue;
230 267
231 set_page_dirty(page); 268 dst = kmap_atomic(page);
269 drm_clflush_virt_range(vaddr, PAGE_SIZE);
270 memcpy(dst, vaddr, PAGE_SIZE);
271 kunmap_atomic(dst);
272
273 set_page_dirty(page);
274 if (obj->madv == I915_MADV_WILLNEED)
232 mark_page_accessed(page); 275 mark_page_accessed(page);
233 page_cache_release(page); 276 page_cache_release(page);
234 }
235 vaddr += PAGE_SIZE; 277 vaddr += PAGE_SIZE;
236 } 278 }
237 i915_gem_chipset_flush(obj->base.dev); 279 obj->dirty = 0;
238 } 280 }
239 281
240#ifdef CONFIG_X86 282 sg_free_table(obj->pages);
241 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); 283 kfree(obj->pages);
242#endif 284
243 drm_pci_free(obj->base.dev, phys); 285 obj->has_dma_mapping = false;
244 obj->phys_handle = NULL; 286}
287
288static void
289i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
290{
291 drm_pci_free(obj->base.dev, obj->phys_handle);
292}
293
294static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
295 .get_pages = i915_gem_object_get_pages_phys,
296 .put_pages = i915_gem_object_put_pages_phys,
297 .release = i915_gem_object_release_phys,
298};
299
300static int
301drop_pages(struct drm_i915_gem_object *obj)
302{
303 struct i915_vma *vma, *next;
304 int ret;
305
306 drm_gem_object_reference(&obj->base);
307 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
308 if (i915_vma_unbind(vma))
309 break;
310
311 ret = i915_gem_object_put_pages(obj);
312 drm_gem_object_unreference(&obj->base);
313
314 return ret;
245} 315}
246 316
247int 317int
@@ -249,9 +319,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
249 int align) 319 int align)
250{ 320{
251 drm_dma_handle_t *phys; 321 drm_dma_handle_t *phys;
252 struct address_space *mapping; 322 int ret;
253 char *vaddr;
254 int i;
255 323
256 if (obj->phys_handle) { 324 if (obj->phys_handle) {
257 if ((unsigned long)obj->phys_handle->vaddr & (align -1)) 325 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
@@ -266,41 +334,19 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
266 if (obj->base.filp == NULL) 334 if (obj->base.filp == NULL)
267 return -EINVAL; 335 return -EINVAL;
268 336
337 ret = drop_pages(obj);
338 if (ret)
339 return ret;
340
269 /* create a new object */ 341 /* create a new object */
270 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); 342 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
271 if (!phys) 343 if (!phys)
272 return -ENOMEM; 344 return -ENOMEM;
273 345
274 vaddr = phys->vaddr;
275#ifdef CONFIG_X86
276 set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
277#endif
278 mapping = file_inode(obj->base.filp)->i_mapping;
279 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
280 struct page *page;
281 char *src;
282
283 page = shmem_read_mapping_page(mapping, i);
284 if (IS_ERR(page)) {
285#ifdef CONFIG_X86
286 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
287#endif
288 drm_pci_free(obj->base.dev, phys);
289 return PTR_ERR(page);
290 }
291
292 src = kmap_atomic(page);
293 memcpy(vaddr, src, PAGE_SIZE);
294 kunmap_atomic(src);
295
296 mark_page_accessed(page);
297 page_cache_release(page);
298
299 vaddr += PAGE_SIZE;
300 }
301
302 obj->phys_handle = phys; 346 obj->phys_handle = phys;
303 return 0; 347 obj->ops = &i915_gem_phys_ops;
348
349 return i915_gem_object_get_pages(obj);
304} 350}
305 351
306static int 352static int
@@ -311,6 +357,14 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
311 struct drm_device *dev = obj->base.dev; 357 struct drm_device *dev = obj->base.dev;
312 void *vaddr = obj->phys_handle->vaddr + args->offset; 358 void *vaddr = obj->phys_handle->vaddr + args->offset;
313 char __user *user_data = to_user_ptr(args->data_ptr); 359 char __user *user_data = to_user_ptr(args->data_ptr);
360 int ret;
361
362 /* We manually control the domain here and pretend that it
363 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
364 */
365 ret = i915_gem_object_wait_rendering(obj, false);
366 if (ret)
367 return ret;
314 368
315 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 369 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
316 unsigned long unwritten; 370 unsigned long unwritten;
@@ -326,6 +380,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
326 return -EFAULT; 380 return -EFAULT;
327 } 381 }
328 382
383 drm_clflush_virt_range(vaddr, args->size);
329 i915_gem_chipset_flush(dev); 384 i915_gem_chipset_flush(dev);
330 return 0; 385 return 0;
331} 386}
@@ -1048,11 +1103,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1048 * pread/pwrite currently are reading and writing from the CPU 1103 * pread/pwrite currently are reading and writing from the CPU
1049 * perspective, requiring manual detiling by the client. 1104 * perspective, requiring manual detiling by the client.
1050 */ 1105 */
1051 if (obj->phys_handle) {
1052 ret = i915_gem_phys_pwrite(obj, args, file);
1053 goto out;
1054 }
1055
1056 if (obj->tiling_mode == I915_TILING_NONE && 1106 if (obj->tiling_mode == I915_TILING_NONE &&
1057 obj->base.write_domain != I915_GEM_DOMAIN_CPU && 1107 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1058 cpu_write_needs_clflush(obj)) { 1108 cpu_write_needs_clflush(obj)) {
@@ -1062,8 +1112,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1062 * textures). Fallback to the shmem path in that case. */ 1112 * textures). Fallback to the shmem path in that case. */
1063 } 1113 }
1064 1114
1065 if (ret == -EFAULT || ret == -ENOSPC) 1115 if (ret == -EFAULT || ret == -ENOSPC) {
1066 ret = i915_gem_shmem_pwrite(dev, obj, args, file); 1116 if (obj->phys_handle)
1117 ret = i915_gem_phys_pwrite(obj, args, file);
1118 else
1119 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1120 }
1067 1121
1068out: 1122out:
1069 drm_gem_object_unreference(&obj->base); 1123 drm_gem_object_unreference(&obj->base);
@@ -2140,6 +2194,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2140 if (i915_gem_object_needs_bit17_swizzle(obj)) 2194 if (i915_gem_object_needs_bit17_swizzle(obj))
2141 i915_gem_object_do_bit_17_swizzle(obj); 2195 i915_gem_object_do_bit_17_swizzle(obj);
2142 2196
2197 if (obj->tiling_mode != I915_TILING_NONE &&
2198 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2199 i915_gem_object_pin_pages(obj);
2200
2143 return 0; 2201 return 0;
2144 2202
2145err_pages: 2203err_pages:
@@ -2438,15 +2496,13 @@ int __i915_add_request(struct intel_engine_cs *ring,
2438 ring->outstanding_lazy_seqno = 0; 2496 ring->outstanding_lazy_seqno = 0;
2439 ring->preallocated_lazy_request = NULL; 2497 ring->preallocated_lazy_request = NULL;
2440 2498
2441 if (!dev_priv->ums.mm_suspended) { 2499 i915_queue_hangcheck(ring->dev);
2442 i915_queue_hangcheck(ring->dev);
2443 2500
2444 cancel_delayed_work_sync(&dev_priv->mm.idle_work); 2501 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2445 queue_delayed_work(dev_priv->wq, 2502 queue_delayed_work(dev_priv->wq,
2446 &dev_priv->mm.retire_work, 2503 &dev_priv->mm.retire_work,
2447 round_jiffies_up_relative(HZ)); 2504 round_jiffies_up_relative(HZ));
2448 intel_mark_busy(dev_priv->dev); 2505 intel_mark_busy(dev_priv->dev);
2449 }
2450 2506
2451 if (out_seqno) 2507 if (out_seqno)
2452 *out_seqno = request->seqno; 2508 *out_seqno = request->seqno;
@@ -2513,12 +2569,18 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2513 2569
2514static void i915_gem_free_request(struct drm_i915_gem_request *request) 2570static void i915_gem_free_request(struct drm_i915_gem_request *request)
2515{ 2571{
2572 struct intel_context *ctx = request->ctx;
2573
2516 list_del(&request->list); 2574 list_del(&request->list);
2517 i915_gem_request_remove_from_client(request); 2575 i915_gem_request_remove_from_client(request);
2518 2576
2519 if (request->ctx) 2577 if (i915.enable_execlists && ctx) {
2520 i915_gem_context_unreference(request->ctx); 2578 struct intel_engine_cs *ring = request->ring;
2521 2579
2580 if (ctx != ring->default_context)
2581 intel_lr_context_unpin(ring, ctx);
2582 i915_gem_context_unreference(ctx);
2583 }
2522 kfree(request); 2584 kfree(request);
2523} 2585}
2524 2586
@@ -2573,6 +2635,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2573 } 2635 }
2574 2636
2575 /* 2637 /*
2638 * Clear the execlists queue up before freeing the requests, as those
2639 * are the ones that keep the context and ringbuffer backing objects
2640 * pinned in place.
2641 */
2642 while (!list_empty(&ring->execlist_queue)) {
2643 struct intel_ctx_submit_request *submit_req;
2644
2645 submit_req = list_first_entry(&ring->execlist_queue,
2646 struct intel_ctx_submit_request,
2647 execlist_link);
2648 list_del(&submit_req->execlist_link);
2649 intel_runtime_pm_put(dev_priv);
2650 i915_gem_context_unreference(submit_req->ctx);
2651 kfree(submit_req);
2652 }
2653
2654 /*
2576 * We must free the requests after all the corresponding objects have 2655 * We must free the requests after all the corresponding objects have
2577 * been moved off active lists. Which is the same order as the normal 2656 * been moved off active lists. Which is the same order as the normal
2578 * retire_requests function does. This is important if object hold 2657 * retire_requests function does. This is important if object hold
@@ -2589,18 +2668,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2589 i915_gem_free_request(request); 2668 i915_gem_free_request(request);
2590 } 2669 }
2591 2670
2592 while (!list_empty(&ring->execlist_queue)) {
2593 struct intel_ctx_submit_request *submit_req;
2594
2595 submit_req = list_first_entry(&ring->execlist_queue,
2596 struct intel_ctx_submit_request,
2597 execlist_link);
2598 list_del(&submit_req->execlist_link);
2599 intel_runtime_pm_put(dev_priv);
2600 i915_gem_context_unreference(submit_req->ctx);
2601 kfree(submit_req);
2602 }
2603
2604 /* These may not have been flush before the reset, do so now */ 2671 /* These may not have been flush before the reset, do so now */
2605 kfree(ring->preallocated_lazy_request); 2672 kfree(ring->preallocated_lazy_request);
2606 ring->preallocated_lazy_request = NULL; 2673 ring->preallocated_lazy_request = NULL;
@@ -2737,6 +2804,15 @@ i915_gem_retire_requests(struct drm_device *dev)
2737 for_each_ring(ring, dev_priv, i) { 2804 for_each_ring(ring, dev_priv, i) {
2738 i915_gem_retire_requests_ring(ring); 2805 i915_gem_retire_requests_ring(ring);
2739 idle &= list_empty(&ring->request_list); 2806 idle &= list_empty(&ring->request_list);
2807 if (i915.enable_execlists) {
2808 unsigned long flags;
2809
2810 spin_lock_irqsave(&ring->execlist_lock, flags);
2811 idle &= list_empty(&ring->execlist_queue);
2812 spin_unlock_irqrestore(&ring->execlist_lock, flags);
2813
2814 intel_execlists_retire_requests(ring);
2815 }
2740 } 2816 }
2741 2817
2742 if (idle) 2818 if (idle)
@@ -3527,7 +3603,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3527 * Stolen memory is always coherent with the GPU as it is explicitly 3603 * Stolen memory is always coherent with the GPU as it is explicitly
3528 * marked as wc by the system, or the system is cache-coherent. 3604 * marked as wc by the system, or the system is cache-coherent.
3529 */ 3605 */
3530 if (obj->stolen) 3606 if (obj->stolen || obj->phys_handle)
3531 return false; 3607 return false;
3532 3608
3533 /* If the GPU is snooping the contents of the CPU cache, 3609 /* If the GPU is snooping the contents of the CPU cache,
@@ -4320,6 +4396,7 @@ int
4320i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 4396i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4321 struct drm_file *file_priv) 4397 struct drm_file *file_priv)
4322{ 4398{
4399 struct drm_i915_private *dev_priv = dev->dev_private;
4323 struct drm_i915_gem_madvise *args = data; 4400 struct drm_i915_gem_madvise *args = data;
4324 struct drm_i915_gem_object *obj; 4401 struct drm_i915_gem_object *obj;
4325 int ret; 4402 int ret;
@@ -4347,6 +4424,15 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4347 goto out; 4424 goto out;
4348 } 4425 }
4349 4426
4427 if (obj->pages &&
4428 obj->tiling_mode != I915_TILING_NONE &&
4429 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4430 if (obj->madv == I915_MADV_WILLNEED)
4431 i915_gem_object_unpin_pages(obj);
4432 if (args->madv == I915_MADV_WILLNEED)
4433 i915_gem_object_pin_pages(obj);
4434 }
4435
4350 if (obj->madv != __I915_MADV_PURGED) 4436 if (obj->madv != __I915_MADV_PURGED)
4351 obj->madv = args->madv; 4437 obj->madv = args->madv;
4352 4438
@@ -4489,8 +4575,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4489 } 4575 }
4490 } 4576 }
4491 4577
4492 i915_gem_object_detach_phys(obj);
4493
4494 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up 4578 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4495 * before progressing. */ 4579 * before progressing. */
4496 if (obj->stolen) 4580 if (obj->stolen)
@@ -4498,6 +4582,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4498 4582
4499 WARN_ON(obj->frontbuffer_bits); 4583 WARN_ON(obj->frontbuffer_bits);
4500 4584
4585 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4586 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4587 obj->tiling_mode != I915_TILING_NONE)
4588 i915_gem_object_unpin_pages(obj);
4589
4501 if (WARN_ON(obj->pages_pin_count)) 4590 if (WARN_ON(obj->pages_pin_count))
4502 obj->pages_pin_count = 0; 4591 obj->pages_pin_count = 0;
4503 if (discard_backing_storage(obj)) 4592 if (discard_backing_storage(obj))
@@ -4570,9 +4659,6 @@ i915_gem_suspend(struct drm_device *dev)
4570 int ret = 0; 4659 int ret = 0;
4571 4660
4572 mutex_lock(&dev->struct_mutex); 4661 mutex_lock(&dev->struct_mutex);
4573 if (dev_priv->ums.mm_suspended)
4574 goto err;
4575
4576 ret = i915_gpu_idle(dev); 4662 ret = i915_gpu_idle(dev);
4577 if (ret) 4663 if (ret)
4578 goto err; 4664 goto err;
@@ -4583,15 +4669,7 @@ i915_gem_suspend(struct drm_device *dev)
4583 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4669 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4584 i915_gem_evict_everything(dev); 4670 i915_gem_evict_everything(dev);
4585 4671
4586 i915_kernel_lost_context(dev);
4587 i915_gem_stop_ringbuffers(dev); 4672 i915_gem_stop_ringbuffers(dev);
4588
4589 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4590 * We need to replace this with a semaphore, or something.
4591 * And not confound ums.mm_suspended!
4592 */
4593 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4594 DRIVER_MODESET);
4595 mutex_unlock(&dev->struct_mutex); 4673 mutex_unlock(&dev->struct_mutex);
4596 4674
4597 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); 4675 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
@@ -4882,9 +4960,6 @@ int i915_gem_init(struct drm_device *dev)
4882 } 4960 }
4883 mutex_unlock(&dev->struct_mutex); 4961 mutex_unlock(&dev->struct_mutex);
4884 4962
4885 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4886 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4887 dev_priv->dri1.allow_batchbuffer = 1;
4888 return ret; 4963 return ret;
4889} 4964}
4890 4965
@@ -4899,74 +4974,6 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4899 dev_priv->gt.cleanup_ring(ring); 4974 dev_priv->gt.cleanup_ring(ring);
4900} 4975}
4901 4976
4902int
4903i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4904 struct drm_file *file_priv)
4905{
4906 struct drm_i915_private *dev_priv = dev->dev_private;
4907 int ret;
4908
4909 if (drm_core_check_feature(dev, DRIVER_MODESET))
4910 return 0;
4911
4912 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4913 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4914 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4915 }
4916
4917 mutex_lock(&dev->struct_mutex);
4918 dev_priv->ums.mm_suspended = 0;
4919
4920 ret = i915_gem_init_hw(dev);
4921 if (ret != 0) {
4922 mutex_unlock(&dev->struct_mutex);
4923 return ret;
4924 }
4925
4926 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4927
4928 ret = drm_irq_install(dev, dev->pdev->irq);
4929 if (ret)
4930 goto cleanup_ringbuffer;
4931 mutex_unlock(&dev->struct_mutex);
4932
4933 return 0;
4934
4935cleanup_ringbuffer:
4936 i915_gem_cleanup_ringbuffer(dev);
4937 dev_priv->ums.mm_suspended = 1;
4938 mutex_unlock(&dev->struct_mutex);
4939
4940 return ret;
4941}
4942
4943int
4944i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4945 struct drm_file *file_priv)
4946{
4947 if (drm_core_check_feature(dev, DRIVER_MODESET))
4948 return 0;
4949
4950 mutex_lock(&dev->struct_mutex);
4951 drm_irq_uninstall(dev);
4952 mutex_unlock(&dev->struct_mutex);
4953
4954 return i915_gem_suspend(dev);
4955}
4956
4957void
4958i915_gem_lastclose(struct drm_device *dev)
4959{
4960 int ret;
4961
4962 if (drm_core_check_feature(dev, DRIVER_MODESET))
4963 return;
4964
4965 ret = i915_gem_suspend(dev);
4966 if (ret)
4967 DRM_ERROR("failed to idle hardware: %d\n", ret);
4968}
4969
4970static void 4977static void
4971init_ring_lists(struct intel_engine_cs *ring) 4978init_ring_lists(struct intel_engine_cs *ring)
4972{ 4979{
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 7d3257111737..d17ff435f276 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -88,6 +88,7 @@
88#include <drm/drmP.h> 88#include <drm/drmP.h>
89#include <drm/i915_drm.h> 89#include <drm/i915_drm.h>
90#include "i915_drv.h" 90#include "i915_drv.h"
91#include "i915_trace.h"
91 92
92/* This is a HW constraint. The value below is the largest known requirement 93/* This is a HW constraint. The value below is the largest known requirement
93 * I've seen in a spec to date, and that was a workaround for a non-shipping 94 * I've seen in a spec to date, and that was a workaround for a non-shipping
@@ -137,6 +138,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
137 struct intel_context *ctx = container_of(ctx_ref, 138 struct intel_context *ctx = container_of(ctx_ref,
138 typeof(*ctx), ref); 139 typeof(*ctx), ref);
139 140
141 trace_i915_context_free(ctx);
142
140 if (i915.enable_execlists) 143 if (i915.enable_execlists)
141 intel_lr_context_free(ctx); 144 intel_lr_context_free(ctx);
142 145
@@ -274,6 +277,8 @@ i915_gem_create_context(struct drm_device *dev,
274 ctx->ppgtt = ppgtt; 277 ctx->ppgtt = ppgtt;
275 } 278 }
276 279
280 trace_i915_context_create(ctx);
281
277 return ctx; 282 return ctx;
278 283
279err_unpin: 284err_unpin:
@@ -549,6 +554,7 @@ static int do_switch(struct intel_engine_cs *ring,
549 from = ring->last_context; 554 from = ring->last_context;
550 555
551 if (to->ppgtt) { 556 if (to->ppgtt) {
557 trace_switch_mm(ring, to);
552 ret = to->ppgtt->switch_mm(to->ppgtt, ring); 558 ret = to->ppgtt->switch_mm(to->ppgtt, ring);
553 if (ret) 559 if (ret)
554 goto unpin_out; 560 goto unpin_out;
@@ -629,7 +635,7 @@ done:
629 635
630 if (uninitialized) { 636 if (uninitialized) {
631 if (ring->init_context) { 637 if (ring->init_context) {
632 ret = ring->init_context(ring); 638 ret = ring->init_context(ring, to);
633 if (ret) 639 if (ret)
634 DRM_ERROR("ring init context: %d\n", ret); 640 DRM_ERROR("ring init context: %d\n", ret);
635 } 641 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2b02fcfae534..f06027ba3ee5 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1023,6 +1023,47 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
1023 return 0; 1023 return 0;
1024} 1024}
1025 1025
1026static int
1027i915_emit_box(struct intel_engine_cs *ring,
1028 struct drm_clip_rect *box,
1029 int DR1, int DR4)
1030{
1031 int ret;
1032
1033 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
1034 box->y2 <= 0 || box->x2 <= 0) {
1035 DRM_ERROR("Bad box %d,%d..%d,%d\n",
1036 box->x1, box->y1, box->x2, box->y2);
1037 return -EINVAL;
1038 }
1039
1040 if (INTEL_INFO(ring->dev)->gen >= 4) {
1041 ret = intel_ring_begin(ring, 4);
1042 if (ret)
1043 return ret;
1044
1045 intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
1046 intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
1047 intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
1048 intel_ring_emit(ring, DR4);
1049 } else {
1050 ret = intel_ring_begin(ring, 6);
1051 if (ret)
1052 return ret;
1053
1054 intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
1055 intel_ring_emit(ring, DR1);
1056 intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
1057 intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
1058 intel_ring_emit(ring, DR4);
1059 intel_ring_emit(ring, 0);
1060 }
1061 intel_ring_advance(ring);
1062
1063 return 0;
1064}
1065
1066
1026int 1067int
1027i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, 1068i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1028 struct intel_engine_cs *ring, 1069 struct intel_engine_cs *ring,
@@ -1151,7 +1192,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1151 exec_len = args->batch_len; 1192 exec_len = args->batch_len;
1152 if (cliprects) { 1193 if (cliprects) {
1153 for (i = 0; i < args->num_cliprects; i++) { 1194 for (i = 0; i < args->num_cliprects; i++) {
1154 ret = i915_emit_box(dev, &cliprects[i], 1195 ret = i915_emit_box(ring, &cliprects[i],
1155 args->DR1, args->DR4); 1196 args->DR1, args->DR4);
1156 if (ret) 1197 if (ret)
1157 goto error; 1198 goto error;
@@ -1300,12 +1341,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1300 if (ret) 1341 if (ret)
1301 goto pre_mutex_err; 1342 goto pre_mutex_err;
1302 1343
1303 if (dev_priv->ums.mm_suspended) {
1304 mutex_unlock(&dev->struct_mutex);
1305 ret = -EBUSY;
1306 goto pre_mutex_err;
1307 }
1308
1309 ctx = i915_gem_validate_context(dev, file, ring, ctx_id); 1344 ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1310 if (IS_ERR(ctx)) { 1345 if (IS_ERR(ctx)) {
1311 mutex_unlock(&dev->struct_mutex); 1346 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c1cf3329108c..171f6eafdeee 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -43,7 +43,12 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
43 if (IS_GEN8(dev)) 43 if (IS_GEN8(dev))
44 has_full_ppgtt = false; /* XXX why? */ 44 has_full_ppgtt = false; /* XXX why? */
45 45
46 if (enable_ppgtt == 0 || !has_aliasing_ppgtt) 46 /*
47 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
48 * execlists, the sole mechanism available to submit work.
49 */
50 if (INTEL_INFO(dev)->gen < 9 &&
51 (enable_ppgtt == 0 || !has_aliasing_ppgtt))
47 return 0; 52 return 0;
48 53
49 if (enable_ppgtt == 1) 54 if (enable_ppgtt == 1)
@@ -164,9 +169,6 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
164 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 169 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
165 pte |= GEN6_PTE_ADDR_ENCODE(addr); 170 pte |= GEN6_PTE_ADDR_ENCODE(addr);
166 171
167 /* Mark the page as writeable. Other platforms don't have a
168 * setting for read-only/writable, so this matches that behavior.
169 */
170 if (!(flags & PTE_READ_ONLY)) 172 if (!(flags & PTE_READ_ONLY))
171 pte |= BYT_PTE_WRITEABLE; 173 pte |= BYT_PTE_WRITEABLE;
172 174
@@ -1174,6 +1176,8 @@ i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
1174 1176
1175 ppgtt->file_priv = fpriv; 1177 ppgtt->file_priv = fpriv;
1176 1178
1179 trace_i915_ppgtt_create(&ppgtt->base);
1180
1177 return ppgtt; 1181 return ppgtt;
1178} 1182}
1179 1183
@@ -1182,6 +1186,8 @@ void i915_ppgtt_release(struct kref *kref)
1182 struct i915_hw_ppgtt *ppgtt = 1186 struct i915_hw_ppgtt *ppgtt =
1183 container_of(kref, struct i915_hw_ppgtt, ref); 1187 container_of(kref, struct i915_hw_ppgtt, ref);
1184 1188
1189 trace_i915_ppgtt_release(&ppgtt->base);
1190
1185 /* vmas should already be unbound */ 1191 /* vmas should already be unbound */
1186 WARN_ON(!list_empty(&ppgtt->base.active_list)); 1192 WARN_ON(!list_empty(&ppgtt->base.active_list));
1187 WARN_ON(!list_empty(&ppgtt->base.inactive_list)); 1193 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
@@ -1658,10 +1664,10 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
1658 } 1664 }
1659} 1665}
1660 1666
1661int i915_gem_setup_global_gtt(struct drm_device *dev, 1667static int i915_gem_setup_global_gtt(struct drm_device *dev,
1662 unsigned long start, 1668 unsigned long start,
1663 unsigned long mappable_end, 1669 unsigned long mappable_end,
1664 unsigned long end) 1670 unsigned long end)
1665{ 1671{
1666 /* Let GEM Manage all of the aperture. 1672 /* Let GEM Manage all of the aperture.
1667 * 1673 *
@@ -1952,9 +1958,17 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
1952 * Only the snoop bit has meaning for CHV, the rest is 1958 * Only the snoop bit has meaning for CHV, the rest is
1953 * ignored. 1959 * ignored.
1954 * 1960 *
1955 * Note that the harware enforces snooping for all page 1961 * The hardware will never snoop for certain types of accesses:
1956 * table accesses. The snoop bit is actually ignored for 1962 * - CPU GTT (GMADR->GGTT->no snoop->memory)
1957 * PDEs. 1963 * - PPGTT page tables
1964 * - some other special cycles
1965 *
1966 * As with BDW, we also need to consider the following for GT accesses:
1967 * "For GGTT, there is NO pat_sel[2:0] from the entry,
1968 * so RTL will always use the value corresponding to
1969 * pat_sel = 000".
1970 * Which means we must set the snoop bit in PAT entry 0
1971 * in order to keep the global status page working.
1958 */ 1972 */
1959 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | 1973 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
1960 GEN8_PPAT(1, 0) | 1974 GEN8_PPAT(1, 0) |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index d0562d0ef6ec..beaf4bcfdac8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -274,8 +274,6 @@ struct i915_hw_ppgtt {
274 274
275int i915_gem_gtt_init(struct drm_device *dev); 275int i915_gem_gtt_init(struct drm_device *dev);
276void i915_gem_init_global_gtt(struct drm_device *dev); 276void i915_gem_init_global_gtt(struct drm_device *dev);
277int i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
278 unsigned long mappable_end, unsigned long end);
279void i915_global_gtt_cleanup(struct drm_device *dev); 277void i915_global_gtt_cleanup(struct drm_device *dev);
280 278
281 279
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index cd7f4734c9f8..4727a4e2c87c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -178,6 +178,15 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
178 } 178 }
179 break; 179 break;
180 } 180 }
181
182 /* check for L-shaped memory aka modified enhanced addressing */
183 if (IS_GEN4(dev)) {
184 uint32_t ddc2 = I915_READ(DCC2);
185
186 if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
187 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
188 }
189
181 if (dcc == 0xffffffff) { 190 if (dcc == 0xffffffff) {
182 DRM_ERROR("Couldn't read from MCHBAR. " 191 DRM_ERROR("Couldn't read from MCHBAR. "
183 "Disabling tiling.\n"); 192 "Disabling tiling.\n");
@@ -380,6 +389,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
380 ret = i915_gem_object_ggtt_unbind(obj); 389 ret = i915_gem_object_ggtt_unbind(obj);
381 390
382 if (ret == 0) { 391 if (ret == 0) {
392 if (obj->pages &&
393 obj->madv == I915_MADV_WILLNEED &&
394 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
395 if (args->tiling_mode == I915_TILING_NONE)
396 i915_gem_object_unpin_pages(obj);
397 if (obj->tiling_mode == I915_TILING_NONE)
398 i915_gem_object_pin_pages(obj);
399 }
400
383 obj->fence_dirty = 401 obj->fence_dirty =
384 obj->last_fenced_seqno || 402 obj->last_fenced_seqno ||
385 obj->fence_reg != I915_FENCE_REG_NONE; 403 obj->fence_reg != I915_FENCE_REG_NONE;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 89a2f3dbf956..cdaee6ce05f8 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -242,11 +242,15 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
242 242
243static void i915_ring_error_state(struct drm_i915_error_state_buf *m, 243static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
244 struct drm_device *dev, 244 struct drm_device *dev,
245 struct drm_i915_error_ring *ring) 245 struct drm_i915_error_state *error,
246 int ring_idx)
246{ 247{
248 struct drm_i915_error_ring *ring = &error->ring[ring_idx];
249
247 if (!ring->valid) 250 if (!ring->valid)
248 return; 251 return;
249 252
253 err_printf(m, "%s command stream:\n", ring_str(ring_idx));
250 err_printf(m, " HEAD: 0x%08x\n", ring->head); 254 err_printf(m, " HEAD: 0x%08x\n", ring->head);
251 err_printf(m, " TAIL: 0x%08x\n", ring->tail); 255 err_printf(m, " TAIL: 0x%08x\n", ring->tail);
252 err_printf(m, " CTL: 0x%08x\n", ring->ctl); 256 err_printf(m, " CTL: 0x%08x\n", ring->ctl);
@@ -388,10 +392,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
388 if (INTEL_INFO(dev)->gen == 7) 392 if (INTEL_INFO(dev)->gen == 7)
389 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 393 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
390 394
391 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 395 for (i = 0; i < ARRAY_SIZE(error->ring); i++)
392 err_printf(m, "%s command stream:\n", ring_str(i)); 396 i915_ring_error_state(m, dev, error, i);
393 i915_ring_error_state(m, dev, &error->ring[i]);
394 }
395 397
396 for (i = 0; i < error->vm_count; i++) { 398 for (i = 0; i < error->vm_count; i++) {
397 err_printf(m, "vm[%d]\n", i); 399 err_printf(m, "vm[%d]\n", i);
@@ -807,9 +809,8 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
807 809
808 if (!error->semaphore_obj) 810 if (!error->semaphore_obj)
809 error->semaphore_obj = 811 error->semaphore_obj =
810 i915_error_object_create(dev_priv, 812 i915_error_ggtt_object_create(dev_priv,
811 dev_priv->semaphore_obj, 813 dev_priv->semaphore_obj);
812 &dev_priv->gtt.base);
813 814
814 for_each_ring(to, dev_priv, i) { 815 for_each_ring(to, dev_priv, i) {
815 int idx; 816 int idx;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5fff2870a17b..5908580d7c15 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -138,6 +138,8 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
138 POSTING_READ(type##IMR); \ 138 POSTING_READ(type##IMR); \
139} while (0) 139} while (0)
140 140
141static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
142
141/* For display hotplug interrupt */ 143/* For display hotplug interrupt */
142void 144void
143ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 145ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
@@ -200,6 +202,21 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
200 ilk_update_gt_irq(dev_priv, mask, 0); 202 ilk_update_gt_irq(dev_priv, mask, 0);
201} 203}
202 204
205static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
206{
207 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
208}
209
210static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
211{
212 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
213}
214
215static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
216{
217 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
218}
219
203/** 220/**
204 * snb_update_pm_irq - update GEN6_PMIMR 221 * snb_update_pm_irq - update GEN6_PMIMR
205 * @dev_priv: driver private 222 * @dev_priv: driver private
@@ -223,8 +240,8 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
223 240
224 if (new_val != dev_priv->pm_irq_mask) { 241 if (new_val != dev_priv->pm_irq_mask) {
225 dev_priv->pm_irq_mask = new_val; 242 dev_priv->pm_irq_mask = new_val;
226 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 243 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
227 POSTING_READ(GEN6_PMIMR); 244 POSTING_READ(gen6_pm_imr(dev_priv));
228 } 245 }
229} 246}
230 247
@@ -238,44 +255,50 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
238 snb_update_pm_irq(dev_priv, mask, 0); 255 snb_update_pm_irq(dev_priv, mask, 0);
239} 256}
240 257
241/** 258void gen6_reset_rps_interrupts(struct drm_device *dev)
242 * bdw_update_pm_irq - update GT interrupt 2
243 * @dev_priv: driver private
244 * @interrupt_mask: mask of interrupt bits to update
245 * @enabled_irq_mask: mask of interrupt bits to enable
246 *
247 * Copied from the snb function, updated with relevant register offsets
248 */
249static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
250 uint32_t interrupt_mask,
251 uint32_t enabled_irq_mask)
252{ 259{
253 uint32_t new_val; 260 struct drm_i915_private *dev_priv = dev->dev_private;
254 261 uint32_t reg = gen6_pm_iir(dev_priv);
255 assert_spin_locked(&dev_priv->irq_lock);
256
257 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
258 return;
259
260 new_val = dev_priv->pm_irq_mask;
261 new_val &= ~interrupt_mask;
262 new_val |= (~enabled_irq_mask & interrupt_mask);
263 262
264 if (new_val != dev_priv->pm_irq_mask) { 263 spin_lock_irq(&dev_priv->irq_lock);
265 dev_priv->pm_irq_mask = new_val; 264 I915_WRITE(reg, dev_priv->pm_rps_events);
266 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask); 265 I915_WRITE(reg, dev_priv->pm_rps_events);
267 POSTING_READ(GEN8_GT_IMR(2)); 266 POSTING_READ(reg);
268 } 267 spin_unlock_irq(&dev_priv->irq_lock);
269} 268}
270 269
271void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 270void gen6_enable_rps_interrupts(struct drm_device *dev)
272{ 271{
273 bdw_update_pm_irq(dev_priv, mask, mask); 272 struct drm_i915_private *dev_priv = dev->dev_private;
273
274 spin_lock_irq(&dev_priv->irq_lock);
275 WARN_ON(dev_priv->rps.pm_iir);
276 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
277 dev_priv->rps.interrupts_enabled = true;
278 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
279 spin_unlock_irq(&dev_priv->irq_lock);
274} 280}
275 281
276void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 282void gen6_disable_rps_interrupts(struct drm_device *dev)
277{ 283{
278 bdw_update_pm_irq(dev_priv, mask, 0); 284 struct drm_i915_private *dev_priv = dev->dev_private;
285
286 spin_lock_irq(&dev_priv->irq_lock);
287 dev_priv->rps.interrupts_enabled = false;
288 spin_unlock_irq(&dev_priv->irq_lock);
289
290 cancel_work_sync(&dev_priv->rps.work);
291
292 I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
293 ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
294 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
295 ~dev_priv->pm_rps_events);
296
297 spin_lock_irq(&dev_priv->irq_lock);
298 dev_priv->rps.pm_iir = 0;
299 spin_unlock_irq(&dev_priv->irq_lock);
300
301 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
279} 302}
280 303
281/** 304/**
@@ -980,7 +1003,6 @@ static void notify_ring(struct drm_device *dev,
980 trace_i915_gem_request_complete(ring); 1003 trace_i915_gem_request_complete(ring);
981 1004
982 wake_up_all(&ring->irq_queue); 1005 wake_up_all(&ring->irq_queue);
983 i915_queue_hangcheck(dev);
984} 1006}
985 1007
986static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, 1008static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
@@ -1116,14 +1138,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
1116 int new_delay, adj; 1138 int new_delay, adj;
1117 1139
1118 spin_lock_irq(&dev_priv->irq_lock); 1140 spin_lock_irq(&dev_priv->irq_lock);
1141 /* Speed up work cancelation during disabling rps interrupts. */
1142 if (!dev_priv->rps.interrupts_enabled) {
1143 spin_unlock_irq(&dev_priv->irq_lock);
1144 return;
1145 }
1119 pm_iir = dev_priv->rps.pm_iir; 1146 pm_iir = dev_priv->rps.pm_iir;
1120 dev_priv->rps.pm_iir = 0; 1147 dev_priv->rps.pm_iir = 0;
1121 if (INTEL_INFO(dev_priv->dev)->gen >= 8) 1148 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1122 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1149 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1123 else {
1124 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1125 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1126 }
1127 spin_unlock_irq(&dev_priv->irq_lock); 1150 spin_unlock_irq(&dev_priv->irq_lock);
1128 1151
1129 /* Make sure we didn't queue anything we're not going to process. */ 1152 /* Make sure we didn't queue anything we're not going to process. */
@@ -1325,19 +1348,6 @@ static void snb_gt_irq_handler(struct drm_device *dev,
1325 ivybridge_parity_error_irq_handler(dev, gt_iir); 1348 ivybridge_parity_error_irq_handler(dev, gt_iir);
1326} 1349}
1327 1350
1328static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1329{
1330 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1331 return;
1332
1333 spin_lock(&dev_priv->irq_lock);
1334 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1335 gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1336 spin_unlock(&dev_priv->irq_lock);
1337
1338 queue_work(dev_priv->wq, &dev_priv->rps.work);
1339}
1340
1341static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1351static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1342 struct drm_i915_private *dev_priv, 1352 struct drm_i915_private *dev_priv,
1343 u32 master_ctl) 1353 u32 master_ctl)
@@ -1399,7 +1409,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1399 I915_WRITE(GEN8_GT_IIR(2), 1409 I915_WRITE(GEN8_GT_IIR(2),
1400 tmp & dev_priv->pm_rps_events); 1410 tmp & dev_priv->pm_rps_events);
1401 ret = IRQ_HANDLED; 1411 ret = IRQ_HANDLED;
1402 gen8_rps_irq_handler(dev_priv, tmp); 1412 gen6_rps_irq_handler(dev_priv, tmp);
1403 } else 1413 } else
1404 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1414 DRM_ERROR("The master control interrupt lied (PM)!\n");
1405 } 1415 }
@@ -1699,15 +1709,24 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1699 * the work queue. */ 1709 * the work queue. */
1700static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1710static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1701{ 1711{
1712 /* TODO: RPS on GEN9+ is not supported yet. */
1713 if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
1714 "GEN9+: unexpected RPS IRQ\n"))
1715 return;
1716
1702 if (pm_iir & dev_priv->pm_rps_events) { 1717 if (pm_iir & dev_priv->pm_rps_events) {
1703 spin_lock(&dev_priv->irq_lock); 1718 spin_lock(&dev_priv->irq_lock);
1704 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1705 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1719 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1720 if (dev_priv->rps.interrupts_enabled) {
1721 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1722 queue_work(dev_priv->wq, &dev_priv->rps.work);
1723 }
1706 spin_unlock(&dev_priv->irq_lock); 1724 spin_unlock(&dev_priv->irq_lock);
1707
1708 queue_work(dev_priv->wq, &dev_priv->rps.work);
1709 } 1725 }
1710 1726
1727 if (INTEL_INFO(dev_priv)->gen >= 8)
1728 return;
1729
1711 if (HAS_VEBOX(dev_priv->dev)) { 1730 if (HAS_VEBOX(dev_priv->dev)) {
1712 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1731 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1713 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1732 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
@@ -2222,6 +2241,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2222 irqreturn_t ret = IRQ_NONE; 2241 irqreturn_t ret = IRQ_NONE;
2223 uint32_t tmp = 0; 2242 uint32_t tmp = 0;
2224 enum pipe pipe; 2243 enum pipe pipe;
2244 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2245
2246 if (IS_GEN9(dev))
2247 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2248 GEN9_AUX_CHANNEL_D;
2225 2249
2226 master_ctl = I915_READ(GEN8_MASTER_IRQ); 2250 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2227 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2251 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
@@ -2254,7 +2278,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2254 if (tmp) { 2278 if (tmp) {
2255 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2279 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2256 ret = IRQ_HANDLED; 2280 ret = IRQ_HANDLED;
2257 if (tmp & GEN8_AUX_CHANNEL_A) 2281
2282 if (tmp & aux_mask)
2258 dp_aux_irq_handler(dev); 2283 dp_aux_irq_handler(dev);
2259 else 2284 else
2260 DRM_ERROR("Unexpected DE Port interrupt\n"); 2285 DRM_ERROR("Unexpected DE Port interrupt\n");
@@ -3036,10 +3061,15 @@ static void i915_hangcheck_elapsed(unsigned long data)
3036void i915_queue_hangcheck(struct drm_device *dev) 3061void i915_queue_hangcheck(struct drm_device *dev)
3037{ 3062{
3038 struct drm_i915_private *dev_priv = dev->dev_private; 3063 struct drm_i915_private *dev_priv = dev->dev_private;
3064 struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
3065
3039 if (!i915.enable_hangcheck) 3066 if (!i915.enable_hangcheck)
3040 return; 3067 return;
3041 3068
3042 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 3069 /* Don't continually defer the hangcheck, but make sure it is active */
3070 if (timer_pending(timer))
3071 return;
3072 mod_timer(timer,
3043 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 3073 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3044} 3074}
3045 3075
@@ -3488,11 +3518,14 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3488 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3518 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3489 uint32_t de_pipe_enables; 3519 uint32_t de_pipe_enables;
3490 int pipe; 3520 int pipe;
3521 u32 aux_en = GEN8_AUX_CHANNEL_A;
3491 3522
3492 if (IS_GEN9(dev_priv)) 3523 if (IS_GEN9(dev_priv)) {
3493 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3524 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3494 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3525 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3495 else 3526 aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3527 GEN9_AUX_CHANNEL_D;
3528 } else
3496 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3529 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3497 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3530 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3498 3531
@@ -3510,7 +3543,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3510 dev_priv->de_irq_mask[pipe], 3543 dev_priv->de_irq_mask[pipe],
3511 de_pipe_enables); 3544 de_pipe_enables);
3512 3545
3513 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A); 3546 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
3514} 3547}
3515 3548
3516static int gen8_irq_postinstall(struct drm_device *dev) 3549static int gen8_irq_postinstall(struct drm_device *dev)
@@ -3533,34 +3566,8 @@ static int gen8_irq_postinstall(struct drm_device *dev)
3533static int cherryview_irq_postinstall(struct drm_device *dev) 3566static int cherryview_irq_postinstall(struct drm_device *dev)
3534{ 3567{
3535 struct drm_i915_private *dev_priv = dev->dev_private; 3568 struct drm_i915_private *dev_priv = dev->dev_private;
3536 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3537 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3538 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3539 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3540 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3541 PIPE_CRC_DONE_INTERRUPT_STATUS;
3542 int pipe;
3543
3544 /*
3545 * Leave vblank interrupts masked initially. enable/disable will
3546 * toggle them based on usage.
3547 */
3548 dev_priv->irq_mask = ~enable_mask;
3549 3569
3550 for_each_pipe(dev_priv, pipe) 3570 vlv_display_irq_postinstall(dev_priv);
3551 I915_WRITE(PIPESTAT(pipe), 0xffff);
3552
3553 spin_lock_irq(&dev_priv->irq_lock);
3554 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3555 for_each_pipe(dev_priv, pipe)
3556 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3557 spin_unlock_irq(&dev_priv->irq_lock);
3558
3559 I915_WRITE(VLV_IIR, 0xffffffff);
3560 I915_WRITE(VLV_IIR, 0xffffffff);
3561 I915_WRITE(VLV_IER, enable_mask);
3562 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3563 POSTING_READ(VLV_IMR);
3564 3571
3565 gen8_gt_irq_postinstall(dev_priv); 3572 gen8_gt_irq_postinstall(dev_priv);
3566 3573
@@ -3580,6 +3587,20 @@ static void gen8_irq_uninstall(struct drm_device *dev)
3580 gen8_irq_reset(dev); 3587 gen8_irq_reset(dev);
3581} 3588}
3582 3589
3590static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3591{
3592 /* Interrupt setup is already guaranteed to be single-threaded, this is
3593 * just to make the assert_spin_locked check happy. */
3594 spin_lock_irq(&dev_priv->irq_lock);
3595 if (dev_priv->display_irqs_enabled)
3596 valleyview_display_irqs_uninstall(dev_priv);
3597 spin_unlock_irq(&dev_priv->irq_lock);
3598
3599 vlv_display_irq_reset(dev_priv);
3600
3601 dev_priv->irq_mask = 0;
3602}
3603
3583static void valleyview_irq_uninstall(struct drm_device *dev) 3604static void valleyview_irq_uninstall(struct drm_device *dev)
3584{ 3605{
3585 struct drm_i915_private *dev_priv = dev->dev_private; 3606 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3593,22 +3614,12 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
3593 3614
3594 I915_WRITE(HWSTAM, 0xffffffff); 3615 I915_WRITE(HWSTAM, 0xffffffff);
3595 3616
3596 /* Interrupt setup is already guaranteed to be single-threaded, this is 3617 vlv_display_irq_uninstall(dev_priv);
3597 * just to make the assert_spin_locked check happy. */
3598 spin_lock_irq(&dev_priv->irq_lock);
3599 if (dev_priv->display_irqs_enabled)
3600 valleyview_display_irqs_uninstall(dev_priv);
3601 spin_unlock_irq(&dev_priv->irq_lock);
3602
3603 vlv_display_irq_reset(dev_priv);
3604
3605 dev_priv->irq_mask = 0;
3606} 3618}
3607 3619
3608static void cherryview_irq_uninstall(struct drm_device *dev) 3620static void cherryview_irq_uninstall(struct drm_device *dev)
3609{ 3621{
3610 struct drm_i915_private *dev_priv = dev->dev_private; 3622 struct drm_i915_private *dev_priv = dev->dev_private;
3611 int pipe;
3612 3623
3613 if (!dev_priv) 3624 if (!dev_priv)
3614 return; 3625 return;
@@ -3620,13 +3631,7 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
3620 3631
3621 GEN5_IRQ_RESET(GEN8_PCU_); 3632 GEN5_IRQ_RESET(GEN8_PCU_);
3622 3633
3623 I915_WRITE(PORT_HOTPLUG_EN, 0); 3634 vlv_display_irq_uninstall(dev_priv);
3624 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3625
3626 for_each_pipe(dev_priv, pipe)
3627 I915_WRITE(PIPESTAT(pipe), 0xffff);
3628
3629 GEN5_IRQ_RESET(VLV_);
3630} 3635}
3631 3636
3632static void ironlake_irq_uninstall(struct drm_device *dev) 3637static void ironlake_irq_uninstall(struct drm_device *dev)
@@ -3760,8 +3765,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3760 I915_WRITE16(IIR, iir & ~flip_mask); 3765 I915_WRITE16(IIR, iir & ~flip_mask);
3761 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3766 new_iir = I915_READ16(IIR); /* Flush posted writes */
3762 3767
3763 i915_update_dri1_breadcrumb(dev);
3764
3765 if (iir & I915_USER_INTERRUPT) 3768 if (iir & I915_USER_INTERRUPT)
3766 notify_ring(dev, &dev_priv->ring[RCS]); 3769 notify_ring(dev, &dev_priv->ring[RCS]);
3767 3770
@@ -3998,8 +4001,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3998 iir = new_iir; 4001 iir = new_iir;
3999 } while (iir & ~flip_mask); 4002 } while (iir & ~flip_mask);
4000 4003
4001 i915_update_dri1_breadcrumb(dev);
4002
4003 return ret; 4004 return ret;
4004} 4005}
4005 4006
@@ -4227,8 +4228,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4227 iir = new_iir; 4228 iir = new_iir;
4228 } 4229 }
4229 4230
4230 i915_update_dri1_breadcrumb(dev);
4231
4232 return ret; 4231 return ret;
4233} 4232}
4234 4233
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d43fa0e627f8..3102907a96a7 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -248,6 +248,16 @@
248#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) 248#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
249#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) 249#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
250#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) 250#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
251/* SKL ones */
252#define MI_DISPLAY_FLIP_SKL_PLANE_1_A (0 << 8)
253#define MI_DISPLAY_FLIP_SKL_PLANE_1_B (1 << 8)
254#define MI_DISPLAY_FLIP_SKL_PLANE_1_C (2 << 8)
255#define MI_DISPLAY_FLIP_SKL_PLANE_2_A (4 << 8)
256#define MI_DISPLAY_FLIP_SKL_PLANE_2_B (5 << 8)
257#define MI_DISPLAY_FLIP_SKL_PLANE_2_C (6 << 8)
258#define MI_DISPLAY_FLIP_SKL_PLANE_3_A (7 << 8)
259#define MI_DISPLAY_FLIP_SKL_PLANE_3_B (8 << 8)
260#define MI_DISPLAY_FLIP_SKL_PLANE_3_C (9 << 8)
251#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */ 261#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
252#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) 262#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
253#define MI_SEMAPHORE_UPDATE (1<<21) 263#define MI_SEMAPHORE_UPDATE (1<<21)
@@ -314,6 +324,8 @@
314#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ 324#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
315#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) 325#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
316 326
327#define MI_PREDICATE_SRC0 (0x2400)
328#define MI_PREDICATE_SRC1 (0x2408)
317 329
318#define MI_PREDICATE_RESULT_2 (0x2214) 330#define MI_PREDICATE_RESULT_2 (0x2214)
319#define LOWER_SLICE_ENABLED (1<<0) 331#define LOWER_SLICE_ENABLED (1<<0)
@@ -564,6 +576,7 @@ enum punit_power_well {
564#define PUNIT_REG_GPU_LFM 0xd3 576#define PUNIT_REG_GPU_LFM 0xd3
565#define PUNIT_REG_GPU_FREQ_REQ 0xd4 577#define PUNIT_REG_GPU_FREQ_REQ 0xd4
566#define PUNIT_REG_GPU_FREQ_STS 0xd8 578#define PUNIT_REG_GPU_FREQ_STS 0xd8
579#define GPLLENABLE (1<<4)
567#define GENFREQSTATUS (1<<0) 580#define GENFREQSTATUS (1<<0)
568#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc 581#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
569#define PUNIT_REG_CZ_TIMESTAMP 0xce 582#define PUNIT_REG_CZ_TIMESTAMP 0xce
@@ -2030,6 +2043,8 @@ enum punit_power_well {
2030#define DCC_ADDRESSING_MODE_MASK (3 << 0) 2043#define DCC_ADDRESSING_MODE_MASK (3 << 0)
2031#define DCC_CHANNEL_XOR_DISABLE (1 << 10) 2044#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
2032#define DCC_CHANNEL_XOR_BIT_17 (1 << 9) 2045#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
2046#define DCC2 0x10204
2047#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20)
2033 2048
2034/* Pineview MCH register contains DDR3 setting */ 2049/* Pineview MCH register contains DDR3 setting */
2035#define CSHRDDR3CTL 0x101a8 2050#define CSHRDDR3CTL 0x101a8
@@ -2313,7 +2328,6 @@ enum punit_power_well {
2313 2328
2314#define GEN6_GT_THREAD_STATUS_REG 0x13805c 2329#define GEN6_GT_THREAD_STATUS_REG 0x13805c
2315#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 2330#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
2316#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
2317 2331
2318#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948) 2332#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
2319#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994) 2333#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
@@ -4904,6 +4918,18 @@ enum punit_power_well {
4904#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) 4918#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
4905#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) 4919#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
4906 4920
4921#define _PSA_CTL 0x68180
4922#define _PSB_CTL 0x68980
4923#define PS_ENABLE (1<<31)
4924#define _PSA_WIN_SZ 0x68174
4925#define _PSB_WIN_SZ 0x68974
4926#define _PSA_WIN_POS 0x68170
4927#define _PSB_WIN_POS 0x68970
4928
4929#define PS_CTL(pipe) _PIPE(pipe, _PSA_CTL, _PSB_CTL)
4930#define PS_WIN_SZ(pipe) _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
4931#define PS_WIN_POS(pipe) _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
4932
4907/* legacy palette */ 4933/* legacy palette */
4908#define _LGC_PALETTE_A 0x4a000 4934#define _LGC_PALETTE_A 0x4a000
4909#define _LGC_PALETTE_B 0x4a800 4935#define _LGC_PALETTE_B 0x4a800
@@ -5048,6 +5074,9 @@ enum punit_power_well {
5048#define GEN8_DE_PORT_IIR 0x44448 5074#define GEN8_DE_PORT_IIR 0x44448
5049#define GEN8_DE_PORT_IER 0x4444c 5075#define GEN8_DE_PORT_IER 0x4444c
5050#define GEN8_PORT_DP_A_HOTPLUG (1 << 3) 5076#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
5077#define GEN9_AUX_CHANNEL_D (1 << 27)
5078#define GEN9_AUX_CHANNEL_C (1 << 26)
5079#define GEN9_AUX_CHANNEL_B (1 << 25)
5051#define GEN8_AUX_CHANNEL_A (1 << 0) 5080#define GEN8_AUX_CHANNEL_A (1 << 0)
5052 5081
5053#define GEN8_DE_MISC_ISR 0x44460 5082#define GEN8_DE_MISC_ISR 0x44460
@@ -5131,6 +5160,7 @@ enum punit_power_well {
5131/* GEN8 chicken */ 5160/* GEN8 chicken */
5132#define HDC_CHICKEN0 0x7300 5161#define HDC_CHICKEN0 0x7300
5133#define HDC_FORCE_NON_COHERENT (1<<4) 5162#define HDC_FORCE_NON_COHERENT (1<<4)
5163#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
5134#define HDC_FENCE_DEST_SLM_DISABLE (1<<14) 5164#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
5135 5165
5136/* WaCatErrorRejectionIssue */ 5166/* WaCatErrorRejectionIssue */
@@ -6010,11 +6040,12 @@ enum punit_power_well {
6010#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) 6040#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
6011#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) 6041#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
6012#define DISPLAY_IPS_CONTROL 0x19 6042#define DISPLAY_IPS_CONTROL 0x19
6043#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
6013#define GEN6_PCODE_DATA 0x138128 6044#define GEN6_PCODE_DATA 0x138128
6014#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 6045#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
6015#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 6046#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
6047#define GEN6_PCODE_DATA1 0x13812C
6016 6048
6017#define GEN9_PCODE_DATA1 0x13812C
6018#define GEN9_PCODE_READ_MEM_LATENCY 0x6 6049#define GEN9_PCODE_READ_MEM_LATENCY 0x6
6019#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF 6050#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
6020#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8 6051#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
@@ -6427,6 +6458,83 @@ enum punit_power_well {
6427#define LCPLL_CD_SOURCE_FCLK (1<<21) 6458#define LCPLL_CD_SOURCE_FCLK (1<<21)
6428#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19) 6459#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
6429 6460
6461/*
6462 * SKL Clocks
6463 */
6464
6465/* CDCLK_CTL */
6466#define CDCLK_CTL 0x46000
6467#define CDCLK_FREQ_SEL_MASK (3<<26)
6468#define CDCLK_FREQ_450_432 (0<<26)
6469#define CDCLK_FREQ_540 (1<<26)
6470#define CDCLK_FREQ_337_308 (2<<26)
6471#define CDCLK_FREQ_675_617 (3<<26)
6472#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
6473
6474/* LCPLL_CTL */
6475#define LCPLL1_CTL 0x46010
6476#define LCPLL2_CTL 0x46014
6477#define LCPLL_PLL_ENABLE (1<<31)
6478
6479/* DPLL control1 */
6480#define DPLL_CTRL1 0x6C058
6481#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
6482#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
6483#define DPLL_CRTL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
6484#define DPLL_CRTL1_LINK_RATE_SHIFT(id) ((id)*6+1)
6485#define DPLL_CRTL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
6486#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6))
6487#define DPLL_CRTL1_LINK_RATE_2700 0
6488#define DPLL_CRTL1_LINK_RATE_1350 1
6489#define DPLL_CRTL1_LINK_RATE_810 2
6490#define DPLL_CRTL1_LINK_RATE_1620 3
6491#define DPLL_CRTL1_LINK_RATE_1080 4
6492#define DPLL_CRTL1_LINK_RATE_2160 5
6493
6494/* DPLL control2 */
6495#define DPLL_CTRL2 0x6C05C
6496#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<(port+15))
6497#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
6498#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
6499#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) (clk<<((port)*3+1))
6500#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
6501
6502/* DPLL Status */
6503#define DPLL_STATUS 0x6C060
6504#define DPLL_LOCK(id) (1<<((id)*8))
6505
6506/* DPLL cfg */
6507#define DPLL1_CFGCR1 0x6C040
6508#define DPLL2_CFGCR1 0x6C048
6509#define DPLL3_CFGCR1 0x6C050
6510#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
6511#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
6512#define DPLL_CFGCR1_DCO_FRACTION(x) (x<<9)
6513#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
6514
6515#define DPLL1_CFGCR2 0x6C044
6516#define DPLL2_CFGCR2 0x6C04C
6517#define DPLL3_CFGCR2 0x6C054
6518#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
6519#define DPLL_CFGCR2_QDIV_RATIO(x) (x<<8)
6520#define DPLL_CFGCR2_QDIV_MODE(x) (x<<7)
6521#define DPLL_CFGCR2_KDIV_MASK (3<<5)
6522#define DPLL_CFGCR2_KDIV(x) (x<<5)
6523#define DPLL_CFGCR2_KDIV_5 (0<<5)
6524#define DPLL_CFGCR2_KDIV_2 (1<<5)
6525#define DPLL_CFGCR2_KDIV_3 (2<<5)
6526#define DPLL_CFGCR2_KDIV_1 (3<<5)
6527#define DPLL_CFGCR2_PDIV_MASK (7<<2)
6528#define DPLL_CFGCR2_PDIV(x) (x<<2)
6529#define DPLL_CFGCR2_PDIV_1 (0<<2)
6530#define DPLL_CFGCR2_PDIV_2 (1<<2)
6531#define DPLL_CFGCR2_PDIV_3 (2<<2)
6532#define DPLL_CFGCR2_PDIV_7 (4<<2)
6533#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
6534
6535#define GET_CFG_CR1_REG(id) (DPLL1_CFGCR1 + (id - SKL_DPLL1) * 8)
6536#define GET_CFG_CR2_REG(id) (DPLL1_CFGCR2 + (id - SKL_DPLL1) * 8)
6537
6430/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, 6538/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
6431 * since on HSW we can't write to it using I915_WRITE. */ 6539 * since on HSW we can't write to it using I915_WRITE. */
6432#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) 6540#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 043123c77a1f..dfe661743398 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -203,34 +203,19 @@ static void i915_save_display(struct drm_device *dev)
203 i915_save_display_reg(dev); 203 i915_save_display_reg(dev);
204 204
205 /* LVDS state */ 205 /* LVDS state */
206 if (HAS_PCH_SPLIT(dev)) { 206 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
207 dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL); 207 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
208 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 208 else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
209 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); 209 dev_priv->regfile.saveLVDS = I915_READ(LVDS);
210 } else if (IS_VALLEYVIEW(dev)) {
211 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
212 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
213
214 dev_priv->regfile.saveBLC_HIST_CTL =
215 I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
216 dev_priv->regfile.saveBLC_HIST_CTL_B =
217 I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
218 } else {
219 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
220 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
221 dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
222 if (IS_MOBILE(dev) && !IS_I830(dev))
223 dev_priv->regfile.saveLVDS = I915_READ(LVDS);
224 }
225
226 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
227 dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
228 210
211 /* Panel power sequencer */
229 if (HAS_PCH_SPLIT(dev)) { 212 if (HAS_PCH_SPLIT(dev)) {
213 dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
230 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); 214 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
231 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); 215 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
232 dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); 216 dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
233 } else { 217 } else if (!IS_VALLEYVIEW(dev)) {
218 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
234 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); 219 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
235 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); 220 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
236 dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); 221 dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
@@ -259,29 +244,19 @@ static void i915_restore_display(struct drm_device *dev)
259 if (drm_core_check_feature(dev, DRIVER_MODESET)) 244 if (drm_core_check_feature(dev, DRIVER_MODESET))
260 mask = ~LVDS_PORT_EN; 245 mask = ~LVDS_PORT_EN;
261 246
247 /* LVDS state */
262 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 248 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
263 I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask); 249 I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
264 else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev)) 250 else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
265 I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask); 251 I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
266 252
267 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) 253 /* Panel power sequencer */
268 I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
269
270 if (HAS_PCH_SPLIT(dev)) { 254 if (HAS_PCH_SPLIT(dev)) {
271 I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); 255 I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
272 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); 256 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
273 I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); 257 I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
274 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); 258 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
275 I915_WRITE(RSTDBYCTL, 259 } else if (!IS_VALLEYVIEW(dev)) {
276 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
277 } else if (IS_VALLEYVIEW(dev)) {
278 I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
279 dev_priv->regfile.saveBLC_HIST_CTL);
280 I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
281 dev_priv->regfile.saveBLC_HIST_CTL);
282 } else {
283 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
284 I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
285 I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); 260 I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
286 I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); 261 I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
287 I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); 262 I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
@@ -368,6 +343,8 @@ int i915_restore_state(struct drm_device *dev)
368 I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR); 343 I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
369 I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR); 344 I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
370 I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG); 345 I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
346 I915_WRITE(RSTDBYCTL,
347 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
371 } else { 348 } else {
372 I915_WRITE(IER, dev_priv->regfile.saveIER); 349 I915_WRITE(IER, dev_priv->regfile.saveIER);
373 I915_WRITE(IMR, dev_priv->regfile.saveIMR); 350 I915_WRITE(IMR, dev_priv->regfile.saveIMR);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index f5aa0067755a..751d4ad14d62 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -587,6 +587,110 @@ TRACE_EVENT(intel_gpu_freq_change,
587 TP_printk("new_freq=%u", __entry->freq) 587 TP_printk("new_freq=%u", __entry->freq)
588); 588);
589 589
590/**
591 * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
592 *
593 * With full ppgtt enabled each process using drm will allocate at least one
594 * translation table. With these traces it is possible to keep track of the
595 * allocation and of the lifetime of the tables; this can be used during
596 * testing/debug to verify that we are not leaking ppgtts.
597 * These traces identify the ppgtt through the vm pointer, which is also printed
598 * by the i915_vma_bind and i915_vma_unbind tracepoints.
599 */
600DECLARE_EVENT_CLASS(i915_ppgtt,
601 TP_PROTO(struct i915_address_space *vm),
602 TP_ARGS(vm),
603
604 TP_STRUCT__entry(
605 __field(struct i915_address_space *, vm)
606 __field(u32, dev)
607 ),
608
609 TP_fast_assign(
610 __entry->vm = vm;
611 __entry->dev = vm->dev->primary->index;
612 ),
613
614 TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
615)
616
617DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
618 TP_PROTO(struct i915_address_space *vm),
619 TP_ARGS(vm)
620);
621
622DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
623 TP_PROTO(struct i915_address_space *vm),
624 TP_ARGS(vm)
625);
626
627/**
628 * DOC: i915_context_create and i915_context_free tracepoints
629 *
630 * These tracepoints are used to track creation and deletion of contexts.
631 * If full ppgtt is enabled, they also print the address of the vm assigned to
632 * the context.
633 */
634DECLARE_EVENT_CLASS(i915_context,
635 TP_PROTO(struct intel_context *ctx),
636 TP_ARGS(ctx),
637
638 TP_STRUCT__entry(
639 __field(u32, dev)
640 __field(struct intel_context *, ctx)
641 __field(struct i915_address_space *, vm)
642 ),
643
644 TP_fast_assign(
645 __entry->ctx = ctx;
646 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
647 __entry->dev = ctx->file_priv->dev_priv->dev->primary->index;
648 ),
649
650 TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
651 __entry->dev, __entry->ctx, __entry->vm)
652)
653
654DEFINE_EVENT(i915_context, i915_context_create,
655 TP_PROTO(struct intel_context *ctx),
656 TP_ARGS(ctx)
657);
658
659DEFINE_EVENT(i915_context, i915_context_free,
660 TP_PROTO(struct intel_context *ctx),
661 TP_ARGS(ctx)
662);
663
664/**
665 * DOC: switch_mm tracepoint
666 *
667 * This tracepoint allows tracking of the mm switch, which is an important point
668 * in the lifetime of the vm in the legacy submission path. This tracepoint is
669 * called only if full ppgtt is enabled.
670 */
671TRACE_EVENT(switch_mm,
672 TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
673
674 TP_ARGS(ring, to),
675
676 TP_STRUCT__entry(
677 __field(u32, ring)
678 __field(struct intel_context *, to)
679 __field(struct i915_address_space *, vm)
680 __field(u32, dev)
681 ),
682
683 TP_fast_assign(
684 __entry->ring = ring->id;
685 __entry->to = to;
686 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
687 __entry->dev = ring->dev->primary->index;
688 ),
689
690 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
691 __entry->dev, __entry->ring, __entry->to, __entry->vm)
692);
693
590#endif /* _I915_TRACE_H_ */ 694#endif /* _I915_TRACE_H_ */
591 695
592/* This part must be outside protection */ 696/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
index 480da593e6c0..d10fe3e9c49f 100644
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -270,6 +270,12 @@ void i915_save_display_reg(struct drm_device *dev)
270 } 270 }
271 /* FIXME: regfile.save TV & SDVO state */ 271 /* FIXME: regfile.save TV & SDVO state */
272 272
273 /* Panel fitter */
274 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
275 dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
276 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
277 }
278
273 /* Backlight */ 279 /* Backlight */
274 if (INTEL_INFO(dev)->gen <= 4) 280 if (INTEL_INFO(dev)->gen <= 4)
275 pci_read_config_byte(dev->pdev, PCI_LBPC, 281 pci_read_config_byte(dev->pdev, PCI_LBPC,
@@ -284,6 +290,7 @@ void i915_save_display_reg(struct drm_device *dev)
284 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 290 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
285 if (INTEL_INFO(dev)->gen >= 4) 291 if (INTEL_INFO(dev)->gen >= 4)
286 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); 292 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
293 dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
287 } 294 }
288 295
289 return; 296 return;
@@ -313,6 +320,13 @@ void i915_restore_display_reg(struct drm_device *dev)
313 if (INTEL_INFO(dev)->gen >= 4) 320 if (INTEL_INFO(dev)->gen >= 4)
314 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); 321 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
315 I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL); 322 I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
323 I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
324 }
325
326 /* Panel fitter */
327 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
328 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
329 I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
316 } 330 }
317 331
318 /* Display port ratios (must be done before clock is set) */ 332 /* Display port ratios (must be done before clock is set) */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 44c49dfe1096..2c7ed5cb29c0 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -107,7 +107,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
107 tmp &= ~bits_elda; 107 tmp &= ~bits_elda;
108 I915_WRITE(reg_elda, tmp); 108 I915_WRITE(reg_elda, tmp);
109 109
110 for (i = 0; i < eld[2]; i++) 110 for (i = 0; i < drm_eld_size(eld) / 4; i++)
111 if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) 111 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
112 return false; 112 return false;
113 113
@@ -162,7 +162,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
162 len = (tmp >> 9) & 0x1f; /* ELD buffer size */ 162 len = (tmp >> 9) & 0x1f; /* ELD buffer size */
163 I915_WRITE(G4X_AUD_CNTL_ST, tmp); 163 I915_WRITE(G4X_AUD_CNTL_ST, tmp);
164 164
165 len = min_t(int, eld[2], len); 165 len = min(drm_eld_size(eld) / 4, len);
166 DRM_DEBUG_DRIVER("ELD size %d\n", len); 166 DRM_DEBUG_DRIVER("ELD size %d\n", len);
167 for (i = 0; i < len; i++) 167 for (i = 0; i < len; i++)
168 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); 168 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
@@ -194,6 +194,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
194 /* Invalidate ELD */ 194 /* Invalidate ELD */
195 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 195 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
196 tmp &= ~AUDIO_ELD_VALID(pipe); 196 tmp &= ~AUDIO_ELD_VALID(pipe);
197 tmp &= ~AUDIO_OUTPUT_ENABLE(pipe);
197 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); 198 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
198} 199}
199 200
@@ -209,7 +210,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
209 int len, i; 210 int len, i;
210 211
211 DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n", 212 DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
212 pipe_name(pipe), eld[2]); 213 pipe_name(pipe), drm_eld_size(eld));
213 214
214 /* Enable audio presence detect, invalidate ELD */ 215 /* Enable audio presence detect, invalidate ELD */
215 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 216 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
@@ -230,8 +231,8 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
230 I915_WRITE(HSW_AUD_DIP_ELD_CTRL(pipe), tmp); 231 I915_WRITE(HSW_AUD_DIP_ELD_CTRL(pipe), tmp);
231 232
232 /* Up to 84 bytes of hw ELD buffer */ 233 /* Up to 84 bytes of hw ELD buffer */
233 len = min_t(int, eld[2], 21); 234 len = min(drm_eld_size(eld), 84);
234 for (i = 0; i < len; i++) 235 for (i = 0; i < len / 4; i++)
235 I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i)); 236 I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
236 237
237 /* ELD valid */ 238 /* ELD valid */
@@ -320,7 +321,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
320 int aud_cntrl_st2; 321 int aud_cntrl_st2;
321 322
322 DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n", 323 DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
323 port_name(port), pipe_name(pipe), eld[2]); 324 port_name(port), pipe_name(pipe), drm_eld_size(eld));
324 325
325 /* 326 /*
326 * FIXME: We're supposed to wait for vblank here, but we have vblanks 327 * FIXME: We're supposed to wait for vblank here, but we have vblanks
@@ -364,8 +365,8 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
364 I915_WRITE(aud_cntl_st, tmp); 365 I915_WRITE(aud_cntl_st, tmp);
365 366
366 /* Up to 84 bytes of hw ELD buffer */ 367 /* Up to 84 bytes of hw ELD buffer */
367 len = min_t(int, eld[2], 21); 368 len = min(drm_eld_size(eld), 84);
368 for (i = 0; i < len; i++) 369 for (i = 0; i < len / 4; i++)
369 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); 370 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
370 371
371 /* ELD valid */ 372 /* ELD valid */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 68703cecdefc..e6b45cd150d3 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -670,6 +670,111 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
670 return (refclk * n * 100) / (p * r); 670 return (refclk * n * 100) / (p * r);
671} 671}
672 672
673static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
674 uint32_t dpll)
675{
676 uint32_t cfgcr1_reg, cfgcr2_reg;
677 uint32_t cfgcr1_val, cfgcr2_val;
678 uint32_t p0, p1, p2, dco_freq;
679
680 cfgcr1_reg = GET_CFG_CR1_REG(dpll);
681 cfgcr2_reg = GET_CFG_CR2_REG(dpll);
682
683 cfgcr1_val = I915_READ(cfgcr1_reg);
684 cfgcr2_val = I915_READ(cfgcr2_reg);
685
686 p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK;
687 p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK;
688
689 if (cfgcr2_val & DPLL_CFGCR2_QDIV_MODE(1))
690 p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
691 else
692 p1 = 1;
693
694
695 switch (p0) {
696 case DPLL_CFGCR2_PDIV_1:
697 p0 = 1;
698 break;
699 case DPLL_CFGCR2_PDIV_2:
700 p0 = 2;
701 break;
702 case DPLL_CFGCR2_PDIV_3:
703 p0 = 3;
704 break;
705 case DPLL_CFGCR2_PDIV_7:
706 p0 = 7;
707 break;
708 }
709
710 switch (p2) {
711 case DPLL_CFGCR2_KDIV_5:
712 p2 = 5;
713 break;
714 case DPLL_CFGCR2_KDIV_2:
715 p2 = 2;
716 break;
717 case DPLL_CFGCR2_KDIV_3:
718 p2 = 3;
719 break;
720 case DPLL_CFGCR2_KDIV_1:
721 p2 = 1;
722 break;
723 }
724
725 dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000;
726
727 dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
728 1000) / 0x8000;
729
730 return dco_freq / (p0 * p1 * p2 * 5);
731}
732
733
734static void skl_ddi_clock_get(struct intel_encoder *encoder,
735 struct intel_crtc_config *pipe_config)
736{
737 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
738 int link_clock = 0;
739 uint32_t dpll_ctl1, dpll;
740
741 dpll = pipe_config->ddi_pll_sel;
742
743 dpll_ctl1 = I915_READ(DPLL_CTRL1);
744
745 if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
746 link_clock = skl_calc_wrpll_link(dev_priv, dpll);
747 } else {
748 link_clock = dpll_ctl1 & DPLL_CRTL1_LINK_RATE_MASK(dpll);
749 link_clock >>= DPLL_CRTL1_LINK_RATE_SHIFT(dpll);
750
751 switch (link_clock) {
752 case DPLL_CRTL1_LINK_RATE_810:
753 link_clock = 81000;
754 break;
755 case DPLL_CRTL1_LINK_RATE_1350:
756 link_clock = 135000;
757 break;
758 case DPLL_CRTL1_LINK_RATE_2700:
759 link_clock = 270000;
760 break;
761 default:
762 WARN(1, "Unsupported link rate\n");
763 break;
764 }
765 link_clock *= 2;
766 }
767
768 pipe_config->port_clock = link_clock;
769
770 if (pipe_config->has_dp_encoder)
771 pipe_config->adjusted_mode.crtc_clock =
772 intel_dotclock_calculate(pipe_config->port_clock,
773 &pipe_config->dp_m_n);
774 else
775 pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
776}
777
673static void hsw_ddi_clock_get(struct intel_encoder *encoder, 778static void hsw_ddi_clock_get(struct intel_encoder *encoder,
674 struct intel_crtc_config *pipe_config) 779 struct intel_crtc_config *pipe_config)
675{ 780{
@@ -828,6 +933,228 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
828 return true; 933 return true;
829} 934}
830 935
936struct skl_wrpll_params {
937 uint32_t dco_fraction;
938 uint32_t dco_integer;
939 uint32_t qdiv_ratio;
940 uint32_t qdiv_mode;
941 uint32_t kdiv;
942 uint32_t pdiv;
943 uint32_t central_freq;
944};
945
946static void
947skl_ddi_calculate_wrpll(int clock /* in Hz */,
948 struct skl_wrpll_params *wrpll_params)
949{
950 uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
951 uint64_t dco_central_freq[3] = {8400000000ULL,
952 9000000000ULL,
953 9600000000ULL};
954 uint32_t min_dco_deviation = 400;
955 uint32_t min_dco_index = 3;
956 uint32_t P0[4] = {1, 2, 3, 7};
957 uint32_t P2[4] = {1, 2, 3, 5};
958 bool found = false;
959 uint32_t candidate_p = 0;
960 uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0};
961 uint32_t candidate_p2[3] = {0};
962 uint32_t dco_central_freq_deviation[3];
963 uint32_t i, P1, k, dco_count;
964 bool retry_with_odd = false;
965 uint64_t dco_freq;
966
967 /* Determine P0, P1 or P2 */
968 for (dco_count = 0; dco_count < 3; dco_count++) {
969 found = false;
970 candidate_p =
971 div64_u64(dco_central_freq[dco_count], afe_clock);
972 if (retry_with_odd == false)
973 candidate_p = (candidate_p % 2 == 0 ?
974 candidate_p : candidate_p + 1);
975
976 for (P1 = 1; P1 < candidate_p; P1++) {
977 for (i = 0; i < 4; i++) {
978 if (!(P0[i] != 1 || P1 == 1))
979 continue;
980
981 for (k = 0; k < 4; k++) {
982 if (P1 != 1 && P2[k] != 2)
983 continue;
984
985 if (candidate_p == P0[i] * P1 * P2[k]) {
986 /* Found possible P0, P1, P2 */
987 found = true;
988 candidate_p0[dco_count] = P0[i];
989 candidate_p1[dco_count] = P1;
990 candidate_p2[dco_count] = P2[k];
991 goto found;
992 }
993
994 }
995 }
996 }
997
998found:
999 if (found) {
1000 dco_central_freq_deviation[dco_count] =
1001 div64_u64(10000 *
1002 abs_diff((candidate_p * afe_clock),
1003 dco_central_freq[dco_count]),
1004 dco_central_freq[dco_count]);
1005
1006 if (dco_central_freq_deviation[dco_count] <
1007 min_dco_deviation) {
1008 min_dco_deviation =
1009 dco_central_freq_deviation[dco_count];
1010 min_dco_index = dco_count;
1011 }
1012 }
1013
1014 if (min_dco_index > 2 && dco_count == 2) {
1015 retry_with_odd = true;
1016 dco_count = 0;
1017 }
1018 }
1019
1020 if (min_dco_index > 2) {
1021 WARN(1, "No valid values found for the given pixel clock\n");
1022 } else {
1023 wrpll_params->central_freq = dco_central_freq[min_dco_index];
1024
1025 switch (dco_central_freq[min_dco_index]) {
1026 case 9600000000ULL:
1027 wrpll_params->central_freq = 0;
1028 break;
1029 case 9000000000ULL:
1030 wrpll_params->central_freq = 1;
1031 break;
1032 case 8400000000ULL:
1033 wrpll_params->central_freq = 3;
1034 }
1035
1036 switch (candidate_p0[min_dco_index]) {
1037 case 1:
1038 wrpll_params->pdiv = 0;
1039 break;
1040 case 2:
1041 wrpll_params->pdiv = 1;
1042 break;
1043 case 3:
1044 wrpll_params->pdiv = 2;
1045 break;
1046 case 7:
1047 wrpll_params->pdiv = 4;
1048 break;
1049 default:
1050 WARN(1, "Incorrect PDiv\n");
1051 }
1052
1053 switch (candidate_p2[min_dco_index]) {
1054 case 5:
1055 wrpll_params->kdiv = 0;
1056 break;
1057 case 2:
1058 wrpll_params->kdiv = 1;
1059 break;
1060 case 3:
1061 wrpll_params->kdiv = 2;
1062 break;
1063 case 1:
1064 wrpll_params->kdiv = 3;
1065 break;
1066 default:
1067 WARN(1, "Incorrect KDiv\n");
1068 }
1069
1070 wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
1071 wrpll_params->qdiv_mode =
1072 (wrpll_params->qdiv_ratio == 1) ? 0 : 1;
1073
1074 dco_freq = candidate_p0[min_dco_index] *
1075 candidate_p1[min_dco_index] *
1076 candidate_p2[min_dco_index] * afe_clock;
1077
1078 /*
1079 * Intermediate values are in Hz.
1080 * Divide by MHz to match bsepc
1081 */
1082 wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
1083 wrpll_params->dco_fraction =
1084 div_u64(((div_u64(dco_freq, 24) -
1085 wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
1086
1087 }
1088}
1089
1090
1091static bool
1092skl_ddi_pll_select(struct intel_crtc *intel_crtc,
1093 struct intel_encoder *intel_encoder,
1094 int clock)
1095{
1096 struct intel_shared_dpll *pll;
1097 uint32_t ctrl1, cfgcr1, cfgcr2;
1098
1099 /*
1100 * See comment in intel_dpll_hw_state to understand why we always use 0
1101 * as the DPLL id in this function.
1102 */
1103
1104 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1105
1106 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
1107 struct skl_wrpll_params wrpll_params = { 0, };
1108
1109 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1110
1111 skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params);
1112
1113 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1114 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1115 wrpll_params.dco_integer;
1116
1117 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1118 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1119 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1120 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1121 wrpll_params.central_freq;
1122 } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
1123 struct drm_encoder *encoder = &intel_encoder->base;
1124 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1125
1126 switch (intel_dp->link_bw) {
1127 case DP_LINK_BW_1_62:
1128 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, 0);
1129 break;
1130 case DP_LINK_BW_2_7:
1131 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, 0);
1132 break;
1133 case DP_LINK_BW_5_4:
1134 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, 0);
1135 break;
1136 }
1137
1138 cfgcr1 = cfgcr2 = 0;
1139 } else /* eDP */
1140 return true;
1141
1142 intel_crtc->new_config->dpll_hw_state.ctrl1 = ctrl1;
1143 intel_crtc->new_config->dpll_hw_state.cfgcr1 = cfgcr1;
1144 intel_crtc->new_config->dpll_hw_state.cfgcr2 = cfgcr2;
1145
1146 pll = intel_get_shared_dpll(intel_crtc);
1147 if (pll == NULL) {
1148 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
1149 pipe_name(intel_crtc->pipe));
1150 return false;
1151 }
1152
1153 /* shared DPLL id 0 is DPLL 1 */
1154 intel_crtc->new_config->ddi_pll_sel = pll->id + 1;
1155
1156 return true;
1157}
831 1158
832/* 1159/*
833 * Tries to find a *shared* PLL for the CRTC and store it in 1160 * Tries to find a *shared* PLL for the CRTC and store it in
@@ -838,11 +1165,15 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
838 */ 1165 */
839bool intel_ddi_pll_select(struct intel_crtc *intel_crtc) 1166bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
840{ 1167{
1168 struct drm_device *dev = intel_crtc->base.dev;
841 struct intel_encoder *intel_encoder = 1169 struct intel_encoder *intel_encoder =
842 intel_ddi_get_crtc_new_encoder(intel_crtc); 1170 intel_ddi_get_crtc_new_encoder(intel_crtc);
843 int clock = intel_crtc->new_config->port_clock; 1171 int clock = intel_crtc->new_config->port_clock;
844 1172
845 return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock); 1173 if (IS_SKYLAKE(dev))
1174 return skl_ddi_pll_select(intel_crtc, intel_encoder, clock);
1175 else
1176 return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
846} 1177}
847 1178
848void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) 1179void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
@@ -1134,7 +1465,8 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
1134static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) 1465static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1135{ 1466{
1136 struct drm_encoder *encoder = &intel_encoder->base; 1467 struct drm_encoder *encoder = &intel_encoder->base;
1137 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 1468 struct drm_device *dev = encoder->dev;
1469 struct drm_i915_private *dev_priv = dev->dev_private;
1138 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); 1470 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
1139 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1471 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1140 int type = intel_encoder->type; 1472 int type = intel_encoder->type;
@@ -1144,8 +1476,42 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1144 intel_edp_panel_on(intel_dp); 1476 intel_edp_panel_on(intel_dp);
1145 } 1477 }
1146 1478
1147 WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE); 1479 if (IS_SKYLAKE(dev)) {
1148 I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel); 1480 uint32_t dpll = crtc->config.ddi_pll_sel;
1481 uint32_t val;
1482
1483 /*
1484 * DPLL0 is used for eDP and is the only "private" DPLL (as
1485 * opposed to shared) on SKL
1486 */
1487 if (type == INTEL_OUTPUT_EDP) {
1488 WARN_ON(dpll != SKL_DPLL0);
1489
1490 val = I915_READ(DPLL_CTRL1);
1491
1492 val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
1493 DPLL_CTRL1_SSC(dpll) |
1494 DPLL_CRTL1_LINK_RATE_MASK(dpll));
1495 val |= crtc->config.dpll_hw_state.ctrl1 << (dpll * 6);
1496
1497 I915_WRITE(DPLL_CTRL1, val);
1498 POSTING_READ(DPLL_CTRL1);
1499 }
1500
1501 /* DDI -> PLL mapping */
1502 val = I915_READ(DPLL_CTRL2);
1503
1504 val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
1505 DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
1506 val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
1507 DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
1508
1509 I915_WRITE(DPLL_CTRL2, val);
1510
1511 } else {
1512 WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
1513 I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
1514 }
1149 1515
1150 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 1516 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1151 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1517 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1155,7 +1521,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1155 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1521 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1156 intel_dp_start_link_train(intel_dp); 1522 intel_dp_start_link_train(intel_dp);
1157 intel_dp_complete_link_train(intel_dp); 1523 intel_dp_complete_link_train(intel_dp);
1158 if (port != PORT_A) 1524 if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
1159 intel_dp_stop_link_train(intel_dp); 1525 intel_dp_stop_link_train(intel_dp);
1160 } else if (type == INTEL_OUTPUT_HDMI) { 1526 } else if (type == INTEL_OUTPUT_HDMI) {
1161 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 1527 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
@@ -1169,7 +1535,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1169static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) 1535static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1170{ 1536{
1171 struct drm_encoder *encoder = &intel_encoder->base; 1537 struct drm_encoder *encoder = &intel_encoder->base;
1172 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 1538 struct drm_device *dev = encoder->dev;
1539 struct drm_i915_private *dev_priv = dev->dev_private;
1173 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1540 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1174 int type = intel_encoder->type; 1541 int type = intel_encoder->type;
1175 uint32_t val; 1542 uint32_t val;
@@ -1197,7 +1564,11 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1197 intel_edp_panel_off(intel_dp); 1564 intel_edp_panel_off(intel_dp);
1198 } 1565 }
1199 1566
1200 I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); 1567 if (IS_SKYLAKE(dev))
1568 I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
1569 DPLL_CTRL2_DDI_CLK_OFF(port)));
1570 else
1571 I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
1201} 1572}
1202 1573
1203static void intel_enable_ddi(struct intel_encoder *intel_encoder) 1574static void intel_enable_ddi(struct intel_encoder *intel_encoder)
@@ -1224,11 +1595,11 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1224 } else if (type == INTEL_OUTPUT_EDP) { 1595 } else if (type == INTEL_OUTPUT_EDP) {
1225 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1596 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1226 1597
1227 if (port == PORT_A) 1598 if (port == PORT_A && INTEL_INFO(dev)->gen < 9)
1228 intel_dp_stop_link_train(intel_dp); 1599 intel_dp_stop_link_train(intel_dp);
1229 1600
1230 intel_edp_backlight_on(intel_dp); 1601 intel_edp_backlight_on(intel_dp);
1231 intel_edp_psr_enable(intel_dp); 1602 intel_psr_enable(intel_dp);
1232 } 1603 }
1233 1604
1234 if (intel_crtc->config.has_audio) { 1605 if (intel_crtc->config.has_audio) {
@@ -1254,11 +1625,59 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1254 if (type == INTEL_OUTPUT_EDP) { 1625 if (type == INTEL_OUTPUT_EDP) {
1255 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1626 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1256 1627
1257 intel_edp_psr_disable(intel_dp); 1628 intel_psr_disable(intel_dp);
1258 intel_edp_backlight_off(intel_dp); 1629 intel_edp_backlight_off(intel_dp);
1259 } 1630 }
1260} 1631}
1261 1632
1633static int skl_get_cdclk_freq(struct drm_i915_private *dev_priv)
1634{
1635 uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
1636 uint32_t cdctl = I915_READ(CDCLK_CTL);
1637 uint32_t linkrate;
1638
1639 if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
1640 WARN(1, "LCPLL1 not enabled\n");
1641 return 24000; /* 24MHz is the cd freq with NSSC ref */
1642 }
1643
1644 if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
1645 return 540000;
1646
1647 linkrate = (I915_READ(DPLL_CTRL1) &
1648 DPLL_CRTL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
1649
1650 if (linkrate == DPLL_CRTL1_LINK_RATE_2160 ||
1651 linkrate == DPLL_CRTL1_LINK_RATE_1080) {
1652 /* vco 8640 */
1653 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
1654 case CDCLK_FREQ_450_432:
1655 return 432000;
1656 case CDCLK_FREQ_337_308:
1657 return 308570;
1658 case CDCLK_FREQ_675_617:
1659 return 617140;
1660 default:
1661 WARN(1, "Unknown cd freq selection\n");
1662 }
1663 } else {
1664 /* vco 8100 */
1665 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
1666 case CDCLK_FREQ_450_432:
1667 return 450000;
1668 case CDCLK_FREQ_337_308:
1669 return 337500;
1670 case CDCLK_FREQ_675_617:
1671 return 675000;
1672 default:
1673 WARN(1, "Unknown cd freq selection\n");
1674 }
1675 }
1676
1677 /* error case, do as if DPLL0 isn't enabled */
1678 return 24000;
1679}
1680
1262static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv) 1681static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv)
1263{ 1682{
1264 uint32_t lcpll = I915_READ(LCPLL_CTL); 1683 uint32_t lcpll = I915_READ(LCPLL_CTL);
@@ -1300,6 +1719,9 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1300{ 1719{
1301 struct drm_device *dev = dev_priv->dev; 1720 struct drm_device *dev = dev_priv->dev;
1302 1721
1722 if (IS_SKYLAKE(dev))
1723 return skl_get_cdclk_freq(dev_priv);
1724
1303 if (IS_BROADWELL(dev)) 1725 if (IS_BROADWELL(dev))
1304 return bdw_get_cdclk_freq(dev_priv); 1726 return bdw_get_cdclk_freq(dev_priv);
1305 1727
@@ -1361,26 +1783,156 @@ static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
1361 } 1783 }
1362} 1784}
1363 1785
1786static const char * const skl_ddi_pll_names[] = {
1787 "DPLL 1",
1788 "DPLL 2",
1789 "DPLL 3",
1790};
1791
1792struct skl_dpll_regs {
1793 u32 ctl, cfgcr1, cfgcr2;
1794};
1795
1796/* this array is indexed by the *shared* pll id */
1797static const struct skl_dpll_regs skl_dpll_regs[3] = {
1798 {
1799 /* DPLL 1 */
1800 .ctl = LCPLL2_CTL,
1801 .cfgcr1 = DPLL1_CFGCR1,
1802 .cfgcr2 = DPLL1_CFGCR2,
1803 },
1804 {
1805 /* DPLL 2 */
1806 .ctl = WRPLL_CTL1,
1807 .cfgcr1 = DPLL2_CFGCR1,
1808 .cfgcr2 = DPLL2_CFGCR2,
1809 },
1810 {
1811 /* DPLL 3 */
1812 .ctl = WRPLL_CTL2,
1813 .cfgcr1 = DPLL3_CFGCR1,
1814 .cfgcr2 = DPLL3_CFGCR2,
1815 },
1816};
1817
1818static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1819 struct intel_shared_dpll *pll)
1820{
1821 uint32_t val;
1822 unsigned int dpll;
1823 const struct skl_dpll_regs *regs = skl_dpll_regs;
1824
1825 /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
1826 dpll = pll->id + 1;
1827
1828 val = I915_READ(DPLL_CTRL1);
1829
1830 val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
1831 DPLL_CRTL1_LINK_RATE_MASK(dpll));
1832 val |= pll->config.hw_state.ctrl1 << (dpll * 6);
1833
1834 I915_WRITE(DPLL_CTRL1, val);
1835 POSTING_READ(DPLL_CTRL1);
1836
1837 I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
1838 I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
1839 POSTING_READ(regs[pll->id].cfgcr1);
1840 POSTING_READ(regs[pll->id].cfgcr2);
1841
1842 /* the enable bit is always bit 31 */
1843 I915_WRITE(regs[pll->id].ctl,
1844 I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
1845
1846 if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(dpll), 5))
1847 DRM_ERROR("DPLL %d not locked\n", dpll);
1848}
1849
1850static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1851 struct intel_shared_dpll *pll)
1852{
1853 const struct skl_dpll_regs *regs = skl_dpll_regs;
1854
1855 /* the enable bit is always bit 31 */
1856 I915_WRITE(regs[pll->id].ctl,
1857 I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
1858 POSTING_READ(regs[pll->id].ctl);
1859}
1860
1861static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1862 struct intel_shared_dpll *pll,
1863 struct intel_dpll_hw_state *hw_state)
1864{
1865 uint32_t val;
1866 unsigned int dpll;
1867 const struct skl_dpll_regs *regs = skl_dpll_regs;
1868
1869 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
1870 return false;
1871
1872 /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
1873 dpll = pll->id + 1;
1874
1875 val = I915_READ(regs[pll->id].ctl);
1876 if (!(val & LCPLL_PLL_ENABLE))
1877 return false;
1878
1879 val = I915_READ(DPLL_CTRL1);
1880 hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
1881
1882 /* avoid reading back stale values if HDMI mode is not enabled */
1883 if (val & DPLL_CTRL1_HDMI_MODE(dpll)) {
1884 hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
1885 hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
1886 }
1887
1888 return true;
1889}
1890
1891static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
1892{
1893 int i;
1894
1895 dev_priv->num_shared_dpll = 3;
1896
1897 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
1898 dev_priv->shared_dplls[i].id = i;
1899 dev_priv->shared_dplls[i].name = skl_ddi_pll_names[i];
1900 dev_priv->shared_dplls[i].disable = skl_ddi_pll_disable;
1901 dev_priv->shared_dplls[i].enable = skl_ddi_pll_enable;
1902 dev_priv->shared_dplls[i].get_hw_state =
1903 skl_ddi_pll_get_hw_state;
1904 }
1905}
1906
1364void intel_ddi_pll_init(struct drm_device *dev) 1907void intel_ddi_pll_init(struct drm_device *dev)
1365{ 1908{
1366 struct drm_i915_private *dev_priv = dev->dev_private; 1909 struct drm_i915_private *dev_priv = dev->dev_private;
1367 uint32_t val = I915_READ(LCPLL_CTL); 1910 uint32_t val = I915_READ(LCPLL_CTL);
1368 1911
1369 hsw_shared_dplls_init(dev_priv); 1912 if (IS_SKYLAKE(dev))
1370 1913 skl_shared_dplls_init(dev_priv);
1371 /* The LCPLL register should be turned on by the BIOS. For now let's 1914 else
1372 * just check its state and print errors in case something is wrong. 1915 hsw_shared_dplls_init(dev_priv);
1373 * Don't even try to turn it on.
1374 */
1375 1916
1376 DRM_DEBUG_KMS("CDCLK running at %dKHz\n", 1917 DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
1377 intel_ddi_get_cdclk_freq(dev_priv)); 1918 intel_ddi_get_cdclk_freq(dev_priv));
1378 1919
1379 if (val & LCPLL_CD_SOURCE_FCLK) 1920 if (IS_SKYLAKE(dev)) {
1380 DRM_ERROR("CDCLK source is not LCPLL\n"); 1921 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
1922 DRM_ERROR("LCPLL1 is disabled\n");
1923 } else {
1924 /*
1925 * The LCPLL register should be turned on by the BIOS. For now
1926 * let's just check its state and print errors in case
1927 * something is wrong. Don't even try to turn it on.
1928 */
1929
1930 if (val & LCPLL_CD_SOURCE_FCLK)
1931 DRM_ERROR("CDCLK source is not LCPLL\n");
1381 1932
1382 if (val & LCPLL_PLL_DISABLE) 1933 if (val & LCPLL_PLL_DISABLE)
1383 DRM_ERROR("LCPLL is disabled\n"); 1934 DRM_ERROR("LCPLL is disabled\n");
1935 }
1384} 1936}
1385 1937
1386void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) 1938void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
@@ -1475,7 +2027,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1475 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 2027 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1476 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 2028 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1477 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 2029 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
2030 struct intel_hdmi *intel_hdmi;
1478 u32 temp, flags = 0; 2031 u32 temp, flags = 0;
2032 struct drm_device *dev = dev_priv->dev;
1479 2033
1480 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 2034 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1481 if (temp & TRANS_DDI_PHSYNC) 2035 if (temp & TRANS_DDI_PHSYNC)
@@ -1509,6 +2063,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1509 switch (temp & TRANS_DDI_MODE_SELECT_MASK) { 2063 switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
1510 case TRANS_DDI_MODE_SELECT_HDMI: 2064 case TRANS_DDI_MODE_SELECT_HDMI:
1511 pipe_config->has_hdmi_sink = true; 2065 pipe_config->has_hdmi_sink = true;
2066 intel_hdmi = enc_to_intel_hdmi(&encoder->base);
2067
2068 if (intel_hdmi->infoframe_enabled(&encoder->base))
2069 pipe_config->has_infoframe = true;
2070 break;
1512 case TRANS_DDI_MODE_SELECT_DVI: 2071 case TRANS_DDI_MODE_SELECT_DVI:
1513 case TRANS_DDI_MODE_SELECT_FDI: 2072 case TRANS_DDI_MODE_SELECT_FDI:
1514 break; 2073 break;
@@ -1547,7 +2106,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1547 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; 2106 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1548 } 2107 }
1549 2108
1550 hsw_ddi_clock_get(encoder, pipe_config); 2109 if (INTEL_INFO(dev)->gen <= 8)
2110 hsw_ddi_clock_get(encoder, pipe_config);
2111 else
2112 skl_ddi_clock_get(encoder, pipe_config);
1551} 2113}
1552 2114
1553static void intel_ddi_destroy(struct drm_encoder *encoder) 2115static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e9a0df8a437b..853697fc4d4b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2931,8 +2931,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2931 return ret; 2931 return ret;
2932 } 2932 }
2933 2933
2934 intel_update_pipe_size(intel_crtc);
2935
2936 dev_priv->display.update_primary_plane(crtc, fb, x, y); 2934 dev_priv->display.update_primary_plane(crtc, fb, x, y);
2937 2935
2938 if (intel_crtc->active) 2936 if (intel_crtc->active)
@@ -4005,6 +4003,19 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4005 } 4003 }
4006} 4004}
4007 4005
4006static void skylake_pfit_enable(struct intel_crtc *crtc)
4007{
4008 struct drm_device *dev = crtc->base.dev;
4009 struct drm_i915_private *dev_priv = dev->dev_private;
4010 int pipe = crtc->pipe;
4011
4012 if (crtc->config.pch_pfit.enabled) {
4013 I915_WRITE(PS_CTL(pipe), PS_ENABLE);
4014 I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos);
4015 I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size);
4016 }
4017}
4018
4008static void ironlake_pfit_enable(struct intel_crtc *crtc) 4019static void ironlake_pfit_enable(struct intel_crtc *crtc)
4009{ 4020{
4010 struct drm_device *dev = crtc->base.dev; 4021 struct drm_device *dev = crtc->base.dev;
@@ -4388,7 +4399,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4388 4399
4389 intel_ddi_enable_pipe_clock(intel_crtc); 4400 intel_ddi_enable_pipe_clock(intel_crtc);
4390 4401
4391 ironlake_pfit_enable(intel_crtc); 4402 if (IS_SKYLAKE(dev))
4403 skylake_pfit_enable(intel_crtc);
4404 else
4405 ironlake_pfit_enable(intel_crtc);
4392 4406
4393 /* 4407 /*
4394 * On ILK+ LUT must be loaded before the pipe is running but with 4408 * On ILK+ LUT must be loaded before the pipe is running but with
@@ -4422,6 +4436,21 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4422 intel_crtc_enable_planes(crtc); 4436 intel_crtc_enable_planes(crtc);
4423} 4437}
4424 4438
4439static void skylake_pfit_disable(struct intel_crtc *crtc)
4440{
4441 struct drm_device *dev = crtc->base.dev;
4442 struct drm_i915_private *dev_priv = dev->dev_private;
4443 int pipe = crtc->pipe;
4444
4445 /* To avoid upsetting the power well on haswell only disable the pfit if
4446 * it's in use. The hw state code will make sure we get this right. */
4447 if (crtc->config.pch_pfit.enabled) {
4448 I915_WRITE(PS_CTL(pipe), 0);
4449 I915_WRITE(PS_WIN_POS(pipe), 0);
4450 I915_WRITE(PS_WIN_SZ(pipe), 0);
4451 }
4452}
4453
4425static void ironlake_pfit_disable(struct intel_crtc *crtc) 4454static void ironlake_pfit_disable(struct intel_crtc *crtc)
4426{ 4455{
4427 struct drm_device *dev = crtc->base.dev; 4456 struct drm_device *dev = crtc->base.dev;
@@ -4534,7 +4563,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4534 4563
4535 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 4564 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4536 4565
4537 ironlake_pfit_disable(intel_crtc); 4566 if (IS_SKYLAKE(dev))
4567 skylake_pfit_disable(intel_crtc);
4568 else
4569 ironlake_pfit_disable(intel_crtc);
4538 4570
4539 intel_ddi_disable_pipe_clock(intel_crtc); 4571 intel_ddi_disable_pipe_clock(intel_crtc);
4540 4572
@@ -4907,10 +4939,23 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
4907 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 4939 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4908 4940
4909 if (req_cdclk != dev_priv->vlv_cdclk_freq) { 4941 if (req_cdclk != dev_priv->vlv_cdclk_freq) {
4942 /*
4943 * FIXME: We can end up here with all power domains off, yet
4944 * with a CDCLK frequency other than the minimum. To account
4945 * for this take the PIPE-A power domain, which covers the HW
4946 * blocks needed for the following programming. This can be
4947 * removed once it's guaranteed that we get here either with
4948 * the minimum CDCLK set, or the required power domains
4949 * enabled.
4950 */
4951 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
4952
4910 if (IS_CHERRYVIEW(dev)) 4953 if (IS_CHERRYVIEW(dev))
4911 cherryview_set_cdclk(dev, req_cdclk); 4954 cherryview_set_cdclk(dev, req_cdclk);
4912 else 4955 else
4913 valleyview_set_cdclk(dev, req_cdclk); 4956 valleyview_set_cdclk(dev, req_cdclk);
4957
4958 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
4914 } 4959 }
4915} 4960}
4916 4961
@@ -5153,36 +5198,6 @@ static void i9xx_crtc_off(struct drm_crtc *crtc)
5153{ 5198{
5154} 5199}
5155 5200
5156static void intel_crtc_update_sarea(struct drm_crtc *crtc,
5157 bool enabled)
5158{
5159 struct drm_device *dev = crtc->dev;
5160 struct drm_i915_master_private *master_priv;
5161 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5162 int pipe = intel_crtc->pipe;
5163
5164 if (!dev->primary->master)
5165 return;
5166
5167 master_priv = dev->primary->master->driver_priv;
5168 if (!master_priv->sarea_priv)
5169 return;
5170
5171 switch (pipe) {
5172 case 0:
5173 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
5174 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
5175 break;
5176 case 1:
5177 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
5178 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
5179 break;
5180 default:
5181 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
5182 break;
5183 }
5184}
5185
5186/* Master function to enable/disable CRTC and corresponding power wells */ 5201/* Master function to enable/disable CRTC and corresponding power wells */
5187void intel_crtc_control(struct drm_crtc *crtc, bool enable) 5202void intel_crtc_control(struct drm_crtc *crtc, bool enable)
5188{ 5203{
@@ -5226,8 +5241,6 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
5226 enable |= intel_encoder->connectors_active; 5241 enable |= intel_encoder->connectors_active;
5227 5242
5228 intel_crtc_control(crtc, enable); 5243 intel_crtc_control(crtc, enable);
5229
5230 intel_crtc_update_sarea(crtc, enable);
5231} 5244}
5232 5245
5233static void intel_crtc_disable(struct drm_crtc *crtc) 5246static void intel_crtc_disable(struct drm_crtc *crtc)
@@ -5242,7 +5255,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
5242 WARN_ON(!crtc->enabled); 5255 WARN_ON(!crtc->enabled);
5243 5256
5244 dev_priv->display.crtc_disable(crtc); 5257 dev_priv->display.crtc_disable(crtc);
5245 intel_crtc_update_sarea(crtc, false);
5246 dev_priv->display.off(crtc); 5258 dev_priv->display.off(crtc);
5247 5259
5248 if (crtc->primary->fb) { 5260 if (crtc->primary->fb) {
@@ -7549,6 +7561,22 @@ static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7549 &pipe_config->fdi_m_n, NULL); 7561 &pipe_config->fdi_m_n, NULL);
7550} 7562}
7551 7563
7564static void skylake_get_pfit_config(struct intel_crtc *crtc,
7565 struct intel_crtc_config *pipe_config)
7566{
7567 struct drm_device *dev = crtc->base.dev;
7568 struct drm_i915_private *dev_priv = dev->dev_private;
7569 uint32_t tmp;
7570
7571 tmp = I915_READ(PS_CTL(crtc->pipe));
7572
7573 if (tmp & PS_ENABLE) {
7574 pipe_config->pch_pfit.enabled = true;
7575 pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
7576 pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
7577 }
7578}
7579
7552static void ironlake_get_pfit_config(struct intel_crtc *crtc, 7580static void ironlake_get_pfit_config(struct intel_crtc *crtc,
7553 struct intel_crtc_config *pipe_config) 7581 struct intel_crtc_config *pipe_config)
7554{ 7582{
@@ -7962,6 +7990,28 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
7962 return 0; 7990 return 0;
7963} 7991}
7964 7992
7993static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
7994 enum port port,
7995 struct intel_crtc_config *pipe_config)
7996{
7997 u32 temp;
7998
7999 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
8000 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
8001
8002 switch (pipe_config->ddi_pll_sel) {
8003 case SKL_DPLL1:
8004 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
8005 break;
8006 case SKL_DPLL2:
8007 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
8008 break;
8009 case SKL_DPLL3:
8010 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
8011 break;
8012 }
8013}
8014
7965static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 8015static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
7966 enum port port, 8016 enum port port,
7967 struct intel_crtc_config *pipe_config) 8017 struct intel_crtc_config *pipe_config)
@@ -7991,7 +8041,10 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
7991 8041
7992 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 8042 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
7993 8043
7994 haswell_get_ddi_pll(dev_priv, port, pipe_config); 8044 if (IS_SKYLAKE(dev))
8045 skylake_get_ddi_pll(dev_priv, port, pipe_config);
8046 else
8047 haswell_get_ddi_pll(dev_priv, port, pipe_config);
7995 8048
7996 if (pipe_config->shared_dpll >= 0) { 8049 if (pipe_config->shared_dpll >= 0) {
7997 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 8050 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
@@ -8067,8 +8120,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
8067 intel_get_pipe_timings(crtc, pipe_config); 8120 intel_get_pipe_timings(crtc, pipe_config);
8068 8121
8069 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 8122 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
8070 if (intel_display_power_is_enabled(dev_priv, pfit_domain)) 8123 if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
8071 ironlake_get_pfit_config(crtc, pipe_config); 8124 if (IS_SKYLAKE(dev))
8125 skylake_get_pfit_config(crtc, pipe_config);
8126 else
8127 ironlake_get_pfit_config(crtc, pipe_config);
8128 }
8072 8129
8073 if (IS_HASWELL(dev)) 8130 if (IS_HASWELL(dev))
8074 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 8131 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
@@ -8292,7 +8349,7 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8292 uint32_t width, uint32_t height) 8349 uint32_t width, uint32_t height)
8293{ 8350{
8294 struct drm_device *dev = crtc->dev; 8351 struct drm_device *dev = crtc->dev;
8295 struct drm_i915_private *dev_priv = dev->dev_private; 8352 struct drm_i915_private *dev_priv = to_i915(dev);
8296 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8353 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8297 enum pipe pipe = intel_crtc->pipe; 8354 enum pipe pipe = intel_crtc->pipe;
8298 unsigned old_width; 8355 unsigned old_width;
@@ -8421,7 +8478,7 @@ __intel_framebuffer_create(struct drm_device *dev,
8421 8478
8422 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8479 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8423 if (!intel_fb) { 8480 if (!intel_fb) {
8424 drm_gem_object_unreference_unlocked(&obj->base); 8481 drm_gem_object_unreference(&obj->base);
8425 return ERR_PTR(-ENOMEM); 8482 return ERR_PTR(-ENOMEM);
8426 } 8483 }
8427 8484
@@ -8431,7 +8488,7 @@ __intel_framebuffer_create(struct drm_device *dev,
8431 8488
8432 return &intel_fb->base; 8489 return &intel_fb->base;
8433err: 8490err:
8434 drm_gem_object_unreference_unlocked(&obj->base); 8491 drm_gem_object_unreference(&obj->base);
8435 kfree(intel_fb); 8492 kfree(intel_fb);
8436 8493
8437 return ERR_PTR(ret); 8494 return ERR_PTR(ret);
@@ -9465,6 +9522,69 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
9465 return 0; 9522 return 0;
9466} 9523}
9467 9524
9525static int intel_gen9_queue_flip(struct drm_device *dev,
9526 struct drm_crtc *crtc,
9527 struct drm_framebuffer *fb,
9528 struct drm_i915_gem_object *obj,
9529 struct intel_engine_cs *ring,
9530 uint32_t flags)
9531{
9532 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9533 uint32_t plane = 0, stride;
9534 int ret;
9535
9536 switch(intel_crtc->pipe) {
9537 case PIPE_A:
9538 plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A;
9539 break;
9540 case PIPE_B:
9541 plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B;
9542 break;
9543 case PIPE_C:
9544 plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C;
9545 break;
9546 default:
9547 WARN_ONCE(1, "unknown plane in flip command\n");
9548 return -ENODEV;
9549 }
9550
9551 switch (obj->tiling_mode) {
9552 case I915_TILING_NONE:
9553 stride = fb->pitches[0] >> 6;
9554 break;
9555 case I915_TILING_X:
9556 stride = fb->pitches[0] >> 9;
9557 break;
9558 default:
9559 WARN_ONCE(1, "unknown tiling in flip command\n");
9560 return -ENODEV;
9561 }
9562
9563 ret = intel_ring_begin(ring, 10);
9564 if (ret)
9565 return ret;
9566
9567 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
9568 intel_ring_emit(ring, DERRMR);
9569 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
9570 DERRMR_PIPEB_PRI_FLIP_DONE |
9571 DERRMR_PIPEC_PRI_FLIP_DONE));
9572 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9573 MI_SRM_LRM_GLOBAL_GTT);
9574 intel_ring_emit(ring, DERRMR);
9575 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
9576 intel_ring_emit(ring, 0);
9577
9578 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane);
9579 intel_ring_emit(ring, stride << 6 | obj->tiling_mode);
9580 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9581
9582 intel_mark_page_flip_active(intel_crtc);
9583 __intel_ring_advance(ring);
9584
9585 return 0;
9586}
9587
9468static int intel_default_queue_flip(struct drm_device *dev, 9588static int intel_default_queue_flip(struct drm_device *dev,
9469 struct drm_crtc *crtc, 9589 struct drm_crtc *crtc,
9470 struct drm_framebuffer *fb, 9590 struct drm_framebuffer *fb,
@@ -9904,6 +10024,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
9904 pipe_config->dp_m2_n2.link_n, 10024 pipe_config->dp_m2_n2.link_n,
9905 pipe_config->dp_m2_n2.tu); 10025 pipe_config->dp_m2_n2.tu);
9906 10026
10027 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
10028 pipe_config->has_audio,
10029 pipe_config->has_infoframe);
10030
9907 DRM_DEBUG_KMS("requested mode:\n"); 10031 DRM_DEBUG_KMS("requested mode:\n");
9908 drm_mode_debug_printmodeline(&pipe_config->requested_mode); 10032 drm_mode_debug_printmodeline(&pipe_config->requested_mode);
9909 DRM_DEBUG_KMS("adjusted mode:\n"); 10033 DRM_DEBUG_KMS("adjusted mode:\n");
@@ -10372,6 +10496,7 @@ intel_pipe_config_compare(struct drm_device *dev,
10372 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || 10496 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
10373 IS_VALLEYVIEW(dev)) 10497 IS_VALLEYVIEW(dev))
10374 PIPE_CONF_CHECK_I(limited_color_range); 10498 PIPE_CONF_CHECK_I(limited_color_range);
10499 PIPE_CONF_CHECK_I(has_infoframe);
10375 10500
10376 PIPE_CONF_CHECK_I(has_audio); 10501 PIPE_CONF_CHECK_I(has_audio);
10377 10502
@@ -10428,6 +10553,9 @@ intel_pipe_config_compare(struct drm_device *dev,
10428 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 10553 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10429 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 10554 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
10430 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 10555 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
10556 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
10557 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
10558 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
10431 10559
10432 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 10560 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
10433 PIPE_CONF_CHECK_I(pipe_bpp); 10561 PIPE_CONF_CHECK_I(pipe_bpp);
@@ -10751,45 +10879,60 @@ static void update_scanline_offset(struct intel_crtc *crtc)
10751 crtc->scanline_offset = 1; 10879 crtc->scanline_offset = 1;
10752} 10880}
10753 10881
10882static struct intel_crtc_config *
10883intel_modeset_compute_config(struct drm_crtc *crtc,
10884 struct drm_display_mode *mode,
10885 struct drm_framebuffer *fb,
10886 unsigned *modeset_pipes,
10887 unsigned *prepare_pipes,
10888 unsigned *disable_pipes)
10889{
10890 struct intel_crtc_config *pipe_config = NULL;
10891
10892 intel_modeset_affected_pipes(crtc, modeset_pipes,
10893 prepare_pipes, disable_pipes);
10894
10895 if ((*modeset_pipes) == 0)
10896 goto out;
10897
10898 /*
10899 * Note this needs changes when we start tracking multiple modes
10900 * and crtcs. At that point we'll need to compute the whole config
10901 * (i.e. one pipe_config for each crtc) rather than just the one
10902 * for this crtc.
10903 */
10904 pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
10905 if (IS_ERR(pipe_config)) {
10906 goto out;
10907 }
10908 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
10909 "[modeset]");
10910 to_intel_crtc(crtc)->new_config = pipe_config;
10911
10912out:
10913 return pipe_config;
10914}
10915
10754static int __intel_set_mode(struct drm_crtc *crtc, 10916static int __intel_set_mode(struct drm_crtc *crtc,
10755 struct drm_display_mode *mode, 10917 struct drm_display_mode *mode,
10756 int x, int y, struct drm_framebuffer *fb) 10918 int x, int y, struct drm_framebuffer *fb,
10919 struct intel_crtc_config *pipe_config,
10920 unsigned modeset_pipes,
10921 unsigned prepare_pipes,
10922 unsigned disable_pipes)
10757{ 10923{
10758 struct drm_device *dev = crtc->dev; 10924 struct drm_device *dev = crtc->dev;
10759 struct drm_i915_private *dev_priv = dev->dev_private; 10925 struct drm_i915_private *dev_priv = dev->dev_private;
10760 struct drm_display_mode *saved_mode; 10926 struct drm_display_mode *saved_mode;
10761 struct intel_crtc_config *pipe_config = NULL;
10762 struct intel_crtc *intel_crtc; 10927 struct intel_crtc *intel_crtc;
10763 unsigned disable_pipes, prepare_pipes, modeset_pipes;
10764 int ret = 0; 10928 int ret = 0;
10765 10929
10766 saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL); 10930 saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
10767 if (!saved_mode) 10931 if (!saved_mode)
10768 return -ENOMEM; 10932 return -ENOMEM;
10769 10933
10770 intel_modeset_affected_pipes(crtc, &modeset_pipes,
10771 &prepare_pipes, &disable_pipes);
10772
10773 *saved_mode = crtc->mode; 10934 *saved_mode = crtc->mode;
10774 10935
10775 /* Hack: Because we don't (yet) support global modeset on multiple
10776 * crtcs, we don't keep track of the new mode for more than one crtc.
10777 * Hence simply check whether any bit is set in modeset_pipes in all the
10778 * pieces of code that are not yet converted to deal with mutliple crtcs
10779 * changing their mode at the same time. */
10780 if (modeset_pipes) {
10781 pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
10782 if (IS_ERR(pipe_config)) {
10783 ret = PTR_ERR(pipe_config);
10784 pipe_config = NULL;
10785
10786 goto out;
10787 }
10788 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
10789 "[modeset]");
10790 to_intel_crtc(crtc)->new_config = pipe_config;
10791 }
10792
10793 /* 10936 /*
10794 * See if the config requires any additional preparation, e.g. 10937 * See if the config requires any additional preparation, e.g.
10795 * to adjust global state with pipes off. We need to do this 10938 * to adjust global state with pipes off. We need to do this
@@ -10830,6 +10973,10 @@ static int __intel_set_mode(struct drm_crtc *crtc,
10830 10973
10831 /* crtc->mode is already used by the ->mode_set callbacks, hence we need 10974 /* crtc->mode is already used by the ->mode_set callbacks, hence we need
10832 * to set it here already despite that we pass it down the callchain. 10975 * to set it here already despite that we pass it down the callchain.
10976 *
10977 * Note we'll need to fix this up when we start tracking multiple
10978 * pipes; here we assume a single modeset_pipe and only track the
10979 * single crtc and mode.
10833 */ 10980 */
10834 if (modeset_pipes) { 10981 if (modeset_pipes) {
10835 crtc->mode = *mode; 10982 crtc->mode = *mode;
@@ -10891,19 +11038,23 @@ done:
10891 if (ret && crtc->enabled) 11038 if (ret && crtc->enabled)
10892 crtc->mode = *saved_mode; 11039 crtc->mode = *saved_mode;
10893 11040
10894out:
10895 kfree(pipe_config); 11041 kfree(pipe_config);
10896 kfree(saved_mode); 11042 kfree(saved_mode);
10897 return ret; 11043 return ret;
10898} 11044}
10899 11045
10900static int intel_set_mode(struct drm_crtc *crtc, 11046static int intel_set_mode_pipes(struct drm_crtc *crtc,
10901 struct drm_display_mode *mode, 11047 struct drm_display_mode *mode,
10902 int x, int y, struct drm_framebuffer *fb) 11048 int x, int y, struct drm_framebuffer *fb,
11049 struct intel_crtc_config *pipe_config,
11050 unsigned modeset_pipes,
11051 unsigned prepare_pipes,
11052 unsigned disable_pipes)
10903{ 11053{
10904 int ret; 11054 int ret;
10905 11055
10906 ret = __intel_set_mode(crtc, mode, x, y, fb); 11056 ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
11057 prepare_pipes, disable_pipes);
10907 11058
10908 if (ret == 0) 11059 if (ret == 0)
10909 intel_modeset_check_state(crtc->dev); 11060 intel_modeset_check_state(crtc->dev);
@@ -10911,6 +11062,26 @@ static int intel_set_mode(struct drm_crtc *crtc,
10911 return ret; 11062 return ret;
10912} 11063}
10913 11064
11065static int intel_set_mode(struct drm_crtc *crtc,
11066 struct drm_display_mode *mode,
11067 int x, int y, struct drm_framebuffer *fb)
11068{
11069 struct intel_crtc_config *pipe_config;
11070 unsigned modeset_pipes, prepare_pipes, disable_pipes;
11071
11072 pipe_config = intel_modeset_compute_config(crtc, mode, fb,
11073 &modeset_pipes,
11074 &prepare_pipes,
11075 &disable_pipes);
11076
11077 if (IS_ERR(pipe_config))
11078 return PTR_ERR(pipe_config);
11079
11080 return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
11081 modeset_pipes, prepare_pipes,
11082 disable_pipes);
11083}
11084
10914void intel_crtc_restore_mode(struct drm_crtc *crtc) 11085void intel_crtc_restore_mode(struct drm_crtc *crtc)
10915{ 11086{
10916 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); 11087 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
@@ -11239,6 +11410,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
11239 struct drm_device *dev; 11410 struct drm_device *dev;
11240 struct drm_mode_set save_set; 11411 struct drm_mode_set save_set;
11241 struct intel_set_config *config; 11412 struct intel_set_config *config;
11413 struct intel_crtc_config *pipe_config;
11414 unsigned modeset_pipes, prepare_pipes, disable_pipes;
11242 int ret; 11415 int ret;
11243 11416
11244 BUG_ON(!set); 11417 BUG_ON(!set);
@@ -11284,9 +11457,36 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
11284 if (ret) 11457 if (ret)
11285 goto fail; 11458 goto fail;
11286 11459
11460 pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
11461 set->fb,
11462 &modeset_pipes,
11463 &prepare_pipes,
11464 &disable_pipes);
11465 if (IS_ERR(pipe_config)) {
11466 ret = PTR_ERR(pipe_config);
11467 goto fail;
11468 } else if (pipe_config) {
11469 if (to_intel_crtc(set->crtc)->new_config->has_audio !=
11470 to_intel_crtc(set->crtc)->config.has_audio)
11471 config->mode_changed = true;
11472
11473 /* Force mode sets for any infoframe stuff */
11474 if (to_intel_crtc(set->crtc)->new_config->has_infoframe ||
11475 to_intel_crtc(set->crtc)->config.has_infoframe)
11476 config->mode_changed = true;
11477 }
11478
11479 /* set_mode will free it in the mode_changed case */
11480 if (!config->mode_changed)
11481 kfree(pipe_config);
11482
11483 intel_update_pipe_size(to_intel_crtc(set->crtc));
11484
11287 if (config->mode_changed) { 11485 if (config->mode_changed) {
11288 ret = intel_set_mode(set->crtc, set->mode, 11486 ret = intel_set_mode_pipes(set->crtc, set->mode,
11289 set->x, set->y, set->fb); 11487 set->x, set->y, set->fb, pipe_config,
11488 modeset_pipes, prepare_pipes,
11489 disable_pipes);
11290 } else if (config->fb_changed) { 11490 } else if (config->fb_changed) {
11291 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); 11491 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
11292 11492
@@ -11559,8 +11759,8 @@ intel_commit_primary_plane(struct drm_plane *plane,
11559 struct drm_rect *src = &state->src; 11759 struct drm_rect *src = &state->src;
11560 11760
11561 crtc->primary->fb = fb; 11761 crtc->primary->fb = fb;
11562 crtc->x = src->x1; 11762 crtc->x = src->x1 >> 16;
11563 crtc->y = src->y1; 11763 crtc->y = src->y1 >> 16;
11564 11764
11565 intel_plane->crtc_x = state->orig_dst.x1; 11765 intel_plane->crtc_x = state->orig_dst.x1;
11566 intel_plane->crtc_y = state->orig_dst.y1; 11766 intel_plane->crtc_y = state->orig_dst.y1;
@@ -12009,7 +12209,7 @@ enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
12009 12209
12010 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 12210 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
12011 12211
12012 if (!encoder) 12212 if (!encoder || WARN_ON(!encoder->crtc))
12013 return INVALID_PIPE; 12213 return INVALID_PIPE;
12014 12214
12015 return to_intel_crtc(encoder->crtc)->pipe; 12215 return to_intel_crtc(encoder->crtc)->pipe;
@@ -12244,7 +12444,7 @@ static void intel_setup_outputs(struct drm_device *dev)
12244 if (SUPPORTS_TV(dev)) 12444 if (SUPPORTS_TV(dev))
12245 intel_tv_init(dev); 12445 intel_tv_init(dev);
12246 12446
12247 intel_edp_psr_init(dev); 12447 intel_psr_init(dev);
12248 12448
12249 for_each_intel_encoder(dev, encoder) { 12449 for_each_intel_encoder(dev, encoder) {
12250 encoder->base.possible_crtcs = encoder->crtc_mask; 12450 encoder->base.possible_crtcs = encoder->crtc_mask;
@@ -12558,6 +12758,9 @@ static void intel_init_display(struct drm_device *dev)
12558 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 12758 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
12559 dev_priv->display.queue_flip = intel_gen7_queue_flip; 12759 dev_priv->display.queue_flip = intel_gen7_queue_flip;
12560 break; 12760 break;
12761 case 9:
12762 dev_priv->display.queue_flip = intel_gen9_queue_flip;
12763 break;
12561 } 12764 }
12562 12765
12563 intel_panel_init_backlight_funcs(dev); 12766 intel_panel_init_backlight_funcs(dev);
@@ -13262,8 +13465,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
13262 struct drm_crtc *crtc = 13465 struct drm_crtc *crtc =
13263 dev_priv->pipe_to_crtc_mapping[pipe]; 13466 dev_priv->pipe_to_crtc_mapping[pipe];
13264 13467
13265 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, 13468 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
13266 crtc->primary->fb); 13469 crtc->primary->fb);
13267 } 13470 }
13268 } else { 13471 } else {
13269 intel_modeset_update_staged_output_state(dev); 13472 intel_modeset_update_staged_output_state(dev);
@@ -13274,6 +13477,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
13274 13477
13275void intel_modeset_gem_init(struct drm_device *dev) 13478void intel_modeset_gem_init(struct drm_device *dev)
13276{ 13479{
13480 struct drm_i915_private *dev_priv = dev->dev_private;
13277 struct drm_crtc *c; 13481 struct drm_crtc *c;
13278 struct drm_i915_gem_object *obj; 13482 struct drm_i915_gem_object *obj;
13279 13483
@@ -13281,6 +13485,16 @@ void intel_modeset_gem_init(struct drm_device *dev)
13281 intel_init_gt_powersave(dev); 13485 intel_init_gt_powersave(dev);
13282 mutex_unlock(&dev->struct_mutex); 13486 mutex_unlock(&dev->struct_mutex);
13283 13487
13488 /*
13489 * There may be no VBT; and if the BIOS enabled SSC we can
13490 * just keep using it to avoid unnecessary flicker. Whereas if the
13491 * BIOS isn't using it, don't assume it will work even if the VBT
13492 * indicates as much.
13493 */
13494 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
13495 dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
13496 DREF_SSC1_ENABLE);
13497
13284 intel_modeset_init_hw(dev); 13498 intel_modeset_init_hw(dev);
13285 13499
13286 intel_setup_overlay(dev); 13500 intel_setup_overlay(dev);
@@ -13306,6 +13520,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
13306 } 13520 }
13307 } 13521 }
13308 mutex_unlock(&dev->struct_mutex); 13522 mutex_unlock(&dev->struct_mutex);
13523
13524 intel_backlight_register(dev);
13309} 13525}
13310 13526
13311void intel_connector_unregister(struct intel_connector *intel_connector) 13527void intel_connector_unregister(struct intel_connector *intel_connector)
@@ -13321,9 +13537,13 @@ void intel_modeset_cleanup(struct drm_device *dev)
13321 struct drm_i915_private *dev_priv = dev->dev_private; 13537 struct drm_i915_private *dev_priv = dev->dev_private;
13322 struct drm_connector *connector; 13538 struct drm_connector *connector;
13323 13539
13540 intel_disable_gt_powersave(dev);
13541
13542 intel_backlight_unregister(dev);
13543
13324 /* 13544 /*
13325 * Interrupts and polling as the first thing to avoid creating havoc. 13545 * Interrupts and polling as the first thing to avoid creating havoc.
13326 * Too much stuff here (turning of rps, connectors, ...) would 13546 * Too much stuff here (turning of connectors, ...) would
13327 * experience fancy races otherwise. 13547 * experience fancy races otherwise.
13328 */ 13548 */
13329 intel_irq_uninstall(dev_priv); 13549 intel_irq_uninstall(dev_priv);
@@ -13340,8 +13560,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
13340 13560
13341 intel_disable_fbc(dev); 13561 intel_disable_fbc(dev);
13342 13562
13343 intel_disable_gt_powersave(dev);
13344
13345 ironlake_teardown_rc6(dev); 13563 ironlake_teardown_rc6(dev);
13346 13564
13347 mutex_unlock(&dev->struct_mutex); 13565 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 49288437e52a..d2529ec280c8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -227,8 +227,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
227 return MODE_OK; 227 return MODE_OK;
228} 228}
229 229
230static uint32_t 230uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
231pack_aux(const uint8_t *src, int src_bytes)
232{ 231{
233 int i; 232 int i;
234 uint32_t v = 0; 233 uint32_t v = 0;
@@ -240,8 +239,7 @@ pack_aux(const uint8_t *src, int src_bytes)
240 return v; 239 return v;
241} 240}
242 241
243static void 242void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
244unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
245{ 243{
246 int i; 244 int i;
247 if (dst_bytes > 4) 245 if (dst_bytes > 4)
@@ -863,7 +861,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
863 /* Load the send data into the aux channel data registers */ 861 /* Load the send data into the aux channel data registers */
864 for (i = 0; i < send_bytes; i += 4) 862 for (i = 0; i < send_bytes; i += 4)
865 I915_WRITE(ch_data + i, 863 I915_WRITE(ch_data + i,
866 pack_aux(send + i, send_bytes - i)); 864 intel_dp_pack_aux(send + i,
865 send_bytes - i));
867 866
868 /* Send the command and wait for it to complete */ 867 /* Send the command and wait for it to complete */
869 I915_WRITE(ch_ctl, send_ctl); 868 I915_WRITE(ch_ctl, send_ctl);
@@ -917,8 +916,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
917 recv_bytes = recv_size; 916 recv_bytes = recv_size;
918 917
919 for (i = 0; i < recv_bytes; i += 4) 918 for (i = 0; i < recv_bytes; i += 4)
920 unpack_aux(I915_READ(ch_data + i), 919 intel_dp_unpack_aux(I915_READ(ch_data + i),
921 recv + i, recv_bytes - i); 920 recv + i, recv_bytes - i);
922 921
923 ret = recv_bytes; 922 ret = recv_bytes;
924out: 923out:
@@ -1075,6 +1074,33 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
1075} 1074}
1076 1075
1077static void 1076static void
1077skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
1078{
1079 u32 ctrl1;
1080
1081 pipe_config->ddi_pll_sel = SKL_DPLL0;
1082 pipe_config->dpll_hw_state.cfgcr1 = 0;
1083 pipe_config->dpll_hw_state.cfgcr2 = 0;
1084
1085 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1086 switch (link_bw) {
1087 case DP_LINK_BW_1_62:
1088 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1089 SKL_DPLL0);
1090 break;
1091 case DP_LINK_BW_2_7:
1092 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1093 SKL_DPLL0);
1094 break;
1095 case DP_LINK_BW_5_4:
1096 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1097 SKL_DPLL0);
1098 break;
1099 }
1100 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1101}
1102
1103static void
1078hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw) 1104hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
1079{ 1105{
1080 switch (link_bw) { 1106 switch (link_bw) {
@@ -1251,7 +1277,9 @@ found:
1251 &pipe_config->dp_m2_n2); 1277 &pipe_config->dp_m2_n2);
1252 } 1278 }
1253 1279
1254 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1280 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1281 skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
1282 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1255 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw); 1283 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1256 else 1284 else
1257 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 1285 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -2067,385 +2095,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
2067 } 2095 }
2068} 2096}
2069 2097
2070static bool is_edp_psr(struct intel_dp *intel_dp)
2071{
2072 return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
2073}
2074
2075static bool intel_edp_is_psr_enabled(struct drm_device *dev)
2076{
2077 struct drm_i915_private *dev_priv = dev->dev_private;
2078
2079 if (!HAS_PSR(dev))
2080 return false;
2081
2082 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
2083}
2084
2085static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
2086 struct edp_vsc_psr *vsc_psr)
2087{
2088 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2089 struct drm_device *dev = dig_port->base.base.dev;
2090 struct drm_i915_private *dev_priv = dev->dev_private;
2091 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
2092 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
2093 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
2094 uint32_t *data = (uint32_t *) vsc_psr;
2095 unsigned int i;
2096
2097 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
2098 the video DIP being updated before program video DIP data buffer
2099 registers for DIP being updated. */
2100 I915_WRITE(ctl_reg, 0);
2101 POSTING_READ(ctl_reg);
2102
2103 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
2104 if (i < sizeof(struct edp_vsc_psr))
2105 I915_WRITE(data_reg + i, *data++);
2106 else
2107 I915_WRITE(data_reg + i, 0);
2108 }
2109
2110 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
2111 POSTING_READ(ctl_reg);
2112}
2113
2114static void intel_edp_psr_setup_vsc(struct intel_dp *intel_dp)
2115{
2116 struct edp_vsc_psr psr_vsc;
2117
2118 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
2119 memset(&psr_vsc, 0, sizeof(psr_vsc));
2120 psr_vsc.sdp_header.HB0 = 0;
2121 psr_vsc.sdp_header.HB1 = 0x7;
2122 psr_vsc.sdp_header.HB2 = 0x2;
2123 psr_vsc.sdp_header.HB3 = 0x8;
2124 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
2125}
2126
2127static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
2128{
2129 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2130 struct drm_device *dev = dig_port->base.base.dev;
2131 struct drm_i915_private *dev_priv = dev->dev_private;
2132 uint32_t aux_clock_divider;
2133 int precharge = 0x3;
2134 bool only_standby = false;
2135 static const uint8_t aux_msg[] = {
2136 [0] = DP_AUX_NATIVE_WRITE << 4,
2137 [1] = DP_SET_POWER >> 8,
2138 [2] = DP_SET_POWER & 0xff,
2139 [3] = 1 - 1,
2140 [4] = DP_SET_POWER_D0,
2141 };
2142 int i;
2143
2144 BUILD_BUG_ON(sizeof(aux_msg) > 20);
2145
2146 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
2147
2148 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
2149 only_standby = true;
2150
2151 /* Enable PSR in sink */
2152 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
2153 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
2154 DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
2155 else
2156 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
2157 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
2158
2159 /* Setup AUX registers */
2160 for (i = 0; i < sizeof(aux_msg); i += 4)
2161 I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
2162 pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
2163
2164 I915_WRITE(EDP_PSR_AUX_CTL(dev),
2165 DP_AUX_CH_CTL_TIME_OUT_400us |
2166 (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
2167 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
2168 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
2169}
2170
2171static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
2172{
2173 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2174 struct drm_device *dev = dig_port->base.base.dev;
2175 struct drm_i915_private *dev_priv = dev->dev_private;
2176 uint32_t max_sleep_time = 0x1f;
2177 uint32_t idle_frames = 1;
2178 uint32_t val = 0x0;
2179 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
2180 bool only_standby = false;
2181
2182 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
2183 only_standby = true;
2184
2185 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
2186 val |= EDP_PSR_LINK_STANDBY;
2187 val |= EDP_PSR_TP2_TP3_TIME_0us;
2188 val |= EDP_PSR_TP1_TIME_0us;
2189 val |= EDP_PSR_SKIP_AUX_EXIT;
2190 val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
2191 } else
2192 val |= EDP_PSR_LINK_DISABLE;
2193
2194 I915_WRITE(EDP_PSR_CTL(dev), val |
2195 (IS_BROADWELL(dev) ? 0 : link_entry_time) |
2196 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
2197 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
2198 EDP_PSR_ENABLE);
2199}
2200
2201static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
2202{
2203 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2204 struct drm_device *dev = dig_port->base.base.dev;
2205 struct drm_i915_private *dev_priv = dev->dev_private;
2206 struct drm_crtc *crtc = dig_port->base.base.crtc;
2207 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2208
2209 lockdep_assert_held(&dev_priv->psr.lock);
2210 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
2211 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
2212
2213 dev_priv->psr.source_ok = false;
2214
2215 if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
2216 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
2217 return false;
2218 }
2219
2220 if (!i915.enable_psr) {
2221 DRM_DEBUG_KMS("PSR disable by flag\n");
2222 return false;
2223 }
2224
2225 /* Below limitations aren't valid for Broadwell */
2226 if (IS_BROADWELL(dev))
2227 goto out;
2228
2229 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
2230 S3D_ENABLE) {
2231 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
2232 return false;
2233 }
2234
2235 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
2236 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
2237 return false;
2238 }
2239
2240 out:
2241 dev_priv->psr.source_ok = true;
2242 return true;
2243}
2244
2245static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
2246{
2247 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2248 struct drm_device *dev = intel_dig_port->base.base.dev;
2249 struct drm_i915_private *dev_priv = dev->dev_private;
2250
2251 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
2252 WARN_ON(dev_priv->psr.active);
2253 lockdep_assert_held(&dev_priv->psr.lock);
2254
2255 /* Enable/Re-enable PSR on the host */
2256 intel_edp_psr_enable_source(intel_dp);
2257
2258 dev_priv->psr.active = true;
2259}
2260
2261void intel_edp_psr_enable(struct intel_dp *intel_dp)
2262{
2263 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2264 struct drm_i915_private *dev_priv = dev->dev_private;
2265
2266 if (!HAS_PSR(dev)) {
2267 DRM_DEBUG_KMS("PSR not supported on this platform\n");
2268 return;
2269 }
2270
2271 if (!is_edp_psr(intel_dp)) {
2272 DRM_DEBUG_KMS("PSR not supported by this panel\n");
2273 return;
2274 }
2275
2276 mutex_lock(&dev_priv->psr.lock);
2277 if (dev_priv->psr.enabled) {
2278 DRM_DEBUG_KMS("PSR already in use\n");
2279 goto unlock;
2280 }
2281
2282 if (!intel_edp_psr_match_conditions(intel_dp))
2283 goto unlock;
2284
2285 dev_priv->psr.busy_frontbuffer_bits = 0;
2286
2287 intel_edp_psr_setup_vsc(intel_dp);
2288
2289 /* Avoid continuous PSR exit by masking memup and hpd */
2290 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
2291 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
2292
2293 /* Enable PSR on the panel */
2294 intel_edp_psr_enable_sink(intel_dp);
2295
2296 dev_priv->psr.enabled = intel_dp;
2297unlock:
2298 mutex_unlock(&dev_priv->psr.lock);
2299}
2300
2301void intel_edp_psr_disable(struct intel_dp *intel_dp)
2302{
2303 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2304 struct drm_i915_private *dev_priv = dev->dev_private;
2305
2306 mutex_lock(&dev_priv->psr.lock);
2307 if (!dev_priv->psr.enabled) {
2308 mutex_unlock(&dev_priv->psr.lock);
2309 return;
2310 }
2311
2312 if (dev_priv->psr.active) {
2313 I915_WRITE(EDP_PSR_CTL(dev),
2314 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
2315
2316 /* Wait till PSR is idle */
2317 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
2318 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
2319 DRM_ERROR("Timed out waiting for PSR Idle State\n");
2320
2321 dev_priv->psr.active = false;
2322 } else {
2323 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
2324 }
2325
2326 dev_priv->psr.enabled = NULL;
2327 mutex_unlock(&dev_priv->psr.lock);
2328
2329 cancel_delayed_work_sync(&dev_priv->psr.work);
2330}
2331
2332static void intel_edp_psr_work(struct work_struct *work)
2333{
2334 struct drm_i915_private *dev_priv =
2335 container_of(work, typeof(*dev_priv), psr.work.work);
2336 struct intel_dp *intel_dp = dev_priv->psr.enabled;
2337
2338 /* We have to make sure PSR is ready for re-enable
2339 * otherwise it keeps disabled until next full enable/disable cycle.
2340 * PSR might take some time to get fully disabled
2341 * and be ready for re-enable.
2342 */
2343 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
2344 EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
2345 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
2346 return;
2347 }
2348
2349 mutex_lock(&dev_priv->psr.lock);
2350 intel_dp = dev_priv->psr.enabled;
2351
2352 if (!intel_dp)
2353 goto unlock;
2354
2355 /*
2356 * The delayed work can race with an invalidate hence we need to
2357 * recheck. Since psr_flush first clears this and then reschedules we
2358 * won't ever miss a flush when bailing out here.
2359 */
2360 if (dev_priv->psr.busy_frontbuffer_bits)
2361 goto unlock;
2362
2363 intel_edp_psr_do_enable(intel_dp);
2364unlock:
2365 mutex_unlock(&dev_priv->psr.lock);
2366}
2367
2368static void intel_edp_psr_do_exit(struct drm_device *dev)
2369{
2370 struct drm_i915_private *dev_priv = dev->dev_private;
2371
2372 if (dev_priv->psr.active) {
2373 u32 val = I915_READ(EDP_PSR_CTL(dev));
2374
2375 WARN_ON(!(val & EDP_PSR_ENABLE));
2376
2377 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
2378
2379 dev_priv->psr.active = false;
2380 }
2381
2382}
2383
2384void intel_edp_psr_invalidate(struct drm_device *dev,
2385 unsigned frontbuffer_bits)
2386{
2387 struct drm_i915_private *dev_priv = dev->dev_private;
2388 struct drm_crtc *crtc;
2389 enum pipe pipe;
2390
2391 mutex_lock(&dev_priv->psr.lock);
2392 if (!dev_priv->psr.enabled) {
2393 mutex_unlock(&dev_priv->psr.lock);
2394 return;
2395 }
2396
2397 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
2398 pipe = to_intel_crtc(crtc)->pipe;
2399
2400 intel_edp_psr_do_exit(dev);
2401
2402 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
2403
2404 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
2405 mutex_unlock(&dev_priv->psr.lock);
2406}
2407
2408void intel_edp_psr_flush(struct drm_device *dev,
2409 unsigned frontbuffer_bits)
2410{
2411 struct drm_i915_private *dev_priv = dev->dev_private;
2412 struct drm_crtc *crtc;
2413 enum pipe pipe;
2414
2415 mutex_lock(&dev_priv->psr.lock);
2416 if (!dev_priv->psr.enabled) {
2417 mutex_unlock(&dev_priv->psr.lock);
2418 return;
2419 }
2420
2421 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
2422 pipe = to_intel_crtc(crtc)->pipe;
2423 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
2424
2425 /*
2426 * On Haswell sprite plane updates don't result in a psr invalidating
2427 * signal in the hardware. Which means we need to manually fake this in
2428 * software for all flushes, not just when we've seen a preceding
2429 * invalidation through frontbuffer rendering.
2430 */
2431 if (IS_HASWELL(dev) &&
2432 (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
2433 intel_edp_psr_do_exit(dev);
2434
2435 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
2436 schedule_delayed_work(&dev_priv->psr.work,
2437 msecs_to_jiffies(100));
2438 mutex_unlock(&dev_priv->psr.lock);
2439}
2440
2441void intel_edp_psr_init(struct drm_device *dev)
2442{
2443 struct drm_i915_private *dev_priv = dev->dev_private;
2444
2445 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
2446 mutex_init(&dev_priv->psr.lock);
2447}
2448
2449static void intel_disable_dp(struct intel_encoder *encoder) 2098static void intel_disable_dp(struct intel_encoder *encoder)
2450{ 2099{
2451 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2100 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@@ -4052,8 +3701,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4052 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count); 3701 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4053 3702
4054 if (attempts == 0) { 3703 if (attempts == 0) {
4055 DRM_ERROR("Panel is unable to calculate CRC after 6 vblanks\n"); 3704 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4056 return -EIO; 3705 return -ETIMEDOUT;
4057 } 3706 }
4058 3707
4059 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) 3708 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
@@ -5117,7 +4766,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5117 * hard to tell without seeing the user of this function of this code. 4766 * hard to tell without seeing the user of this function of this code.
5118 * Check locking and ordering once that lands. 4767 * Check locking and ordering once that lands.
5119 */ 4768 */
5120 if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) { 4769 if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
5121 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n"); 4770 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
5122 return; 4771 return;
5123 } 4772 }
@@ -5233,6 +4882,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5233 bool has_dpcd; 4882 bool has_dpcd;
5234 struct drm_display_mode *scan; 4883 struct drm_display_mode *scan;
5235 struct edid *edid; 4884 struct edid *edid;
4885 enum pipe pipe = INVALID_PIPE;
5236 4886
5237 intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED; 4887 intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
5238 4888
@@ -5301,11 +4951,30 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5301 if (IS_VALLEYVIEW(dev)) { 4951 if (IS_VALLEYVIEW(dev)) {
5302 intel_dp->edp_notifier.notifier_call = edp_notify_handler; 4952 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5303 register_reboot_notifier(&intel_dp->edp_notifier); 4953 register_reboot_notifier(&intel_dp->edp_notifier);
4954
4955 /*
4956 * Figure out the current pipe for the initial backlight setup.
4957 * If the current pipe isn't valid, try the PPS pipe, and if that
4958 * fails just assume pipe A.
4959 */
4960 if (IS_CHERRYVIEW(dev))
4961 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
4962 else
4963 pipe = PORT_TO_PIPE(intel_dp->DP);
4964
4965 if (pipe != PIPE_A && pipe != PIPE_B)
4966 pipe = intel_dp->pps_pipe;
4967
4968 if (pipe != PIPE_A && pipe != PIPE_B)
4969 pipe = PIPE_A;
4970
4971 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
4972 pipe_name(pipe));
5304 } 4973 }
5305 4974
5306 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 4975 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5307 intel_connector->panel.backlight_power = intel_edp_backlight_power; 4976 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5308 intel_panel_setup_backlight(connector); 4977 intel_panel_setup_backlight(connector, pipe);
5309 4978
5310 return true; 4979 return true;
5311} 4980}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 5c622ad2e9aa..f0a46ecf3f3a 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -292,6 +292,9 @@ struct intel_crtc_config {
292 * between pch encoders and cpu encoders. */ 292 * between pch encoders and cpu encoders. */
293 bool has_pch_encoder; 293 bool has_pch_encoder;
294 294
295 /* Are we sending infoframes on the attached port */
296 bool has_infoframe;
297
295 /* CPU Transcoder for the pipe. Currently this can only differ from the 298 /* CPU Transcoder for the pipe. Currently this can only differ from the
296 * pipe on Haswell (where we have a special eDP transcoder). */ 299 * pipe on Haswell (where we have a special eDP transcoder). */
297 enum transcoder cpu_transcoder; 300 enum transcoder cpu_transcoder;
@@ -340,7 +343,10 @@ struct intel_crtc_config {
340 /* Selected dpll when shared or DPLL_ID_PRIVATE. */ 343 /* Selected dpll when shared or DPLL_ID_PRIVATE. */
341 enum intel_dpll_id shared_dpll; 344 enum intel_dpll_id shared_dpll;
342 345
343 /* PORT_CLK_SEL for DDI ports. */ 346 /*
347 * - PORT_CLK_SEL for DDI ports on HSW/BDW.
348 * - enum skl_dpll on SKL
349 */
344 uint32_t ddi_pll_sel; 350 uint32_t ddi_pll_sel;
345 351
346 /* Actual register state of the dpll, for shared dpll cross-checking. */ 352 /* Actual register state of the dpll, for shared dpll cross-checking. */
@@ -552,6 +558,7 @@ struct intel_hdmi {
552 void (*set_infoframes)(struct drm_encoder *encoder, 558 void (*set_infoframes)(struct drm_encoder *encoder,
553 bool enable, 559 bool enable,
554 struct drm_display_mode *adjusted_mode); 560 struct drm_display_mode *adjusted_mode);
561 bool (*infoframe_enabled)(struct drm_encoder *encoder);
555}; 562};
556 563
557struct intel_dp_mst_encoder; 564struct intel_dp_mst_encoder;
@@ -784,8 +791,9 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
784void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 791void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
785void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 792void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
786void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 793void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
787void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 794void gen6_reset_rps_interrupts(struct drm_device *dev);
788void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 795void gen6_enable_rps_interrupts(struct drm_device *dev);
796void gen6_disable_rps_interrupts(struct drm_device *dev);
789void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); 797void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
790void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); 798void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
791static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) 799static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
@@ -992,21 +1000,16 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp);
992void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); 1000void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
993void intel_edp_panel_on(struct intel_dp *intel_dp); 1001void intel_edp_panel_on(struct intel_dp *intel_dp);
994void intel_edp_panel_off(struct intel_dp *intel_dp); 1002void intel_edp_panel_off(struct intel_dp *intel_dp);
995void intel_edp_psr_enable(struct intel_dp *intel_dp);
996void intel_edp_psr_disable(struct intel_dp *intel_dp);
997void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate); 1003void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
998void intel_edp_psr_invalidate(struct drm_device *dev,
999 unsigned frontbuffer_bits);
1000void intel_edp_psr_flush(struct drm_device *dev,
1001 unsigned frontbuffer_bits);
1002void intel_edp_psr_init(struct drm_device *dev);
1003
1004void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector); 1004void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
1005void intel_dp_mst_suspend(struct drm_device *dev); 1005void intel_dp_mst_suspend(struct drm_device *dev);
1006void intel_dp_mst_resume(struct drm_device *dev); 1006void intel_dp_mst_resume(struct drm_device *dev);
1007int intel_dp_max_link_bw(struct intel_dp *intel_dp); 1007int intel_dp_max_link_bw(struct intel_dp *intel_dp);
1008void intel_dp_hot_plug(struct intel_encoder *intel_encoder); 1008void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
1009void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv); 1009void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
1010uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
1011void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
1012
1010/* intel_dp_mst.c */ 1013/* intel_dp_mst.c */
1011int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); 1014int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
1012void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); 1015void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
@@ -1096,7 +1099,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc,
1096 int fitting_mode); 1099 int fitting_mode);
1097void intel_panel_set_backlight_acpi(struct intel_connector *connector, 1100void intel_panel_set_backlight_acpi(struct intel_connector *connector,
1098 u32 level, u32 max); 1101 u32 level, u32 max);
1099int intel_panel_setup_backlight(struct drm_connector *connector); 1102int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe);
1100void intel_panel_enable_backlight(struct intel_connector *connector); 1103void intel_panel_enable_backlight(struct intel_connector *connector);
1101void intel_panel_disable_backlight(struct intel_connector *connector); 1104void intel_panel_disable_backlight(struct intel_connector *connector);
1102void intel_panel_destroy_backlight(struct drm_connector *connector); 1105void intel_panel_destroy_backlight(struct drm_connector *connector);
@@ -1106,6 +1109,19 @@ extern struct drm_display_mode *intel_find_panel_downclock(
1106 struct drm_device *dev, 1109 struct drm_device *dev,
1107 struct drm_display_mode *fixed_mode, 1110 struct drm_display_mode *fixed_mode,
1108 struct drm_connector *connector); 1111 struct drm_connector *connector);
1112void intel_backlight_register(struct drm_device *dev);
1113void intel_backlight_unregister(struct drm_device *dev);
1114
1115
1116/* intel_psr.c */
1117bool intel_psr_is_enabled(struct drm_device *dev);
1118void intel_psr_enable(struct intel_dp *intel_dp);
1119void intel_psr_disable(struct intel_dp *intel_dp);
1120void intel_psr_invalidate(struct drm_device *dev,
1121 unsigned frontbuffer_bits);
1122void intel_psr_flush(struct drm_device *dev,
1123 unsigned frontbuffer_bits);
1124void intel_psr_init(struct drm_device *dev);
1109 1125
1110/* intel_runtime_pm.c */ 1126/* intel_runtime_pm.c */
1111int intel_power_domains_init(struct drm_i915_private *); 1127int intel_power_domains_init(struct drm_i915_private *);
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 58cf2e6b78f4..79f6d72179c5 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -156,7 +156,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
156 156
157 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); 157 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
158 158
159 intel_edp_psr_invalidate(dev, obj->frontbuffer_bits); 159 intel_psr_invalidate(dev, obj->frontbuffer_bits);
160} 160}
161 161
162/** 162/**
@@ -182,7 +182,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
182 182
183 intel_mark_fb_busy(dev, frontbuffer_bits, NULL); 183 intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
184 184
185 intel_edp_psr_flush(dev, frontbuffer_bits); 185 intel_psr_flush(dev, frontbuffer_bits);
186 186
187 /* 187 /*
188 * FIXME: Unconditional fbc flushing here is a rather gross hack and 188 * FIXME: Unconditional fbc flushing here is a rather gross hack and
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 29baa53aef90..ec873338e84d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -166,6 +166,19 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
166 POSTING_READ(VIDEO_DIP_CTL); 166 POSTING_READ(VIDEO_DIP_CTL);
167} 167}
168 168
169static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
170{
171 struct drm_device *dev = encoder->dev;
172 struct drm_i915_private *dev_priv = dev->dev_private;
173 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
174 u32 val = I915_READ(VIDEO_DIP_CTL);
175
176 if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
177 return val & VIDEO_DIP_ENABLE;
178
179 return false;
180}
181
169static void ibx_write_infoframe(struct drm_encoder *encoder, 182static void ibx_write_infoframe(struct drm_encoder *encoder,
170 enum hdmi_infoframe_type type, 183 enum hdmi_infoframe_type type,
171 const void *frame, ssize_t len) 184 const void *frame, ssize_t len)
@@ -204,6 +217,17 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
204 POSTING_READ(reg); 217 POSTING_READ(reg);
205} 218}
206 219
220static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
221{
222 struct drm_device *dev = encoder->dev;
223 struct drm_i915_private *dev_priv = dev->dev_private;
224 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
225 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
226 u32 val = I915_READ(reg);
227
228 return val & VIDEO_DIP_ENABLE;
229}
230
207static void cpt_write_infoframe(struct drm_encoder *encoder, 231static void cpt_write_infoframe(struct drm_encoder *encoder,
208 enum hdmi_infoframe_type type, 232 enum hdmi_infoframe_type type,
209 const void *frame, ssize_t len) 233 const void *frame, ssize_t len)
@@ -245,6 +269,17 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
245 POSTING_READ(reg); 269 POSTING_READ(reg);
246} 270}
247 271
272static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
273{
274 struct drm_device *dev = encoder->dev;
275 struct drm_i915_private *dev_priv = dev->dev_private;
276 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
277 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
278 u32 val = I915_READ(reg);
279
280 return val & VIDEO_DIP_ENABLE;
281}
282
248static void vlv_write_infoframe(struct drm_encoder *encoder, 283static void vlv_write_infoframe(struct drm_encoder *encoder,
249 enum hdmi_infoframe_type type, 284 enum hdmi_infoframe_type type,
250 const void *frame, ssize_t len) 285 const void *frame, ssize_t len)
@@ -283,6 +318,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
283 POSTING_READ(reg); 318 POSTING_READ(reg);
284} 319}
285 320
321static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
322{
323 struct drm_device *dev = encoder->dev;
324 struct drm_i915_private *dev_priv = dev->dev_private;
325 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
326 int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
327 u32 val = I915_READ(reg);
328
329 return val & VIDEO_DIP_ENABLE;
330}
331
286static void hsw_write_infoframe(struct drm_encoder *encoder, 332static void hsw_write_infoframe(struct drm_encoder *encoder,
287 enum hdmi_infoframe_type type, 333 enum hdmi_infoframe_type type,
288 const void *frame, ssize_t len) 334 const void *frame, ssize_t len)
@@ -320,6 +366,18 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
320 POSTING_READ(ctl_reg); 366 POSTING_READ(ctl_reg);
321} 367}
322 368
369static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
370{
371 struct drm_device *dev = encoder->dev;
372 struct drm_i915_private *dev_priv = dev->dev_private;
373 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
374 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
375 u32 val = I915_READ(ctl_reg);
376
377 return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
378 VIDEO_DIP_ENABLE_VS_HSW);
379}
380
323/* 381/*
324 * The data we write to the DIP data buffer registers is 1 byte bigger than the 382 * The data we write to the DIP data buffer registers is 1 byte bigger than the
325 * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting 383 * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
@@ -724,6 +782,9 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
724 if (tmp & HDMI_MODE_SELECT_HDMI) 782 if (tmp & HDMI_MODE_SELECT_HDMI)
725 pipe_config->has_hdmi_sink = true; 783 pipe_config->has_hdmi_sink = true;
726 784
785 if (intel_hdmi->infoframe_enabled(&encoder->base))
786 pipe_config->has_infoframe = true;
787
727 if (tmp & SDVO_AUDIO_ENABLE) 788 if (tmp & SDVO_AUDIO_ENABLE)
728 pipe_config->has_audio = true; 789 pipe_config->has_audio = true;
729 790
@@ -925,6 +986,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
925 986
926 pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink; 987 pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
927 988
989 if (pipe_config->has_hdmi_sink)
990 pipe_config->has_infoframe = true;
991
928 if (intel_hdmi->color_range_auto) { 992 if (intel_hdmi->color_range_auto) {
929 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 993 /* See CEA-861-E - 5.1 Default Encoding Parameters */
930 if (pipe_config->has_hdmi_sink && 994 if (pipe_config->has_hdmi_sink &&
@@ -1619,18 +1683,23 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1619 if (IS_VALLEYVIEW(dev)) { 1683 if (IS_VALLEYVIEW(dev)) {
1620 intel_hdmi->write_infoframe = vlv_write_infoframe; 1684 intel_hdmi->write_infoframe = vlv_write_infoframe;
1621 intel_hdmi->set_infoframes = vlv_set_infoframes; 1685 intel_hdmi->set_infoframes = vlv_set_infoframes;
1686 intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
1622 } else if (IS_G4X(dev)) { 1687 } else if (IS_G4X(dev)) {
1623 intel_hdmi->write_infoframe = g4x_write_infoframe; 1688 intel_hdmi->write_infoframe = g4x_write_infoframe;
1624 intel_hdmi->set_infoframes = g4x_set_infoframes; 1689 intel_hdmi->set_infoframes = g4x_set_infoframes;
1690 intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
1625 } else if (HAS_DDI(dev)) { 1691 } else if (HAS_DDI(dev)) {
1626 intel_hdmi->write_infoframe = hsw_write_infoframe; 1692 intel_hdmi->write_infoframe = hsw_write_infoframe;
1627 intel_hdmi->set_infoframes = hsw_set_infoframes; 1693 intel_hdmi->set_infoframes = hsw_set_infoframes;
1694 intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
1628 } else if (HAS_PCH_IBX(dev)) { 1695 } else if (HAS_PCH_IBX(dev)) {
1629 intel_hdmi->write_infoframe = ibx_write_infoframe; 1696 intel_hdmi->write_infoframe = ibx_write_infoframe;
1630 intel_hdmi->set_infoframes = ibx_set_infoframes; 1697 intel_hdmi->set_infoframes = ibx_set_infoframes;
1698 intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
1631 } else { 1699 } else {
1632 intel_hdmi->write_infoframe = cpt_write_infoframe; 1700 intel_hdmi->write_infoframe = cpt_write_infoframe;
1633 intel_hdmi->set_infoframes = cpt_set_infoframes; 1701 intel_hdmi->set_infoframes = cpt_set_infoframes;
1702 intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
1634 } 1703 }
1635 1704
1636 if (HAS_DDI(dev)) 1705 if (HAS_DDI(dev))
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6025ac754c37..e588376227ea 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -136,11 +136,10 @@
136#include <drm/i915_drm.h> 136#include <drm/i915_drm.h>
137#include "i915_drv.h" 137#include "i915_drv.h"
138 138
139#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
139#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) 140#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
140#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE) 141#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
141 142
142#define GEN8_LR_CONTEXT_ALIGN 4096
143
144#define RING_EXECLIST_QFULL (1 << 0x2) 143#define RING_EXECLIST_QFULL (1 << 0x2)
145#define RING_EXECLIST1_VALID (1 << 0x3) 144#define RING_EXECLIST1_VALID (1 << 0x3)
146#define RING_EXECLIST0_VALID (1 << 0x4) 145#define RING_EXECLIST0_VALID (1 << 0x4)
@@ -204,6 +203,9 @@ enum {
204}; 203};
205#define GEN8_CTX_ID_SHIFT 32 204#define GEN8_CTX_ID_SHIFT 32
206 205
206static int intel_lr_context_pin(struct intel_engine_cs *ring,
207 struct intel_context *ctx);
208
207/** 209/**
208 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists 210 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
209 * @dev: DRM device. 211 * @dev: DRM device.
@@ -219,6 +221,9 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
219{ 221{
220 WARN_ON(i915.enable_ppgtt == -1); 222 WARN_ON(i915.enable_ppgtt == -1);
221 223
224 if (INTEL_INFO(dev)->gen >= 9)
225 return 1;
226
222 if (enable_execlists == 0) 227 if (enable_execlists == 0)
223 return 0; 228 return 0;
224 229
@@ -275,7 +280,8 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
275 struct drm_i915_gem_object *ctx_obj0, 280 struct drm_i915_gem_object *ctx_obj0,
276 struct drm_i915_gem_object *ctx_obj1) 281 struct drm_i915_gem_object *ctx_obj1)
277{ 282{
278 struct drm_i915_private *dev_priv = ring->dev->dev_private; 283 struct drm_device *dev = ring->dev;
284 struct drm_i915_private *dev_priv = dev->dev_private;
279 uint64_t temp = 0; 285 uint64_t temp = 0;
280 uint32_t desc[4]; 286 uint32_t desc[4];
281 unsigned long flags; 287 unsigned long flags;
@@ -300,13 +306,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
300 * Instead, we do the runtime_pm_get/put when creating/destroying requests. 306 * Instead, we do the runtime_pm_get/put when creating/destroying requests.
301 */ 307 */
302 spin_lock_irqsave(&dev_priv->uncore.lock, flags); 308 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
303 if (IS_CHERRYVIEW(dev_priv->dev)) { 309 if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
304 if (dev_priv->uncore.fw_rendercount++ == 0) 310 if (dev_priv->uncore.fw_rendercount++ == 0)
305 dev_priv->uncore.funcs.force_wake_get(dev_priv, 311 dev_priv->uncore.funcs.force_wake_get(dev_priv,
306 FORCEWAKE_RENDER); 312 FORCEWAKE_RENDER);
307 if (dev_priv->uncore.fw_mediacount++ == 0) 313 if (dev_priv->uncore.fw_mediacount++ == 0)
308 dev_priv->uncore.funcs.force_wake_get(dev_priv, 314 dev_priv->uncore.funcs.force_wake_get(dev_priv,
309 FORCEWAKE_MEDIA); 315 FORCEWAKE_MEDIA);
316 if (INTEL_INFO(dev)->gen >= 9) {
317 if (dev_priv->uncore.fw_blittercount++ == 0)
318 dev_priv->uncore.funcs.force_wake_get(dev_priv,
319 FORCEWAKE_BLITTER);
320 }
310 } else { 321 } else {
311 if (dev_priv->uncore.forcewake_count++ == 0) 322 if (dev_priv->uncore.forcewake_count++ == 0)
312 dev_priv->uncore.funcs.force_wake_get(dev_priv, 323 dev_priv->uncore.funcs.force_wake_get(dev_priv,
@@ -325,13 +336,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
325 336
326 /* Release Force Wakeup (see the big comment above). */ 337 /* Release Force Wakeup (see the big comment above). */
327 spin_lock_irqsave(&dev_priv->uncore.lock, flags); 338 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
328 if (IS_CHERRYVIEW(dev_priv->dev)) { 339 if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
329 if (--dev_priv->uncore.fw_rendercount == 0) 340 if (--dev_priv->uncore.fw_rendercount == 0)
330 dev_priv->uncore.funcs.force_wake_put(dev_priv, 341 dev_priv->uncore.funcs.force_wake_put(dev_priv,
331 FORCEWAKE_RENDER); 342 FORCEWAKE_RENDER);
332 if (--dev_priv->uncore.fw_mediacount == 0) 343 if (--dev_priv->uncore.fw_mediacount == 0)
333 dev_priv->uncore.funcs.force_wake_put(dev_priv, 344 dev_priv->uncore.funcs.force_wake_put(dev_priv,
334 FORCEWAKE_MEDIA); 345 FORCEWAKE_MEDIA);
346 if (INTEL_INFO(dev)->gen >= 9) {
347 if (--dev_priv->uncore.fw_blittercount == 0)
348 dev_priv->uncore.funcs.force_wake_put(dev_priv,
349 FORCEWAKE_BLITTER);
350 }
335 } else { 351 } else {
336 if (--dev_priv->uncore.forcewake_count == 0) 352 if (--dev_priv->uncore.forcewake_count == 0)
337 dev_priv->uncore.funcs.force_wake_put(dev_priv, 353 dev_priv->uncore.funcs.force_wake_put(dev_priv,
@@ -341,7 +357,9 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
341 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); 357 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
342} 358}
343 359
344static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail) 360static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
361 struct drm_i915_gem_object *ring_obj,
362 u32 tail)
345{ 363{
346 struct page *page; 364 struct page *page;
347 uint32_t *reg_state; 365 uint32_t *reg_state;
@@ -350,6 +368,7 @@ static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tai
350 reg_state = kmap_atomic(page); 368 reg_state = kmap_atomic(page);
351 369
352 reg_state[CTX_RING_TAIL+1] = tail; 370 reg_state[CTX_RING_TAIL+1] = tail;
371 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
353 372
354 kunmap_atomic(reg_state); 373 kunmap_atomic(reg_state);
355 374
@@ -360,21 +379,25 @@ static void execlists_submit_contexts(struct intel_engine_cs *ring,
360 struct intel_context *to0, u32 tail0, 379 struct intel_context *to0, u32 tail0,
361 struct intel_context *to1, u32 tail1) 380 struct intel_context *to1, u32 tail1)
362{ 381{
363 struct drm_i915_gem_object *ctx_obj0; 382 struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
383 struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
364 struct drm_i915_gem_object *ctx_obj1 = NULL; 384 struct drm_i915_gem_object *ctx_obj1 = NULL;
385 struct intel_ringbuffer *ringbuf1 = NULL;
365 386
366 ctx_obj0 = to0->engine[ring->id].state;
367 BUG_ON(!ctx_obj0); 387 BUG_ON(!ctx_obj0);
368 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0)); 388 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
389 WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
369 390
370 execlists_ctx_write_tail(ctx_obj0, tail0); 391 execlists_update_context(ctx_obj0, ringbuf0->obj, tail0);
371 392
372 if (to1) { 393 if (to1) {
394 ringbuf1 = to1->engine[ring->id].ringbuf;
373 ctx_obj1 = to1->engine[ring->id].state; 395 ctx_obj1 = to1->engine[ring->id].state;
374 BUG_ON(!ctx_obj1); 396 BUG_ON(!ctx_obj1);
375 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1)); 397 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
398 WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
376 399
377 execlists_ctx_write_tail(ctx_obj1, tail1); 400 execlists_update_context(ctx_obj1, ringbuf1->obj, tail1);
378 } 401 }
379 402
380 execlists_elsp_write(ring, ctx_obj0, ctx_obj1); 403 execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
@@ -384,7 +407,6 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
384{ 407{
385 struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL; 408 struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
386 struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL; 409 struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
387 struct drm_i915_private *dev_priv = ring->dev->dev_private;
388 410
389 assert_spin_locked(&ring->execlist_lock); 411 assert_spin_locked(&ring->execlist_lock);
390 412
@@ -401,7 +423,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
401 * will update tail past first request's workload */ 423 * will update tail past first request's workload */
402 cursor->elsp_submitted = req0->elsp_submitted; 424 cursor->elsp_submitted = req0->elsp_submitted;
403 list_del(&req0->execlist_link); 425 list_del(&req0->execlist_link);
404 queue_work(dev_priv->wq, &req0->work); 426 list_add_tail(&req0->execlist_link,
427 &ring->execlist_retired_req_list);
405 req0 = cursor; 428 req0 = cursor;
406 } else { 429 } else {
407 req1 = cursor; 430 req1 = cursor;
@@ -423,7 +446,6 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
423static bool execlists_check_remove_request(struct intel_engine_cs *ring, 446static bool execlists_check_remove_request(struct intel_engine_cs *ring,
424 u32 request_id) 447 u32 request_id)
425{ 448{
426 struct drm_i915_private *dev_priv = ring->dev->dev_private;
427 struct intel_ctx_submit_request *head_req; 449 struct intel_ctx_submit_request *head_req;
428 450
429 assert_spin_locked(&ring->execlist_lock); 451 assert_spin_locked(&ring->execlist_lock);
@@ -441,7 +463,8 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
441 463
442 if (--head_req->elsp_submitted <= 0) { 464 if (--head_req->elsp_submitted <= 0) {
443 list_del(&head_req->execlist_link); 465 list_del(&head_req->execlist_link);
444 queue_work(dev_priv->wq, &head_req->work); 466 list_add_tail(&head_req->execlist_link,
467 &ring->execlist_retired_req_list);
445 return true; 468 return true;
446 } 469 }
447 } 470 }
@@ -510,22 +533,6 @@ void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
510 ((u32)ring->next_context_status_buffer & 0x07) << 8); 533 ((u32)ring->next_context_status_buffer & 0x07) << 8);
511} 534}
512 535
513static void execlists_free_request_task(struct work_struct *work)
514{
515 struct intel_ctx_submit_request *req =
516 container_of(work, struct intel_ctx_submit_request, work);
517 struct drm_device *dev = req->ring->dev;
518 struct drm_i915_private *dev_priv = dev->dev_private;
519
520 intel_runtime_pm_put(dev_priv);
521
522 mutex_lock(&dev->struct_mutex);
523 i915_gem_context_unreference(req->ctx);
524 mutex_unlock(&dev->struct_mutex);
525
526 kfree(req);
527}
528
529static int execlists_context_queue(struct intel_engine_cs *ring, 536static int execlists_context_queue(struct intel_engine_cs *ring,
530 struct intel_context *to, 537 struct intel_context *to,
531 u32 tail) 538 u32 tail)
@@ -540,9 +547,12 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
540 return -ENOMEM; 547 return -ENOMEM;
541 req->ctx = to; 548 req->ctx = to;
542 i915_gem_context_reference(req->ctx); 549 i915_gem_context_reference(req->ctx);
550
551 if (to != ring->default_context)
552 intel_lr_context_pin(ring, to);
553
543 req->ring = ring; 554 req->ring = ring;
544 req->tail = tail; 555 req->tail = tail;
545 INIT_WORK(&req->work, execlists_free_request_task);
546 556
547 intel_runtime_pm_get(dev_priv); 557 intel_runtime_pm_get(dev_priv);
548 558
@@ -561,9 +571,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
561 571
562 if (to == tail_req->ctx) { 572 if (to == tail_req->ctx) {
563 WARN(tail_req->elsp_submitted != 0, 573 WARN(tail_req->elsp_submitted != 0,
564 "More than 2 already-submitted reqs queued\n"); 574 "More than 2 already-submitted reqs queued\n");
565 list_del(&tail_req->execlist_link); 575 list_del(&tail_req->execlist_link);
566 queue_work(dev_priv->wq, &tail_req->work); 576 list_add_tail(&tail_req->execlist_link,
577 &ring->execlist_retired_req_list);
567 } 578 }
568 } 579 }
569 580
@@ -731,6 +742,36 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
731 return 0; 742 return 0;
732} 743}
733 744
745void intel_execlists_retire_requests(struct intel_engine_cs *ring)
746{
747 struct intel_ctx_submit_request *req, *tmp;
748 struct drm_i915_private *dev_priv = ring->dev->dev_private;
749 unsigned long flags;
750 struct list_head retired_list;
751
752 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
753 if (list_empty(&ring->execlist_retired_req_list))
754 return;
755
756 INIT_LIST_HEAD(&retired_list);
757 spin_lock_irqsave(&ring->execlist_lock, flags);
758 list_replace_init(&ring->execlist_retired_req_list, &retired_list);
759 spin_unlock_irqrestore(&ring->execlist_lock, flags);
760
761 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
762 struct intel_context *ctx = req->ctx;
763 struct drm_i915_gem_object *ctx_obj =
764 ctx->engine[ring->id].state;
765
766 if (ctx_obj && (ctx != ring->default_context))
767 intel_lr_context_unpin(ring, ctx);
768 intel_runtime_pm_put(dev_priv);
769 i915_gem_context_unreference(req->ctx);
770 list_del(&req->execlist_link);
771 kfree(req);
772 }
773}
774
734void intel_logical_ring_stop(struct intel_engine_cs *ring) 775void intel_logical_ring_stop(struct intel_engine_cs *ring)
735{ 776{
736 struct drm_i915_private *dev_priv = ring->dev->dev_private; 777 struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -791,9 +832,55 @@ void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
791 execlists_context_queue(ring, ctx, ringbuf->tail); 832 execlists_context_queue(ring, ctx, ringbuf->tail);
792} 833}
793 834
835static int intel_lr_context_pin(struct intel_engine_cs *ring,
836 struct intel_context *ctx)
837{
838 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
839 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
840 int ret = 0;
841
842 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
843 if (ctx->engine[ring->id].unpin_count++ == 0) {
844 ret = i915_gem_obj_ggtt_pin(ctx_obj,
845 GEN8_LR_CONTEXT_ALIGN, 0);
846 if (ret)
847 goto reset_unpin_count;
848
849 ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
850 if (ret)
851 goto unpin_ctx_obj;
852 }
853
854 return ret;
855
856unpin_ctx_obj:
857 i915_gem_object_ggtt_unpin(ctx_obj);
858reset_unpin_count:
859 ctx->engine[ring->id].unpin_count = 0;
860
861 return ret;
862}
863
864void intel_lr_context_unpin(struct intel_engine_cs *ring,
865 struct intel_context *ctx)
866{
867 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
868 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
869
870 if (ctx_obj) {
871 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
872 if (--ctx->engine[ring->id].unpin_count == 0) {
873 intel_unpin_ringbuffer_obj(ringbuf);
874 i915_gem_object_ggtt_unpin(ctx_obj);
875 }
876 }
877}
878
794static int logical_ring_alloc_seqno(struct intel_engine_cs *ring, 879static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
795 struct intel_context *ctx) 880 struct intel_context *ctx)
796{ 881{
882 int ret;
883
797 if (ring->outstanding_lazy_seqno) 884 if (ring->outstanding_lazy_seqno)
798 return 0; 885 return 0;
799 886
@@ -804,6 +891,14 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
804 if (request == NULL) 891 if (request == NULL)
805 return -ENOMEM; 892 return -ENOMEM;
806 893
894 if (ctx != ring->default_context) {
895 ret = intel_lr_context_pin(ring, ctx);
896 if (ret) {
897 kfree(request);
898 return ret;
899 }
900 }
901
807 /* Hold a reference to the context this request belongs to 902 /* Hold a reference to the context this request belongs to
808 * (we will need it when the time comes to emit/retire the 903 * (we will need it when the time comes to emit/retire the
809 * request). 904 * request).
@@ -989,6 +1084,44 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
989 return 0; 1084 return 0;
990} 1085}
991 1086
1087static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
1088 struct intel_context *ctx)
1089{
1090 int ret, i;
1091 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
1092 struct drm_device *dev = ring->dev;
1093 struct drm_i915_private *dev_priv = dev->dev_private;
1094 struct i915_workarounds *w = &dev_priv->workarounds;
1095
1096 if (WARN_ON(w->count == 0))
1097 return 0;
1098
1099 ring->gpu_caches_dirty = true;
1100 ret = logical_ring_flush_all_caches(ringbuf);
1101 if (ret)
1102 return ret;
1103
1104 ret = intel_logical_ring_begin(ringbuf, w->count * 2 + 2);
1105 if (ret)
1106 return ret;
1107
1108 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
1109 for (i = 0; i < w->count; i++) {
1110 intel_logical_ring_emit(ringbuf, w->reg[i].addr);
1111 intel_logical_ring_emit(ringbuf, w->reg[i].value);
1112 }
1113 intel_logical_ring_emit(ringbuf, MI_NOOP);
1114
1115 intel_logical_ring_advance(ringbuf);
1116
1117 ring->gpu_caches_dirty = true;
1118 ret = logical_ring_flush_all_caches(ringbuf);
1119 if (ret)
1120 return ret;
1121
1122 return 0;
1123}
1124
992static int gen8_init_common_ring(struct intel_engine_cs *ring) 1125static int gen8_init_common_ring(struct intel_engine_cs *ring)
993{ 1126{
994 struct drm_device *dev = ring->dev; 1127 struct drm_device *dev = ring->dev;
@@ -1032,7 +1165,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
1032 1165
1033 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1166 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1034 1167
1035 return ret; 1168 return init_workarounds_ring(ring);
1036} 1169}
1037 1170
1038static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf, 1171static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
@@ -1248,6 +1381,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
1248 init_waitqueue_head(&ring->irq_queue); 1381 init_waitqueue_head(&ring->irq_queue);
1249 1382
1250 INIT_LIST_HEAD(&ring->execlist_queue); 1383 INIT_LIST_HEAD(&ring->execlist_queue);
1384 INIT_LIST_HEAD(&ring->execlist_retired_req_list);
1251 spin_lock_init(&ring->execlist_lock); 1385 spin_lock_init(&ring->execlist_lock);
1252 ring->next_context_status_buffer = 0; 1386 ring->next_context_status_buffer = 0;
1253 1387
@@ -1282,6 +1416,7 @@ static int logical_render_ring_init(struct drm_device *dev)
1282 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 1416 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1283 1417
1284 ring->init = gen8_init_render_ring; 1418 ring->init = gen8_init_render_ring;
1419 ring->init_context = intel_logical_ring_workarounds_emit;
1285 ring->cleanup = intel_fini_pipe_control; 1420 ring->cleanup = intel_fini_pipe_control;
1286 ring->get_seqno = gen8_get_seqno; 1421 ring->get_seqno = gen8_get_seqno;
1287 ring->set_seqno = gen8_set_seqno; 1422 ring->set_seqno = gen8_set_seqno;
@@ -1495,7 +1630,6 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
1495{ 1630{
1496 struct drm_device *dev = ring->dev; 1631 struct drm_device *dev = ring->dev;
1497 struct drm_i915_private *dev_priv = dev->dev_private; 1632 struct drm_i915_private *dev_priv = dev->dev_private;
1498 struct drm_i915_gem_object *ring_obj = ringbuf->obj;
1499 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 1633 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
1500 struct page *page; 1634 struct page *page;
1501 uint32_t *reg_state; 1635 uint32_t *reg_state;
@@ -1541,7 +1675,9 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
1541 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base); 1675 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
1542 reg_state[CTX_RING_TAIL+1] = 0; 1676 reg_state[CTX_RING_TAIL+1] = 0;
1543 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base); 1677 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
1544 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj); 1678 /* Ring buffer start address is not known until the buffer is pinned.
1679 * It is written to the context image in execlists_update_context()
1680 */
1545 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base); 1681 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
1546 reg_state[CTX_RING_BUFFER_CONTROL+1] = 1682 reg_state[CTX_RING_BUFFER_CONTROL+1] =
1547 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID; 1683 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
@@ -1617,12 +1753,18 @@ void intel_lr_context_free(struct intel_context *ctx)
1617 1753
1618 for (i = 0; i < I915_NUM_RINGS; i++) { 1754 for (i = 0; i < I915_NUM_RINGS; i++) {
1619 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; 1755 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
1620 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
1621 1756
1622 if (ctx_obj) { 1757 if (ctx_obj) {
1758 struct intel_ringbuffer *ringbuf =
1759 ctx->engine[i].ringbuf;
1760 struct intel_engine_cs *ring = ringbuf->ring;
1761
1762 if (ctx == ring->default_context) {
1763 intel_unpin_ringbuffer_obj(ringbuf);
1764 i915_gem_object_ggtt_unpin(ctx_obj);
1765 }
1623 intel_destroy_ringbuffer_obj(ringbuf); 1766 intel_destroy_ringbuffer_obj(ringbuf);
1624 kfree(ringbuf); 1767 kfree(ringbuf);
1625 i915_gem_object_ggtt_unpin(ctx_obj);
1626 drm_gem_object_unreference(&ctx_obj->base); 1768 drm_gem_object_unreference(&ctx_obj->base);
1627 } 1769 }
1628 } 1770 }
@@ -1632,11 +1774,14 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
1632{ 1774{
1633 int ret = 0; 1775 int ret = 0;
1634 1776
1635 WARN_ON(INTEL_INFO(ring->dev)->gen != 8); 1777 WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
1636 1778
1637 switch (ring->id) { 1779 switch (ring->id) {
1638 case RCS: 1780 case RCS:
1639 ret = GEN8_LR_CONTEXT_RENDER_SIZE; 1781 if (INTEL_INFO(ring->dev)->gen >= 9)
1782 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
1783 else
1784 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
1640 break; 1785 break;
1641 case VCS: 1786 case VCS:
1642 case BCS: 1787 case BCS:
@@ -1649,7 +1794,7 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
1649 return ret; 1794 return ret;
1650} 1795}
1651 1796
1652static int lrc_setup_hardware_status_page(struct intel_engine_cs *ring, 1797static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
1653 struct drm_i915_gem_object *default_ctx_obj) 1798 struct drm_i915_gem_object *default_ctx_obj)
1654{ 1799{
1655 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1800 struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1659,15 +1804,11 @@ static int lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
1659 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj); 1804 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
1660 ring->status_page.page_addr = 1805 ring->status_page.page_addr =
1661 kmap(sg_page(default_ctx_obj->pages->sgl)); 1806 kmap(sg_page(default_ctx_obj->pages->sgl));
1662 if (ring->status_page.page_addr == NULL)
1663 return -ENOMEM;
1664 ring->status_page.obj = default_ctx_obj; 1807 ring->status_page.obj = default_ctx_obj;
1665 1808
1666 I915_WRITE(RING_HWS_PGA(ring->mmio_base), 1809 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
1667 (u32)ring->status_page.gfx_addr); 1810 (u32)ring->status_page.gfx_addr);
1668 POSTING_READ(RING_HWS_PGA(ring->mmio_base)); 1811 POSTING_READ(RING_HWS_PGA(ring->mmio_base));
1669
1670 return 0;
1671} 1812}
1672 1813
1673/** 1814/**
@@ -1686,6 +1827,7 @@ static int lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
1686int intel_lr_context_deferred_create(struct intel_context *ctx, 1827int intel_lr_context_deferred_create(struct intel_context *ctx,
1687 struct intel_engine_cs *ring) 1828 struct intel_engine_cs *ring)
1688{ 1829{
1830 const bool is_global_default_ctx = (ctx == ring->default_context);
1689 struct drm_device *dev = ring->dev; 1831 struct drm_device *dev = ring->dev;
1690 struct drm_i915_gem_object *ctx_obj; 1832 struct drm_i915_gem_object *ctx_obj;
1691 uint32_t context_size; 1833 uint32_t context_size;
@@ -1705,21 +1847,22 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
1705 return ret; 1847 return ret;
1706 } 1848 }
1707 1849
1708 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0); 1850 if (is_global_default_ctx) {
1709 if (ret) { 1851 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
1710 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret); 1852 if (ret) {
1711 drm_gem_object_unreference(&ctx_obj->base); 1853 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
1712 return ret; 1854 ret);
1855 drm_gem_object_unreference(&ctx_obj->base);
1856 return ret;
1857 }
1713 } 1858 }
1714 1859
1715 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); 1860 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1716 if (!ringbuf) { 1861 if (!ringbuf) {
1717 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", 1862 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
1718 ring->name); 1863 ring->name);
1719 i915_gem_object_ggtt_unpin(ctx_obj);
1720 drm_gem_object_unreference(&ctx_obj->base);
1721 ret = -ENOMEM; 1864 ret = -ENOMEM;
1722 return ret; 1865 goto error_unpin_ctx;
1723 } 1866 }
1724 1867
1725 ringbuf->ring = ring; 1868 ringbuf->ring = ring;
@@ -1732,43 +1875,51 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
1732 ringbuf->space = ringbuf->size; 1875 ringbuf->space = ringbuf->size;
1733 ringbuf->last_retired_head = -1; 1876 ringbuf->last_retired_head = -1;
1734 1877
1735 /* TODO: For now we put this in the mappable region so that we can reuse 1878 if (ringbuf->obj == NULL) {
1736 * the existing ringbuffer code which ioremaps it. When we start 1879 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1737 * creating many contexts, this will no longer work and we must switch 1880 if (ret) {
1738 * to a kmapish interface. 1881 DRM_DEBUG_DRIVER(
1739 */ 1882 "Failed to allocate ringbuffer obj %s: %d\n",
1740 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1741 if (ret) {
1742 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
1743 ring->name, ret); 1883 ring->name, ret);
1744 goto error; 1884 goto error_free_rbuf;
1885 }
1886
1887 if (is_global_default_ctx) {
1888 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
1889 if (ret) {
1890 DRM_ERROR(
1891 "Failed to pin and map ringbuffer %s: %d\n",
1892 ring->name, ret);
1893 goto error_destroy_rbuf;
1894 }
1895 }
1896
1745 } 1897 }
1746 1898
1747 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); 1899 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
1748 if (ret) { 1900 if (ret) {
1749 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); 1901 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
1750 intel_destroy_ringbuffer_obj(ringbuf);
1751 goto error; 1902 goto error;
1752 } 1903 }
1753 1904
1754 ctx->engine[ring->id].ringbuf = ringbuf; 1905 ctx->engine[ring->id].ringbuf = ringbuf;
1755 ctx->engine[ring->id].state = ctx_obj; 1906 ctx->engine[ring->id].state = ctx_obj;
1756 1907
1757 if (ctx == ring->default_context) { 1908 if (ctx == ring->default_context)
1758 ret = lrc_setup_hardware_status_page(ring, ctx_obj); 1909 lrc_setup_hardware_status_page(ring, ctx_obj);
1759 if (ret) {
1760 DRM_ERROR("Failed to setup hardware status page\n");
1761 goto error;
1762 }
1763 }
1764 1910
1765 if (ring->id == RCS && !ctx->rcs_initialized) { 1911 if (ring->id == RCS && !ctx->rcs_initialized) {
1912 if (ring->init_context) {
1913 ret = ring->init_context(ring, ctx);
1914 if (ret)
1915 DRM_ERROR("ring init context: %d\n", ret);
1916 }
1917
1766 ret = intel_lr_context_render_state_init(ring, ctx); 1918 ret = intel_lr_context_render_state_init(ring, ctx);
1767 if (ret) { 1919 if (ret) {
1768 DRM_ERROR("Init render state failed: %d\n", ret); 1920 DRM_ERROR("Init render state failed: %d\n", ret);
1769 ctx->engine[ring->id].ringbuf = NULL; 1921 ctx->engine[ring->id].ringbuf = NULL;
1770 ctx->engine[ring->id].state = NULL; 1922 ctx->engine[ring->id].state = NULL;
1771 intel_destroy_ringbuffer_obj(ringbuf);
1772 goto error; 1923 goto error;
1773 } 1924 }
1774 ctx->rcs_initialized = true; 1925 ctx->rcs_initialized = true;
@@ -1777,8 +1928,15 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
1777 return 0; 1928 return 0;
1778 1929
1779error: 1930error:
1931 if (is_global_default_ctx)
1932 intel_unpin_ringbuffer_obj(ringbuf);
1933error_destroy_rbuf:
1934 intel_destroy_ringbuffer_obj(ringbuf);
1935error_free_rbuf:
1780 kfree(ringbuf); 1936 kfree(ringbuf);
1781 i915_gem_object_ggtt_unpin(ctx_obj); 1937error_unpin_ctx:
1938 if (is_global_default_ctx)
1939 i915_gem_object_ggtt_unpin(ctx_obj);
1782 drm_gem_object_unreference(&ctx_obj->base); 1940 drm_gem_object_unreference(&ctx_obj->base);
1783 return ret; 1941 return ret;
1784} 1942}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 33c3b4bf28c5..14b216b9be7f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -24,6 +24,8 @@
24#ifndef _INTEL_LRC_H_ 24#ifndef _INTEL_LRC_H_
25#define _INTEL_LRC_H_ 25#define _INTEL_LRC_H_
26 26
27#define GEN8_LR_CONTEXT_ALIGN 4096
28
27/* Execlists regs */ 29/* Execlists regs */
28#define RING_ELSP(ring) ((ring)->mmio_base+0x230) 30#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
29#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234) 31#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
@@ -67,6 +69,8 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
67void intel_lr_context_free(struct intel_context *ctx); 69void intel_lr_context_free(struct intel_context *ctx);
68int intel_lr_context_deferred_create(struct intel_context *ctx, 70int intel_lr_context_deferred_create(struct intel_context *ctx,
69 struct intel_engine_cs *ring); 71 struct intel_engine_cs *ring);
72void intel_lr_context_unpin(struct intel_engine_cs *ring,
73 struct intel_context *ctx);
70 74
71/* Execlists */ 75/* Execlists */
72int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); 76int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -104,11 +108,11 @@ struct intel_ctx_submit_request {
104 u32 tail; 108 u32 tail;
105 109
106 struct list_head execlist_link; 110 struct list_head execlist_link;
107 struct work_struct work;
108 111
109 int elsp_submitted; 112 int elsp_submitted;
110}; 113};
111 114
112void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring); 115void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
116void intel_execlists_retire_requests(struct intel_engine_cs *ring);
113 117
114#endif /* _INTEL_LRC_H_ */ 118#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 2b50c98dd6b0..c03d457a5150 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -1116,7 +1116,7 @@ out:
1116 drm_connector_register(connector); 1116 drm_connector_register(connector);
1117 1117
1118 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 1118 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
1119 intel_panel_setup_backlight(connector); 1119 intel_panel_setup_backlight(connector, INVALID_PIPE);
1120 1120
1121 return; 1121 return;
1122 1122
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index b001c90312e7..4d63839bd9b4 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -521,6 +521,9 @@ static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
521{ 521{
522 struct drm_i915_private *dev_priv = dev->dev_private; 522 struct drm_i915_private *dev_priv = dev->dev_private;
523 523
524 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
525 return 0;
526
524 return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK; 527 return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
525} 528}
526 529
@@ -536,12 +539,15 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
536{ 539{
537 struct drm_device *dev = connector->base.dev; 540 struct drm_device *dev = connector->base.dev;
538 struct drm_i915_private *dev_priv = dev->dev_private; 541 struct drm_i915_private *dev_priv = dev->dev_private;
539 u32 val; 542 struct intel_panel *panel = &connector->panel;
543 u32 val = 0;
540 544
541 mutex_lock(&dev_priv->backlight_lock); 545 mutex_lock(&dev_priv->backlight_lock);
542 546
543 val = dev_priv->display.get_backlight(connector); 547 if (panel->backlight.enabled) {
544 val = intel_panel_compute_brightness(connector, val); 548 val = dev_priv->display.get_backlight(connector);
549 val = intel_panel_compute_brightness(connector, val);
550 }
545 551
546 mutex_unlock(&dev_priv->backlight_lock); 552 mutex_unlock(&dev_priv->backlight_lock);
547 553
@@ -602,6 +608,9 @@ static void vlv_set_backlight(struct intel_connector *connector, u32 level)
602 enum pipe pipe = intel_get_pipe_from_connector(connector); 608 enum pipe pipe = intel_get_pipe_from_connector(connector);
603 u32 tmp; 609 u32 tmp;
604 610
611 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
612 return;
613
605 tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK; 614 tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
606 I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level); 615 I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
607} 616}
@@ -625,10 +634,9 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
625 struct drm_device *dev = connector->base.dev; 634 struct drm_device *dev = connector->base.dev;
626 struct drm_i915_private *dev_priv = dev->dev_private; 635 struct drm_i915_private *dev_priv = dev->dev_private;
627 struct intel_panel *panel = &connector->panel; 636 struct intel_panel *panel = &connector->panel;
628 enum pipe pipe = intel_get_pipe_from_connector(connector);
629 u32 hw_level; 637 u32 hw_level;
630 638
631 if (!panel->backlight.present || pipe == INVALID_PIPE) 639 if (!panel->backlight.present)
632 return; 640 return;
633 641
634 mutex_lock(&dev_priv->backlight_lock); 642 mutex_lock(&dev_priv->backlight_lock);
@@ -656,6 +664,12 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
656 enum pipe pipe = intel_get_pipe_from_connector(connector); 664 enum pipe pipe = intel_get_pipe_from_connector(connector);
657 u32 hw_level; 665 u32 hw_level;
658 666
667 /*
668 * INVALID_PIPE may occur during driver init because
669 * connection_mutex isn't held across the entire backlight
670 * setup + modeset readout, and the BIOS can issue the
671 * requests at any time.
672 */
659 if (!panel->backlight.present || pipe == INVALID_PIPE) 673 if (!panel->backlight.present || pipe == INVALID_PIPE)
660 return; 674 return;
661 675
@@ -717,6 +731,9 @@ static void vlv_disable_backlight(struct intel_connector *connector)
717 enum pipe pipe = intel_get_pipe_from_connector(connector); 731 enum pipe pipe = intel_get_pipe_from_connector(connector);
718 u32 tmp; 732 u32 tmp;
719 733
734 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
735 return;
736
720 intel_panel_actually_set_backlight(connector, 0); 737 intel_panel_actually_set_backlight(connector, 0);
721 738
722 tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe)); 739 tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
@@ -728,9 +745,8 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
728 struct drm_device *dev = connector->base.dev; 745 struct drm_device *dev = connector->base.dev;
729 struct drm_i915_private *dev_priv = dev->dev_private; 746 struct drm_i915_private *dev_priv = dev->dev_private;
730 struct intel_panel *panel = &connector->panel; 747 struct intel_panel *panel = &connector->panel;
731 enum pipe pipe = intel_get_pipe_from_connector(connector);
732 748
733 if (!panel->backlight.present || pipe == INVALID_PIPE) 749 if (!panel->backlight.present)
734 return; 750 return;
735 751
736 /* 752 /*
@@ -906,6 +922,9 @@ static void vlv_enable_backlight(struct intel_connector *connector)
906 enum pipe pipe = intel_get_pipe_from_connector(connector); 922 enum pipe pipe = intel_get_pipe_from_connector(connector);
907 u32 ctl, ctl2; 923 u32 ctl, ctl2;
908 924
925 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
926 return;
927
909 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe)); 928 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
910 if (ctl2 & BLM_PWM_ENABLE) { 929 if (ctl2 & BLM_PWM_ENABLE) {
911 DRM_DEBUG_KMS("backlight already enabled\n"); 930 DRM_DEBUG_KMS("backlight already enabled\n");
@@ -934,7 +953,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
934 struct intel_panel *panel = &connector->panel; 953 struct intel_panel *panel = &connector->panel;
935 enum pipe pipe = intel_get_pipe_from_connector(connector); 954 enum pipe pipe = intel_get_pipe_from_connector(connector);
936 955
937 if (!panel->backlight.present || pipe == INVALID_PIPE) 956 if (!panel->backlight.present)
938 return; 957 return;
939 958
940 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); 959 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
@@ -1026,6 +1045,9 @@ static int intel_backlight_device_register(struct intel_connector *connector)
1026 if (WARN_ON(panel->backlight.device)) 1045 if (WARN_ON(panel->backlight.device))
1027 return -ENODEV; 1046 return -ENODEV;
1028 1047
1048 if (!panel->backlight.present)
1049 return 0;
1050
1029 WARN_ON(panel->backlight.max == 0); 1051 WARN_ON(panel->backlight.max == 0);
1030 1052
1031 memset(&props, 0, sizeof(props)); 1053 memset(&props, 0, sizeof(props));
@@ -1061,6 +1083,10 @@ static int intel_backlight_device_register(struct intel_connector *connector)
1061 panel->backlight.device = NULL; 1083 panel->backlight.device = NULL;
1062 return -ENODEV; 1084 return -ENODEV;
1063 } 1085 }
1086
1087 DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n",
1088 connector->base.name);
1089
1064 return 0; 1090 return 0;
1065} 1091}
1066 1092
@@ -1115,7 +1141,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
1115 return scale(min, 0, 255, 0, panel->backlight.max); 1141 return scale(min, 0, 255, 0, panel->backlight.max);
1116} 1142}
1117 1143
1118static int bdw_setup_backlight(struct intel_connector *connector) 1144static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unused)
1119{ 1145{
1120 struct drm_device *dev = connector->base.dev; 1146 struct drm_device *dev = connector->base.dev;
1121 struct drm_i915_private *dev_priv = dev->dev_private; 1147 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1141,7 +1167,7 @@ static int bdw_setup_backlight(struct intel_connector *connector)
1141 return 0; 1167 return 0;
1142} 1168}
1143 1169
1144static int pch_setup_backlight(struct intel_connector *connector) 1170static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
1145{ 1171{
1146 struct drm_device *dev = connector->base.dev; 1172 struct drm_device *dev = connector->base.dev;
1147 struct drm_i915_private *dev_priv = dev->dev_private; 1173 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1168,7 +1194,7 @@ static int pch_setup_backlight(struct intel_connector *connector)
1168 return 0; 1194 return 0;
1169} 1195}
1170 1196
1171static int i9xx_setup_backlight(struct intel_connector *connector) 1197static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
1172{ 1198{
1173 struct drm_device *dev = connector->base.dev; 1199 struct drm_device *dev = connector->base.dev;
1174 struct drm_i915_private *dev_priv = dev->dev_private; 1200 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1200,7 +1226,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
1200 return 0; 1226 return 0;
1201} 1227}
1202 1228
1203static int i965_setup_backlight(struct intel_connector *connector) 1229static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
1204{ 1230{
1205 struct drm_device *dev = connector->base.dev; 1231 struct drm_device *dev = connector->base.dev;
1206 struct drm_i915_private *dev_priv = dev->dev_private; 1232 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1230,37 +1256,40 @@ static int i965_setup_backlight(struct intel_connector *connector)
1230 return 0; 1256 return 0;
1231} 1257}
1232 1258
1233static int vlv_setup_backlight(struct intel_connector *connector) 1259static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
1234{ 1260{
1235 struct drm_device *dev = connector->base.dev; 1261 struct drm_device *dev = connector->base.dev;
1236 struct drm_i915_private *dev_priv = dev->dev_private; 1262 struct drm_i915_private *dev_priv = dev->dev_private;
1237 struct intel_panel *panel = &connector->panel; 1263 struct intel_panel *panel = &connector->panel;
1238 enum pipe pipe; 1264 enum pipe p;
1239 u32 ctl, ctl2, val; 1265 u32 ctl, ctl2, val;
1240 1266
1241 for_each_pipe(dev_priv, pipe) { 1267 for_each_pipe(dev_priv, p) {
1242 u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe)); 1268 u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(p));
1243 1269
1244 /* Skip if the modulation freq is already set */ 1270 /* Skip if the modulation freq is already set */
1245 if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK) 1271 if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
1246 continue; 1272 continue;
1247 1273
1248 cur_val &= BACKLIGHT_DUTY_CYCLE_MASK; 1274 cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
1249 I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) | 1275 I915_WRITE(VLV_BLC_PWM_CTL(p), (0xf42 << 16) |
1250 cur_val); 1276 cur_val);
1251 } 1277 }
1252 1278
1253 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A)); 1279 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
1280 return -ENODEV;
1281
1282 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
1254 panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965; 1283 panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
1255 1284
1256 ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A)); 1285 ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
1257 panel->backlight.max = ctl >> 16; 1286 panel->backlight.max = ctl >> 16;
1258 if (!panel->backlight.max) 1287 if (!panel->backlight.max)
1259 return -ENODEV; 1288 return -ENODEV;
1260 1289
1261 panel->backlight.min = get_backlight_min_vbt(connector); 1290 panel->backlight.min = get_backlight_min_vbt(connector);
1262 1291
1263 val = _vlv_get_backlight(dev, PIPE_A); 1292 val = _vlv_get_backlight(dev, pipe);
1264 panel->backlight.level = intel_panel_compute_brightness(connector, val); 1293 panel->backlight.level = intel_panel_compute_brightness(connector, val);
1265 1294
1266 panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) && 1295 panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
@@ -1269,7 +1298,7 @@ static int vlv_setup_backlight(struct intel_connector *connector)
1269 return 0; 1298 return 0;
1270} 1299}
1271 1300
1272int intel_panel_setup_backlight(struct drm_connector *connector) 1301int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
1273{ 1302{
1274 struct drm_device *dev = connector->dev; 1303 struct drm_device *dev = connector->dev;
1275 struct drm_i915_private *dev_priv = dev->dev_private; 1304 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1288,7 +1317,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
1288 1317
1289 /* set level and max in panel struct */ 1318 /* set level and max in panel struct */
1290 mutex_lock(&dev_priv->backlight_lock); 1319 mutex_lock(&dev_priv->backlight_lock);
1291 ret = dev_priv->display.setup_backlight(intel_connector); 1320 ret = dev_priv->display.setup_backlight(intel_connector, pipe);
1292 mutex_unlock(&dev_priv->backlight_lock); 1321 mutex_unlock(&dev_priv->backlight_lock);
1293 1322
1294 if (ret) { 1323 if (ret) {
@@ -1297,15 +1326,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
1297 return ret; 1326 return ret;
1298 } 1327 }
1299 1328
1300 intel_backlight_device_register(intel_connector);
1301
1302 panel->backlight.present = true; 1329 panel->backlight.present = true;
1303 1330
1304 DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, " 1331 DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
1305 "sysfs interface %sregistered\n", 1332 connector->name,
1306 panel->backlight.enabled ? "enabled" : "disabled", 1333 panel->backlight.enabled ? "enabled" : "disabled",
1307 panel->backlight.level, panel->backlight.max, 1334 panel->backlight.level, panel->backlight.max);
1308 panel->backlight.device ? "" : "not ");
1309 1335
1310 return 0; 1336 return 0;
1311} 1337}
@@ -1316,7 +1342,6 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
1316 struct intel_panel *panel = &intel_connector->panel; 1342 struct intel_panel *panel = &intel_connector->panel;
1317 1343
1318 panel->backlight.present = false; 1344 panel->backlight.present = false;
1319 intel_backlight_device_unregister(intel_connector);
1320} 1345}
1321 1346
1322/* Set up chip specific backlight functions */ 1347/* Set up chip specific backlight functions */
@@ -1379,3 +1404,19 @@ void intel_panel_fini(struct intel_panel *panel)
1379 drm_mode_destroy(intel_connector->base.dev, 1404 drm_mode_destroy(intel_connector->base.dev,
1380 panel->downclock_mode); 1405 panel->downclock_mode);
1381} 1406}
1407
1408void intel_backlight_register(struct drm_device *dev)
1409{
1410 struct intel_connector *connector;
1411
1412 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
1413 intel_backlight_device_register(connector);
1414}
1415
1416void intel_backlight_unregister(struct drm_device *dev)
1417{
1418 struct intel_connector *connector;
1419
1420 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
1421 intel_backlight_device_unregister(connector);
1422}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8a0788dcf106..9af0af49382e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4451,7 +4451,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4451 dev_priv->rps.min_freq_softlimit); 4451 dev_priv->rps.min_freq_softlimit);
4452 4452
4453 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) 4453 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
4454 & GENFREQSTATUS) == 0, 5)) 4454 & GENFREQSTATUS) == 0, 100))
4455 DRM_ERROR("timed out waiting for Punit\n"); 4455 DRM_ERROR("timed out waiting for Punit\n");
4456 4456
4457 vlv_force_gfx_clock(dev_priv, false); 4457 vlv_force_gfx_clock(dev_priv, false);
@@ -4504,14 +4504,8 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
4504 "Odd GPU freq value\n")) 4504 "Odd GPU freq value\n"))
4505 val &= ~1; 4505 val &= ~1;
4506 4506
4507 if (val != dev_priv->rps.cur_freq) { 4507 if (val != dev_priv->rps.cur_freq)
4508 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
4509 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4510 dev_priv->rps.cur_freq,
4511 vlv_gpu_freq(dev_priv, val), val);
4512
4513 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 4508 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
4514 }
4515 4509
4516 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 4510 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4517 4511
@@ -4519,26 +4513,6 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
4519 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); 4513 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
4520} 4514}
4521 4515
4522static void gen8_disable_rps_interrupts(struct drm_device *dev)
4523{
4524 struct drm_i915_private *dev_priv = dev->dev_private;
4525
4526 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
4527 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
4528 ~dev_priv->pm_rps_events);
4529 /* Complete PM interrupt masking here doesn't race with the rps work
4530 * item again unmasking PM interrupts because that is using a different
4531 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
4532 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
4533 * gen8_enable_rps will clean up. */
4534
4535 spin_lock_irq(&dev_priv->irq_lock);
4536 dev_priv->rps.pm_iir = 0;
4537 spin_unlock_irq(&dev_priv->irq_lock);
4538
4539 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
4540}
4541
4542static void gen9_disable_rps(struct drm_device *dev) 4516static void gen9_disable_rps(struct drm_device *dev)
4543{ 4517{
4544 struct drm_i915_private *dev_priv = dev->dev_private; 4518 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4546,36 +4520,12 @@ static void gen9_disable_rps(struct drm_device *dev)
4546 I915_WRITE(GEN6_RC_CONTROL, 0); 4520 I915_WRITE(GEN6_RC_CONTROL, 0);
4547} 4521}
4548 4522
4549static void gen6_disable_rps_interrupts(struct drm_device *dev)
4550{
4551 struct drm_i915_private *dev_priv = dev->dev_private;
4552
4553 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4554 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
4555 ~dev_priv->pm_rps_events);
4556 /* Complete PM interrupt masking here doesn't race with the rps work
4557 * item again unmasking PM interrupts because that is using a different
4558 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
4559 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
4560
4561 spin_lock_irq(&dev_priv->irq_lock);
4562 dev_priv->rps.pm_iir = 0;
4563 spin_unlock_irq(&dev_priv->irq_lock);
4564
4565 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
4566}
4567
4568static void gen6_disable_rps(struct drm_device *dev) 4523static void gen6_disable_rps(struct drm_device *dev)
4569{ 4524{
4570 struct drm_i915_private *dev_priv = dev->dev_private; 4525 struct drm_i915_private *dev_priv = dev->dev_private;
4571 4526
4572 I915_WRITE(GEN6_RC_CONTROL, 0); 4527 I915_WRITE(GEN6_RC_CONTROL, 0);
4573 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 4528 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
4574
4575 if (IS_BROADWELL(dev))
4576 gen8_disable_rps_interrupts(dev);
4577 else
4578 gen6_disable_rps_interrupts(dev);
4579} 4529}
4580 4530
4581static void cherryview_disable_rps(struct drm_device *dev) 4531static void cherryview_disable_rps(struct drm_device *dev)
@@ -4583,8 +4533,6 @@ static void cherryview_disable_rps(struct drm_device *dev)
4583 struct drm_i915_private *dev_priv = dev->dev_private; 4533 struct drm_i915_private *dev_priv = dev->dev_private;
4584 4534
4585 I915_WRITE(GEN6_RC_CONTROL, 0); 4535 I915_WRITE(GEN6_RC_CONTROL, 0);
4586
4587 gen8_disable_rps_interrupts(dev);
4588} 4536}
4589 4537
4590static void valleyview_disable_rps(struct drm_device *dev) 4538static void valleyview_disable_rps(struct drm_device *dev)
@@ -4598,8 +4546,6 @@ static void valleyview_disable_rps(struct drm_device *dev)
4598 I915_WRITE(GEN6_RC_CONTROL, 0); 4546 I915_WRITE(GEN6_RC_CONTROL, 0);
4599 4547
4600 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 4548 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4601
4602 gen6_disable_rps_interrupts(dev);
4603} 4549}
4604 4550
4605static void intel_print_rc6_info(struct drm_device *dev, u32 mode) 4551static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
@@ -4663,47 +4609,46 @@ int intel_enable_rc6(const struct drm_device *dev)
4663 return i915.enable_rc6; 4609 return i915.enable_rc6;
4664} 4610}
4665 4611
4666static void gen8_enable_rps_interrupts(struct drm_device *dev) 4612static void gen6_init_rps_frequencies(struct drm_device *dev)
4667{ 4613{
4668 struct drm_i915_private *dev_priv = dev->dev_private; 4614 struct drm_i915_private *dev_priv = dev->dev_private;
4615 uint32_t rp_state_cap;
4616 u32 ddcc_status = 0;
4617 int ret;
4669 4618
4670 spin_lock_irq(&dev_priv->irq_lock); 4619 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4671 WARN_ON(dev_priv->rps.pm_iir);
4672 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
4673 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
4674 spin_unlock_irq(&dev_priv->irq_lock);
4675}
4676
4677static void gen6_enable_rps_interrupts(struct drm_device *dev)
4678{
4679 struct drm_i915_private *dev_priv = dev->dev_private;
4680
4681 spin_lock_irq(&dev_priv->irq_lock);
4682 WARN_ON(dev_priv->rps.pm_iir);
4683 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
4684 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
4685 spin_unlock_irq(&dev_priv->irq_lock);
4686}
4687
4688static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
4689{
4690 /* All of these values are in units of 50MHz */ 4620 /* All of these values are in units of 50MHz */
4691 dev_priv->rps.cur_freq = 0; 4621 dev_priv->rps.cur_freq = 0;
4692 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */ 4622 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
4693 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4694 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; 4623 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
4624 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4695 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; 4625 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
4696 /* XXX: only BYT has a special efficient freq */
4697 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4698 /* hw_max = RP0 until we check for overclocking */ 4626 /* hw_max = RP0 until we check for overclocking */
4699 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 4627 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4700 4628
4629 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4630 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4631 ret = sandybridge_pcode_read(dev_priv,
4632 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4633 &ddcc_status);
4634 if (0 == ret)
4635 dev_priv->rps.efficient_freq =
4636 (ddcc_status >> 8) & 0xff;
4637 }
4638
4701 /* Preserve min/max settings in case of re-init */ 4639 /* Preserve min/max settings in case of re-init */
4702 if (dev_priv->rps.max_freq_softlimit == 0) 4640 if (dev_priv->rps.max_freq_softlimit == 0)
4703 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 4641 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4704 4642
4705 if (dev_priv->rps.min_freq_softlimit == 0) 4643 if (dev_priv->rps.min_freq_softlimit == 0) {
4706 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 4644 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4645 dev_priv->rps.min_freq_softlimit =
4646 /* max(RPe, 450 MHz) */
4647 max(dev_priv->rps.efficient_freq, (u8) 9);
4648 else
4649 dev_priv->rps.min_freq_softlimit =
4650 dev_priv->rps.min_freq;
4651 }
4707} 4652}
4708 4653
4709static void gen9_enable_rps(struct drm_device *dev) 4654static void gen9_enable_rps(struct drm_device *dev)
@@ -4749,7 +4694,7 @@ static void gen8_enable_rps(struct drm_device *dev)
4749{ 4694{
4750 struct drm_i915_private *dev_priv = dev->dev_private; 4695 struct drm_i915_private *dev_priv = dev->dev_private;
4751 struct intel_engine_cs *ring; 4696 struct intel_engine_cs *ring;
4752 uint32_t rc6_mask = 0, rp_state_cap; 4697 uint32_t rc6_mask = 0;
4753 int unused; 4698 int unused;
4754 4699
4755 /* 1a: Software RC state - RC0 */ 4700 /* 1a: Software RC state - RC0 */
@@ -4762,8 +4707,8 @@ static void gen8_enable_rps(struct drm_device *dev)
4762 /* 2a: Disable RC states. */ 4707 /* 2a: Disable RC states. */
4763 I915_WRITE(GEN6_RC_CONTROL, 0); 4708 I915_WRITE(GEN6_RC_CONTROL, 0);
4764 4709
4765 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 4710 /* Initialize rps frequencies */
4766 parse_rp_state_cap(dev_priv, rp_state_cap); 4711 gen6_init_rps_frequencies(dev);
4767 4712
4768 /* 2b: Program RC6 thresholds.*/ 4713 /* 2b: Program RC6 thresholds.*/
4769 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 4714 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -4821,9 +4766,8 @@ static void gen8_enable_rps(struct drm_device *dev)
4821 4766
4822 /* 6: Ring frequency + overclocking (our driver does this later */ 4767 /* 6: Ring frequency + overclocking (our driver does this later */
4823 4768
4824 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8); 4769 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4825 4770 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
4826 gen8_enable_rps_interrupts(dev);
4827 4771
4828 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 4772 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4829} 4773}
@@ -4832,7 +4776,6 @@ static void gen6_enable_rps(struct drm_device *dev)
4832{ 4776{
4833 struct drm_i915_private *dev_priv = dev->dev_private; 4777 struct drm_i915_private *dev_priv = dev->dev_private;
4834 struct intel_engine_cs *ring; 4778 struct intel_engine_cs *ring;
4835 u32 rp_state_cap;
4836 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; 4779 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
4837 u32 gtfifodbg; 4780 u32 gtfifodbg;
4838 int rc6_mode; 4781 int rc6_mode;
@@ -4856,9 +4799,8 @@ static void gen6_enable_rps(struct drm_device *dev)
4856 4799
4857 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 4800 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4858 4801
4859 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 4802 /* Initialize rps frequencies */
4860 4803 gen6_init_rps_frequencies(dev);
4861 parse_rp_state_cap(dev_priv, rp_state_cap);
4862 4804
4863 /* disable the counters and set deterministic thresholds */ 4805 /* disable the counters and set deterministic thresholds */
4864 I915_WRITE(GEN6_RC_CONTROL, 0); 4806 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -4921,8 +4863,6 @@ static void gen6_enable_rps(struct drm_device *dev)
4921 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 4863 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4922 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 4864 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
4923 4865
4924 gen6_enable_rps_interrupts(dev);
4925
4926 rc6vids = 0; 4866 rc6vids = 0;
4927 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 4867 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
4928 if (IS_GEN6(dev) && ret) { 4868 if (IS_GEN6(dev) && ret) {
@@ -4975,9 +4915,9 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
4975 * to use for memory access. We do this by specifying the IA frequency 4915 * to use for memory access. We do this by specifying the IA frequency
4976 * the PCU should use as a reference to determine the ring frequency. 4916 * the PCU should use as a reference to determine the ring frequency.
4977 */ 4917 */
4978 for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit; 4918 for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
4979 gpu_freq--) { 4919 gpu_freq--) {
4980 int diff = dev_priv->rps.max_freq_softlimit - gpu_freq; 4920 int diff = dev_priv->rps.max_freq - gpu_freq;
4981 unsigned int ia_freq = 0, ring_freq = 0; 4921 unsigned int ia_freq = 0, ring_freq = 0;
4982 4922
4983 if (INTEL_INFO(dev)->gen >= 8) { 4923 if (INTEL_INFO(dev)->gen >= 8) {
@@ -5132,12 +5072,15 @@ static void cherryview_setup_pctx(struct drm_device *dev)
5132 5072
5133 pcbr = I915_READ(VLV_PCBR); 5073 pcbr = I915_READ(VLV_PCBR);
5134 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { 5074 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
5075 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5135 paddr = (dev_priv->mm.stolen_base + 5076 paddr = (dev_priv->mm.stolen_base +
5136 (gtt->stolen_size - pctx_size)); 5077 (gtt->stolen_size - pctx_size));
5137 5078
5138 pctx_paddr = (paddr & (~4095)); 5079 pctx_paddr = (paddr & (~4095));
5139 I915_WRITE(VLV_PCBR, pctx_paddr); 5080 I915_WRITE(VLV_PCBR, pctx_paddr);
5140 } 5081 }
5082
5083 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5141} 5084}
5142 5085
5143static void valleyview_setup_pctx(struct drm_device *dev) 5086static void valleyview_setup_pctx(struct drm_device *dev)
@@ -5163,6 +5106,8 @@ static void valleyview_setup_pctx(struct drm_device *dev)
5163 goto out; 5106 goto out;
5164 } 5107 }
5165 5108
5109 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5110
5166 /* 5111 /*
5167 * From the Gunit register HAS: 5112 * From the Gunit register HAS:
5168 * The Gfx driver is expected to program this register and ensure 5113 * The Gfx driver is expected to program this register and ensure
@@ -5181,6 +5126,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
5181 I915_WRITE(VLV_PCBR, pctx_paddr); 5126 I915_WRITE(VLV_PCBR, pctx_paddr);
5182 5127
5183out: 5128out:
5129 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5184 dev_priv->vlv_pctx = pctx; 5130 dev_priv->vlv_pctx = pctx;
5185} 5131}
5186 5132
@@ -5217,7 +5163,7 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
5217 dev_priv->mem_freq = 1333; 5163 dev_priv->mem_freq = 1333;
5218 break; 5164 break;
5219 } 5165 }
5220 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); 5166 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5221 5167
5222 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); 5168 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5223 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 5169 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
@@ -5259,7 +5205,10 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
5259 5205
5260 mutex_lock(&dev_priv->rps.hw_lock); 5206 mutex_lock(&dev_priv->rps.hw_lock);
5261 5207
5262 val = vlv_punit_read(dev_priv, CCK_FUSE_REG); 5208 mutex_lock(&dev_priv->dpio_lock);
5209 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
5210 mutex_unlock(&dev_priv->dpio_lock);
5211
5263 switch ((val >> 2) & 0x7) { 5212 switch ((val >> 2) & 0x7) {
5264 case 0: 5213 case 0:
5265 case 1: 5214 case 1:
@@ -5283,7 +5232,7 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
5283 dev_priv->mem_freq = 1600; 5232 dev_priv->mem_freq = 1600;
5284 break; 5233 break;
5285 } 5234 }
5286 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); 5235 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5287 5236
5288 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); 5237 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5289 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 5238 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
@@ -5369,8 +5318,6 @@ static void cherryview_enable_rps(struct drm_device *dev)
5369 /* For now we assume BIOS is allocating and populating the PCBR */ 5318 /* For now we assume BIOS is allocating and populating the PCBR */
5370 pcbr = I915_READ(VLV_PCBR); 5319 pcbr = I915_READ(VLV_PCBR);
5371 5320
5372 DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
5373
5374 /* 3: Enable RC6 */ 5321 /* 3: Enable RC6 */
5375 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && 5322 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
5376 (pcbr >> VLV_PCBR_ADDR_SHIFT)) 5323 (pcbr >> VLV_PCBR_ADDR_SHIFT))
@@ -5400,7 +5347,10 @@ static void cherryview_enable_rps(struct drm_device *dev)
5400 5347
5401 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 5348 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5402 5349
5403 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); 5350 /* RPS code assumes GPLL is used */
5351 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5352
5353 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
5404 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 5354 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5405 5355
5406 dev_priv->rps.cur_freq = (val >> 8) & 0xff; 5356 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -5414,8 +5364,6 @@ static void cherryview_enable_rps(struct drm_device *dev)
5414 5364
5415 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); 5365 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5416 5366
5417 gen8_enable_rps_interrupts(dev);
5418
5419 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 5367 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
5420} 5368}
5421 5369
@@ -5480,7 +5428,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
5480 5428
5481 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 5429 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5482 5430
5483 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); 5431 /* RPS code assumes GPLL is used */
5432 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5433
5434 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
5484 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 5435 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5485 5436
5486 dev_priv->rps.cur_freq = (val >> 8) & 0xff; 5437 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -5494,8 +5445,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
5494 5445
5495 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); 5446 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5496 5447
5497 gen6_enable_rps_interrupts(dev);
5498
5499 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 5448 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
5500} 5449}
5501 5450
@@ -6254,12 +6203,17 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
6254{ 6203{
6255 struct drm_i915_private *dev_priv = dev->dev_private; 6204 struct drm_i915_private *dev_priv = dev->dev_private;
6256 6205
6257 /* Interrupts should be disabled already to avoid re-arming. */ 6206 if (INTEL_INFO(dev)->gen < 6)
6258 WARN_ON(intel_irqs_enabled(dev_priv)); 6207 return;
6259 6208
6260 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 6209 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6261 6210
6262 cancel_work_sync(&dev_priv->rps.work); 6211 /*
6212 * TODO: disable RPS interrupts on GEN9+ too once RPS support
6213 * is added for it.
6214 */
6215 if (INTEL_INFO(dev)->gen < 9)
6216 gen6_disable_rps_interrupts(dev);
6263 6217
6264 /* Force GPU to min freq during suspend */ 6218 /* Force GPU to min freq during suspend */
6265 gen6_rps_idle(dev_priv); 6219 gen6_rps_idle(dev_priv);
@@ -6269,9 +6223,6 @@ void intel_disable_gt_powersave(struct drm_device *dev)
6269{ 6223{
6270 struct drm_i915_private *dev_priv = dev->dev_private; 6224 struct drm_i915_private *dev_priv = dev->dev_private;
6271 6225
6272 /* Interrupts should be disabled already to avoid re-arming. */
6273 WARN_ON(intel_irqs_enabled(dev_priv));
6274
6275 if (IS_IRONLAKE_M(dev)) { 6226 if (IS_IRONLAKE_M(dev)) {
6276 ironlake_disable_drps(dev); 6227 ironlake_disable_drps(dev);
6277 ironlake_disable_rc6(dev); 6228 ironlake_disable_rc6(dev);
@@ -6287,6 +6238,7 @@ void intel_disable_gt_powersave(struct drm_device *dev)
6287 valleyview_disable_rps(dev); 6238 valleyview_disable_rps(dev);
6288 else 6239 else
6289 gen6_disable_rps(dev); 6240 gen6_disable_rps(dev);
6241
6290 dev_priv->rps.enabled = false; 6242 dev_priv->rps.enabled = false;
6291 mutex_unlock(&dev_priv->rps.hw_lock); 6243 mutex_unlock(&dev_priv->rps.hw_lock);
6292 } 6244 }
@@ -6301,6 +6253,13 @@ static void intel_gen6_powersave_work(struct work_struct *work)
6301 6253
6302 mutex_lock(&dev_priv->rps.hw_lock); 6254 mutex_lock(&dev_priv->rps.hw_lock);
6303 6255
6256 /*
6257 * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
6258 * added for it.
6259 */
6260 if (INTEL_INFO(dev)->gen < 9)
6261 gen6_reset_rps_interrupts(dev);
6262
6304 if (IS_CHERRYVIEW(dev)) { 6263 if (IS_CHERRYVIEW(dev)) {
6305 cherryview_enable_rps(dev); 6264 cherryview_enable_rps(dev);
6306 } else if (IS_VALLEYVIEW(dev)) { 6265 } else if (IS_VALLEYVIEW(dev)) {
@@ -6315,6 +6274,10 @@ static void intel_gen6_powersave_work(struct work_struct *work)
6315 __gen6_update_ring_freq(dev); 6274 __gen6_update_ring_freq(dev);
6316 } 6275 }
6317 dev_priv->rps.enabled = true; 6276 dev_priv->rps.enabled = true;
6277
6278 if (INTEL_INFO(dev)->gen < 9)
6279 gen6_enable_rps_interrupts(dev);
6280
6318 mutex_unlock(&dev_priv->rps.hw_lock); 6281 mutex_unlock(&dev_priv->rps.hw_lock);
6319 6282
6320 intel_runtime_pm_put(dev_priv); 6283 intel_runtime_pm_put(dev_priv);
@@ -6953,18 +6916,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
6953 /* WaDisableSDEUnitClockGating:chv */ 6916 /* WaDisableSDEUnitClockGating:chv */
6954 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 6917 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6955 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 6918 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6956
6957 /* WaDisableGunitClockGating:chv (pre-production hw) */
6958 I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
6959 GINT_DIS);
6960
6961 /* WaDisableFfDopClockGating:chv (pre-production hw) */
6962 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6963 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
6964
6965 /* WaDisableDopClockGating:chv (pre-production hw) */
6966 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6967 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
6968} 6919}
6969 6920
6970static void g4x_init_clock_gating(struct drm_device *dev) 6921static void g4x_init_clock_gating(struct drm_device *dev)
@@ -7135,7 +7086,7 @@ void intel_init_pm(struct drm_device *dev)
7135 i915_ironlake_get_mem_freq(dev); 7086 i915_ironlake_get_mem_freq(dev);
7136 7087
7137 /* For FIFO watermark updates */ 7088 /* For FIFO watermark updates */
7138 if (IS_GEN9(dev)) { 7089 if (INTEL_INFO(dev)->gen >= 9) {
7139 skl_setup_wm_latency(dev); 7090 skl_setup_wm_latency(dev);
7140 7091
7141 dev_priv->display.init_clock_gating = gen9_init_clock_gating; 7092 dev_priv->display.init_clock_gating = gen9_init_clock_gating;
@@ -7222,7 +7173,7 @@ void intel_init_pm(struct drm_device *dev)
7222 } 7173 }
7223} 7174}
7224 7175
7225int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) 7176int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
7226{ 7177{
7227 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7178 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7228 7179
@@ -7232,8 +7183,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
7232 } 7183 }
7233 7184
7234 I915_WRITE(GEN6_PCODE_DATA, *val); 7185 I915_WRITE(GEN6_PCODE_DATA, *val);
7235 if (INTEL_INFO(dev_priv)->gen >= 9) 7186 I915_WRITE(GEN6_PCODE_DATA1, 0);
7236 I915_WRITE(GEN9_PCODE_DATA1, 0);
7237 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 7187 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7238 7188
7239 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 7189 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
@@ -7248,7 +7198,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
7248 return 0; 7198 return 0;
7249} 7199}
7250 7200
7251int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) 7201int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
7252{ 7202{
7253 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7203 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7254 7204
@@ -7271,99 +7221,66 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
7271 return 0; 7221 return 0;
7272} 7222}
7273 7223
7274static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) 7224static int vlv_gpu_freq_div(unsigned int czclk_freq)
7275{ 7225{
7276 int div; 7226 switch (czclk_freq) {
7277 7227 case 200:
7278 /* 4 x czclk */ 7228 return 10;
7279 switch (dev_priv->mem_freq) { 7229 case 267:
7280 case 800: 7230 return 12;
7281 div = 10; 7231 case 320:
7282 break; 7232 case 333:
7283 case 1066: 7233 return 16;
7284 div = 12; 7234 case 400:
7285 break; 7235 return 20;
7286 case 1333:
7287 div = 16;
7288 break;
7289 default: 7236 default:
7290 return -1; 7237 return -1;
7291 } 7238 }
7239}
7292 7240
7293 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div); 7241static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7242{
7243 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
7244
7245 div = vlv_gpu_freq_div(czclk_freq);
7246 if (div < 0)
7247 return div;
7248
7249 return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
7294} 7250}
7295 7251
7296static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) 7252static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7297{ 7253{
7298 int mul; 7254 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
7299 7255
7300 /* 4 x czclk */ 7256 mul = vlv_gpu_freq_div(czclk_freq);
7301 switch (dev_priv->mem_freq) { 7257 if (mul < 0)
7302 case 800: 7258 return mul;
7303 mul = 10;
7304 break;
7305 case 1066:
7306 mul = 12;
7307 break;
7308 case 1333:
7309 mul = 16;
7310 break;
7311 default:
7312 return -1;
7313 }
7314 7259
7315 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; 7260 return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
7316} 7261}
7317 7262
7318static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) 7263static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7319{ 7264{
7320 int div, freq; 7265 int div, czclk_freq = dev_priv->rps.cz_freq;
7321
7322 switch (dev_priv->rps.cz_freq) {
7323 case 200:
7324 div = 5;
7325 break;
7326 case 267:
7327 div = 6;
7328 break;
7329 case 320:
7330 case 333:
7331 case 400:
7332 div = 8;
7333 break;
7334 default:
7335 return -1;
7336 }
7337 7266
7338 freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2); 7267 div = vlv_gpu_freq_div(czclk_freq) / 2;
7268 if (div < 0)
7269 return div;
7339 7270
7340 return freq; 7271 return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
7341} 7272}
7342 7273
7343static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) 7274static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7344{ 7275{
7345 int mul, opcode; 7276 int mul, czclk_freq = dev_priv->rps.cz_freq;
7346 7277
7347 switch (dev_priv->rps.cz_freq) { 7278 mul = vlv_gpu_freq_div(czclk_freq) / 2;
7348 case 200: 7279 if (mul < 0)
7349 mul = 5; 7280 return mul;
7350 break;
7351 case 267:
7352 mul = 6;
7353 break;
7354 case 320:
7355 case 333:
7356 case 400:
7357 mul = 8;
7358 break;
7359 default:
7360 return -1;
7361 }
7362 7281
7363 /* CHV needs even values */ 7282 /* CHV needs even values */
7364 opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2); 7283 return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
7365
7366 return opcode;
7367} 7284}
7368 7285
7369int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val) 7286int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
new file mode 100644
index 000000000000..716b8a961eea
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -0,0 +1,481 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24/**
25 * DOC: Panel Self Refresh (PSR/SRD)
26 *
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
33 *
34 * Panel Self Refresh must be supported by both Hardware (source) and
35 * Panel (sink).
36 *
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
40 *
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
52 */
53
54#include <drm/drmP.h>
55
56#include "intel_drv.h"
57#include "i915_drv.h"
58
59static bool is_edp_psr(struct intel_dp *intel_dp)
60{
61 return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
62}
63
64bool intel_psr_is_enabled(struct drm_device *dev)
65{
66 struct drm_i915_private *dev_priv = dev->dev_private;
67
68 if (!HAS_PSR(dev))
69 return false;
70
71 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
72}
73
74static void intel_psr_write_vsc(struct intel_dp *intel_dp,
75 struct edp_vsc_psr *vsc_psr)
76{
77 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
78 struct drm_device *dev = dig_port->base.base.dev;
79 struct drm_i915_private *dev_priv = dev->dev_private;
80 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
81 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
82 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
83 uint32_t *data = (uint32_t *) vsc_psr;
84 unsigned int i;
85
86 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
87 the video DIP being updated before program video DIP data buffer
88 registers for DIP being updated. */
89 I915_WRITE(ctl_reg, 0);
90 POSTING_READ(ctl_reg);
91
92 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
93 if (i < sizeof(struct edp_vsc_psr))
94 I915_WRITE(data_reg + i, *data++);
95 else
96 I915_WRITE(data_reg + i, 0);
97 }
98
99 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
100 POSTING_READ(ctl_reg);
101}
102
103static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
104{
105 struct edp_vsc_psr psr_vsc;
106
107 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
108 memset(&psr_vsc, 0, sizeof(psr_vsc));
109 psr_vsc.sdp_header.HB0 = 0;
110 psr_vsc.sdp_header.HB1 = 0x7;
111 psr_vsc.sdp_header.HB2 = 0x2;
112 psr_vsc.sdp_header.HB3 = 0x8;
113 intel_psr_write_vsc(intel_dp, &psr_vsc);
114}
115
116static void intel_psr_enable_sink(struct intel_dp *intel_dp)
117{
118 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
119 struct drm_device *dev = dig_port->base.base.dev;
120 struct drm_i915_private *dev_priv = dev->dev_private;
121 uint32_t aux_clock_divider;
122 int precharge = 0x3;
123 bool only_standby = false;
124 static const uint8_t aux_msg[] = {
125 [0] = DP_AUX_NATIVE_WRITE << 4,
126 [1] = DP_SET_POWER >> 8,
127 [2] = DP_SET_POWER & 0xff,
128 [3] = 1 - 1,
129 [4] = DP_SET_POWER_D0,
130 };
131 int i;
132
133 BUILD_BUG_ON(sizeof(aux_msg) > 20);
134
135 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
136
137 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
138 only_standby = true;
139
140 /* Enable PSR in sink */
141 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
142 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
143 DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
144 else
145 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
146 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
147
148 /* Setup AUX registers */
149 for (i = 0; i < sizeof(aux_msg); i += 4)
150 I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
151 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
152
153 I915_WRITE(EDP_PSR_AUX_CTL(dev),
154 DP_AUX_CH_CTL_TIME_OUT_400us |
155 (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
156 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
157 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
158}
159
160static void intel_psr_enable_source(struct intel_dp *intel_dp)
161{
162 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
163 struct drm_device *dev = dig_port->base.base.dev;
164 struct drm_i915_private *dev_priv = dev->dev_private;
165 uint32_t max_sleep_time = 0x1f;
166 uint32_t idle_frames = 1;
167 uint32_t val = 0x0;
168 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
169 bool only_standby = false;
170
171 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
172 only_standby = true;
173
174 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
175 val |= EDP_PSR_LINK_STANDBY;
176 val |= EDP_PSR_TP2_TP3_TIME_0us;
177 val |= EDP_PSR_TP1_TIME_0us;
178 val |= EDP_PSR_SKIP_AUX_EXIT;
179 val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
180 } else
181 val |= EDP_PSR_LINK_DISABLE;
182
183 I915_WRITE(EDP_PSR_CTL(dev), val |
184 (IS_BROADWELL(dev) ? 0 : link_entry_time) |
185 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
186 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
187 EDP_PSR_ENABLE);
188}
189
190static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
191{
192 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
193 struct drm_device *dev = dig_port->base.base.dev;
194 struct drm_i915_private *dev_priv = dev->dev_private;
195 struct drm_crtc *crtc = dig_port->base.base.crtc;
196 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
197
198 lockdep_assert_held(&dev_priv->psr.lock);
199 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
200 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
201
202 dev_priv->psr.source_ok = false;
203
204 if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
205 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
206 return false;
207 }
208
209 if (!i915.enable_psr) {
210 DRM_DEBUG_KMS("PSR disable by flag\n");
211 return false;
212 }
213
214 /* Below limitations aren't valid for Broadwell */
215 if (IS_BROADWELL(dev))
216 goto out;
217
218 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
219 S3D_ENABLE) {
220 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
221 return false;
222 }
223
224 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
225 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
226 return false;
227 }
228
229 out:
230 dev_priv->psr.source_ok = true;
231 return true;
232}
233
234static void intel_psr_do_enable(struct intel_dp *intel_dp)
235{
236 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
237 struct drm_device *dev = intel_dig_port->base.base.dev;
238 struct drm_i915_private *dev_priv = dev->dev_private;
239
240 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
241 WARN_ON(dev_priv->psr.active);
242 lockdep_assert_held(&dev_priv->psr.lock);
243
244 /* Enable/Re-enable PSR on the host */
245 intel_psr_enable_source(intel_dp);
246
247 dev_priv->psr.active = true;
248}
249
250/**
251 * intel_psr_enable - Enable PSR
252 * @intel_dp: Intel DP
253 *
254 * This function can only be called after the pipe is fully trained and enabled.
255 */
256void intel_psr_enable(struct intel_dp *intel_dp)
257{
258 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
259 struct drm_device *dev = intel_dig_port->base.base.dev;
260 struct drm_i915_private *dev_priv = dev->dev_private;
261
262 if (!HAS_PSR(dev)) {
263 DRM_DEBUG_KMS("PSR not supported on this platform\n");
264 return;
265 }
266
267 if (!is_edp_psr(intel_dp)) {
268 DRM_DEBUG_KMS("PSR not supported by this panel\n");
269 return;
270 }
271
272 mutex_lock(&dev_priv->psr.lock);
273 if (dev_priv->psr.enabled) {
274 DRM_DEBUG_KMS("PSR already in use\n");
275 goto unlock;
276 }
277
278 if (!intel_psr_match_conditions(intel_dp))
279 goto unlock;
280
281 dev_priv->psr.busy_frontbuffer_bits = 0;
282
283 intel_psr_setup_vsc(intel_dp);
284
285 /* Avoid continuous PSR exit by masking memup and hpd */
286 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
287 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
288
289 /* Enable PSR on the panel */
290 intel_psr_enable_sink(intel_dp);
291
292 dev_priv->psr.enabled = intel_dp;
293unlock:
294 mutex_unlock(&dev_priv->psr.lock);
295}
296
297/**
298 * intel_psr_disable - Disable PSR
299 * @intel_dp: Intel DP
300 *
301 * This function needs to be called before disabling pipe.
302 */
303void intel_psr_disable(struct intel_dp *intel_dp)
304{
305 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
306 struct drm_device *dev = intel_dig_port->base.base.dev;
307 struct drm_i915_private *dev_priv = dev->dev_private;
308
309 mutex_lock(&dev_priv->psr.lock);
310 if (!dev_priv->psr.enabled) {
311 mutex_unlock(&dev_priv->psr.lock);
312 return;
313 }
314
315 if (dev_priv->psr.active) {
316 I915_WRITE(EDP_PSR_CTL(dev),
317 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
318
319 /* Wait till PSR is idle */
320 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
321 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
322 DRM_ERROR("Timed out waiting for PSR Idle State\n");
323
324 dev_priv->psr.active = false;
325 } else {
326 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
327 }
328
329 dev_priv->psr.enabled = NULL;
330 mutex_unlock(&dev_priv->psr.lock);
331
332 cancel_delayed_work_sync(&dev_priv->psr.work);
333}
334
335static void intel_psr_work(struct work_struct *work)
336{
337 struct drm_i915_private *dev_priv =
338 container_of(work, typeof(*dev_priv), psr.work.work);
339 struct intel_dp *intel_dp = dev_priv->psr.enabled;
340
341 /* We have to make sure PSR is ready for re-enable
342 * otherwise it keeps disabled until next full enable/disable cycle.
343 * PSR might take some time to get fully disabled
344 * and be ready for re-enable.
345 */
346 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
347 EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
348 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
349 return;
350 }
351
352 mutex_lock(&dev_priv->psr.lock);
353 intel_dp = dev_priv->psr.enabled;
354
355 if (!intel_dp)
356 goto unlock;
357
358 /*
359 * The delayed work can race with an invalidate hence we need to
360 * recheck. Since psr_flush first clears this and then reschedules we
361 * won't ever miss a flush when bailing out here.
362 */
363 if (dev_priv->psr.busy_frontbuffer_bits)
364 goto unlock;
365
366 intel_psr_do_enable(intel_dp);
367unlock:
368 mutex_unlock(&dev_priv->psr.lock);
369}
370
371static void intel_psr_exit(struct drm_device *dev)
372{
373 struct drm_i915_private *dev_priv = dev->dev_private;
374
375 if (dev_priv->psr.active) {
376 u32 val = I915_READ(EDP_PSR_CTL(dev));
377
378 WARN_ON(!(val & EDP_PSR_ENABLE));
379
380 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
381
382 dev_priv->psr.active = false;
383 }
384
385}
386
387/**
388 * intel_psr_invalidate - Invalidade PSR
389 * @dev: DRM device
390 * @frontbuffer_bits: frontbuffer plane tracking bits
391 *
392 * Since the hardware frontbuffer tracking has gaps we need to integrate
393 * with the software frontbuffer tracking. This function gets called every
394 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
395 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
396 *
397 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
398 */
399void intel_psr_invalidate(struct drm_device *dev,
400 unsigned frontbuffer_bits)
401{
402 struct drm_i915_private *dev_priv = dev->dev_private;
403 struct drm_crtc *crtc;
404 enum pipe pipe;
405
406 mutex_lock(&dev_priv->psr.lock);
407 if (!dev_priv->psr.enabled) {
408 mutex_unlock(&dev_priv->psr.lock);
409 return;
410 }
411
412 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
413 pipe = to_intel_crtc(crtc)->pipe;
414
415 intel_psr_exit(dev);
416
417 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
418
419 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
420 mutex_unlock(&dev_priv->psr.lock);
421}
422
423/**
424 * intel_psr_flush - Flush PSR
425 * @dev: DRM device
426 * @frontbuffer_bits: frontbuffer plane tracking bits
427 *
428 * Since the hardware frontbuffer tracking has gaps we need to integrate
429 * with the software frontbuffer tracking. This function gets called every
430 * time frontbuffer rendering has completed and flushed out to memory. PSR
431 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
432 *
433 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
434 */
435void intel_psr_flush(struct drm_device *dev,
436 unsigned frontbuffer_bits)
437{
438 struct drm_i915_private *dev_priv = dev->dev_private;
439 struct drm_crtc *crtc;
440 enum pipe pipe;
441
442 mutex_lock(&dev_priv->psr.lock);
443 if (!dev_priv->psr.enabled) {
444 mutex_unlock(&dev_priv->psr.lock);
445 return;
446 }
447
448 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
449 pipe = to_intel_crtc(crtc)->pipe;
450 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
451
452 /*
453 * On Haswell sprite plane updates don't result in a psr invalidating
454 * signal in the hardware. Which means we need to manually fake this in
455 * software for all flushes, not just when we've seen a preceding
456 * invalidation through frontbuffer rendering.
457 */
458 if (IS_HASWELL(dev) &&
459 (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
460 intel_psr_exit(dev);
461
462 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
463 schedule_delayed_work(&dev_priv->psr.work,
464 msecs_to_jiffies(100));
465 mutex_unlock(&dev_priv->psr.lock);
466}
467
468/**
469 * intel_psr_init - Init basic PSR work and mutex.
470 * @dev: DRM device
471 *
472 * This function is called only once at driver load to initialize basic
473 * PSR stuff.
474 */
475void intel_psr_init(struct drm_device *dev)
476{
477 struct drm_i915_private *dev_priv = dev->dev_private;
478
479 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
480 mutex_init(&dev_priv->psr.lock);
481}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f457146ff6a4..1d01b51ff058 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -589,14 +589,10 @@ static int init_ring_common(struct intel_engine_cs *ring)
589 goto out; 589 goto out;
590 } 590 }
591 591
592 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 592 ringbuf->head = I915_READ_HEAD(ring);
593 i915_kernel_lost_context(ring->dev); 593 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
594 else { 594 ringbuf->space = intel_ring_space(ringbuf);
595 ringbuf->head = I915_READ_HEAD(ring); 595 ringbuf->last_retired_head = -1;
596 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
597 ringbuf->space = intel_ring_space(ringbuf);
598 ringbuf->last_retired_head = -1;
599 }
600 596
601 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 597 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
602 598
@@ -665,7 +661,8 @@ err:
665 return ret; 661 return ret;
666} 662}
667 663
668static int intel_ring_workarounds_emit(struct intel_engine_cs *ring) 664static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
665 struct intel_context *ctx)
669{ 666{
670 int ret, i; 667 int ret, i;
671 struct drm_device *dev = ring->dev; 668 struct drm_device *dev = ring->dev;
@@ -788,25 +785,25 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
788 struct drm_i915_private *dev_priv = dev->dev_private; 785 struct drm_i915_private *dev_priv = dev->dev_private;
789 786
790 /* WaDisablePartialInstShootdown:chv */ 787 /* WaDisablePartialInstShootdown:chv */
791 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
792 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
793
794 /* WaDisableThreadStallDopClockGating:chv */ 788 /* WaDisableThreadStallDopClockGating:chv */
795 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 789 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
796 STALL_DOP_GATING_DISABLE); 790 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
797 791 STALL_DOP_GATING_DISABLE);
798 /* WaDisableDopClockGating:chv (pre-production hw) */
799 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
800 DOP_CLOCK_GATING_DISABLE);
801 792
802 /* WaDisableSamplerPowerBypass:chv (pre-production hw) */ 793 /* Use Force Non-Coherent whenever executing a 3D context. This is a
803 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 794 * workaround for a possible hang in the unlikely event a TLB
804 GEN8_SAMPLER_POWER_BYPASS_DIS); 795 * invalidation occurs during a PSD flush.
796 */
797 /* WaForceEnableNonCoherent:chv */
798 /* WaHdcDisableFetchWhenMasked:chv */
799 WA_SET_BIT_MASKED(HDC_CHICKEN0,
800 HDC_FORCE_NON_COHERENT |
801 HDC_DONOT_FETCH_MEM_WHEN_MASKED);
805 802
806 return 0; 803 return 0;
807} 804}
808 805
809static int init_workarounds_ring(struct intel_engine_cs *ring) 806int init_workarounds_ring(struct intel_engine_cs *ring)
810{ 807{
811 struct drm_device *dev = ring->dev; 808 struct drm_device *dev = ring->dev;
812 struct drm_i915_private *dev_priv = dev->dev_private; 809 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1721,13 +1718,42 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
1721 return 0; 1718 return 0;
1722} 1719}
1723 1720
1724void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 1721void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1725{ 1722{
1726 if (!ringbuf->obj)
1727 return;
1728
1729 iounmap(ringbuf->virtual_start); 1723 iounmap(ringbuf->virtual_start);
1724 ringbuf->virtual_start = NULL;
1730 i915_gem_object_ggtt_unpin(ringbuf->obj); 1725 i915_gem_object_ggtt_unpin(ringbuf->obj);
1726}
1727
1728int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
1729 struct intel_ringbuffer *ringbuf)
1730{
1731 struct drm_i915_private *dev_priv = to_i915(dev);
1732 struct drm_i915_gem_object *obj = ringbuf->obj;
1733 int ret;
1734
1735 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1736 if (ret)
1737 return ret;
1738
1739 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1740 if (ret) {
1741 i915_gem_object_ggtt_unpin(obj);
1742 return ret;
1743 }
1744
1745 ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
1746 i915_gem_obj_ggtt_offset(obj), ringbuf->size);
1747 if (ringbuf->virtual_start == NULL) {
1748 i915_gem_object_ggtt_unpin(obj);
1749 return -EINVAL;
1750 }
1751
1752 return 0;
1753}
1754
1755void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1756{
1731 drm_gem_object_unreference(&ringbuf->obj->base); 1757 drm_gem_object_unreference(&ringbuf->obj->base);
1732 ringbuf->obj = NULL; 1758 ringbuf->obj = NULL;
1733} 1759}
@@ -1735,12 +1761,7 @@ void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1735int intel_alloc_ringbuffer_obj(struct drm_device *dev, 1761int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1736 struct intel_ringbuffer *ringbuf) 1762 struct intel_ringbuffer *ringbuf)
1737{ 1763{
1738 struct drm_i915_private *dev_priv = to_i915(dev);
1739 struct drm_i915_gem_object *obj; 1764 struct drm_i915_gem_object *obj;
1740 int ret;
1741
1742 if (ringbuf->obj)
1743 return 0;
1744 1765
1745 obj = NULL; 1766 obj = NULL;
1746 if (!HAS_LLC(dev)) 1767 if (!HAS_LLC(dev))
@@ -1753,30 +1774,9 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1753 /* mark ring buffers as read-only from GPU side by default */ 1774 /* mark ring buffers as read-only from GPU side by default */
1754 obj->gt_ro = 1; 1775 obj->gt_ro = 1;
1755 1776
1756 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1757 if (ret)
1758 goto err_unref;
1759
1760 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1761 if (ret)
1762 goto err_unpin;
1763
1764 ringbuf->virtual_start =
1765 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1766 ringbuf->size);
1767 if (ringbuf->virtual_start == NULL) {
1768 ret = -EINVAL;
1769 goto err_unpin;
1770 }
1771
1772 ringbuf->obj = obj; 1777 ringbuf->obj = obj;
1773 return 0;
1774 1778
1775err_unpin: 1779 return 0;
1776 i915_gem_object_ggtt_unpin(obj);
1777err_unref:
1778 drm_gem_object_unreference(&obj->base);
1779 return ret;
1780} 1780}
1781 1781
1782static int intel_init_ring_buffer(struct drm_device *dev, 1782static int intel_init_ring_buffer(struct drm_device *dev,
@@ -1813,10 +1813,21 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1813 goto error; 1813 goto error;
1814 } 1814 }
1815 1815
1816 ret = intel_alloc_ringbuffer_obj(dev, ringbuf); 1816 if (ringbuf->obj == NULL) {
1817 if (ret) { 1817 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1818 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); 1818 if (ret) {
1819 goto error; 1819 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
1820 ring->name, ret);
1821 goto error;
1822 }
1823
1824 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
1825 if (ret) {
1826 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
1827 ring->name, ret);
1828 intel_destroy_ringbuffer_obj(ringbuf);
1829 goto error;
1830 }
1820 } 1831 }
1821 1832
1822 /* Workaround an erratum on the i830 which causes a hang if 1833 /* Workaround an erratum on the i830 which causes a hang if
@@ -1857,6 +1868,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1857 intel_stop_ring_buffer(ring); 1868 intel_stop_ring_buffer(ring);
1858 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); 1869 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
1859 1870
1871 intel_unpin_ringbuffer_obj(ringbuf);
1860 intel_destroy_ringbuffer_obj(ringbuf); 1872 intel_destroy_ringbuffer_obj(ringbuf);
1861 ring->preallocated_lazy_request = NULL; 1873 ring->preallocated_lazy_request = NULL;
1862 ring->outstanding_lazy_seqno = 0; 1874 ring->outstanding_lazy_seqno = 0;
@@ -1942,13 +1954,6 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
1942 break; 1954 break;
1943 } 1955 }
1944 1956
1945 if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
1946 dev->primary->master) {
1947 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1948 if (master_priv->sarea_priv)
1949 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1950 }
1951
1952 msleep(1); 1957 msleep(1);
1953 1958
1954 if (dev_priv->mm.interruptible && signal_pending(current)) { 1959 if (dev_priv->mm.interruptible && signal_pending(current)) {
@@ -2439,91 +2444,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2439 return intel_init_ring_buffer(dev, ring); 2444 return intel_init_ring_buffer(dev, ring);
2440} 2445}
2441 2446
2442int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
2443{
2444 struct drm_i915_private *dev_priv = dev->dev_private;
2445 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
2446 struct intel_ringbuffer *ringbuf = ring->buffer;
2447 int ret;
2448
2449 if (ringbuf == NULL) {
2450 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
2451 if (!ringbuf)
2452 return -ENOMEM;
2453 ring->buffer = ringbuf;
2454 }
2455
2456 ring->name = "render ring";
2457 ring->id = RCS;
2458 ring->mmio_base = RENDER_RING_BASE;
2459
2460 if (INTEL_INFO(dev)->gen >= 6) {
2461 /* non-kms not supported on gen6+ */
2462 ret = -ENODEV;
2463 goto err_ringbuf;
2464 }
2465
2466 /* Note: gem is not supported on gen5/ilk without kms (the corresponding
2467 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
2468 * the special gen5 functions. */
2469 ring->add_request = i9xx_add_request;
2470 if (INTEL_INFO(dev)->gen < 4)
2471 ring->flush = gen2_render_ring_flush;
2472 else
2473 ring->flush = gen4_render_ring_flush;
2474 ring->get_seqno = ring_get_seqno;
2475 ring->set_seqno = ring_set_seqno;
2476 if (IS_GEN2(dev)) {
2477 ring->irq_get = i8xx_ring_get_irq;
2478 ring->irq_put = i8xx_ring_put_irq;
2479 } else {
2480 ring->irq_get = i9xx_ring_get_irq;
2481 ring->irq_put = i9xx_ring_put_irq;
2482 }
2483 ring->irq_enable_mask = I915_USER_INTERRUPT;
2484 ring->write_tail = ring_write_tail;
2485 if (INTEL_INFO(dev)->gen >= 4)
2486 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2487 else if (IS_I830(dev) || IS_845G(dev))
2488 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
2489 else
2490 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
2491 ring->init = init_render_ring;
2492 ring->cleanup = render_ring_cleanup;
2493
2494 ring->dev = dev;
2495 INIT_LIST_HEAD(&ring->active_list);
2496 INIT_LIST_HEAD(&ring->request_list);
2497
2498 ringbuf->size = size;
2499 ringbuf->effective_size = ringbuf->size;
2500 if (IS_I830(ring->dev) || IS_845G(ring->dev))
2501 ringbuf->effective_size -= 2 * CACHELINE_BYTES;
2502
2503 ringbuf->virtual_start = ioremap_wc(start, size);
2504 if (ringbuf->virtual_start == NULL) {
2505 DRM_ERROR("can not ioremap virtual address for"
2506 " ring buffer\n");
2507 ret = -ENOMEM;
2508 goto err_ringbuf;
2509 }
2510
2511 if (!I915_NEED_GFX_HWS(dev)) {
2512 ret = init_phys_status_page(ring);
2513 if (ret)
2514 goto err_vstart;
2515 }
2516
2517 return 0;
2518
2519err_vstart:
2520 iounmap(ringbuf->virtual_start);
2521err_ringbuf:
2522 kfree(ringbuf);
2523 ring->buffer = NULL;
2524 return ret;
2525}
2526
2527int intel_init_bsd_ring_buffer(struct drm_device *dev) 2447int intel_init_bsd_ring_buffer(struct drm_device *dev)
2528{ 2448{
2529 struct drm_i915_private *dev_priv = dev->dev_private; 2449 struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 96479c89f4bd..fe426cff598b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -148,7 +148,8 @@ struct intel_engine_cs {
148 148
149 int (*init)(struct intel_engine_cs *ring); 149 int (*init)(struct intel_engine_cs *ring);
150 150
151 int (*init_context)(struct intel_engine_cs *ring); 151 int (*init_context)(struct intel_engine_cs *ring,
152 struct intel_context *ctx);
152 153
153 void (*write_tail)(struct intel_engine_cs *ring, 154 void (*write_tail)(struct intel_engine_cs *ring,
154 u32 value); 155 u32 value);
@@ -235,6 +236,7 @@ struct intel_engine_cs {
235 /* Execlists */ 236 /* Execlists */
236 spinlock_t execlist_lock; 237 spinlock_t execlist_lock;
237 struct list_head execlist_queue; 238 struct list_head execlist_queue;
239 struct list_head execlist_retired_req_list;
238 u8 next_context_status_buffer; 240 u8 next_context_status_buffer;
239 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ 241 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
240 int (*emit_request)(struct intel_ringbuffer *ringbuf); 242 int (*emit_request)(struct intel_ringbuffer *ringbuf);
@@ -381,6 +383,9 @@ intel_write_status_page(struct intel_engine_cs *ring,
381#define I915_GEM_HWS_SCRATCH_INDEX 0x30 383#define I915_GEM_HWS_SCRATCH_INDEX 0x30
382#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 384#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
383 385
386void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
387int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
388 struct intel_ringbuffer *ringbuf);
384void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 389void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
385int intel_alloc_ringbuffer_obj(struct drm_device *dev, 390int intel_alloc_ringbuffer_obj(struct drm_device *dev,
386 struct intel_ringbuffer *ringbuf); 391 struct intel_ringbuffer *ringbuf);
@@ -424,6 +429,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
424u64 intel_ring_get_active_head(struct intel_engine_cs *ring); 429u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
425void intel_ring_setup_status_page(struct intel_engine_cs *ring); 430void intel_ring_setup_status_page(struct intel_engine_cs *ring);
426 431
432int init_workarounds_ring(struct intel_engine_cs *ring);
433
427static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) 434static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
428{ 435{
429 return ringbuf->tail; 436 return ringbuf->tail;
@@ -441,7 +448,4 @@ static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
441 ring->trace_irq_seqno = seqno; 448 ring->trace_irq_seqno = seqno;
442} 449}
443 450
444/* DRI warts */
445int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
446
447#endif /* _INTEL_RINGBUFFER_H_ */ 451#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index dcbecffc6b5f..f5a78d53e297 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -577,6 +577,23 @@ static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
577 power_well->data != PIPE_C); 577 power_well->data != PIPE_C);
578 578
579 chv_set_pipe_power_well(dev_priv, power_well, true); 579 chv_set_pipe_power_well(dev_priv, power_well, true);
580
581 if (power_well->data == PIPE_A) {
582 spin_lock_irq(&dev_priv->irq_lock);
583 valleyview_enable_display_irqs(dev_priv);
584 spin_unlock_irq(&dev_priv->irq_lock);
585
586 /*
587 * During driver initialization/resume we can avoid restoring the
588 * part of the HW/SW state that will be inited anyway explicitly.
589 */
590 if (dev_priv->power_domains.initializing)
591 return;
592
593 intel_hpd_init(dev_priv);
594
595 i915_redisable_vga_power_on(dev_priv->dev);
596 }
580} 597}
581 598
582static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 599static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
@@ -586,6 +603,12 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
586 power_well->data != PIPE_B && 603 power_well->data != PIPE_B &&
587 power_well->data != PIPE_C); 604 power_well->data != PIPE_C);
588 605
606 if (power_well->data == PIPE_A) {
607 spin_lock_irq(&dev_priv->irq_lock);
608 valleyview_disable_display_irqs(dev_priv);
609 spin_unlock_irq(&dev_priv->irq_lock);
610 }
611
589 chv_set_pipe_power_well(dev_priv, power_well, false); 612 chv_set_pipe_power_well(dev_priv, power_well, false);
590 613
591 if (power_well->data == PIPE_A) 614 if (power_well->data == PIPE_A)
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 64076555153a..7d9c340f7693 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1264,10 +1264,11 @@ intel_prepare_sprite_plane(struct drm_plane *plane,
1264 struct drm_device *dev = plane->dev; 1264 struct drm_device *dev = plane->dev;
1265 struct drm_crtc *crtc = state->crtc; 1265 struct drm_crtc *crtc = state->crtc;
1266 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1266 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1267 struct intel_plane *intel_plane = to_intel_plane(plane);
1267 enum pipe pipe = intel_crtc->pipe; 1268 enum pipe pipe = intel_crtc->pipe;
1268 struct drm_framebuffer *fb = state->fb; 1269 struct drm_framebuffer *fb = state->fb;
1269 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 1270 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1270 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 1271 struct drm_i915_gem_object *old_obj = intel_plane->obj;
1271 int ret; 1272 int ret;
1272 1273
1273 if (old_obj != obj) { 1274 if (old_obj != obj) {
@@ -1302,7 +1303,7 @@ intel_commit_sprite_plane(struct drm_plane *plane,
1302 enum pipe pipe = intel_crtc->pipe; 1303 enum pipe pipe = intel_crtc->pipe;
1303 struct drm_framebuffer *fb = state->fb; 1304 struct drm_framebuffer *fb = state->fb;
1304 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 1305 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1305 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 1306 struct drm_i915_gem_object *old_obj = intel_plane->obj;
1306 int crtc_x, crtc_y; 1307 int crtc_x, crtc_y;
1307 unsigned int crtc_w, crtc_h; 1308 unsigned int crtc_w, crtc_h;
1308 uint32_t src_x, src_y, src_w, src_h; 1309 uint32_t src_x, src_y, src_w, src_h;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 6a0c3fb2cbf0..1a3e485a4f97 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -49,17 +49,11 @@ assert_device_not_suspended(struct drm_i915_private *dev_priv)
49 49
50static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 50static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
51{ 51{
52 u32 gt_thread_status_mask;
53
54 if (IS_HASWELL(dev_priv->dev))
55 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
56 else
57 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
58
59 /* w/a for a sporadic read returning 0 by waiting for the GT 52 /* w/a for a sporadic read returning 0 by waiting for the GT
60 * thread to wake up. 53 * thread to wake up.
61 */ 54 */
62 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) 55 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
56 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
63 DRM_ERROR("GT thread status wait timed out\n"); 57 DRM_ERROR("GT thread status wait timed out\n");
64} 58}
65 59
@@ -120,8 +114,7 @@ static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
120 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 114 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
121 115
122 /* WaRsForcewakeWaitTC0:ivb,hsw */ 116 /* WaRsForcewakeWaitTC0:ivb,hsw */
123 if (INTEL_INFO(dev_priv->dev)->gen < 8) 117 __gen6_gt_wait_for_thread_c0(dev_priv);
124 __gen6_gt_wait_for_thread_c0(dev_priv);
125} 118}
126 119
127static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 120static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
@@ -229,10 +222,6 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
229 FORCEWAKE_ACK_TIMEOUT_MS)) 222 FORCEWAKE_ACK_TIMEOUT_MS))
230 DRM_ERROR("Timed out: waiting for media to ack.\n"); 223 DRM_ERROR("Timed out: waiting for media to ack.\n");
231 } 224 }
232
233 /* WaRsForcewakeWaitTC0:vlv */
234 if (!IS_CHERRYVIEW(dev_priv->dev))
235 __gen6_gt_wait_for_thread_c0(dev_priv);
236} 225}
237 226
238static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, 227static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
@@ -681,6 +670,34 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
681 REG_RANGE((reg), 0x14000, 0x14400) || \ 670 REG_RANGE((reg), 0x14000, 0x14400) || \
682 REG_RANGE((reg), 0x22000, 0x24000)) 671 REG_RANGE((reg), 0x22000, 0x24000))
683 672
673#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
674 REG_RANGE((reg), 0xC00, 0x2000)
675
676#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
677 (REG_RANGE((reg), 0x2000, 0x4000) || \
678 REG_RANGE((reg), 0x5200, 0x8000) || \
679 REG_RANGE((reg), 0x8300, 0x8500) || \
680 REG_RANGE((reg), 0x8C00, 0x8D00) || \
681 REG_RANGE((reg), 0xB000, 0xB480) || \
682 REG_RANGE((reg), 0xE000, 0xE800))
683
684#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
685 (REG_RANGE((reg), 0x8800, 0x8A00) || \
686 REG_RANGE((reg), 0xD000, 0xD800) || \
687 REG_RANGE((reg), 0x12000, 0x14000) || \
688 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
689 REG_RANGE((reg), 0x30000, 0x40000))
690
691#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
692 REG_RANGE((reg), 0x9400, 0x9800)
693
694#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
695 ((reg) < 0x40000 &&\
696 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
697 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
698 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
699 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
700
684static void 701static void
685ilk_dummy_write(struct drm_i915_private *dev_priv) 702ilk_dummy_write(struct drm_i915_private *dev_priv)
686{ 703{
@@ -811,6 +828,45 @@ chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
811 REG_READ_FOOTER; \ 828 REG_READ_FOOTER; \
812} 829}
813 830
831#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
832 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
833
834#define __gen9_read(x) \
835static u##x \
836gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
837 REG_READ_HEADER(x); \
838 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
839 val = __raw_i915_read##x(dev_priv, reg); \
840 } else { \
841 unsigned fwengine = 0; \
842 if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
843 if (dev_priv->uncore.fw_rendercount == 0) \
844 fwengine = FORCEWAKE_RENDER; \
845 } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
846 if (dev_priv->uncore.fw_mediacount == 0) \
847 fwengine = FORCEWAKE_MEDIA; \
848 } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
849 if (dev_priv->uncore.fw_rendercount == 0) \
850 fwengine |= FORCEWAKE_RENDER; \
851 if (dev_priv->uncore.fw_mediacount == 0) \
852 fwengine |= FORCEWAKE_MEDIA; \
853 } else { \
854 if (dev_priv->uncore.fw_blittercount == 0) \
855 fwengine = FORCEWAKE_BLITTER; \
856 } \
857 if (fwengine) \
858 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
859 val = __raw_i915_read##x(dev_priv, reg); \
860 if (fwengine) \
861 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
862 } \
863 REG_READ_FOOTER; \
864}
865
866__gen9_read(8)
867__gen9_read(16)
868__gen9_read(32)
869__gen9_read(64)
814__chv_read(8) 870__chv_read(8)
815__chv_read(16) 871__chv_read(16)
816__chv_read(32) 872__chv_read(32)
@@ -832,6 +888,7 @@ __gen4_read(16)
832__gen4_read(32) 888__gen4_read(32)
833__gen4_read(64) 889__gen4_read(64)
834 890
891#undef __gen9_read
835#undef __chv_read 892#undef __chv_read
836#undef __vlv_read 893#undef __vlv_read
837#undef __gen6_read 894#undef __gen6_read
@@ -969,6 +1026,69 @@ chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
969 REG_WRITE_FOOTER; \ 1026 REG_WRITE_FOOTER; \
970} 1027}
971 1028
1029static const u32 gen9_shadowed_regs[] = {
1030 RING_TAIL(RENDER_RING_BASE),
1031 RING_TAIL(GEN6_BSD_RING_BASE),
1032 RING_TAIL(VEBOX_RING_BASE),
1033 RING_TAIL(BLT_RING_BASE),
1034 FORCEWAKE_BLITTER_GEN9,
1035 FORCEWAKE_RENDER_GEN9,
1036 FORCEWAKE_MEDIA_GEN9,
1037 GEN6_RPNSWREQ,
1038 GEN6_RC_VIDEO_FREQ,
1039 /* TODO: Other registers are not yet used */
1040};
1041
1042static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
1043{
1044 int i;
1045 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
1046 if (reg == gen9_shadowed_regs[i])
1047 return true;
1048
1049 return false;
1050}
1051
1052#define __gen9_write(x) \
1053static void \
1054gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
1055 bool trace) { \
1056 REG_WRITE_HEADER; \
1057 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
1058 is_gen9_shadowed(dev_priv, reg)) { \
1059 __raw_i915_write##x(dev_priv, reg, val); \
1060 } else { \
1061 unsigned fwengine = 0; \
1062 if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
1063 if (dev_priv->uncore.fw_rendercount == 0) \
1064 fwengine = FORCEWAKE_RENDER; \
1065 } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
1066 if (dev_priv->uncore.fw_mediacount == 0) \
1067 fwengine = FORCEWAKE_MEDIA; \
1068 } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
1069 if (dev_priv->uncore.fw_rendercount == 0) \
1070 fwengine |= FORCEWAKE_RENDER; \
1071 if (dev_priv->uncore.fw_mediacount == 0) \
1072 fwengine |= FORCEWAKE_MEDIA; \
1073 } else { \
1074 if (dev_priv->uncore.fw_blittercount == 0) \
1075 fwengine = FORCEWAKE_BLITTER; \
1076 } \
1077 if (fwengine) \
1078 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
1079 fwengine); \
1080 __raw_i915_write##x(dev_priv, reg, val); \
1081 if (fwengine) \
1082 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
1083 fwengine); \
1084 } \
1085 REG_WRITE_FOOTER; \
1086}
1087
1088__gen9_write(8)
1089__gen9_write(16)
1090__gen9_write(32)
1091__gen9_write(64)
972__chv_write(8) 1092__chv_write(8)
973__chv_write(16) 1093__chv_write(16)
974__chv_write(32) 1094__chv_write(32)
@@ -994,6 +1114,7 @@ __gen4_write(16)
994__gen4_write(32) 1114__gen4_write(32)
995__gen4_write(64) 1115__gen4_write(64)
996 1116
1117#undef __gen9_write
997#undef __chv_write 1118#undef __chv_write
998#undef __gen8_write 1119#undef __gen8_write
999#undef __hsw_write 1120#undef __hsw_write
@@ -1077,6 +1198,13 @@ void intel_uncore_init(struct drm_device *dev)
1077 1198
1078 switch (INTEL_INFO(dev)->gen) { 1199 switch (INTEL_INFO(dev)->gen) {
1079 default: 1200 default:
1201 WARN_ON(1);
1202 return;
1203 case 9:
1204 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1205 ASSIGN_READ_MMIO_VFUNCS(gen9);
1206 break;
1207 case 8:
1080 if (IS_CHERRYVIEW(dev)) { 1208 if (IS_CHERRYVIEW(dev)) {
1081 ASSIGN_WRITE_MMIO_VFUNCS(chv); 1209 ASSIGN_WRITE_MMIO_VFUNCS(chv);
1082 ASSIGN_READ_MMIO_VFUNCS(chv); 1210 ASSIGN_READ_MMIO_VFUNCS(chv);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index b1da7cd40db8..490b90866baf 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1699,7 +1699,8 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1699 drm_edid_to_eld(&nv_connector->base, nv_connector->edid); 1699 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
1700 memcpy(args.data, nv_connector->base.eld, sizeof(args.data)); 1700 memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
1701 1701
1702 nvif_mthd(disp->disp, 0, &args, sizeof(args.base) + args.data[2] * 4); 1702 nvif_mthd(disp->disp, 0, &args,
1703 sizeof(args.base) + drm_eld_size(args.data));
1703} 1704}
1704 1705
1705static void 1706static void
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 2ec0efcaa719..250262265ee3 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -340,6 +340,7 @@ typedef struct drm_i915_irq_wait {
340#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 340#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
341#define I915_PARAM_HAS_WT 27 341#define I915_PARAM_HAS_WT 27
342#define I915_PARAM_CMD_PARSER_VERSION 28 342#define I915_PARAM_CMD_PARSER_VERSION 28
343#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
343 344
344typedef struct drm_i915_getparam { 345typedef struct drm_i915_getparam {
345 int param; 346 int param;