aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-04-30 19:11:37 -0400
committerDave Airlie <airlied@redhat.com>2014-04-30 19:11:37 -0400
commit885ac04ab3a226d28147853d6d98eee3897a5636 (patch)
tree6310f259c2f91525574fb330f62b56579450350e
parent8aa9e85adac609588eeec356e5a85059b3b819ba (diff)
parentc79057922ed6c2c6df1214e6ab4414fea1b23db2 (diff)
Merge tag 'drm-intel-next-2014-04-16' of git://anongit.freedesktop.org/drm-intel into drm-next
drm-intel-next-2014-04-16: - vlv infoframe fixes from Jesse - dsi/mipi fixes from Shobhit - gen8 pageflip fixes for LRI/SRM from Damien - cmd parser fixes from Brad Volkin - some prep patches for CHV, DRRS, ... - and tons of little things all over drm-intel-next-2014-04-04: - cmd parser for gen7 but only in enforcing and not yet granting mode - the batch copying stuff is still missing. Also performance is a bit ... rough (Brad Volkin + OACONTROL fix from Ken). - deprecate UMS harder (i.e. CONFIG_BROKEN) - interrupt rework from Paulo Zanoni - runtime PM support for bdw and snb, again from Paulo - a pile of refactorings from various people all over the place to prep for new stuff (irq reworks, power domain polish, ...) drm-intel-next-2014-04-04: - cmd parser for gen7 but only in enforcing and not yet granting mode - the batch copying stuff is still missing. Also performance is a bit ... rough (Brad Volkin + OACONTROL fix from Ken). - deprecate UMS harder (i.e. CONFIG_BROKEN) - interrupt rework from Paulo Zanoni - runtime PM support for bdw and snb, again from Paulo - a pile of refactorings from various people all over the place to prep for new stuff (irq reworks, power domain polish, ...) Conflicts: drivers/gpu/drm/i915/i915_gem_context.c
-rw-r--r--drivers/gpu/drm/drm_cache.c4
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c24
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c2
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c616
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c25
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c53
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h265
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c73
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h283
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c23
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c504
-rw-r--r--drivers/gpu/drm/i915/i915_params.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h116
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c242
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h60
-rw-r--r--drivers/gpu/drm/i915/intel_display.c283
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c197
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h35
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c125
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h5
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c38
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c7
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c219
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c8
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c24
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/uapi/drm/i915_drm.h1
42 files changed, 2317 insertions, 1013 deletions
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 534cb89b160d..ae251b8abd0e 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -131,11 +131,11 @@ drm_clflush_sg(struct sg_table *st)
131EXPORT_SYMBOL(drm_clflush_sg); 131EXPORT_SYMBOL(drm_clflush_sg);
132 132
133void 133void
134drm_clflush_virt_range(char *addr, unsigned long length) 134drm_clflush_virt_range(void *addr, unsigned long length)
135{ 135{
136#if defined(CONFIG_X86) 136#if defined(CONFIG_X86)
137 if (cpu_has_clflush) { 137 if (cpu_has_clflush) {
138 char *end = addr + length; 138 void *end = addr + length;
139 mb(); 139 mb();
140 for (; addr < end; addr += boot_cpu_data.x86_clflush_size) 140 for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
141 clflush(addr); 141 clflush(addr);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index bea2d67196fb..e4e3c01b8cbc 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -71,7 +71,7 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
71 71
72config DRM_I915_UMS 72config DRM_I915_UMS
73 bool "Enable userspace modesetting on Intel hardware (DEPRECATED)" 73 bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
74 depends on DRM_I915 74 depends on DRM_I915 && BROKEN
75 default n 75 default n
76 help 76 help
77 Choose this option if you still need userspace modesetting. 77 Choose this option if you still need userspace modesetting.
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index a0f5bdd69491..80449f475960 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -160,7 +160,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
160 if (i2c_transfer(adapter, msgs, 2) == 2) { 160 if (i2c_transfer(adapter, msgs, 2) == 2) {
161 *ch = in_buf[0]; 161 *ch = in_buf[0];
162 return true; 162 return true;
163 }; 163 }
164 164
165 if (!ch7xxx->quiet) { 165 if (!ch7xxx->quiet) {
166 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", 166 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 0f1865d7d4d8..0f2587ff347c 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -195,7 +195,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
195 if (i2c_transfer(adapter, msgs, 3) == 3) { 195 if (i2c_transfer(adapter, msgs, 3) == 3) {
196 *data = (in_buf[1] << 8) | in_buf[0]; 196 *data = (in_buf[1] << 8) | in_buf[0];
197 return true; 197 return true;
198 }; 198 }
199 199
200 if (!priv->quiet) { 200 if (!priv->quiet) {
201 DRM_DEBUG_KMS("Unable to read register 0x%02x from " 201 DRM_DEBUG_KMS("Unable to read register 0x%02x from "
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index 8155ded79079..74f2af7c2d3e 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -121,7 +121,7 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
121 if (i2c_transfer(adapter, msgs, 2) == 2) { 121 if (i2c_transfer(adapter, msgs, 2) == 2) {
122 *ch = in_buf[0]; 122 *ch = in_buf[0];
123 return true; 123 return true;
124 }; 124 }
125 125
126 if (!ns->quiet) { 126 if (!ns->quiet) {
127 DRM_DEBUG_KMS 127 DRM_DEBUG_KMS
@@ -233,9 +233,8 @@ static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
233 struct drm_display_mode *mode) 233 struct drm_display_mode *mode)
234{ 234{
235 DRM_DEBUG_KMS 235 DRM_DEBUG_KMS
236 ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n", 236 ("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
237 __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay, 237 mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
238 mode->vtotal);
239 238
240 /* 239 /*
241 * Currently, these are all the modes I have data from. 240 * Currently, these are all the modes I have data from.
@@ -261,9 +260,8 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
261 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); 260 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
262 261
263 DRM_DEBUG_KMS 262 DRM_DEBUG_KMS
264 ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n", 263 ("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
265 __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay, 264 mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
266 mode->vtotal);
267 265
268 /* 266 /*
269 * Where do I find the native resolution for which scaling is not required??? 267 * Where do I find the native resolution for which scaling is not required???
@@ -277,8 +275,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
277 if (mode->hdisplay == 800 && mode->vdisplay == 600) { 275 if (mode->hdisplay == 800 && mode->vdisplay == 600) {
278 /* mode 277 */ 276 /* mode 277 */
279 ns->reg_8_shadow &= ~NS2501_8_BPAS; 277 ns->reg_8_shadow &= ~NS2501_8_BPAS;
280 DRM_DEBUG_KMS("%s: switching to 800x600\n", 278 DRM_DEBUG_KMS("switching to 800x600\n");
281 __FUNCTION__);
282 279
283 /* 280 /*
284 * No, I do not know where this data comes from. 281 * No, I do not know where this data comes from.
@@ -341,8 +338,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
341 338
342 } else if (mode->hdisplay == 640 && mode->vdisplay == 480) { 339 } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
343 /* mode 274 */ 340 /* mode 274 */
344 DRM_DEBUG_KMS("%s: switching to 640x480\n", 341 DRM_DEBUG_KMS("switching to 640x480\n");
345 __FUNCTION__);
346 /* 342 /*
347 * No, I do not know where this data comes from. 343 * No, I do not know where this data comes from.
348 * It is just what the video bios left in the DVO, so 344 * It is just what the video bios left in the DVO, so
@@ -406,8 +402,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
406 402
407 } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) { 403 } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
408 /* mode 280 */ 404 /* mode 280 */
409 DRM_DEBUG_KMS("%s: switching to 1024x768\n", 405 DRM_DEBUG_KMS("switching to 1024x768\n");
410 __FUNCTION__);
411 /* 406 /*
412 * This might or might not work, actually. I'm silently 407 * This might or might not work, actually. I'm silently
413 * assuming here that the native panel resolution is 408 * assuming here that the native panel resolution is
@@ -458,8 +453,7 @@ static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
458 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); 453 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
459 unsigned char ch; 454 unsigned char ch;
460 455
461 DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n", 456 DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
462 __FUNCTION__, enable);
463 457
464 ch = ns->reg_8_shadow; 458 ch = ns->reg_8_shadow;
465 459
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 7b3e9e936200..fa0114967076 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -93,7 +93,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
93 if (i2c_transfer(adapter, msgs, 2) == 2) { 93 if (i2c_transfer(adapter, msgs, 2) == 2) {
94 *ch = in_buf[0]; 94 *ch = in_buf[0];
95 return true; 95 return true;
96 }; 96 }
97 97
98 if (!sil->quiet) { 98 if (!sil->quiet) {
99 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", 99 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 12ea4b164692..7853719a0e81 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -118,7 +118,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
118 if (i2c_transfer(adapter, msgs, 2) == 2) { 118 if (i2c_transfer(adapter, msgs, 2) == 2) {
119 *ch = in_buf[0]; 119 *ch = in_buf[0];
120 return true; 120 return true;
121 }; 121 }
122 122
123 if (!tfp->quiet) { 123 if (!tfp->quiet) {
124 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", 124 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 4cf6d020d513..9bac0979a294 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -86,6 +86,367 @@
86 * general bitmasking mechanism. 86 * general bitmasking mechanism.
87 */ 87 */
88 88
89#define STD_MI_OPCODE_MASK 0xFF800000
90#define STD_3D_OPCODE_MASK 0xFFFF0000
91#define STD_2D_OPCODE_MASK 0xFFC00000
92#define STD_MFX_OPCODE_MASK 0xFFFF0000
93
94#define CMD(op, opm, f, lm, fl, ...) \
95 { \
96 .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
97 .cmd = { (op), (opm) }, \
98 .length = { (lm) }, \
99 __VA_ARGS__ \
100 }
101
102/* Convenience macros to compress the tables */
103#define SMI STD_MI_OPCODE_MASK
104#define S3D STD_3D_OPCODE_MASK
105#define S2D STD_2D_OPCODE_MASK
106#define SMFX STD_MFX_OPCODE_MASK
107#define F true
108#define S CMD_DESC_SKIP
109#define R CMD_DESC_REJECT
110#define W CMD_DESC_REGISTER
111#define B CMD_DESC_BITMASK
112#define M CMD_DESC_MASTER
113
114/* Command Mask Fixed Len Action
115 ---------------------------------------------------------- */
116static const struct drm_i915_cmd_descriptor common_cmds[] = {
117 CMD( MI_NOOP, SMI, F, 1, S ),
118 CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
119 CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
120 CMD( MI_ARB_CHECK, SMI, F, 1, S ),
121 CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
122 CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
123 CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
124 CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
125 CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
126 .reg = { .offset = 1, .mask = 0x007FFFFC } ),
127 CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
128 .reg = { .offset = 1, .mask = 0x007FFFFC },
129 .bits = {{
130 .offset = 0,
131 .mask = MI_GLOBAL_GTT,
132 .expected = 0,
133 }}, ),
134 CMD( MI_LOAD_REGISTER_MEM, SMI, !F, 0xFF, W | B,
135 .reg = { .offset = 1, .mask = 0x007FFFFC },
136 .bits = {{
137 .offset = 0,
138 .mask = MI_GLOBAL_GTT,
139 .expected = 0,
140 }}, ),
141 CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
142};
143
144static const struct drm_i915_cmd_descriptor render_cmds[] = {
145 CMD( MI_FLUSH, SMI, F, 1, S ),
146 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
147 CMD( MI_PREDICATE, SMI, F, 1, S ),
148 CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
149 CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
150 CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
151 CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
152 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
153 .bits = {{
154 .offset = 0,
155 .mask = MI_GLOBAL_GTT,
156 .expected = 0,
157 }}, ),
158 CMD( MI_UPDATE_GTT, SMI, !F, 0xFF, R ),
159 CMD( MI_CLFLUSH, SMI, !F, 0x3FF, B,
160 .bits = {{
161 .offset = 0,
162 .mask = MI_GLOBAL_GTT,
163 .expected = 0,
164 }}, ),
165 CMD( MI_REPORT_PERF_COUNT, SMI, !F, 0x3F, B,
166 .bits = {{
167 .offset = 1,
168 .mask = MI_REPORT_PERF_COUNT_GGTT,
169 .expected = 0,
170 }}, ),
171 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
172 .bits = {{
173 .offset = 0,
174 .mask = MI_GLOBAL_GTT,
175 .expected = 0,
176 }}, ),
177 CMD( GFX_OP_3DSTATE_VF_STATISTICS, S3D, F, 1, S ),
178 CMD( PIPELINE_SELECT, S3D, F, 1, S ),
179 CMD( MEDIA_VFE_STATE, S3D, !F, 0xFFFF, B,
180 .bits = {{
181 .offset = 2,
182 .mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK,
183 .expected = 0,
184 }}, ),
185 CMD( GPGPU_OBJECT, S3D, !F, 0xFF, S ),
186 CMD( GPGPU_WALKER, S3D, !F, 0xFF, S ),
187 CMD( GFX_OP_3DSTATE_SO_DECL_LIST, S3D, !F, 0x1FF, S ),
188 CMD( GFX_OP_PIPE_CONTROL(5), S3D, !F, 0xFF, B,
189 .bits = {{
190 .offset = 1,
191 .mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY),
192 .expected = 0,
193 },
194 {
195 .offset = 1,
196 .mask = (PIPE_CONTROL_GLOBAL_GTT_IVB |
197 PIPE_CONTROL_STORE_DATA_INDEX),
198 .expected = 0,
199 .condition_offset = 1,
200 .condition_mask = PIPE_CONTROL_POST_SYNC_OP_MASK,
201 }}, ),
202};
203
204static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
205 CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
206 CMD( MI_RS_CONTROL, SMI, F, 1, S ),
207 CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
208 CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
209 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
210 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
211 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ),
212 CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
213 CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
214 CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
215 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_VS, S3D, !F, 0x7FF, S ),
216 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_PS, S3D, !F, 0x7FF, S ),
217
218 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS, S3D, !F, 0x1FF, S ),
219 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS, S3D, !F, 0x1FF, S ),
220 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS, S3D, !F, 0x1FF, S ),
221 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS, S3D, !F, 0x1FF, S ),
222 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
223};
224
225static const struct drm_i915_cmd_descriptor video_cmds[] = {
226 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
227 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
228 .bits = {{
229 .offset = 0,
230 .mask = MI_GLOBAL_GTT,
231 .expected = 0,
232 }}, ),
233 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
234 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
235 .bits = {{
236 .offset = 0,
237 .mask = MI_FLUSH_DW_NOTIFY,
238 .expected = 0,
239 },
240 {
241 .offset = 1,
242 .mask = MI_FLUSH_DW_USE_GTT,
243 .expected = 0,
244 .condition_offset = 0,
245 .condition_mask = MI_FLUSH_DW_OP_MASK,
246 },
247 {
248 .offset = 0,
249 .mask = MI_FLUSH_DW_STORE_INDEX,
250 .expected = 0,
251 .condition_offset = 0,
252 .condition_mask = MI_FLUSH_DW_OP_MASK,
253 }}, ),
254 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
255 .bits = {{
256 .offset = 0,
257 .mask = MI_GLOBAL_GTT,
258 .expected = 0,
259 }}, ),
260 /*
261 * MFX_WAIT doesn't fit the way we handle length for most commands.
262 * It has a length field but it uses a non-standard length bias.
263 * It is always 1 dword though, so just treat it as fixed length.
264 */
265 CMD( MFX_WAIT, SMFX, F, 1, S ),
266};
267
268static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
269 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
270 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
271 .bits = {{
272 .offset = 0,
273 .mask = MI_GLOBAL_GTT,
274 .expected = 0,
275 }}, ),
276 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
277 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
278 .bits = {{
279 .offset = 0,
280 .mask = MI_FLUSH_DW_NOTIFY,
281 .expected = 0,
282 },
283 {
284 .offset = 1,
285 .mask = MI_FLUSH_DW_USE_GTT,
286 .expected = 0,
287 .condition_offset = 0,
288 .condition_mask = MI_FLUSH_DW_OP_MASK,
289 },
290 {
291 .offset = 0,
292 .mask = MI_FLUSH_DW_STORE_INDEX,
293 .expected = 0,
294 .condition_offset = 0,
295 .condition_mask = MI_FLUSH_DW_OP_MASK,
296 }}, ),
297 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
298 .bits = {{
299 .offset = 0,
300 .mask = MI_GLOBAL_GTT,
301 .expected = 0,
302 }}, ),
303};
304
305static const struct drm_i915_cmd_descriptor blt_cmds[] = {
306 CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
307 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
308 .bits = {{
309 .offset = 0,
310 .mask = MI_GLOBAL_GTT,
311 .expected = 0,
312 }}, ),
313 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
314 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
315 .bits = {{
316 .offset = 0,
317 .mask = MI_FLUSH_DW_NOTIFY,
318 .expected = 0,
319 },
320 {
321 .offset = 1,
322 .mask = MI_FLUSH_DW_USE_GTT,
323 .expected = 0,
324 .condition_offset = 0,
325 .condition_mask = MI_FLUSH_DW_OP_MASK,
326 },
327 {
328 .offset = 0,
329 .mask = MI_FLUSH_DW_STORE_INDEX,
330 .expected = 0,
331 .condition_offset = 0,
332 .condition_mask = MI_FLUSH_DW_OP_MASK,
333 }}, ),
334 CMD( COLOR_BLT, S2D, !F, 0x3F, S ),
335 CMD( SRC_COPY_BLT, S2D, !F, 0x3F, S ),
336};
337
338static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
339 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
340 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
341};
342
343#undef CMD
344#undef SMI
345#undef S3D
346#undef S2D
347#undef SMFX
348#undef F
349#undef S
350#undef R
351#undef W
352#undef B
353#undef M
354
355static const struct drm_i915_cmd_table gen7_render_cmds[] = {
356 { common_cmds, ARRAY_SIZE(common_cmds) },
357 { render_cmds, ARRAY_SIZE(render_cmds) },
358};
359
360static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
361 { common_cmds, ARRAY_SIZE(common_cmds) },
362 { render_cmds, ARRAY_SIZE(render_cmds) },
363 { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
364};
365
366static const struct drm_i915_cmd_table gen7_video_cmds[] = {
367 { common_cmds, ARRAY_SIZE(common_cmds) },
368 { video_cmds, ARRAY_SIZE(video_cmds) },
369};
370
371static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
372 { common_cmds, ARRAY_SIZE(common_cmds) },
373 { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
374};
375
376static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
377 { common_cmds, ARRAY_SIZE(common_cmds) },
378 { blt_cmds, ARRAY_SIZE(blt_cmds) },
379};
380
381static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
382 { common_cmds, ARRAY_SIZE(common_cmds) },
383 { blt_cmds, ARRAY_SIZE(blt_cmds) },
384 { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
385};
386
387/*
388 * Register whitelists, sorted by increasing register offset.
389 *
390 * Some registers that userspace accesses are 64 bits. The register
391 * access commands only allow 32-bit accesses. Hence, we have to include
392 * entries for both halves of the 64-bit registers.
393 */
394
395/* Convenience macro for adding 64-bit registers */
396#define REG64(addr) (addr), (addr + sizeof(u32))
397
398static const u32 gen7_render_regs[] = {
399 REG64(HS_INVOCATION_COUNT),
400 REG64(DS_INVOCATION_COUNT),
401 REG64(IA_VERTICES_COUNT),
402 REG64(IA_PRIMITIVES_COUNT),
403 REG64(VS_INVOCATION_COUNT),
404 REG64(GS_INVOCATION_COUNT),
405 REG64(GS_PRIMITIVES_COUNT),
406 REG64(CL_INVOCATION_COUNT),
407 REG64(CL_PRIMITIVES_COUNT),
408 REG64(PS_INVOCATION_COUNT),
409 REG64(PS_DEPTH_COUNT),
410 OACONTROL, /* Only allowed for LRI and SRM. See below. */
411 GEN7_3DPRIM_END_OFFSET,
412 GEN7_3DPRIM_START_VERTEX,
413 GEN7_3DPRIM_VERTEX_COUNT,
414 GEN7_3DPRIM_INSTANCE_COUNT,
415 GEN7_3DPRIM_START_INSTANCE,
416 GEN7_3DPRIM_BASE_VERTEX,
417 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
418 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
419 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
420 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)),
421 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)),
422 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
423 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
424 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
425 GEN7_SO_WRITE_OFFSET(0),
426 GEN7_SO_WRITE_OFFSET(1),
427 GEN7_SO_WRITE_OFFSET(2),
428 GEN7_SO_WRITE_OFFSET(3),
429};
430
431static const u32 gen7_blt_regs[] = {
432 BCS_SWCTRL,
433};
434
435static const u32 ivb_master_regs[] = {
436 FORCEWAKE_MT,
437 DERRMR,
438 GEN7_PIPE_DE_LOAD_SL(PIPE_A),
439 GEN7_PIPE_DE_LOAD_SL(PIPE_B),
440 GEN7_PIPE_DE_LOAD_SL(PIPE_C),
441};
442
443static const u32 hsw_master_regs[] = {
444 FORCEWAKE_MT,
445 DERRMR,
446};
447
448#undef REG64
449
89static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) 450static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
90{ 451{
91 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; 452 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
@@ -137,12 +498,13 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
137 return 0; 498 return 0;
138} 499}
139 500
140static void validate_cmds_sorted(struct intel_ring_buffer *ring) 501static bool validate_cmds_sorted(struct intel_ring_buffer *ring)
141{ 502{
142 int i; 503 int i;
504 bool ret = true;
143 505
144 if (!ring->cmd_tables || ring->cmd_table_count == 0) 506 if (!ring->cmd_tables || ring->cmd_table_count == 0)
145 return; 507 return true;
146 508
147 for (i = 0; i < ring->cmd_table_count; i++) { 509 for (i = 0; i < ring->cmd_table_count; i++) {
148 const struct drm_i915_cmd_table *table = &ring->cmd_tables[i]; 510 const struct drm_i915_cmd_table *table = &ring->cmd_tables[i];
@@ -154,35 +516,45 @@ static void validate_cmds_sorted(struct intel_ring_buffer *ring)
154 &table->table[i]; 516 &table->table[i];
155 u32 curr = desc->cmd.value & desc->cmd.mask; 517 u32 curr = desc->cmd.value & desc->cmd.mask;
156 518
157 if (curr < previous) 519 if (curr < previous) {
158 DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n", 520 DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
159 ring->id, i, j, curr, previous); 521 ring->id, i, j, curr, previous);
522 ret = false;
523 }
160 524
161 previous = curr; 525 previous = curr;
162 } 526 }
163 } 527 }
528
529 return ret;
164} 530}
165 531
166static void check_sorted(int ring_id, const u32 *reg_table, int reg_count) 532static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count)
167{ 533{
168 int i; 534 int i;
169 u32 previous = 0; 535 u32 previous = 0;
536 bool ret = true;
170 537
171 for (i = 0; i < reg_count; i++) { 538 for (i = 0; i < reg_count; i++) {
172 u32 curr = reg_table[i]; 539 u32 curr = reg_table[i];
173 540
174 if (curr < previous) 541 if (curr < previous) {
175 DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n", 542 DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
176 ring_id, i, curr, previous); 543 ring_id, i, curr, previous);
544 ret = false;
545 }
177 546
178 previous = curr; 547 previous = curr;
179 } 548 }
549
550 return ret;
180} 551}
181 552
182static void validate_regs_sorted(struct intel_ring_buffer *ring) 553static bool validate_regs_sorted(struct intel_ring_buffer *ring)
183{ 554{
184 check_sorted(ring->id, ring->reg_table, ring->reg_count); 555 return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
185 check_sorted(ring->id, ring->master_reg_table, ring->master_reg_count); 556 check_sorted(ring->id, ring->master_reg_table,
557 ring->master_reg_count);
186} 558}
187 559
188/** 560/**
@@ -200,15 +572,58 @@ void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
200 572
201 switch (ring->id) { 573 switch (ring->id) {
202 case RCS: 574 case RCS:
575 if (IS_HASWELL(ring->dev)) {
576 ring->cmd_tables = hsw_render_ring_cmds;
577 ring->cmd_table_count =
578 ARRAY_SIZE(hsw_render_ring_cmds);
579 } else {
580 ring->cmd_tables = gen7_render_cmds;
581 ring->cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
582 }
583
584 ring->reg_table = gen7_render_regs;
585 ring->reg_count = ARRAY_SIZE(gen7_render_regs);
586
587 if (IS_HASWELL(ring->dev)) {
588 ring->master_reg_table = hsw_master_regs;
589 ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
590 } else {
591 ring->master_reg_table = ivb_master_regs;
592 ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
593 }
594
203 ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask; 595 ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
204 break; 596 break;
205 case VCS: 597 case VCS:
598 ring->cmd_tables = gen7_video_cmds;
599 ring->cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
206 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; 600 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
207 break; 601 break;
208 case BCS: 602 case BCS:
603 if (IS_HASWELL(ring->dev)) {
604 ring->cmd_tables = hsw_blt_ring_cmds;
605 ring->cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
606 } else {
607 ring->cmd_tables = gen7_blt_cmds;
608 ring->cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
609 }
610
611 ring->reg_table = gen7_blt_regs;
612 ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
613
614 if (IS_HASWELL(ring->dev)) {
615 ring->master_reg_table = hsw_master_regs;
616 ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
617 } else {
618 ring->master_reg_table = ivb_master_regs;
619 ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
620 }
621
209 ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; 622 ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
210 break; 623 break;
211 case VECS: 624 case VECS:
625 ring->cmd_tables = hsw_vebox_cmds;
626 ring->cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
212 /* VECS can use the same length_mask function as VCS */ 627 /* VECS can use the same length_mask function as VCS */
213 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; 628 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
214 break; 629 break;
@@ -218,8 +633,8 @@ void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
218 BUG(); 633 BUG();
219 } 634 }
220 635
221 validate_cmds_sorted(ring); 636 BUG_ON(!validate_cmds_sorted(ring));
222 validate_regs_sorted(ring); 637 BUG_ON(!validate_regs_sorted(ring));
223} 638}
224 639
225static const struct drm_i915_cmd_descriptor* 640static const struct drm_i915_cmd_descriptor*
@@ -331,13 +746,111 @@ finish:
331 */ 746 */
332bool i915_needs_cmd_parser(struct intel_ring_buffer *ring) 747bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
333{ 748{
749 struct drm_i915_private *dev_priv = ring->dev->dev_private;
750
334 /* No command tables indicates a platform without parsing */ 751 /* No command tables indicates a platform without parsing */
335 if (!ring->cmd_tables) 752 if (!ring->cmd_tables)
336 return false; 753 return false;
337 754
755 /*
756 * XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
757 * disabled. That will cause all of the parser's PPGTT checks to
758 * fail. For now, disable parsing when PPGTT is off.
759 */
760 if (!dev_priv->mm.aliasing_ppgtt)
761 return false;
762
338 return (i915.enable_cmd_parser == 1); 763 return (i915.enable_cmd_parser == 1);
339} 764}
340 765
766static bool check_cmd(const struct intel_ring_buffer *ring,
767 const struct drm_i915_cmd_descriptor *desc,
768 const u32 *cmd,
769 const bool is_master,
770 bool *oacontrol_set)
771{
772 if (desc->flags & CMD_DESC_REJECT) {
773 DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
774 return false;
775 }
776
777 if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
778 DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
779 *cmd);
780 return false;
781 }
782
783 if (desc->flags & CMD_DESC_REGISTER) {
784 u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
785
786 /*
787 * OACONTROL requires some special handling for writes. We
788 * want to make sure that any batch which enables OA also
789 * disables it before the end of the batch. The goal is to
790 * prevent one process from snooping on the perf data from
791 * another process. To do that, we need to check the value
792 * that will be written to the register. Hence, limit
793 * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
794 */
795 if (reg_addr == OACONTROL) {
796 if (desc->cmd.value == MI_LOAD_REGISTER_MEM)
797 return false;
798
799 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
800 *oacontrol_set = (cmd[2] != 0);
801 }
802
803 if (!valid_reg(ring->reg_table,
804 ring->reg_count, reg_addr)) {
805 if (!is_master ||
806 !valid_reg(ring->master_reg_table,
807 ring->master_reg_count,
808 reg_addr)) {
809 DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
810 reg_addr,
811 *cmd,
812 ring->id);
813 return false;
814 }
815 }
816 }
817
818 if (desc->flags & CMD_DESC_BITMASK) {
819 int i;
820
821 for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
822 u32 dword;
823
824 if (desc->bits[i].mask == 0)
825 break;
826
827 if (desc->bits[i].condition_mask != 0) {
828 u32 offset =
829 desc->bits[i].condition_offset;
830 u32 condition = cmd[offset] &
831 desc->bits[i].condition_mask;
832
833 if (condition == 0)
834 continue;
835 }
836
837 dword = cmd[desc->bits[i].offset] &
838 desc->bits[i].mask;
839
840 if (dword != desc->bits[i].expected) {
841 DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
842 *cmd,
843 desc->bits[i].mask,
844 desc->bits[i].expected,
845 dword, ring->id);
846 return false;
847 }
848 }
849 }
850
851 return true;
852}
853
341#define LENGTH_BIAS 2 854#define LENGTH_BIAS 2
342 855
343/** 856/**
@@ -361,6 +874,7 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
361 u32 *cmd, *batch_base, *batch_end; 874 u32 *cmd, *batch_base, *batch_end;
362 struct drm_i915_cmd_descriptor default_desc = { 0 }; 875 struct drm_i915_cmd_descriptor default_desc = { 0 };
363 int needs_clflush = 0; 876 int needs_clflush = 0;
877 bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
364 878
365 ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush); 879 ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
366 if (ret) { 880 if (ret) {
@@ -402,7 +916,7 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
402 length = ((*cmd & desc->length.mask) + LENGTH_BIAS); 916 length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
403 917
404 if ((batch_end - cmd) < length) { 918 if ((batch_end - cmd) < length) {
405 DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%td\n", 919 DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
406 *cmd, 920 *cmd,
407 length, 921 length,
408 (unsigned long)(batch_end - cmd)); 922 (unsigned long)(batch_end - cmd));
@@ -410,68 +924,19 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
410 break; 924 break;
411 } 925 }
412 926
413 if (desc->flags & CMD_DESC_REJECT) { 927 if (!check_cmd(ring, desc, cmd, is_master, &oacontrol_set)) {
414 DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
415 ret = -EINVAL;
416 break;
417 }
418
419 if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
420 DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
421 *cmd);
422 ret = -EINVAL; 928 ret = -EINVAL;
423 break; 929 break;
424 } 930 }
425 931
426 if (desc->flags & CMD_DESC_REGISTER) {
427 u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
428
429 if (!valid_reg(ring->reg_table,
430 ring->reg_count, reg_addr)) {
431 if (!is_master ||
432 !valid_reg(ring->master_reg_table,
433 ring->master_reg_count,
434 reg_addr)) {
435 DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
436 reg_addr,
437 *cmd,
438 ring->id);
439 ret = -EINVAL;
440 break;
441 }
442 }
443 }
444
445 if (desc->flags & CMD_DESC_BITMASK) {
446 int i;
447
448 for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
449 u32 dword;
450
451 if (desc->bits[i].mask == 0)
452 break;
453
454 dword = cmd[desc->bits[i].offset] &
455 desc->bits[i].mask;
456
457 if (dword != desc->bits[i].expected) {
458 DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
459 *cmd,
460 desc->bits[i].mask,
461 desc->bits[i].expected,
462 dword, ring->id);
463 ret = -EINVAL;
464 break;
465 }
466 }
467
468 if (ret)
469 break;
470 }
471
472 cmd += length; 932 cmd += length;
473 } 933 }
474 934
935 if (oacontrol_set) {
936 DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
937 ret = -EINVAL;
938 }
939
475 if (cmd >= batch_end) { 940 if (cmd >= batch_end) {
476 DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); 941 DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
477 ret = -EINVAL; 942 ret = -EINVAL;
@@ -483,3 +948,22 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
483 948
484 return ret; 949 return ret;
485} 950}
951
952/**
953 * i915_cmd_parser_get_version() - get the cmd parser version number
954 *
955 * The cmd parser maintains a simple increasing integer version number suitable
956 * for passing to userspace clients to determine what operations are permitted.
957 *
958 * Return: the current version number of the cmd parser
959 */
960int i915_cmd_parser_get_version(void)
961{
962 /*
963 * Command parser version history
964 *
965 * 1. Initial version. Checks batches and reports violations, but leaves
966 * hardware parsing enabled (so does not allow new use cases).
967 */
968 return 1;
969}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 195fe5bc0aac..1e83ae45041c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -966,7 +966,7 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
966 return 0; 966 return 0;
967} 967}
968 968
969static int i915_cur_delayinfo(struct seq_file *m, void *unused) 969static int i915_frequency_info(struct seq_file *m, void *unused)
970{ 970{
971 struct drm_info_node *node = (struct drm_info_node *) m->private; 971 struct drm_info_node *node = (struct drm_info_node *) m->private;
972 struct drm_device *dev = node->minor->dev; 972 struct drm_device *dev = node->minor->dev;
@@ -991,6 +991,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
991 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 991 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
992 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 992 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
993 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 993 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
994 u32 rpmodectl, rpinclimit, rpdeclimit;
994 u32 rpstat, cagf, reqf; 995 u32 rpstat, cagf, reqf;
995 u32 rpupei, rpcurup, rpprevup; 996 u32 rpupei, rpcurup, rpprevup;
996 u32 rpdownei, rpcurdown, rpprevdown; 997 u32 rpdownei, rpcurdown, rpprevdown;
@@ -1011,6 +1012,10 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
1011 reqf >>= 25; 1012 reqf >>= 25;
1012 reqf *= GT_FREQUENCY_MULTIPLIER; 1013 reqf *= GT_FREQUENCY_MULTIPLIER;
1013 1014
1015 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1016 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1017 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1018
1014 rpstat = I915_READ(GEN6_RPSTAT1); 1019 rpstat = I915_READ(GEN6_RPSTAT1);
1015 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 1020 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
1016 rpcurup = I915_READ(GEN6_RP_CUR_UP); 1021 rpcurup = I915_READ(GEN6_RP_CUR_UP);
@@ -1027,14 +1032,23 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
1027 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 1032 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
1028 mutex_unlock(&dev->struct_mutex); 1033 mutex_unlock(&dev->struct_mutex);
1029 1034
1035 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1036 I915_READ(GEN6_PMIER),
1037 I915_READ(GEN6_PMIMR),
1038 I915_READ(GEN6_PMISR),
1039 I915_READ(GEN6_PMIIR),
1040 I915_READ(GEN6_PMINTRMSK));
1030 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1041 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1031 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1032 seq_printf(m, "Render p-state ratio: %d\n", 1042 seq_printf(m, "Render p-state ratio: %d\n",
1033 (gt_perf_status & 0xff00) >> 8); 1043 (gt_perf_status & 0xff00) >> 8);
1034 seq_printf(m, "Render p-state VID: %d\n", 1044 seq_printf(m, "Render p-state VID: %d\n",
1035 gt_perf_status & 0xff); 1045 gt_perf_status & 0xff);
1036 seq_printf(m, "Render p-state limit: %d\n", 1046 seq_printf(m, "Render p-state limit: %d\n",
1037 rp_state_limits & 0xff); 1047 rp_state_limits & 0xff);
1048 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1049 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1050 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1051 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1038 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1052 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1039 seq_printf(m, "CAGF: %dMHz\n", cagf); 1053 seq_printf(m, "CAGF: %dMHz\n", cagf);
1040 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 1054 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
@@ -1816,8 +1830,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1816 u64 pdp = I915_READ(ring->mmio_base + offset + 4); 1830 u64 pdp = I915_READ(ring->mmio_base + offset + 4);
1817 pdp <<= 32; 1831 pdp <<= 32;
1818 pdp |= I915_READ(ring->mmio_base + offset); 1832 pdp |= I915_READ(ring->mmio_base + offset);
1819 for (i = 0; i < 4; i++) 1833 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
1820 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
1821 } 1834 }
1822 } 1835 }
1823} 1836}
@@ -2044,7 +2057,7 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
2044 struct drm_device *dev = node->minor->dev; 2057 struct drm_device *dev = node->minor->dev;
2045 struct drm_i915_private *dev_priv = dev->dev_private; 2058 struct drm_i915_private *dev_priv = dev->dev_private;
2046 2059
2047 if (!IS_HASWELL(dev)) { 2060 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2048 seq_puts(m, "not supported\n"); 2061 seq_puts(m, "not supported\n");
2049 return 0; 2062 return 0;
2050 } 2063 }
@@ -3774,7 +3787,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
3774 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 3787 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
3775 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 3788 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
3776 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 3789 {"i915_rstdby_delays", i915_rstdby_delays, 0},
3777 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 3790 {"i915_frequency_info", i915_frequency_info, 0},
3778 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 3791 {"i915_delayfreq_table", i915_delayfreq_table, 0},
3779 {"i915_inttoext_table", i915_inttoext_table, 0}, 3792 {"i915_inttoext_table", i915_inttoext_table, 0},
3780 {"i915_drpc_info", i915_drpc_info, 0}, 3793 {"i915_drpc_info", i915_drpc_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 96177eec0a0e..0b38f88c35f0 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1017,6 +1017,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
1017 case I915_PARAM_HAS_EXEC_HANDLE_LUT: 1017 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
1018 value = 1; 1018 value = 1;
1019 break; 1019 break;
1020 case I915_PARAM_CMD_PARSER_VERSION:
1021 value = i915_cmd_parser_get_version();
1022 break;
1020 default: 1023 default:
1021 DRM_DEBUG("Unknown parameter %d\n", param->param); 1024 DRM_DEBUG("Unknown parameter %d\n", param->param);
1022 return -EINVAL; 1025 return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 82f4d1f47d3b..5d8250f7145d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -891,7 +891,36 @@ static int i915_pm_poweroff(struct device *dev)
891 return i915_drm_freeze(drm_dev); 891 return i915_drm_freeze(drm_dev);
892} 892}
893 893
894static int i915_runtime_suspend(struct device *device) 894static void snb_runtime_suspend(struct drm_i915_private *dev_priv)
895{
896 struct drm_device *dev = dev_priv->dev;
897
898 intel_runtime_pm_disable_interrupts(dev);
899}
900
901static void hsw_runtime_suspend(struct drm_i915_private *dev_priv)
902{
903 hsw_enable_pc8(dev_priv);
904}
905
906static void snb_runtime_resume(struct drm_i915_private *dev_priv)
907{
908 struct drm_device *dev = dev_priv->dev;
909
910 intel_runtime_pm_restore_interrupts(dev);
911 intel_init_pch_refclk(dev);
912 i915_gem_init_swizzling(dev);
913 mutex_lock(&dev_priv->rps.hw_lock);
914 gen6_update_ring_freq(dev);
915 mutex_unlock(&dev_priv->rps.hw_lock);
916}
917
918static void hsw_runtime_resume(struct drm_i915_private *dev_priv)
919{
920 hsw_disable_pc8(dev_priv);
921}
922
923static int intel_runtime_suspend(struct device *device)
895{ 924{
896 struct pci_dev *pdev = to_pci_dev(device); 925 struct pci_dev *pdev = to_pci_dev(device);
897 struct drm_device *dev = pci_get_drvdata(pdev); 926 struct drm_device *dev = pci_get_drvdata(pdev);
@@ -902,8 +931,12 @@ static int i915_runtime_suspend(struct device *device)
902 931
903 DRM_DEBUG_KMS("Suspending device\n"); 932 DRM_DEBUG_KMS("Suspending device\n");
904 933
905 if (HAS_PC8(dev)) 934 if (IS_GEN6(dev))
906 hsw_enable_pc8(dev_priv); 935 snb_runtime_suspend(dev_priv);
936 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
937 hsw_runtime_suspend(dev_priv);
938 else
939 WARN_ON(1);
907 940
908 i915_gem_release_all_mmaps(dev_priv); 941 i915_gem_release_all_mmaps(dev_priv);
909 942
@@ -923,7 +956,7 @@ static int i915_runtime_suspend(struct device *device)
923 return 0; 956 return 0;
924} 957}
925 958
926static int i915_runtime_resume(struct device *device) 959static int intel_runtime_resume(struct device *device)
927{ 960{
928 struct pci_dev *pdev = to_pci_dev(device); 961 struct pci_dev *pdev = to_pci_dev(device);
929 struct drm_device *dev = pci_get_drvdata(pdev); 962 struct drm_device *dev = pci_get_drvdata(pdev);
@@ -936,8 +969,12 @@ static int i915_runtime_resume(struct device *device)
936 intel_opregion_notify_adapter(dev, PCI_D0); 969 intel_opregion_notify_adapter(dev, PCI_D0);
937 dev_priv->pm.suspended = false; 970 dev_priv->pm.suspended = false;
938 971
939 if (HAS_PC8(dev)) 972 if (IS_GEN6(dev))
940 hsw_disable_pc8(dev_priv); 973 snb_runtime_resume(dev_priv);
974 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
975 hsw_runtime_resume(dev_priv);
976 else
977 WARN_ON(1);
941 978
942 DRM_DEBUG_KMS("Device resumed\n"); 979 DRM_DEBUG_KMS("Device resumed\n");
943 return 0; 980 return 0;
@@ -954,8 +991,8 @@ static const struct dev_pm_ops i915_pm_ops = {
954 .poweroff = i915_pm_poweroff, 991 .poweroff = i915_pm_poweroff,
955 .restore_early = i915_pm_resume_early, 992 .restore_early = i915_pm_resume_early,
956 .restore = i915_pm_resume, 993 .restore = i915_pm_resume,
957 .runtime_suspend = i915_runtime_suspend, 994 .runtime_suspend = intel_runtime_suspend,
958 .runtime_resume = i915_runtime_resume, 995 .runtime_resume = intel_runtime_resume,
959}; 996};
960 997
961static const struct vm_operations_struct i915_gem_vm_ops = { 998static const struct vm_operations_struct i915_gem_vm_ops = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ec82f6bff122..7d6acb401fd9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -35,6 +35,7 @@
35#include "i915_reg.h" 35#include "i915_reg.h"
36#include "intel_bios.h" 36#include "intel_bios.h"
37#include "intel_ringbuffer.h" 37#include "intel_ringbuffer.h"
38#include "i915_gem_gtt.h"
38#include <linux/io-mapping.h> 39#include <linux/io-mapping.h>
39#include <linux/i2c.h> 40#include <linux/i2c.h>
40#include <linux/i2c-algo-bit.h> 41#include <linux/i2c-algo-bit.h>
@@ -358,7 +359,7 @@ struct drm_i915_error_state {
358 u64 bbaddr; 359 u64 bbaddr;
359 u64 acthd; 360 u64 acthd;
360 u32 fault_reg; 361 u32 fault_reg;
361 u32 faddr; 362 u64 faddr;
362 u32 rc_psmi; /* sleep state */ 363 u32 rc_psmi; /* sleep state */
363 u32 semaphore_mboxes[I915_NUM_RINGS - 1]; 364 u32 semaphore_mboxes[I915_NUM_RINGS - 1];
364 365
@@ -572,168 +573,6 @@ enum i915_cache_level {
572 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 573 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
573}; 574};
574 575
575typedef uint32_t gen6_gtt_pte_t;
576
577/**
578 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
579 * VMA's presence cannot be guaranteed before binding, or after unbinding the
580 * object into/from the address space.
581 *
582 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
583 * will always be <= an objects lifetime. So object refcounting should cover us.
584 */
585struct i915_vma {
586 struct drm_mm_node node;
587 struct drm_i915_gem_object *obj;
588 struct i915_address_space *vm;
589
590 /** This object's place on the active/inactive lists */
591 struct list_head mm_list;
592
593 struct list_head vma_link; /* Link in the object's VMA list */
594
595 /** This vma's place in the batchbuffer or on the eviction list */
596 struct list_head exec_list;
597
598 /**
599 * Used for performing relocations during execbuffer insertion.
600 */
601 struct hlist_node exec_node;
602 unsigned long exec_handle;
603 struct drm_i915_gem_exec_object2 *exec_entry;
604
605 /**
606 * How many users have pinned this object in GTT space. The following
607 * users can each hold at most one reference: pwrite/pread, pin_ioctl
608 * (via user_pin_count), execbuffer (objects are not allowed multiple
609 * times for the same batchbuffer), and the framebuffer code. When
610 * switching/pageflipping, the framebuffer code has at most two buffers
611 * pinned per crtc.
612 *
613 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
614 * bits with absolutely no headroom. So use 4 bits. */
615 unsigned int pin_count:4;
616#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
617
618 /** Unmap an object from an address space. This usually consists of
619 * setting the valid PTE entries to a reserved scratch page. */
620 void (*unbind_vma)(struct i915_vma *vma);
621 /* Map an object into an address space with the given cache flags. */
622#define GLOBAL_BIND (1<<0)
623 void (*bind_vma)(struct i915_vma *vma,
624 enum i915_cache_level cache_level,
625 u32 flags);
626};
627
628struct i915_address_space {
629 struct drm_mm mm;
630 struct drm_device *dev;
631 struct list_head global_link;
632 unsigned long start; /* Start offset always 0 for dri2 */
633 size_t total; /* size addr space maps (ex. 2GB for ggtt) */
634
635 struct {
636 dma_addr_t addr;
637 struct page *page;
638 } scratch;
639
640 /**
641 * List of objects currently involved in rendering.
642 *
643 * Includes buffers having the contents of their GPU caches
644 * flushed, not necessarily primitives. last_rendering_seqno
645 * represents when the rendering involved will be completed.
646 *
647 * A reference is held on the buffer while on this list.
648 */
649 struct list_head active_list;
650
651 /**
652 * LRU list of objects which are not in the ringbuffer and
653 * are ready to unbind, but are still in the GTT.
654 *
655 * last_rendering_seqno is 0 while an object is in this list.
656 *
657 * A reference is not held on the buffer while on this list,
658 * as merely being GTT-bound shouldn't prevent its being
659 * freed, and we'll pull it off the list in the free path.
660 */
661 struct list_head inactive_list;
662
663 /* FIXME: Need a more generic return type */
664 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
665 enum i915_cache_level level,
666 bool valid); /* Create a valid PTE */
667 void (*clear_range)(struct i915_address_space *vm,
668 uint64_t start,
669 uint64_t length,
670 bool use_scratch);
671 void (*insert_entries)(struct i915_address_space *vm,
672 struct sg_table *st,
673 uint64_t start,
674 enum i915_cache_level cache_level);
675 void (*cleanup)(struct i915_address_space *vm);
676};
677
678/* The Graphics Translation Table is the way in which GEN hardware translates a
679 * Graphics Virtual Address into a Physical Address. In addition to the normal
680 * collateral associated with any va->pa translations GEN hardware also has a
681 * portion of the GTT which can be mapped by the CPU and remain both coherent
682 * and correct (in cases like swizzling). That region is referred to as GMADR in
683 * the spec.
684 */
685struct i915_gtt {
686 struct i915_address_space base;
687 size_t stolen_size; /* Total size of stolen memory */
688
689 unsigned long mappable_end; /* End offset that we can CPU map */
690 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
691 phys_addr_t mappable_base; /* PA of our GMADR */
692
693 /** "Graphics Stolen Memory" holds the global PTEs */
694 void __iomem *gsm;
695
696 bool do_idle_maps;
697
698 int mtrr;
699
700 /* global gtt ops */
701 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
702 size_t *stolen, phys_addr_t *mappable_base,
703 unsigned long *mappable_end);
704};
705#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
706
707#define GEN8_LEGACY_PDPS 4
708struct i915_hw_ppgtt {
709 struct i915_address_space base;
710 struct kref ref;
711 struct drm_mm_node node;
712 unsigned num_pd_entries;
713 unsigned num_pd_pages; /* gen8+ */
714 union {
715 struct page **pt_pages;
716 struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
717 };
718 struct page *pd_pages;
719 union {
720 uint32_t pd_offset;
721 dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
722 };
723 union {
724 dma_addr_t *pt_dma_addr;
725 dma_addr_t *gen8_pt_dma_addr[4];
726 };
727
728 struct i915_hw_context *ctx;
729
730 int (*enable)(struct i915_hw_ppgtt *ppgtt);
731 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
732 struct intel_ring_buffer *ring,
733 bool synchronous);
734 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
735};
736
737struct i915_ctx_hang_stats { 576struct i915_ctx_hang_stats {
738 /* This context had batch pending when hang was declared */ 577 /* This context had batch pending when hang was declared */
739 unsigned batch_pending; 578 unsigned batch_pending;
@@ -794,6 +633,10 @@ struct i915_fbc {
794 } no_fbc_reason; 633 } no_fbc_reason;
795}; 634};
796 635
636struct i915_drrs {
637 struct intel_connector *connector;
638};
639
797struct i915_psr { 640struct i915_psr {
798 bool sink_support; 641 bool sink_support;
799 bool source_ok; 642 bool source_ok;
@@ -1260,8 +1103,12 @@ struct i915_gpu_error {
1260 */ 1103 */
1261 wait_queue_head_t reset_queue; 1104 wait_queue_head_t reset_queue;
1262 1105
1263 /* For gpu hang simulation. */ 1106 /* Userspace knobs for gpu hang simulation;
1264 unsigned int stop_rings; 1107 * combines both a ring mask, and extra flags
1108 */
1109 u32 stop_rings;
1110#define I915_STOP_RING_ALLOW_BAN (1 << 31)
1111#define I915_STOP_RING_ALLOW_WARN (1 << 30)
1265 1112
1266 /* For missed irq/seqno simulation. */ 1113 /* For missed irq/seqno simulation. */
1267 unsigned int test_irq_rings; 1114 unsigned int test_irq_rings;
@@ -1281,6 +1128,12 @@ struct ddi_vbt_port_info {
1281 uint8_t supports_dp:1; 1128 uint8_t supports_dp:1;
1282}; 1129};
1283 1130
1131enum drrs_support_type {
1132 DRRS_NOT_SUPPORTED = 0,
1133 STATIC_DRRS_SUPPORT = 1,
1134 SEAMLESS_DRRS_SUPPORT = 2
1135};
1136
1284struct intel_vbt_data { 1137struct intel_vbt_data {
1285 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1138 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1286 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1139 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -1296,6 +1149,8 @@ struct intel_vbt_data {
1296 int lvds_ssc_freq; 1149 int lvds_ssc_freq;
1297 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1150 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1298 1151
1152 enum drrs_support_type drrs_type;
1153
1299 /* eDP */ 1154 /* eDP */
1300 int edp_rate; 1155 int edp_rate;
1301 int edp_lanes; 1156 int edp_lanes;
@@ -1315,6 +1170,12 @@ struct intel_vbt_data {
1315 /* MIPI DSI */ 1170 /* MIPI DSI */
1316 struct { 1171 struct {
1317 u16 panel_id; 1172 u16 panel_id;
1173 struct mipi_config *config;
1174 struct mipi_pps_data *pps;
1175 u8 seq_version;
1176 u32 size;
1177 u8 *data;
1178 u8 *sequence[MIPI_SEQ_MAX];
1318 } dsi; 1179 } dsi;
1319 1180
1320 int crt_ddc_pin; 1181 int crt_ddc_pin;
@@ -1366,23 +1227,13 @@ struct ilk_wm_values {
1366 * goes back to false exactly before we reenable the IRQs. We use this variable 1227 * goes back to false exactly before we reenable the IRQs. We use this variable
1367 * to check if someone is trying to enable/disable IRQs while they're supposed 1228 * to check if someone is trying to enable/disable IRQs while they're supposed
1368 * to be disabled. This shouldn't happen and we'll print some error messages in 1229 * to be disabled. This shouldn't happen and we'll print some error messages in
1369 * case it happens, but if it actually happens we'll also update the variables 1230 * case it happens.
1370 * inside struct regsave so when we restore the IRQs they will contain the
1371 * latest expected values.
1372 * 1231 *
1373 * For more, read the Documentation/power/runtime_pm.txt. 1232 * For more, read the Documentation/power/runtime_pm.txt.
1374 */ 1233 */
1375struct i915_runtime_pm { 1234struct i915_runtime_pm {
1376 bool suspended; 1235 bool suspended;
1377 bool irqs_disabled; 1236 bool irqs_disabled;
1378
1379 struct {
1380 uint32_t deimr;
1381 uint32_t sdeimr;
1382 uint32_t gtimr;
1383 uint32_t gtier;
1384 uint32_t gen6_pmimr;
1385 } regsave;
1386}; 1237};
1387 1238
1388enum intel_pipe_crc_source { 1239enum intel_pipe_crc_source {
@@ -1415,7 +1266,7 @@ struct intel_pipe_crc {
1415 wait_queue_head_t wq; 1266 wait_queue_head_t wq;
1416}; 1267};
1417 1268
1418typedef struct drm_i915_private { 1269struct drm_i915_private {
1419 struct drm_device *dev; 1270 struct drm_device *dev;
1420 struct kmem_cache *slab; 1271 struct kmem_cache *slab;
1421 1272
@@ -1484,6 +1335,7 @@ typedef struct drm_i915_private {
1484 struct timer_list hotplug_reenable_timer; 1335 struct timer_list hotplug_reenable_timer;
1485 1336
1486 struct i915_fbc fbc; 1337 struct i915_fbc fbc;
1338 struct i915_drrs drrs;
1487 struct intel_opregion opregion; 1339 struct intel_opregion opregion;
1488 struct intel_vbt_data vbt; 1340 struct intel_vbt_data vbt;
1489 1341
@@ -1501,6 +1353,7 @@ typedef struct drm_i915_private {
1501 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1353 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1502 1354
1503 unsigned int fsb_freq, mem_freq, is_ddr3; 1355 unsigned int fsb_freq, mem_freq, is_ddr3;
1356 unsigned int vlv_cdclk_freq;
1504 1357
1505 /** 1358 /**
1506 * wq - Driver workqueue for GEM. 1359 * wq - Driver workqueue for GEM.
@@ -1524,7 +1377,7 @@ typedef struct drm_i915_private {
1524 struct mutex modeset_restore_lock; 1377 struct mutex modeset_restore_lock;
1525 1378
1526 struct list_head vm_list; /* Global list of all address spaces */ 1379 struct list_head vm_list; /* Global list of all address spaces */
1527 struct i915_gtt gtt; /* VMA representing the global address space */ 1380 struct i915_gtt gtt; /* VM representing the global address space */
1528 1381
1529 struct i915_gem_mm mm; 1382 struct i915_gem_mm mm;
1530 1383
@@ -1620,7 +1473,7 @@ typedef struct drm_i915_private {
1620 struct i915_dri1_state dri1; 1473 struct i915_dri1_state dri1;
1621 /* Old ums support infrastructure, same warning applies. */ 1474 /* Old ums support infrastructure, same warning applies. */
1622 struct i915_ums_state ums; 1475 struct i915_ums_state ums;
1623} drm_i915_private_t; 1476};
1624 1477
1625static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 1478static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1626{ 1479{
@@ -1894,11 +1747,17 @@ struct drm_i915_cmd_descriptor {
1894 * the expected value, the parser rejects it. Only valid if flags has 1747 * the expected value, the parser rejects it. Only valid if flags has
1895 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero 1748 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
1896 * are valid. 1749 * are valid.
1750 *
1751 * If the check specifies a non-zero condition_mask then the parser
1752 * only performs the check when the bits specified by condition_mask
1753 * are non-zero.
1897 */ 1754 */
1898 struct { 1755 struct {
1899 u32 offset; 1756 u32 offset;
1900 u32 mask; 1757 u32 mask;
1901 u32 expected; 1758 u32 expected;
1759 u32 condition_offset;
1760 u32 condition_mask;
1902 } bits[MAX_CMD_DESC_BITMASKS]; 1761 } bits[MAX_CMD_DESC_BITMASKS];
1903}; 1762};
1904 1763
@@ -1940,8 +1799,9 @@ struct drm_i915_cmd_table {
1940 (dev)->pdev->device == 0x0106 || \ 1799 (dev)->pdev->device == 0x0106 || \
1941 (dev)->pdev->device == 0x010A) 1800 (dev)->pdev->device == 0x010A)
1942#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1801#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1802#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
1943#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1803#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1944#define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8) 1804#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
1945#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1805#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1946#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 1806#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1947 ((dev)->pdev->device & 0xFF00) == 0x0C00) 1807 ((dev)->pdev->device & 0xFF00) == 0x0C00)
@@ -2022,8 +1882,8 @@ struct drm_i915_cmd_table {
2022#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 1882#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
2023#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1883#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
2024#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1884#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
2025#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */ 1885#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
2026#define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev)) 1886 IS_BROADWELL(dev))
2027 1887
2028#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1888#define INTEL_PCH_DEVICE_ID_MASK 0xff00
2029#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1889#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2080,6 +1940,7 @@ struct i915_params {
2080 bool prefault_disable; 1940 bool prefault_disable;
2081 bool reset; 1941 bool reset;
2082 bool disable_display; 1942 bool disable_display;
1943 bool disable_vtd_wa;
2083}; 1944};
2084extern struct i915_params i915 __read_mostly; 1945extern struct i915_params i915 __read_mostly;
2085 1946
@@ -2302,6 +2163,18 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
2302 return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; 2163 return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
2303} 2164}
2304 2165
2166static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
2167{
2168 return dev_priv->gpu_error.stop_rings == 0 ||
2169 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
2170}
2171
2172static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
2173{
2174 return dev_priv->gpu_error.stop_rings == 0 ||
2175 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
2176}
2177
2305void i915_gem_reset(struct drm_device *dev); 2178void i915_gem_reset(struct drm_device *dev);
2306bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 2179bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
2307int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 2180int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
@@ -2466,23 +2339,12 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
2466int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 2339int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2467int i915_gem_evict_everything(struct drm_device *dev); 2340int i915_gem_evict_everything(struct drm_device *dev);
2468 2341
2469/* i915_gem_gtt.c */ 2342/* belongs in i915_gem_gtt.h */
2470void i915_check_and_clear_faults(struct drm_device *dev);
2471void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
2472void i915_gem_restore_gtt_mappings(struct drm_device *dev);
2473int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
2474void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
2475void i915_gem_init_global_gtt(struct drm_device *dev);
2476void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
2477 unsigned long mappable_end, unsigned long end);
2478int i915_gem_gtt_init(struct drm_device *dev);
2479static inline void i915_gem_chipset_flush(struct drm_device *dev) 2343static inline void i915_gem_chipset_flush(struct drm_device *dev)
2480{ 2344{
2481 if (INTEL_INFO(dev)->gen < 6) 2345 if (INTEL_INFO(dev)->gen < 6)
2482 intel_gtt_chipset_flush(); 2346 intel_gtt_chipset_flush();
2483} 2347}
2484int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
2485bool intel_enable_ppgtt(struct drm_device *dev, bool full);
2486 2348
2487/* i915_gem_stolen.c */ 2349/* i915_gem_stolen.c */
2488int i915_gem_init_stolen(struct drm_device *dev); 2350int i915_gem_init_stolen(struct drm_device *dev);
@@ -2550,6 +2412,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2550const char *i915_cache_level_str(int type); 2412const char *i915_cache_level_str(int type);
2551 2413
2552/* i915_cmd_parser.c */ 2414/* i915_cmd_parser.c */
2415int i915_cmd_parser_get_version(void);
2553void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring); 2416void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring);
2554bool i915_needs_cmd_parser(struct intel_ring_buffer *ring); 2417bool i915_needs_cmd_parser(struct intel_ring_buffer *ring);
2555int i915_parse_cmds(struct intel_ring_buffer *ring, 2418int i915_parse_cmds(struct intel_ring_buffer *ring,
@@ -2701,20 +2564,6 @@ void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2701int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val); 2564int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
2702int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val); 2565int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
2703 2566
2704void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
2705void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
2706
2707#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
2708 (((reg) >= 0x2000 && (reg) < 0x4000) ||\
2709 ((reg) >= 0x5000 && (reg) < 0x8000) ||\
2710 ((reg) >= 0xB000 && (reg) < 0x12000) ||\
2711 ((reg) >= 0x2E000 && (reg) < 0x30000))
2712
2713#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
2714 (((reg) >= 0x12000 && (reg) < 0x14000) ||\
2715 ((reg) >= 0x22000 && (reg) < 0x24000) ||\
2716 ((reg) >= 0x30000 && (reg) < 0x40000))
2717
2718#define FORCEWAKE_RENDER (1 << 0) 2567#define FORCEWAKE_RENDER (1 << 0)
2719#define FORCEWAKE_MEDIA (1 << 1) 2568#define FORCEWAKE_MEDIA (1 << 1)
2720#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA) 2569#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2871ce75f438..8f5ffab15894 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2277,8 +2277,9 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2277 if (!i915_gem_context_is_default(ctx)) { 2277 if (!i915_gem_context_is_default(ctx)) {
2278 DRM_DEBUG("context hanging too fast, banning!\n"); 2278 DRM_DEBUG("context hanging too fast, banning!\n");
2279 return true; 2279 return true;
2280 } else if (dev_priv->gpu_error.stop_rings == 0) { 2280 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2281 DRM_ERROR("gpu hanging too fast, banning!\n"); 2281 if (i915_stop_ring_allow_warn(dev_priv))
2282 DRM_ERROR("gpu hanging too fast, banning!\n");
2282 return true; 2283 return true;
2283 } 2284 }
2284 } 2285 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d72db15afa02..f77b4c126465 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -240,7 +240,15 @@ __create_hw_context(struct drm_device *dev,
240 goto err_out; 240 goto err_out;
241 } 241 }
242 242
243 if (INTEL_INFO(dev)->gen >= 7) { 243 /*
244 * Try to make the context utilize L3 as well as LLC.
245 *
246 * On VLV we don't have L3 controls in the PTEs so we
247 * shouldn't touch the cache level, especially as that
248 * would make the object snooped which might have a
249 * negative performance impact.
250 */
251 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
244 ret = i915_gem_object_set_cache_level(ctx->obj, 252 ret = i915_gem_object_set_cache_level(ctx->obj,
245 I915_CACHE_L3_LLC); 253 I915_CACHE_L3_LLC);
246 /* Failure shouldn't ever happen this early */ 254 /* Failure shouldn't ever happen this early */
@@ -549,7 +557,7 @@ mi_set_context(struct intel_ring_buffer *ring,
549 * explicitly, so we rely on the value at ring init, stored in 557 * explicitly, so we rely on the value at ring init, stored in
550 * itlb_before_ctx_switch. 558 * itlb_before_ctx_switch.
551 */ 559 */
552 if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) { 560 if (IS_GEN6(ring->dev)) {
553 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0); 561 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
554 if (ret) 562 if (ret)
555 return ret; 563 return ret;
@@ -559,8 +567,8 @@ mi_set_context(struct intel_ring_buffer *ring,
559 if (ret) 567 if (ret)
560 return ret; 568 return ret;
561 569
562 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */ 570 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw */
563 if (IS_GEN7(ring->dev)) 571 if (INTEL_INFO(ring->dev)->gen >= 7)
564 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); 572 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
565 else 573 else
566 intel_ring_emit(ring, MI_NOOP); 574 intel_ring_emit(ring, MI_NOOP);
@@ -578,7 +586,7 @@ mi_set_context(struct intel_ring_buffer *ring,
578 */ 586 */
579 intel_ring_emit(ring, MI_NOOP); 587 intel_ring_emit(ring, MI_NOOP);
580 588
581 if (IS_GEN7(ring->dev)) 589 if (INTEL_INFO(ring->dev)->gen >= 7)
582 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); 590 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
583 else 591 else
584 intel_ring_emit(ring, MI_NOOP); 592 intel_ring_emit(ring, MI_NOOP);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 9bb533e0d762..321102a8374b 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -161,12 +161,8 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
161{ 161{
162 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 162 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
163 struct drm_device *dev = obj->base.dev; 163 struct drm_device *dev = obj->base.dev;
164 int ret;
165
166 ret = i915_mutex_lock_interruptible(dev);
167 if (ret)
168 return;
169 164
165 mutex_lock(&dev->struct_mutex);
170 if (--obj->vmapping_count == 0) { 166 if (--obj->vmapping_count == 0) {
171 vunmap(obj->dma_buf_vmapping); 167 vunmap(obj->dma_buf_vmapping);
172 obj->dma_buf_vmapping = NULL; 168 obj->dma_buf_vmapping = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2c9d9cbaf653..0ec8621eb4f8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1132,7 +1132,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1132 mutex_unlock(&dev->struct_mutex); 1132 mutex_unlock(&dev->struct_mutex);
1133 ret = PTR_ERR(ctx); 1133 ret = PTR_ERR(ctx);
1134 goto pre_mutex_err; 1134 goto pre_mutex_err;
1135 } 1135 }
1136 1136
1137 i915_gem_context_reference(ctx); 1137 i915_gem_context_reference(ctx);
1138 1138
@@ -1142,6 +1142,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1142 1142
1143 eb = eb_create(args); 1143 eb = eb_create(args);
1144 if (eb == NULL) { 1144 if (eb == NULL) {
1145 i915_gem_context_unreference(ctx);
1145 mutex_unlock(&dev->struct_mutex); 1146 mutex_unlock(&dev->struct_mutex);
1146 ret = -ENOMEM; 1147 ret = -ENOMEM;
1147 goto pre_mutex_err; 1148 goto pre_mutex_err;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 62a5c3627b90..0d514ff9b94c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -55,59 +55,6 @@ bool intel_enable_ppgtt(struct drm_device *dev, bool full)
55 return HAS_ALIASING_PPGTT(dev); 55 return HAS_ALIASING_PPGTT(dev);
56} 56}
57 57
58#define GEN6_PPGTT_PD_ENTRIES 512
59#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
60typedef uint64_t gen8_gtt_pte_t;
61typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
62
63/* PPGTT stuff */
64#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
65#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
66
67#define GEN6_PDE_VALID (1 << 0)
68/* gen6+ has bit 11-4 for physical addr bit 39-32 */
69#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
70
71#define GEN6_PTE_VALID (1 << 0)
72#define GEN6_PTE_UNCACHED (1 << 1)
73#define HSW_PTE_UNCACHED (0)
74#define GEN6_PTE_CACHE_LLC (2 << 1)
75#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
76#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
77#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
78
79/* Cacheability Control is a 4-bit value. The low three bits are stored in *
80 * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
81 */
82#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
83 (((bits) & 0x8) << (11 - 3)))
84#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
85#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
86#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
87#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
88#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
89#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
90
91#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
92#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
93
94/* GEN8 legacy style addressis defined as a 3 level page table:
95 * 31:30 | 29:21 | 20:12 | 11:0
96 * PDPE | PDE | PTE | offset
97 * The difference as compared to normal x86 3 level page table is the PDPEs are
98 * programmed via register.
99 */
100#define GEN8_PDPE_SHIFT 30
101#define GEN8_PDPE_MASK 0x3
102#define GEN8_PDE_SHIFT 21
103#define GEN8_PDE_MASK 0x1ff
104#define GEN8_PTE_SHIFT 12
105#define GEN8_PTE_MASK 0x1ff
106
107#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
108#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
109#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
110#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
111 58
112static void ppgtt_bind_vma(struct i915_vma *vma, 59static void ppgtt_bind_vma(struct i915_vma *vma,
113 enum i915_cache_level cache_level, 60 enum i915_cache_level cache_level,
@@ -187,9 +134,6 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
187 return pte; 134 return pte;
188} 135}
189 136
190#define BYT_PTE_WRITEABLE (1 << 1)
191#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
192
193static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, 137static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
194 enum i915_cache_level level, 138 enum i915_cache_level level,
195 bool valid) 139 bool valid)
@@ -1057,8 +1001,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1057 1001
1058static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) 1002static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
1059{ 1003{
1060#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
1061#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
1062 struct drm_device *dev = ppgtt->base.dev; 1004 struct drm_device *dev = ppgtt->base.dev;
1063 struct drm_i915_private *dev_priv = dev->dev_private; 1005 struct drm_i915_private *dev_priv = dev->dev_private;
1064 bool retried = false; 1006 bool retried = false;
@@ -1848,17 +1790,6 @@ static int ggtt_probe_common(struct drm_device *dev,
1848 * writing this data shouldn't be harmful even in those cases. */ 1790 * writing this data shouldn't be harmful even in those cases. */
1849static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv) 1791static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
1850{ 1792{
1851#define GEN8_PPAT_UC (0<<0)
1852#define GEN8_PPAT_WC (1<<0)
1853#define GEN8_PPAT_WT (2<<0)
1854#define GEN8_PPAT_WB (3<<0)
1855#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
1856/* FIXME(BDW): Bspec is completely confused about cache control bits. */
1857#define GEN8_PPAT_LLC (1<<2)
1858#define GEN8_PPAT_LLCELLC (2<<2)
1859#define GEN8_PPAT_LLCeLLC (3<<2)
1860#define GEN8_PPAT_AGE(x) (x<<4)
1861#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
1862 uint64_t pat; 1793 uint64_t pat;
1863 1794
1864 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ 1795 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
@@ -2031,6 +1962,10 @@ int i915_gem_gtt_init(struct drm_device *dev)
2031 gtt->base.total >> 20); 1962 gtt->base.total >> 20);
2032 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); 1963 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
2033 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); 1964 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
1965#ifdef CONFIG_INTEL_IOMMU
1966 if (intel_iommu_gfx_mapped)
1967 DRM_INFO("VT-d active for gfx access\n");
1968#endif
2034 1969
2035 return 0; 1970 return 0;
2036} 1971}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
new file mode 100644
index 000000000000..b5e8ac0f5ce4
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -0,0 +1,283 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Please try to maintain the following order within this file unless it makes
24 * sense to do otherwise. From top to bottom:
25 * 1. typedefs
26 * 2. #defines, and macros
27 * 3. structure definitions
28 * 4. function prototypes
29 *
30 * Within each section, please try to order by generation in ascending order,
31 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
32 */
33
34#ifndef __I915_GEM_GTT_H__
35#define __I915_GEM_GTT_H__
36
37typedef uint32_t gen6_gtt_pte_t;
38typedef uint64_t gen8_gtt_pte_t;
39typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
40
41#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
42
43#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
44/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
45#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
46#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
47#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
48#define GEN6_PTE_CACHE_LLC (2 << 1)
49#define GEN6_PTE_UNCACHED (1 << 1)
50#define GEN6_PTE_VALID (1 << 0)
51
52#define GEN6_PPGTT_PD_ENTRIES 512
53#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
54#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
55#define GEN6_PDE_VALID (1 << 0)
56
57#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
58
59#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
60#define BYT_PTE_WRITEABLE (1 << 1)
61
62/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
63 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
64 */
65#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
66 (((bits) & 0x8) << (11 - 3)))
67#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
68#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
69#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
70#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
71#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
72#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
73#define HSW_PTE_UNCACHED (0)
74#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
75#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
76
77/* GEN8 legacy style address is defined as a 3 level page table:
78 * 31:30 | 29:21 | 20:12 | 11:0
79 * PDPE | PDE | PTE | offset
80 * The difference as compared to normal x86 3 level page table is the PDPEs are
81 * programmed via register.
82 */
83#define GEN8_PDPE_SHIFT 30
84#define GEN8_PDPE_MASK 0x3
85#define GEN8_PDE_SHIFT 21
86#define GEN8_PDE_MASK 0x1ff
87#define GEN8_PTE_SHIFT 12
88#define GEN8_PTE_MASK 0x1ff
89#define GEN8_LEGACY_PDPS 4
90#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
91#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
92
93#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
94#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
95#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
96#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
97
98#define GEN8_PPAT_AGE(x) (x<<4)
99#define GEN8_PPAT_LLCeLLC (3<<2)
100#define GEN8_PPAT_LLCELLC (2<<2)
101#define GEN8_PPAT_LLC (1<<2)
102#define GEN8_PPAT_WB (3<<0)
103#define GEN8_PPAT_WT (2<<0)
104#define GEN8_PPAT_WC (1<<0)
105#define GEN8_PPAT_UC (0<<0)
106#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
107#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
108
109enum i915_cache_level;
110/**
111 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
112 * VMA's presence cannot be guaranteed before binding, or after unbinding the
113 * object into/from the address space.
114 *
115 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
116 * will always be <= an objects lifetime. So object refcounting should cover us.
117 */
118struct i915_vma {
119 struct drm_mm_node node;
120 struct drm_i915_gem_object *obj;
121 struct i915_address_space *vm;
122
123 /** This object's place on the active/inactive lists */
124 struct list_head mm_list;
125
126 struct list_head vma_link; /* Link in the object's VMA list */
127
128 /** This vma's place in the batchbuffer or on the eviction list */
129 struct list_head exec_list;
130
131 /**
132 * Used for performing relocations during execbuffer insertion.
133 */
134 struct hlist_node exec_node;
135 unsigned long exec_handle;
136 struct drm_i915_gem_exec_object2 *exec_entry;
137
138 /**
139 * How many users have pinned this object in GTT space. The following
140 * users can each hold at most one reference: pwrite/pread, pin_ioctl
141 * (via user_pin_count), execbuffer (objects are not allowed multiple
142 * times for the same batchbuffer), and the framebuffer code. When
143 * switching/pageflipping, the framebuffer code has at most two buffers
144 * pinned per crtc.
145 *
146 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
147 * bits with absolutely no headroom. So use 4 bits. */
148 unsigned int pin_count:4;
149#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
150
151 /** Unmap an object from an address space. This usually consists of
152 * setting the valid PTE entries to a reserved scratch page. */
153 void (*unbind_vma)(struct i915_vma *vma);
154 /* Map an object into an address space with the given cache flags. */
155#define GLOBAL_BIND (1<<0)
156 void (*bind_vma)(struct i915_vma *vma,
157 enum i915_cache_level cache_level,
158 u32 flags);
159};
160
161struct i915_address_space {
162 struct drm_mm mm;
163 struct drm_device *dev;
164 struct list_head global_link;
165 unsigned long start; /* Start offset always 0 for dri2 */
166 size_t total; /* size addr space maps (ex. 2GB for ggtt) */
167
168 struct {
169 dma_addr_t addr;
170 struct page *page;
171 } scratch;
172
173 /**
174 * List of objects currently involved in rendering.
175 *
176 * Includes buffers having the contents of their GPU caches
177 * flushed, not necessarily primitives. last_rendering_seqno
178 * represents when the rendering involved will be completed.
179 *
180 * A reference is held on the buffer while on this list.
181 */
182 struct list_head active_list;
183
184 /**
185 * LRU list of objects which are not in the ringbuffer and
186 * are ready to unbind, but are still in the GTT.
187 *
188 * last_rendering_seqno is 0 while an object is in this list.
189 *
190 * A reference is not held on the buffer while on this list,
191 * as merely being GTT-bound shouldn't prevent its being
192 * freed, and we'll pull it off the list in the free path.
193 */
194 struct list_head inactive_list;
195
196 /* FIXME: Need a more generic return type */
197 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
198 enum i915_cache_level level,
199 bool valid); /* Create a valid PTE */
200 void (*clear_range)(struct i915_address_space *vm,
201 uint64_t start,
202 uint64_t length,
203 bool use_scratch);
204 void (*insert_entries)(struct i915_address_space *vm,
205 struct sg_table *st,
206 uint64_t start,
207 enum i915_cache_level cache_level);
208 void (*cleanup)(struct i915_address_space *vm);
209};
210
211/* The Graphics Translation Table is the way in which GEN hardware translates a
212 * Graphics Virtual Address into a Physical Address. In addition to the normal
213 * collateral associated with any va->pa translations GEN hardware also has a
214 * portion of the GTT which can be mapped by the CPU and remain both coherent
215 * and correct (in cases like swizzling). That region is referred to as GMADR in
216 * the spec.
217 */
218struct i915_gtt {
219 struct i915_address_space base;
220 size_t stolen_size; /* Total size of stolen memory */
221
222 unsigned long mappable_end; /* End offset that we can CPU map */
223 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
224 phys_addr_t mappable_base; /* PA of our GMADR */
225
226 /** "Graphics Stolen Memory" holds the global PTEs */
227 void __iomem *gsm;
228
229 bool do_idle_maps;
230
231 int mtrr;
232
233 /* global gtt ops */
234 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
235 size_t *stolen, phys_addr_t *mappable_base,
236 unsigned long *mappable_end);
237};
238
239struct i915_hw_ppgtt {
240 struct i915_address_space base;
241 struct kref ref;
242 struct drm_mm_node node;
243 unsigned num_pd_entries;
244 unsigned num_pd_pages; /* gen8+ */
245 union {
246 struct page **pt_pages;
247 struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
248 };
249 struct page *pd_pages;
250 union {
251 uint32_t pd_offset;
252 dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
253 };
254 union {
255 dma_addr_t *pt_dma_addr;
256 dma_addr_t *gen8_pt_dma_addr[4];
257 };
258
259 struct i915_hw_context *ctx;
260
261 int (*enable)(struct i915_hw_ppgtt *ppgtt);
262 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
263 struct intel_ring_buffer *ring,
264 bool synchronous);
265 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
266};
267
268int i915_gem_gtt_init(struct drm_device *dev);
269void i915_gem_init_global_gtt(struct drm_device *dev);
270void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
271 unsigned long mappable_end, unsigned long end);
272
273bool intel_enable_ppgtt(struct drm_device *dev, bool full);
274int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
275
276void i915_check_and_clear_faults(struct drm_device *dev);
277void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
278void i915_gem_restore_gtt_mappings(struct drm_device *dev);
279
280int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
281void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
282
283#endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 12f1d43b2d68..4865ade71f29 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -257,7 +257,8 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
257 err_printf(m, " INSTPS: 0x%08x\n", ring->instps); 257 err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
258 } 258 }
259 err_printf(m, " INSTPM: 0x%08x\n", ring->instpm); 259 err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
260 err_printf(m, " FADDR: 0x%08x\n", ring->faddr); 260 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
261 lower_32_bits(ring->faddr));
261 if (INTEL_INFO(dev)->gen >= 6) { 262 if (INTEL_INFO(dev)->gen >= 6) {
262 err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi); 263 err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
263 err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg); 264 err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
@@ -452,16 +453,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
452 err_printf(m, "%s --- HW Context = 0x%08x\n", 453 err_printf(m, "%s --- HW Context = 0x%08x\n",
453 dev_priv->ring[i].name, 454 dev_priv->ring[i].name,
454 obj->gtt_offset); 455 obj->gtt_offset);
455 offset = 0; 456 print_error_obj(m, obj);
456 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
457 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
458 offset,
459 obj->pages[0][elt],
460 obj->pages[0][elt+1],
461 obj->pages[0][elt+2],
462 obj->pages[0][elt+3]);
463 offset += 16;
464 }
465 } 457 }
466 } 458 }
467 459
@@ -781,8 +773,10 @@ static void i915_record_ring_state(struct drm_device *dev,
781 ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base)); 773 ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
782 ering->instps = I915_READ(RING_INSTPS(ring->mmio_base)); 774 ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
783 ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base)); 775 ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
784 if (INTEL_INFO(dev)->gen >= 8) 776 if (INTEL_INFO(dev)->gen >= 8) {
777 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
785 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32; 778 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
779 }
786 ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base)); 780 ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
787 } else { 781 } else {
788 ering->faddr = I915_READ(DMA_FADD_I8XX); 782 ering->faddr = I915_READ(DMA_FADD_I8XX);
@@ -875,10 +869,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
875 869
876 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 870 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
877 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { 871 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
878 ering->ctx = i915_error_object_create_sized(dev_priv, 872 ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
879 obj,
880 &dev_priv->gtt.base,
881 1);
882 break; 873 break;
883 } 874 }
884 } 875 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f98ba4e6e70b..afa55199b829 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -80,17 +80,64 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81}; 81};
82 82
83/* IIR can theoretically queue up two events. Be paranoid. */
84#define GEN8_IRQ_RESET_NDX(type, which) do { \
85 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
86 POSTING_READ(GEN8_##type##_IMR(which)); \
87 I915_WRITE(GEN8_##type##_IER(which), 0); \
88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
89 POSTING_READ(GEN8_##type##_IIR(which)); \
90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
91 POSTING_READ(GEN8_##type##_IIR(which)); \
92} while (0)
93
94#define GEN5_IRQ_RESET(type) do { \
95 I915_WRITE(type##IMR, 0xffffffff); \
96 POSTING_READ(type##IMR); \
97 I915_WRITE(type##IER, 0); \
98 I915_WRITE(type##IIR, 0xffffffff); \
99 POSTING_READ(type##IIR); \
100 I915_WRITE(type##IIR, 0xffffffff); \
101 POSTING_READ(type##IIR); \
102} while (0)
103
104/*
105 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
106 */
107#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
108 u32 val = I915_READ(reg); \
109 if (val) { \
110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
111 (reg), val); \
112 I915_WRITE((reg), 0xffffffff); \
113 POSTING_READ(reg); \
114 I915_WRITE((reg), 0xffffffff); \
115 POSTING_READ(reg); \
116 } \
117} while (0)
118
119#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
123 POSTING_READ(GEN8_##type##_IER(which)); \
124} while (0)
125
126#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
128 I915_WRITE(type##IMR, (imr_val)); \
129 I915_WRITE(type##IER, (ier_val)); \
130 POSTING_READ(type##IER); \
131} while (0)
132
83/* For display hotplug interrupt */ 133/* For display hotplug interrupt */
84static void 134static void
85ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 135ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
86{ 136{
87 assert_spin_locked(&dev_priv->irq_lock); 137 assert_spin_locked(&dev_priv->irq_lock);
88 138
89 if (dev_priv->pm.irqs_disabled) { 139 if (WARN_ON(dev_priv->pm.irqs_disabled))
90 WARN(1, "IRQs disabled\n");
91 dev_priv->pm.regsave.deimr &= ~mask;
92 return; 140 return;
93 }
94 141
95 if ((dev_priv->irq_mask & mask) != 0) { 142 if ((dev_priv->irq_mask & mask) != 0) {
96 dev_priv->irq_mask &= ~mask; 143 dev_priv->irq_mask &= ~mask;
@@ -104,11 +151,8 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
104{ 151{
105 assert_spin_locked(&dev_priv->irq_lock); 152 assert_spin_locked(&dev_priv->irq_lock);
106 153
107 if (dev_priv->pm.irqs_disabled) { 154 if (WARN_ON(dev_priv->pm.irqs_disabled))
108 WARN(1, "IRQs disabled\n");
109 dev_priv->pm.regsave.deimr |= mask;
110 return; 155 return;
111 }
112 156
113 if ((dev_priv->irq_mask & mask) != mask) { 157 if ((dev_priv->irq_mask & mask) != mask) {
114 dev_priv->irq_mask |= mask; 158 dev_priv->irq_mask |= mask;
@@ -129,13 +173,8 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
129{ 173{
130 assert_spin_locked(&dev_priv->irq_lock); 174 assert_spin_locked(&dev_priv->irq_lock);
131 175
132 if (dev_priv->pm.irqs_disabled) { 176 if (WARN_ON(dev_priv->pm.irqs_disabled))
133 WARN(1, "IRQs disabled\n");
134 dev_priv->pm.regsave.gtimr &= ~interrupt_mask;
135 dev_priv->pm.regsave.gtimr |= (~enabled_irq_mask &
136 interrupt_mask);
137 return; 177 return;
138 }
139 178
140 dev_priv->gt_irq_mask &= ~interrupt_mask; 179 dev_priv->gt_irq_mask &= ~interrupt_mask;
141 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 180 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
@@ -167,13 +206,8 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
167 206
168 assert_spin_locked(&dev_priv->irq_lock); 207 assert_spin_locked(&dev_priv->irq_lock);
169 208
170 if (dev_priv->pm.irqs_disabled) { 209 if (WARN_ON(dev_priv->pm.irqs_disabled))
171 WARN(1, "IRQs disabled\n");
172 dev_priv->pm.regsave.gen6_pmimr &= ~interrupt_mask;
173 dev_priv->pm.regsave.gen6_pmimr |= (~enabled_irq_mask &
174 interrupt_mask);
175 return; 210 return;
176 }
177 211
178 new_val = dev_priv->pm_irq_mask; 212 new_val = dev_priv->pm_irq_mask;
179 new_val &= ~interrupt_mask; 213 new_val &= ~interrupt_mask;
@@ -313,14 +347,8 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
313 347
314 assert_spin_locked(&dev_priv->irq_lock); 348 assert_spin_locked(&dev_priv->irq_lock);
315 349
316 if (dev_priv->pm.irqs_disabled && 350 if (WARN_ON(dev_priv->pm.irqs_disabled))
317 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
318 WARN(1, "IRQs disabled\n");
319 dev_priv->pm.regsave.sdeimr &= ~interrupt_mask;
320 dev_priv->pm.regsave.sdeimr |= (~enabled_irq_mask &
321 interrupt_mask);
322 return; 351 return;
323 }
324 352
325 I915_WRITE(SDEIMR, sdeimr); 353 I915_WRITE(SDEIMR, sdeimr);
326 POSTING_READ(SDEIMR); 354 POSTING_READ(SDEIMR);
@@ -503,8 +531,10 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
503 531
504 assert_spin_locked(&dev_priv->irq_lock); 532 assert_spin_locked(&dev_priv->irq_lock);
505 533
506 if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 534 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
507 status_mask & ~PIPESTAT_INT_STATUS_MASK)) 535 status_mask & ~PIPESTAT_INT_STATUS_MASK,
536 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
537 pipe_name(pipe), enable_mask, status_mask))
508 return; 538 return;
509 539
510 if ((pipestat & enable_mask) == enable_mask) 540 if ((pipestat & enable_mask) == enable_mask)
@@ -527,8 +557,10 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
527 557
528 assert_spin_locked(&dev_priv->irq_lock); 558 assert_spin_locked(&dev_priv->irq_lock);
529 559
530 if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 560 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
531 status_mask & ~PIPESTAT_INT_STATUS_MASK)) 561 status_mask & ~PIPESTAT_INT_STATUS_MASK,
562 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
563 pipe_name(pipe), enable_mask, status_mask))
532 return; 564 return;
533 565
534 if ((pipestat & enable_mask) == 0) 566 if ((pipestat & enable_mask) == 0)
@@ -1619,6 +1651,33 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1619 gmbus_irq_handler(dev); 1651 gmbus_irq_handler(dev);
1620} 1652}
1621 1653
1654static void i9xx_hpd_irq_handler(struct drm_device *dev)
1655{
1656 struct drm_i915_private *dev_priv = dev->dev_private;
1657 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1658
1659 if (IS_G4X(dev)) {
1660 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1661
1662 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
1663 } else {
1664 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1665
1666 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1667 }
1668
1669 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1670 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1671 dp_aux_irq_handler(dev);
1672
1673 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1674 /*
1675 * Make sure hotplug status is cleared before we clear IIR, or else we
1676 * may miss hotplug events.
1677 */
1678 POSTING_READ(PORT_HOTPLUG_STAT);
1679}
1680
1622static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1681static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1623{ 1682{
1624 struct drm_device *dev = (struct drm_device *) arg; 1683 struct drm_device *dev = (struct drm_device *) arg;
@@ -1641,19 +1700,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1641 valleyview_pipestat_irq_handler(dev, iir); 1700 valleyview_pipestat_irq_handler(dev, iir);
1642 1701
1643 /* Consume port. Then clear IIR or we'll miss events */ 1702 /* Consume port. Then clear IIR or we'll miss events */
1644 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 1703 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1645 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1704 i9xx_hpd_irq_handler(dev);
1646 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1647
1648 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1649
1650 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1651 dp_aux_irq_handler(dev);
1652
1653 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1654 I915_READ(PORT_HOTPLUG_STAT);
1655 }
1656
1657 1705
1658 if (pm_iir) 1706 if (pm_iir)
1659 gen6_rps_irq_handler(dev_priv, pm_iir); 1707 gen6_rps_irq_handler(dev_priv, pm_iir);
@@ -2022,7 +2070,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2022 if (pipe_iir & GEN8_PIPE_VBLANK) 2070 if (pipe_iir & GEN8_PIPE_VBLANK)
2023 drm_handle_vblank(dev, pipe); 2071 drm_handle_vblank(dev, pipe);
2024 2072
2025 if (pipe_iir & GEN8_PIPE_FLIP_DONE) { 2073 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
2026 intel_prepare_page_flip(dev, pipe); 2074 intel_prepare_page_flip(dev, pipe);
2027 intel_finish_page_flip_plane(dev, pipe); 2075 intel_finish_page_flip_plane(dev, pipe);
2028 } 2076 }
@@ -2511,6 +2559,56 @@ ring_idle(struct intel_ring_buffer *ring, u32 seqno)
2511 i915_seqno_passed(seqno, ring_last_seqno(ring))); 2559 i915_seqno_passed(seqno, ring_last_seqno(ring)));
2512} 2560}
2513 2561
2562static bool
2563ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2564{
2565 if (INTEL_INFO(dev)->gen >= 8) {
2566 /*
2567 * FIXME: gen8 semaphore support - currently we don't emit
2568 * semaphores on bdw anyway, but this needs to be addressed when
2569 * we merge that code.
2570 */
2571 return false;
2572 } else {
2573 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2574 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2575 MI_SEMAPHORE_REGISTER);
2576 }
2577}
2578
2579static struct intel_ring_buffer *
2580semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr)
2581{
2582 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2583 struct intel_ring_buffer *signaller;
2584 int i;
2585
2586 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2587 /*
2588 * FIXME: gen8 semaphore support - currently we don't emit
2589 * semaphores on bdw anyway, but this needs to be addressed when
2590 * we merge that code.
2591 */
2592 return NULL;
2593 } else {
2594 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2595
2596 for_each_ring(signaller, dev_priv, i) {
2597 if(ring == signaller)
2598 continue;
2599
2600 if (sync_bits ==
2601 signaller->semaphore_register[ring->id])
2602 return signaller;
2603 }
2604 }
2605
2606 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n",
2607 ring->id, ipehr);
2608
2609 return NULL;
2610}
2611
2514static struct intel_ring_buffer * 2612static struct intel_ring_buffer *
2515semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2613semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2516{ 2614{
@@ -2519,8 +2617,7 @@ semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2519 int i; 2617 int i;
2520 2618
2521 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2619 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2522 if ((ipehr & ~(0x3 << 16)) != 2620 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2523 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2524 return NULL; 2621 return NULL;
2525 2622
2526 /* 2623 /*
@@ -2552,7 +2649,7 @@ semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2552 return NULL; 2649 return NULL;
2553 2650
2554 *seqno = ioread32(ring->virtual_start + head + 4) + 1; 2651 *seqno = ioread32(ring->virtual_start + head + 4) + 1;
2555 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2652 return semaphore_wait_to_signaller_ring(ring, ipehr);
2556} 2653}
2557 2654
2558static int semaphore_passed(struct intel_ring_buffer *ring) 2655static int semaphore_passed(struct intel_ring_buffer *ring)
@@ -2759,57 +2856,68 @@ void i915_queue_hangcheck(struct drm_device *dev)
2759 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2856 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2760} 2857}
2761 2858
2762static void ibx_irq_preinstall(struct drm_device *dev) 2859static void ibx_irq_reset(struct drm_device *dev)
2763{ 2860{
2764 struct drm_i915_private *dev_priv = dev->dev_private; 2861 struct drm_i915_private *dev_priv = dev->dev_private;
2765 2862
2766 if (HAS_PCH_NOP(dev)) 2863 if (HAS_PCH_NOP(dev))
2767 return; 2864 return;
2768 2865
2769 /* south display irq */ 2866 GEN5_IRQ_RESET(SDE);
2770 I915_WRITE(SDEIMR, 0xffffffff); 2867
2771 /* 2868 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2772 * SDEIER is also touched by the interrupt handler to work around missed 2869 I915_WRITE(SERR_INT, 0xffffffff);
2773 * PCH interrupts. Hence we can't update it after the interrupt handler 2870}
2774 * is enabled - instead we unconditionally enable all PCH interrupt 2871
2775 * sources here, but then only unmask them as needed with SDEIMR. 2872/*
2776 */ 2873 * SDEIER is also touched by the interrupt handler to work around missed PCH
2874 * interrupts. Hence we can't update it after the interrupt handler is enabled -
2875 * instead we unconditionally enable all PCH interrupt sources here, but then
2876 * only unmask them as needed with SDEIMR.
2877 *
2878 * This function needs to be called before interrupts are enabled.
2879 */
2880static void ibx_irq_pre_postinstall(struct drm_device *dev)
2881{
2882 struct drm_i915_private *dev_priv = dev->dev_private;
2883
2884 if (HAS_PCH_NOP(dev))
2885 return;
2886
2887 WARN_ON(I915_READ(SDEIER) != 0);
2777 I915_WRITE(SDEIER, 0xffffffff); 2888 I915_WRITE(SDEIER, 0xffffffff);
2778 POSTING_READ(SDEIER); 2889 POSTING_READ(SDEIER);
2779} 2890}
2780 2891
2781static void gen5_gt_irq_preinstall(struct drm_device *dev) 2892static void gen5_gt_irq_reset(struct drm_device *dev)
2782{ 2893{
2783 struct drm_i915_private *dev_priv = dev->dev_private; 2894 struct drm_i915_private *dev_priv = dev->dev_private;
2784 2895
2785 /* and GT */ 2896 GEN5_IRQ_RESET(GT);
2786 I915_WRITE(GTIMR, 0xffffffff); 2897 if (INTEL_INFO(dev)->gen >= 6)
2787 I915_WRITE(GTIER, 0x0); 2898 GEN5_IRQ_RESET(GEN6_PM);
2788 POSTING_READ(GTIER);
2789
2790 if (INTEL_INFO(dev)->gen >= 6) {
2791 /* and PM */
2792 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2793 I915_WRITE(GEN6_PMIER, 0x0);
2794 POSTING_READ(GEN6_PMIER);
2795 }
2796} 2899}
2797 2900
2798/* drm_dma.h hooks 2901/* drm_dma.h hooks
2799*/ 2902*/
2800static void ironlake_irq_preinstall(struct drm_device *dev) 2903static void ironlake_irq_reset(struct drm_device *dev)
2801{ 2904{
2802 struct drm_i915_private *dev_priv = dev->dev_private; 2905 struct drm_i915_private *dev_priv = dev->dev_private;
2803 2906
2804 I915_WRITE(HWSTAM, 0xeffe); 2907 I915_WRITE(HWSTAM, 0xffffffff);
2805 2908
2806 I915_WRITE(DEIMR, 0xffffffff); 2909 GEN5_IRQ_RESET(DE);
2807 I915_WRITE(DEIER, 0x0); 2910 if (IS_GEN7(dev))
2808 POSTING_READ(DEIER); 2911 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
2809 2912
2810 gen5_gt_irq_preinstall(dev); 2913 gen5_gt_irq_reset(dev);
2914
2915 ibx_irq_reset(dev);
2916}
2811 2917
2812 ibx_irq_preinstall(dev); 2918static void ironlake_irq_preinstall(struct drm_device *dev)
2919{
2920 ironlake_irq_reset(dev);
2813} 2921}
2814 2922
2815static void valleyview_irq_preinstall(struct drm_device *dev) 2923static void valleyview_irq_preinstall(struct drm_device *dev)
@@ -2827,7 +2935,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
2827 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2935 I915_WRITE(GTIIR, I915_READ(GTIIR));
2828 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2936 I915_WRITE(GTIIR, I915_READ(GTIIR));
2829 2937
2830 gen5_gt_irq_preinstall(dev); 2938 gen5_gt_irq_reset(dev);
2831 2939
2832 I915_WRITE(DPINVGTT, 0xff); 2940 I915_WRITE(DPINVGTT, 0xff);
2833 2941
@@ -2841,7 +2949,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
2841 POSTING_READ(VLV_IER); 2949 POSTING_READ(VLV_IER);
2842} 2950}
2843 2951
2844static void gen8_irq_preinstall(struct drm_device *dev) 2952static void gen8_irq_reset(struct drm_device *dev)
2845{ 2953{
2846 struct drm_i915_private *dev_priv = dev->dev_private; 2954 struct drm_i915_private *dev_priv = dev->dev_private;
2847 int pipe; 2955 int pipe;
@@ -2849,43 +2957,24 @@ static void gen8_irq_preinstall(struct drm_device *dev)
2849 I915_WRITE(GEN8_MASTER_IRQ, 0); 2957 I915_WRITE(GEN8_MASTER_IRQ, 0);
2850 POSTING_READ(GEN8_MASTER_IRQ); 2958 POSTING_READ(GEN8_MASTER_IRQ);
2851 2959
2852 /* IIR can theoretically queue up two events. Be paranoid */ 2960 GEN8_IRQ_RESET_NDX(GT, 0);
2853#define GEN8_IRQ_INIT_NDX(type, which) do { \ 2961 GEN8_IRQ_RESET_NDX(GT, 1);
2854 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 2962 GEN8_IRQ_RESET_NDX(GT, 2);
2855 POSTING_READ(GEN8_##type##_IMR(which)); \ 2963 GEN8_IRQ_RESET_NDX(GT, 3);
2856 I915_WRITE(GEN8_##type##_IER(which), 0); \
2857 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2858 POSTING_READ(GEN8_##type##_IIR(which)); \
2859 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2860 } while (0)
2861
2862#define GEN8_IRQ_INIT(type) do { \
2863 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2864 POSTING_READ(GEN8_##type##_IMR); \
2865 I915_WRITE(GEN8_##type##_IER, 0); \
2866 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2867 POSTING_READ(GEN8_##type##_IIR); \
2868 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2869 } while (0)
2870
2871 GEN8_IRQ_INIT_NDX(GT, 0);
2872 GEN8_IRQ_INIT_NDX(GT, 1);
2873 GEN8_IRQ_INIT_NDX(GT, 2);
2874 GEN8_IRQ_INIT_NDX(GT, 3);
2875 2964
2876 for_each_pipe(pipe) { 2965 for_each_pipe(pipe)
2877 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe); 2966 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
2878 }
2879 2967
2880 GEN8_IRQ_INIT(DE_PORT); 2968 GEN5_IRQ_RESET(GEN8_DE_PORT_);
2881 GEN8_IRQ_INIT(DE_MISC); 2969 GEN5_IRQ_RESET(GEN8_DE_MISC_);
2882 GEN8_IRQ_INIT(PCU); 2970 GEN5_IRQ_RESET(GEN8_PCU_);
2883#undef GEN8_IRQ_INIT
2884#undef GEN8_IRQ_INIT_NDX
2885 2971
2886 POSTING_READ(GEN8_PCU_IIR); 2972 ibx_irq_reset(dev);
2973}
2887 2974
2888 ibx_irq_preinstall(dev); 2975static void gen8_irq_preinstall(struct drm_device *dev)
2976{
2977 gen8_irq_reset(dev);
2889} 2978}
2890 2979
2891static void ibx_hpd_irq_setup(struct drm_device *dev) 2980static void ibx_hpd_irq_setup(struct drm_device *dev)
@@ -2931,15 +3020,12 @@ static void ibx_irq_postinstall(struct drm_device *dev)
2931 if (HAS_PCH_NOP(dev)) 3020 if (HAS_PCH_NOP(dev))
2932 return; 3021 return;
2933 3022
2934 if (HAS_PCH_IBX(dev)) { 3023 if (HAS_PCH_IBX(dev))
2935 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3024 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
2936 } else { 3025 else
2937 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3026 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
2938 3027
2939 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 3028 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
2940 }
2941
2942 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2943 I915_WRITE(SDEIMR, ~mask); 3029 I915_WRITE(SDEIMR, ~mask);
2944} 3030}
2945 3031
@@ -2965,10 +3051,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
2965 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3051 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2966 } 3052 }
2967 3053
2968 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3054 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
2969 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2970 I915_WRITE(GTIER, gt_irqs);
2971 POSTING_READ(GTIER);
2972 3055
2973 if (INTEL_INFO(dev)->gen >= 6) { 3056 if (INTEL_INFO(dev)->gen >= 6) {
2974 pm_irqs |= dev_priv->pm_rps_events; 3057 pm_irqs |= dev_priv->pm_rps_events;
@@ -2977,10 +3060,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
2977 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3060 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2978 3061
2979 dev_priv->pm_irq_mask = 0xffffffff; 3062 dev_priv->pm_irq_mask = 0xffffffff;
2980 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 3063 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
2981 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
2982 I915_WRITE(GEN6_PMIER, pm_irqs);
2983 POSTING_READ(GEN6_PMIER);
2984 } 3064 }
2985} 3065}
2986 3066
@@ -2997,8 +3077,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
2997 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3077 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
2998 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3078 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2999 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3079 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3000
3001 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
3002 } else { 3080 } else {
3003 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3081 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3004 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3082 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
@@ -3011,11 +3089,11 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
3011 3089
3012 dev_priv->irq_mask = ~display_mask; 3090 dev_priv->irq_mask = ~display_mask;
3013 3091
3014 /* should always can generate irq */ 3092 I915_WRITE(HWSTAM, 0xeffe);
3015 I915_WRITE(DEIIR, I915_READ(DEIIR)); 3093
3016 I915_WRITE(DEIMR, dev_priv->irq_mask); 3094 ibx_irq_pre_postinstall(dev);
3017 I915_WRITE(DEIER, display_mask | extra_mask); 3095
3018 POSTING_READ(DEIER); 3096 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3019 3097
3020 gen5_gt_irq_postinstall(dev); 3098 gen5_gt_irq_postinstall(dev);
3021 3099
@@ -3175,21 +3253,14 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3175 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3253 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3176 }; 3254 };
3177 3255
3178 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) { 3256 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
3179 u32 tmp = I915_READ(GEN8_GT_IIR(i)); 3257 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
3180 if (tmp)
3181 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
3182 i, tmp);
3183 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
3184 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
3185 }
3186 POSTING_READ(GEN8_GT_IER(0));
3187} 3258}
3188 3259
3189static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3260static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3190{ 3261{
3191 struct drm_device *dev = dev_priv->dev; 3262 struct drm_device *dev = dev_priv->dev;
3192 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | 3263 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
3193 GEN8_PIPE_CDCLK_CRC_DONE | 3264 GEN8_PIPE_CDCLK_CRC_DONE |
3194 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3265 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3195 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3266 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
@@ -3199,25 +3270,19 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3199 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3270 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3200 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3271 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3201 3272
3202 for_each_pipe(pipe) { 3273 for_each_pipe(pipe)
3203 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 3274 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
3204 if (tmp) 3275 de_pipe_enables);
3205 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
3206 pipe, tmp);
3207 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
3208 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
3209 }
3210 POSTING_READ(GEN8_DE_PIPE_ISR(0));
3211 3276
3212 I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A); 3277 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3213 I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
3214 POSTING_READ(GEN8_DE_PORT_IER);
3215} 3278}
3216 3279
3217static int gen8_irq_postinstall(struct drm_device *dev) 3280static int gen8_irq_postinstall(struct drm_device *dev)
3218{ 3281{
3219 struct drm_i915_private *dev_priv = dev->dev_private; 3282 struct drm_i915_private *dev_priv = dev->dev_private;
3220 3283
3284 ibx_irq_pre_postinstall(dev);
3285
3221 gen8_gt_irq_postinstall(dev_priv); 3286 gen8_gt_irq_postinstall(dev_priv);
3222 gen8_de_irq_postinstall(dev_priv); 3287 gen8_de_irq_postinstall(dev_priv);
3223 3288
@@ -3232,41 +3297,13 @@ static int gen8_irq_postinstall(struct drm_device *dev)
3232static void gen8_irq_uninstall(struct drm_device *dev) 3297static void gen8_irq_uninstall(struct drm_device *dev)
3233{ 3298{
3234 struct drm_i915_private *dev_priv = dev->dev_private; 3299 struct drm_i915_private *dev_priv = dev->dev_private;
3235 int pipe;
3236 3300
3237 if (!dev_priv) 3301 if (!dev_priv)
3238 return; 3302 return;
3239 3303
3240 I915_WRITE(GEN8_MASTER_IRQ, 0); 3304 intel_hpd_irq_uninstall(dev_priv);
3241
3242#define GEN8_IRQ_FINI_NDX(type, which) do { \
3243 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3244 I915_WRITE(GEN8_##type##_IER(which), 0); \
3245 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3246 } while (0)
3247
3248#define GEN8_IRQ_FINI(type) do { \
3249 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3250 I915_WRITE(GEN8_##type##_IER, 0); \
3251 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3252 } while (0)
3253
3254 GEN8_IRQ_FINI_NDX(GT, 0);
3255 GEN8_IRQ_FINI_NDX(GT, 1);
3256 GEN8_IRQ_FINI_NDX(GT, 2);
3257 GEN8_IRQ_FINI_NDX(GT, 3);
3258
3259 for_each_pipe(pipe) {
3260 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
3261 }
3262
3263 GEN8_IRQ_FINI(DE_PORT);
3264 GEN8_IRQ_FINI(DE_MISC);
3265 GEN8_IRQ_FINI(PCU);
3266#undef GEN8_IRQ_FINI
3267#undef GEN8_IRQ_FINI_NDX
3268 3305
3269 POSTING_READ(GEN8_PCU_IIR); 3306 gen8_irq_reset(dev);
3270} 3307}
3271 3308
3272static void valleyview_irq_uninstall(struct drm_device *dev) 3309static void valleyview_irq_uninstall(struct drm_device *dev)
@@ -3309,26 +3346,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
3309 3346
3310 intel_hpd_irq_uninstall(dev_priv); 3347 intel_hpd_irq_uninstall(dev_priv);
3311 3348
3312 I915_WRITE(HWSTAM, 0xffffffff); 3349 ironlake_irq_reset(dev);
3313
3314 I915_WRITE(DEIMR, 0xffffffff);
3315 I915_WRITE(DEIER, 0x0);
3316 I915_WRITE(DEIIR, I915_READ(DEIIR));
3317 if (IS_GEN7(dev))
3318 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
3319
3320 I915_WRITE(GTIMR, 0xffffffff);
3321 I915_WRITE(GTIER, 0x0);
3322 I915_WRITE(GTIIR, I915_READ(GTIIR));
3323
3324 if (HAS_PCH_NOP(dev))
3325 return;
3326
3327 I915_WRITE(SDEIMR, 0xffffffff);
3328 I915_WRITE(SDEIER, 0x0);
3329 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
3330 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3331 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
3332} 3350}
3333 3351
3334static void i8xx_irq_preinstall(struct drm_device * dev) 3352static void i8xx_irq_preinstall(struct drm_device * dev)
@@ -3636,16 +3654,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3636 break; 3654 break;
3637 3655
3638 /* Consume port. Then clear IIR or we'll miss events */ 3656 /* Consume port. Then clear IIR or we'll miss events */
3639 if ((I915_HAS_HOTPLUG(dev)) && 3657 if (I915_HAS_HOTPLUG(dev) &&
3640 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 3658 iir & I915_DISPLAY_PORT_INTERRUPT)
3641 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3659 i9xx_hpd_irq_handler(dev);
3642 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3643
3644 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3645
3646 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3647 POSTING_READ(PORT_HOTPLUG_STAT);
3648 }
3649 3660
3650 I915_WRITE(IIR, iir & ~flip_mask); 3661 I915_WRITE(IIR, iir & ~flip_mask);
3651 new_iir = I915_READ(IIR); /* Flush posted writes */ 3662 new_iir = I915_READ(IIR); /* Flush posted writes */
@@ -3879,22 +3890,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3879 ret = IRQ_HANDLED; 3890 ret = IRQ_HANDLED;
3880 3891
3881 /* Consume port. Then clear IIR or we'll miss events */ 3892 /* Consume port. Then clear IIR or we'll miss events */
3882 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 3893 if (iir & I915_DISPLAY_PORT_INTERRUPT)
3883 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3894 i9xx_hpd_irq_handler(dev);
3884 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3885 HOTPLUG_INT_STATUS_G4X :
3886 HOTPLUG_INT_STATUS_I915);
3887
3888 intel_hpd_irq_handler(dev, hotplug_trigger,
3889 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
3890
3891 if (IS_G4X(dev) &&
3892 (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
3893 dp_aux_irq_handler(dev);
3894
3895 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3896 I915_READ(PORT_HOTPLUG_STAT);
3897 }
3898 3895
3899 I915_WRITE(IIR, iir & ~flip_mask); 3896 I915_WRITE(IIR, iir & ~flip_mask);
3900 new_iir = I915_READ(IIR); /* Flush posted writes */ 3897 new_iir = I915_READ(IIR); /* Flush posted writes */
@@ -4121,57 +4118,20 @@ void intel_hpd_init(struct drm_device *dev)
4121} 4118}
4122 4119
4123/* Disable interrupts so we can allow runtime PM. */ 4120/* Disable interrupts so we can allow runtime PM. */
4124void hsw_runtime_pm_disable_interrupts(struct drm_device *dev) 4121void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
4125{ 4122{
4126 struct drm_i915_private *dev_priv = dev->dev_private; 4123 struct drm_i915_private *dev_priv = dev->dev_private;
4127 unsigned long irqflags;
4128
4129 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4130
4131 dev_priv->pm.regsave.deimr = I915_READ(DEIMR);
4132 dev_priv->pm.regsave.sdeimr = I915_READ(SDEIMR);
4133 dev_priv->pm.regsave.gtimr = I915_READ(GTIMR);
4134 dev_priv->pm.regsave.gtier = I915_READ(GTIER);
4135 dev_priv->pm.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
4136
4137 ironlake_disable_display_irq(dev_priv, 0xffffffff);
4138 ibx_disable_display_interrupt(dev_priv, 0xffffffff);
4139 ilk_disable_gt_irq(dev_priv, 0xffffffff);
4140 snb_disable_pm_irq(dev_priv, 0xffffffff);
4141 4124
4125 dev->driver->irq_uninstall(dev);
4142 dev_priv->pm.irqs_disabled = true; 4126 dev_priv->pm.irqs_disabled = true;
4143
4144 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4145} 4127}
4146 4128
4147/* Restore interrupts so we can recover from runtime PM. */ 4129/* Restore interrupts so we can recover from runtime PM. */
4148void hsw_runtime_pm_restore_interrupts(struct drm_device *dev) 4130void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
4149{ 4131{
4150 struct drm_i915_private *dev_priv = dev->dev_private; 4132 struct drm_i915_private *dev_priv = dev->dev_private;
4151 unsigned long irqflags;
4152 uint32_t val;
4153
4154 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4155
4156 val = I915_READ(DEIMR);
4157 WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
4158
4159 val = I915_READ(SDEIMR);
4160 WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
4161
4162 val = I915_READ(GTIMR);
4163 WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
4164
4165 val = I915_READ(GEN6_PMIMR);
4166 WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
4167 4133
4168 dev_priv->pm.irqs_disabled = false; 4134 dev_priv->pm.irqs_disabled = false;
4169 4135 dev->driver->irq_preinstall(dev);
4170 ironlake_enable_display_irq(dev_priv, ~dev_priv->pm.regsave.deimr); 4136 dev->driver->irq_postinstall(dev);
4171 ibx_enable_display_interrupt(dev_priv, ~dev_priv->pm.regsave.sdeimr);
4172 ilk_enable_gt_irq(dev_priv, ~dev_priv->pm.regsave.gtimr);
4173 snb_enable_pm_irq(dev_priv, ~dev_priv->pm.regsave.gen6_pmimr);
4174 I915_WRITE(GTIER, dev_priv->pm.regsave.gtier);
4175
4176 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4177} 4137}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index d1d7980f0e01..d05a2afa17dc 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -46,7 +46,8 @@ struct i915_params i915 __read_mostly = {
46 .reset = true, 46 .reset = true,
47 .invert_brightness = 0, 47 .invert_brightness = 0,
48 .disable_display = 0, 48 .disable_display = 0,
49 .enable_cmd_parser = 0, 49 .enable_cmd_parser = 1,
50 .disable_vtd_wa = 0,
50}; 51};
51 52
52module_param_named(modeset, i915.modeset, int, 0400); 53module_param_named(modeset, i915.modeset, int, 0400);
@@ -149,6 +150,9 @@ MODULE_PARM_DESC(invert_brightness,
149module_param_named(disable_display, i915.disable_display, bool, 0600); 150module_param_named(disable_display, i915.disable_display, bool, 0600);
150MODULE_PARM_DESC(disable_display, "Disable display (default: false)"); 151MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
151 152
153module_param_named(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
154MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
155
152module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); 156module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
153MODULE_PARM_DESC(enable_cmd_parser, 157MODULE_PARM_DESC(enable_cmd_parser,
154 "Enable command parsing (1=enabled, 0=disabled [default])"); 158 "Enable command parsing (1=enabled [default], 0=disabled)");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c77af69c2d8f..8f845556503e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -190,6 +190,8 @@
190 * Memory interface instructions used by the kernel 190 * Memory interface instructions used by the kernel
191 */ 191 */
192#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) 192#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
193/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */
194#define MI_GLOBAL_GTT (1<<22)
193 195
194#define MI_NOOP MI_INSTR(0, 0) 196#define MI_NOOP MI_INSTR(0, 0)
195#define MI_USER_INTERRUPT MI_INSTR(0x02, 0) 197#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
@@ -244,7 +246,8 @@
244#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */ 246#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
245#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */ 247#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
246#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */ 248#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
247#define MI_SEMAPHORE_SYNC_INVALID (3<<16) 249#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
250#define MI_SEMAPHORE_SYNC_MASK (3<<16)
248#define MI_SET_CONTEXT MI_INSTR(0x18, 0) 251#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
249#define MI_MM_SPACE_GTT (1<<8) 252#define MI_MM_SPACE_GTT (1<<8)
250#define MI_MM_SPACE_PHYSICAL (0<<8) 253#define MI_MM_SPACE_PHYSICAL (0<<8)
@@ -262,13 +265,16 @@
262 * - One can actually load arbitrary many arbitrary registers: Simply issue x 265 * - One can actually load arbitrary many arbitrary registers: Simply issue x
263 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! 266 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
264 */ 267 */
265#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 268#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
266#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) 269#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
270#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
267#define MI_SRM_LRM_GLOBAL_GTT (1<<22) 271#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
268#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 272#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
269#define MI_FLUSH_DW_STORE_INDEX (1<<21) 273#define MI_FLUSH_DW_STORE_INDEX (1<<21)
270#define MI_INVALIDATE_TLB (1<<18) 274#define MI_INVALIDATE_TLB (1<<18)
271#define MI_FLUSH_DW_OP_STOREDW (1<<14) 275#define MI_FLUSH_DW_OP_STOREDW (1<<14)
276#define MI_FLUSH_DW_OP_MASK (3<<14)
277#define MI_FLUSH_DW_NOTIFY (1<<8)
272#define MI_INVALIDATE_BSD (1<<7) 278#define MI_INVALIDATE_BSD (1<<7)
273#define MI_FLUSH_DW_USE_GTT (1<<2) 279#define MI_FLUSH_DW_USE_GTT (1<<2)
274#define MI_FLUSH_DW_USE_PPGTT (0<<2) 280#define MI_FLUSH_DW_USE_PPGTT (0<<2)
@@ -330,9 +336,12 @@
330#define DISPLAY_PLANE_B (1<<20) 336#define DISPLAY_PLANE_B (1<<20)
331#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) 337#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
332#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ 338#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
339#define PIPE_CONTROL_MMIO_WRITE (1<<23)
340#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
333#define PIPE_CONTROL_CS_STALL (1<<20) 341#define PIPE_CONTROL_CS_STALL (1<<20)
334#define PIPE_CONTROL_TLB_INVALIDATE (1<<18) 342#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
335#define PIPE_CONTROL_QW_WRITE (1<<14) 343#define PIPE_CONTROL_QW_WRITE (1<<14)
344#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
336#define PIPE_CONTROL_DEPTH_STALL (1<<13) 345#define PIPE_CONTROL_DEPTH_STALL (1<<13)
337#define PIPE_CONTROL_WRITE_FLUSH (1<<12) 346#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
338#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ 347#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
@@ -347,6 +356,94 @@
347#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0) 356#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
348#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ 357#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
349 358
359/*
360 * Commands used only by the command parser
361 */
362#define MI_SET_PREDICATE MI_INSTR(0x01, 0)
363#define MI_ARB_CHECK MI_INSTR(0x05, 0)
364#define MI_RS_CONTROL MI_INSTR(0x06, 0)
365#define MI_URB_ATOMIC_ALLOC MI_INSTR(0x09, 0)
366#define MI_PREDICATE MI_INSTR(0x0C, 0)
367#define MI_RS_CONTEXT MI_INSTR(0x0F, 0)
368#define MI_TOPOLOGY_FILTER MI_INSTR(0x0D, 0)
369#define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0)
370#define MI_URB_CLEAR MI_INSTR(0x19, 0)
371#define MI_UPDATE_GTT MI_INSTR(0x23, 0)
372#define MI_CLFLUSH MI_INSTR(0x27, 0)
373#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
374#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
375#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 0)
376#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
377#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
378#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
379#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
380#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
381
382#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
383#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
384#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
385#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
386#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
387#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
388#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
389 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16))
390#define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \
391 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16))
392#define GFX_OP_3DSTATE_SO_DECL_LIST \
393 ((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16))
394
395#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \
396 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16))
397#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \
398 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16))
399#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \
400 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16))
401#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \
402 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16))
403#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \
404 ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16))
405
406#define MFX_WAIT ((0x3<<29)|(0x1<<27)|(0x0<<16))
407
408#define COLOR_BLT ((0x2<<29)|(0x40<<22))
409#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
410
411/*
412 * Registers used only by the command parser
413 */
414#define BCS_SWCTRL 0x22200
415
416#define HS_INVOCATION_COUNT 0x2300
417#define DS_INVOCATION_COUNT 0x2308
418#define IA_VERTICES_COUNT 0x2310
419#define IA_PRIMITIVES_COUNT 0x2318
420#define VS_INVOCATION_COUNT 0x2320
421#define GS_INVOCATION_COUNT 0x2328
422#define GS_PRIMITIVES_COUNT 0x2330
423#define CL_INVOCATION_COUNT 0x2338
424#define CL_PRIMITIVES_COUNT 0x2340
425#define PS_INVOCATION_COUNT 0x2348
426#define PS_DEPTH_COUNT 0x2350
427
428/* There are the 4 64-bit counter registers, one for each stream output */
429#define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
430
431#define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8)
432
433#define GEN7_3DPRIM_END_OFFSET 0x2420
434#define GEN7_3DPRIM_START_VERTEX 0x2430
435#define GEN7_3DPRIM_VERTEX_COUNT 0x2434
436#define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
437#define GEN7_3DPRIM_START_INSTANCE 0x243C
438#define GEN7_3DPRIM_BASE_VERTEX 0x2440
439
440#define OACONTROL 0x2360
441
442#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
443#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
444#define GEN7_PIPE_DE_LOAD_SL(pipe) _PIPE(pipe, \
445 _GEN7_PIPEA_DE_LOAD_SL, \
446 _GEN7_PIPEB_DE_LOAD_SL)
350 447
351/* 448/*
352 * Reset registers 449 * Reset registers
@@ -748,6 +845,7 @@ enum punit_power_well {
748#define RING_INSTDONE(base) ((base)+0x6c) 845#define RING_INSTDONE(base) ((base)+0x6c)
749#define RING_INSTPS(base) ((base)+0x70) 846#define RING_INSTPS(base) ((base)+0x70)
750#define RING_DMA_FADD(base) ((base)+0x78) 847#define RING_DMA_FADD(base) ((base)+0x78)
848#define RING_DMA_FADD_UDW(base) ((base)+0x60) /* gen8+ */
751#define RING_INSTPM(base) ((base)+0xc0) 849#define RING_INSTPM(base) ((base)+0xc0)
752#define RING_MI_MODE(base) ((base)+0x9c) 850#define RING_MI_MODE(base) ((base)+0x9c)
753#define INSTPS 0x02070 /* 965+ only */ 851#define INSTPS 0x02070 /* 965+ only */
@@ -842,7 +940,7 @@ enum punit_power_well {
842#define GFX_MODE_GEN7 0x0229c 940#define GFX_MODE_GEN7 0x0229c
843#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) 941#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
844#define GFX_RUN_LIST_ENABLE (1<<15) 942#define GFX_RUN_LIST_ENABLE (1<<15)
845#define GFX_TLB_INVALIDATE_ALWAYS (1<<13) 943#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
846#define GFX_SURFACE_FAULT_ENABLE (1<<12) 944#define GFX_SURFACE_FAULT_ENABLE (1<<12)
847#define GFX_REPLAY_MODE (1<<11) 945#define GFX_REPLAY_MODE (1<<11)
848#define GFX_PSMI_GRANULARITY (1<<10) 946#define GFX_PSMI_GRANULARITY (1<<10)
@@ -973,6 +1071,7 @@ enum punit_power_well {
973#define ECO_FLIP_DONE (1<<0) 1071#define ECO_FLIP_DONE (1<<0)
974 1072
975#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */ 1073#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */
1074#define RC_OP_FLUSH_ENABLE (1<<0)
976#define HIZ_RAW_STALL_OPT_DISABLE (1<<2) 1075#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
977#define CACHE_MODE_1 0x7004 /* IVB+ */ 1076#define CACHE_MODE_1 0x7004 /* IVB+ */
978#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) 1077#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
@@ -3258,6 +3357,7 @@ enum punit_power_well {
3258#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ 3357#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
3259#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ 3358#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
3260#define PIPECONF_INTERLACE_MODE_MASK (7 << 21) 3359#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
3360#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
3261#define PIPECONF_CXSR_DOWNCLOCK (1<<16) 3361#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
3262#define PIPECONF_COLOR_RANGE_SELECT (1 << 13) 3362#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
3263#define PIPECONF_BPC_MASK (0x7 << 5) 3363#define PIPECONF_BPC_MASK (0x7 << 5)
@@ -3535,9 +3635,9 @@ enum punit_power_well {
3535#define PIPE_PIXEL_MASK 0x00ffffff 3635#define PIPE_PIXEL_MASK 0x00ffffff
3536#define PIPE_PIXEL_SHIFT 0 3636#define PIPE_PIXEL_SHIFT 0
3537/* GM45+ just has to be different */ 3637/* GM45+ just has to be different */
3538#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70040) 3638#define _PIPEA_FRMCOUNT_GM45 0x70040
3539#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70044) 3639#define _PIPEA_FLIPCOUNT_GM45 0x70044
3540#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) 3640#define PIPE_FRMCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_GM45)
3541 3641
3542/* Cursor A & B regs */ 3642/* Cursor A & B regs */
3543#define _CURACNTR (dev_priv->info.display_mmio_offset + 0x70080) 3643#define _CURACNTR (dev_priv->info.display_mmio_offset + 0x70080)
@@ -4120,7 +4220,7 @@ enum punit_power_well {
4120#define GEN8_PIPE_SPRITE_FAULT (1 << 9) 4220#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
4121#define GEN8_PIPE_PRIMARY_FAULT (1 << 8) 4221#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
4122#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5) 4222#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5)
4123#define GEN8_PIPE_FLIP_DONE (1 << 4) 4223#define GEN8_PIPE_PRIMARY_FLIP_DONE (1 << 4)
4124#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2) 4224#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
4125#define GEN8_PIPE_VSYNC (1 << 1) 4225#define GEN8_PIPE_VSYNC (1 << 1)
4126#define GEN8_PIPE_VBLANK (1 << 0) 4226#define GEN8_PIPE_VBLANK (1 << 0)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index fa486c5fbb02..fba9efd09e87 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -206,7 +206,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
206 const struct lvds_dvo_timing *panel_dvo_timing; 206 const struct lvds_dvo_timing *panel_dvo_timing;
207 const struct lvds_fp_timing *fp_timing; 207 const struct lvds_fp_timing *fp_timing;
208 struct drm_display_mode *panel_fixed_mode; 208 struct drm_display_mode *panel_fixed_mode;
209 int i, downclock; 209 int i, downclock, drrs_mode;
210 210
211 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); 211 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
212 if (!lvds_options) 212 if (!lvds_options)
@@ -218,6 +218,28 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
218 218
219 panel_type = lvds_options->panel_type; 219 panel_type = lvds_options->panel_type;
220 220
221 drrs_mode = (lvds_options->dps_panel_type_bits
222 >> (panel_type * 2)) & MODE_MASK;
223 /*
224 * VBT has static DRRS = 0 and seamless DRRS = 2.
225 * The below piece of code is required to adjust vbt.drrs_type
226 * to match the enum drrs_support_type.
227 */
228 switch (drrs_mode) {
229 case 0:
230 dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT;
231 DRM_DEBUG_KMS("DRRS supported mode is static\n");
232 break;
233 case 2:
234 dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
235 DRM_DEBUG_KMS("DRRS supported mode is seamless\n");
236 break;
237 default:
238 dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
239 DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
240 break;
241 }
242
221 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); 243 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
222 if (!lvds_lfp_data) 244 if (!lvds_lfp_data)
223 return; 245 return;
@@ -526,6 +548,16 @@ parse_driver_features(struct drm_i915_private *dev_priv,
526 548
527 if (driver->dual_frequency) 549 if (driver->dual_frequency)
528 dev_priv->render_reclock_avail = true; 550 dev_priv->render_reclock_avail = true;
551
552 DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
553 /*
554 * If DRRS is not supported, drrs_type has to be set to 0.
555 * This is because, VBT is configured in such a way that
556 * static DRRS is 0 and DRRS not supported is represented by
557 * driver->drrs_enabled=false
558 */
559 if (!driver->drrs_enabled)
560 dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
529} 561}
530 562
531static void 563static void
@@ -604,19 +636,217 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
604 } 636 }
605} 637}
606 638
639static u8 *goto_next_sequence(u8 *data, int *size)
640{
641 u16 len;
642 int tmp = *size;
643
644 if (--tmp < 0)
645 return NULL;
646
647 /* goto first element */
648 data++;
649 while (1) {
650 switch (*data) {
651 case MIPI_SEQ_ELEM_SEND_PKT:
652 /*
653 * skip by this element payload size
654 * skip elem id, command flag and data type
655 */
656 tmp -= 5;
657 if (tmp < 0)
658 return NULL;
659
660 data += 3;
661 len = *((u16 *)data);
662
663 tmp -= len;
664 if (tmp < 0)
665 return NULL;
666
667 /* skip by len */
668 data = data + 2 + len;
669 break;
670 case MIPI_SEQ_ELEM_DELAY:
671 /* skip by elem id, and delay is 4 bytes */
672 tmp -= 5;
673 if (tmp < 0)
674 return NULL;
675
676 data += 5;
677 break;
678 case MIPI_SEQ_ELEM_GPIO:
679 tmp -= 3;
680 if (tmp < 0)
681 return NULL;
682
683 data += 3;
684 break;
685 default:
686 DRM_ERROR("Unknown element\n");
687 return NULL;
688 }
689
690 /* end of sequence ? */
691 if (*data == 0)
692 break;
693 }
694
695 /* goto next sequence or end of block byte */
696 if (--tmp < 0)
697 return NULL;
698
699 data++;
700
701 /* update amount of data left for the sequence block to be parsed */
702 *size = tmp;
703 return data;
704}
705
607static void 706static void
608parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb) 707parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
609{ 708{
610 struct bdb_mipi *mipi; 709 struct bdb_mipi_config *start;
710 struct bdb_mipi_sequence *sequence;
711 struct mipi_config *config;
712 struct mipi_pps_data *pps;
713 u8 *data, *seq_data;
714 int i, panel_id, seq_size;
715 u16 block_size;
716
717 /* Initialize this to undefined indicating no generic MIPI support */
718 dev_priv->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
719
720 /* Block #40 is already parsed and panel_fixed_mode is
721 * stored in dev_priv->lfp_lvds_vbt_mode
722 * resuse this when needed
723 */
611 724
612 mipi = find_section(bdb, BDB_MIPI_CONFIG); 725 /* Parse #52 for panel index used from panel_type already
613 if (!mipi) { 726 * parsed
614 DRM_DEBUG_KMS("No MIPI BDB found"); 727 */
728 start = find_section(bdb, BDB_MIPI_CONFIG);
729 if (!start) {
730 DRM_DEBUG_KMS("No MIPI config BDB found");
615 return; 731 return;
616 } 732 }
617 733
618 /* XXX: add more info */ 734 DRM_DEBUG_DRIVER("Found MIPI Config block, panel index = %d\n",
735 panel_type);
736
737 /*
738 * get hold of the correct configuration block and pps data as per
739 * the panel_type as index
740 */
741 config = &start->config[panel_type];
742 pps = &start->pps[panel_type];
743
744 /* store as of now full data. Trim when we realise all is not needed */
745 dev_priv->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
746 if (!dev_priv->vbt.dsi.config)
747 return;
748
749 dev_priv->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
750 if (!dev_priv->vbt.dsi.pps) {
751 kfree(dev_priv->vbt.dsi.config);
752 return;
753 }
754
755 /* We have mandatory mipi config blocks. Initialize as generic panel */
619 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; 756 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
757
758 /* Check if we have sequence block as well */
759 sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
760 if (!sequence) {
761 DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
762 return;
763 }
764
765 DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
766
767 block_size = get_blocksize(sequence);
768
769 /*
770 * parse the sequence block for individual sequences
771 */
772 dev_priv->vbt.dsi.seq_version = sequence->version;
773
774 seq_data = &sequence->data[0];
775
776 /*
777 * sequence block is variable length and hence we need to parse and
778 * get the sequence data for specific panel id
779 */
780 for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) {
781 panel_id = *seq_data;
782 seq_size = *((u16 *) (seq_data + 1));
783 if (panel_id == panel_type)
784 break;
785
786 /* skip the sequence including seq header of 3 bytes */
787 seq_data = seq_data + 3 + seq_size;
788 if ((seq_data - &sequence->data[0]) > block_size) {
789 DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n");
790 return;
791 }
792 }
793
794 if (i == MAX_MIPI_CONFIGURATIONS) {
795 DRM_ERROR("Sequence block detected but no valid configuration\n");
796 return;
797 }
798
799 /* check if found sequence is completely within the sequence block
800 * just being paranoid */
801 if (seq_size > block_size) {
802 DRM_ERROR("Corrupted sequence/size, bailing out\n");
803 return;
804 }
805
806 /* skip the panel id(1 byte) and seq size(2 bytes) */
807 dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL);
808 if (!dev_priv->vbt.dsi.data)
809 return;
810
811 /*
812 * loop into the sequence data and split into multiple sequneces
813 * There are only 5 types of sequences as of now
814 */
815 data = dev_priv->vbt.dsi.data;
816 dev_priv->vbt.dsi.size = seq_size;
817
818 /* two consecutive 0x00 indicate end of all sequences */
819 while (1) {
820 int seq_id = *data;
821 if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) {
822 dev_priv->vbt.dsi.sequence[seq_id] = data;
823 DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id);
824 } else {
825 DRM_ERROR("undefined sequence\n");
826 goto err;
827 }
828
829 /* partial parsing to skip elements */
830 data = goto_next_sequence(data, &seq_size);
831
832 if (data == NULL) {
833 DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n");
834 goto err;
835 }
836
837 if (*data == 0)
838 break; /* end of sequence reached */
839 }
840
841 DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n");
842 return;
843err:
844 kfree(dev_priv->vbt.dsi.data);
845 dev_priv->vbt.dsi.data = NULL;
846
847 /* error during parsing so set all pointers to null
848 * because of partial parsing */
849 memset(dev_priv->vbt.dsi.sequence, 0, MIPI_SEQ_MAX);
620} 850}
621 851
622static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, 852static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index f27f7b282465..6009debebaaf 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -282,6 +282,9 @@ struct bdb_general_definitions {
282 union child_device_config devices[0]; 282 union child_device_config devices[0];
283} __packed; 283} __packed;
284 284
285/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
286#define MODE_MASK 0x3
287
285struct bdb_lvds_options { 288struct bdb_lvds_options {
286 u8 panel_type; 289 u8 panel_type;
287 u8 rsvd1; 290 u8 rsvd1;
@@ -294,6 +297,18 @@ struct bdb_lvds_options {
294 u8 lvds_edid:1; 297 u8 lvds_edid:1;
295 u8 rsvd2:1; 298 u8 rsvd2:1;
296 u8 rsvd4; 299 u8 rsvd4;
300 /* LVDS Panel channel bits stored here */
301 u32 lvds_panel_channel_bits;
302 /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
303 u16 ssc_bits;
304 u16 ssc_freq;
305 u16 ssc_ddt;
306 /* Panel color depth defined here */
307 u16 panel_color_depth;
308 /* LVDS panel type bits stored here */
309 u32 dps_panel_type_bits;
310 /* LVDS backlight control type bits stored here */
311 u32 blt_control_type_bits;
297} __packed; 312} __packed;
298 313
299/* LFP pointer table contains entries to the struct below */ 314/* LFP pointer table contains entries to the struct below */
@@ -482,6 +497,20 @@ struct bdb_driver_features {
482 497
483 u8 hdmi_termination; 498 u8 hdmi_termination;
484 u8 custom_vbt_version; 499 u8 custom_vbt_version;
500 /* Driver features data block */
501 u16 rmpm_enabled:1;
502 u16 s2ddt_enabled:1;
503 u16 dpst_enabled:1;
504 u16 bltclt_enabled:1;
505 u16 adb_enabled:1;
506 u16 drrs_enabled:1;
507 u16 grs_enabled:1;
508 u16 gpmt_enabled:1;
509 u16 tbt_enabled:1;
510 u16 psr_enabled:1;
511 u16 ips_enabled:1;
512 u16 reserved3:4;
513 u16 pc_feature_valid:1;
485} __packed; 514} __packed;
486 515
487#define EDP_18BPP 0 516#define EDP_18BPP 0
@@ -870,4 +899,35 @@ struct bdb_mipi_sequence {
870 u8 data[0]; 899 u8 data[0];
871}; 900};
872 901
902/* MIPI Sequnece Block definitions */
903enum mipi_seq {
904 MIPI_SEQ_UNDEFINED = 0,
905 MIPI_SEQ_ASSERT_RESET,
906 MIPI_SEQ_INIT_OTP,
907 MIPI_SEQ_DISPLAY_ON,
908 MIPI_SEQ_DISPLAY_OFF,
909 MIPI_SEQ_DEASSERT_RESET,
910 MIPI_SEQ_MAX
911};
912
913enum mipi_seq_element {
914 MIPI_SEQ_ELEM_UNDEFINED = 0,
915 MIPI_SEQ_ELEM_SEND_PKT,
916 MIPI_SEQ_ELEM_DELAY,
917 MIPI_SEQ_ELEM_GPIO,
918 MIPI_SEQ_ELEM_STATUS,
919 MIPI_SEQ_ELEM_MAX
920};
921
922enum mipi_gpio_pin_index {
923 MIPI_GPIO_UNDEFINED = 0,
924 MIPI_GPIO_PANEL_ENABLE,
925 MIPI_GPIO_BL_ENABLE,
926 MIPI_GPIO_PWM_ENABLE,
927 MIPI_GPIO_RESET_N,
928 MIPI_GPIO_PWR_DOWN_R,
929 MIPI_GPIO_STDBY_RST_N,
930 MIPI_GPIO_MAX
931};
932
873#endif /* _I830_BIOS_H_ */ 933#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 69bcc42a0e44..b39d0367dd68 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -765,7 +765,7 @@ static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
765 frame = I915_READ(frame_reg); 765 frame = I915_READ(frame_reg);
766 766
767 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50)) 767 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
768 DRM_DEBUG_KMS("vblank wait timed out\n"); 768 WARN(1, "vblank wait timed out\n");
769} 769}
770 770
771/** 771/**
@@ -1804,16 +1804,6 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
1804 1804
1805 I915_WRITE(reg, val | PIPECONF_ENABLE); 1805 I915_WRITE(reg, val | PIPECONF_ENABLE);
1806 POSTING_READ(reg); 1806 POSTING_READ(reg);
1807
1808 /*
1809 * There's no guarantee the pipe will really start running now. It
1810 * depends on the Gen, the output type and the relative order between
1811 * pipe and plane enabling. Avoid waiting on HSW+ since it's not
1812 * necessary.
1813 * TODO: audit the previous gens.
1814 */
1815 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
1816 intel_wait_for_vblank(dev_priv->dev, pipe);
1817} 1807}
1818 1808
1819/** 1809/**
@@ -2166,15 +2156,6 @@ static int i9xx_update_primary_plane(struct drm_crtc *crtc,
2166 u32 dspcntr; 2156 u32 dspcntr;
2167 u32 reg; 2157 u32 reg;
2168 2158
2169 switch (plane) {
2170 case 0:
2171 case 1:
2172 break;
2173 default:
2174 DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2175 return -EINVAL;
2176 }
2177
2178 intel_fb = to_intel_framebuffer(fb); 2159 intel_fb = to_intel_framebuffer(fb);
2179 obj = intel_fb->obj; 2160 obj = intel_fb->obj;
2180 2161
@@ -2267,16 +2248,6 @@ static int ironlake_update_primary_plane(struct drm_crtc *crtc,
2267 u32 dspcntr; 2248 u32 dspcntr;
2268 u32 reg; 2249 u32 reg;
2269 2250
2270 switch (plane) {
2271 case 0:
2272 case 1:
2273 case 2:
2274 break;
2275 default:
2276 DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2277 return -EINVAL;
2278 }
2279
2280 intel_fb = to_intel_framebuffer(fb); 2251 intel_fb = to_intel_framebuffer(fb);
2281 obj = intel_fb->obj; 2252 obj = intel_fb->obj;
2282 2253
@@ -3602,10 +3573,13 @@ void hsw_disable_ips(struct intel_crtc *crtc)
3602 return; 3573 return;
3603 3574
3604 assert_plane_enabled(dev_priv, crtc->plane); 3575 assert_plane_enabled(dev_priv, crtc->plane);
3605 if (IS_BROADWELL(crtc->base.dev)) { 3576 if (IS_BROADWELL(dev)) {
3606 mutex_lock(&dev_priv->rps.hw_lock); 3577 mutex_lock(&dev_priv->rps.hw_lock);
3607 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 3578 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3608 mutex_unlock(&dev_priv->rps.hw_lock); 3579 mutex_unlock(&dev_priv->rps.hw_lock);
3580 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
3581 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
3582 DRM_ERROR("Timed out waiting for IPS disable\n");
3609 } else { 3583 } else {
3610 I915_WRITE(IPS_CTL, 0); 3584 I915_WRITE(IPS_CTL, 0);
3611 POSTING_READ(IPS_CTL); 3585 POSTING_READ(IPS_CTL);
@@ -3662,6 +3636,46 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
3662 hsw_enable_ips(intel_crtc); 3636 hsw_enable_ips(intel_crtc);
3663} 3637}
3664 3638
3639static void ilk_crtc_enable_planes(struct drm_crtc *crtc)
3640{
3641 struct drm_device *dev = crtc->dev;
3642 struct drm_i915_private *dev_priv = dev->dev_private;
3643 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3644 int pipe = intel_crtc->pipe;
3645 int plane = intel_crtc->plane;
3646
3647 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3648 intel_enable_planes(crtc);
3649 intel_crtc_update_cursor(crtc, true);
3650
3651 hsw_enable_ips(intel_crtc);
3652
3653 mutex_lock(&dev->struct_mutex);
3654 intel_update_fbc(dev);
3655 mutex_unlock(&dev->struct_mutex);
3656}
3657
3658static void ilk_crtc_disable_planes(struct drm_crtc *crtc)
3659{
3660 struct drm_device *dev = crtc->dev;
3661 struct drm_i915_private *dev_priv = dev->dev_private;
3662 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3663 int pipe = intel_crtc->pipe;
3664 int plane = intel_crtc->plane;
3665
3666 intel_crtc_wait_for_pending_flips(crtc);
3667 drm_vblank_off(dev, pipe);
3668
3669 if (dev_priv->fbc.plane == plane)
3670 intel_disable_fbc(dev);
3671
3672 hsw_disable_ips(intel_crtc);
3673
3674 intel_crtc_update_cursor(crtc, false);
3675 intel_disable_planes(crtc);
3676 intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3677}
3678
3665static void ironlake_crtc_enable(struct drm_crtc *crtc) 3679static void ironlake_crtc_enable(struct drm_crtc *crtc)
3666{ 3680{
3667 struct drm_device *dev = crtc->dev; 3681 struct drm_device *dev = crtc->dev;
@@ -3669,7 +3683,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3669 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3683 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3670 struct intel_encoder *encoder; 3684 struct intel_encoder *encoder;
3671 int pipe = intel_crtc->pipe; 3685 int pipe = intel_crtc->pipe;
3672 int plane = intel_crtc->plane;
3673 3686
3674 WARN_ON(!crtc->enabled); 3687 WARN_ON(!crtc->enabled);
3675 3688
@@ -3705,23 +3718,18 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3705 3718
3706 intel_update_watermarks(crtc); 3719 intel_update_watermarks(crtc);
3707 intel_enable_pipe(intel_crtc); 3720 intel_enable_pipe(intel_crtc);
3708 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3709 intel_enable_planes(crtc);
3710 intel_crtc_update_cursor(crtc, true);
3711 3721
3712 if (intel_crtc->config.has_pch_encoder) 3722 if (intel_crtc->config.has_pch_encoder)
3713 ironlake_pch_enable(crtc); 3723 ironlake_pch_enable(crtc);
3714 3724
3715 mutex_lock(&dev->struct_mutex);
3716 intel_update_fbc(dev);
3717 mutex_unlock(&dev->struct_mutex);
3718
3719 for_each_encoder_on_crtc(dev, crtc, encoder) 3725 for_each_encoder_on_crtc(dev, crtc, encoder)
3720 encoder->enable(encoder); 3726 encoder->enable(encoder);
3721 3727
3722 if (HAS_PCH_CPT(dev)) 3728 if (HAS_PCH_CPT(dev))
3723 cpt_verify_modeset(dev, intel_crtc->pipe); 3729 cpt_verify_modeset(dev, intel_crtc->pipe);
3724 3730
3731 ilk_crtc_enable_planes(crtc);
3732
3725 /* 3733 /*
3726 * There seems to be a race in PCH platform hw (at least on some 3734 * There seems to be a race in PCH platform hw (at least on some
3727 * outputs) where an enabled pipe still completes any pageflip right 3735 * outputs) where an enabled pipe still completes any pageflip right
@@ -3739,47 +3747,6 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3739 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 3747 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
3740} 3748}
3741 3749
3742static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
3743{
3744 struct drm_device *dev = crtc->dev;
3745 struct drm_i915_private *dev_priv = dev->dev_private;
3746 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3747 int pipe = intel_crtc->pipe;
3748 int plane = intel_crtc->plane;
3749
3750 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3751 intel_enable_planes(crtc);
3752 intel_crtc_update_cursor(crtc, true);
3753
3754 hsw_enable_ips(intel_crtc);
3755
3756 mutex_lock(&dev->struct_mutex);
3757 intel_update_fbc(dev);
3758 mutex_unlock(&dev->struct_mutex);
3759}
3760
3761static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
3762{
3763 struct drm_device *dev = crtc->dev;
3764 struct drm_i915_private *dev_priv = dev->dev_private;
3765 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3766 int pipe = intel_crtc->pipe;
3767 int plane = intel_crtc->plane;
3768
3769 intel_crtc_wait_for_pending_flips(crtc);
3770 drm_vblank_off(dev, pipe);
3771
3772 /* FBC must be disabled before disabling the plane on HSW. */
3773 if (dev_priv->fbc.plane == plane)
3774 intel_disable_fbc(dev);
3775
3776 hsw_disable_ips(intel_crtc);
3777
3778 intel_crtc_update_cursor(crtc, false);
3779 intel_disable_planes(crtc);
3780 intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3781}
3782
3783/* 3750/*
3784 * This implements the workaround described in the "notes" section of the mode 3751 * This implements the workaround described in the "notes" section of the mode
3785 * set sequence documentation. When going from no pipes or single pipe to 3752 * set sequence documentation. When going from no pipes or single pipe to
@@ -3862,7 +3829,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3862 /* If we change the relative order between pipe/planes enabling, we need 3829 /* If we change the relative order between pipe/planes enabling, we need
3863 * to change the workaround. */ 3830 * to change the workaround. */
3864 haswell_mode_set_planes_workaround(intel_crtc); 3831 haswell_mode_set_planes_workaround(intel_crtc);
3865 haswell_crtc_enable_planes(crtc); 3832 ilk_crtc_enable_planes(crtc);
3866} 3833}
3867 3834
3868static void ironlake_pfit_disable(struct intel_crtc *crtc) 3835static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -3887,26 +3854,16 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3887 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3854 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3888 struct intel_encoder *encoder; 3855 struct intel_encoder *encoder;
3889 int pipe = intel_crtc->pipe; 3856 int pipe = intel_crtc->pipe;
3890 int plane = intel_crtc->plane;
3891 u32 reg, temp; 3857 u32 reg, temp;
3892 3858
3893
3894 if (!intel_crtc->active) 3859 if (!intel_crtc->active)
3895 return; 3860 return;
3896 3861
3862 ilk_crtc_disable_planes(crtc);
3863
3897 for_each_encoder_on_crtc(dev, crtc, encoder) 3864 for_each_encoder_on_crtc(dev, crtc, encoder)
3898 encoder->disable(encoder); 3865 encoder->disable(encoder);
3899 3866
3900 intel_crtc_wait_for_pending_flips(crtc);
3901 drm_vblank_off(dev, pipe);
3902
3903 if (dev_priv->fbc.plane == plane)
3904 intel_disable_fbc(dev);
3905
3906 intel_crtc_update_cursor(crtc, false);
3907 intel_disable_planes(crtc);
3908 intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3909
3910 if (intel_crtc->config.has_pch_encoder) 3867 if (intel_crtc->config.has_pch_encoder)
3911 intel_set_pch_fifo_underrun_reporting(dev, pipe, false); 3868 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
3912 3869
@@ -3965,7 +3922,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3965 if (!intel_crtc->active) 3922 if (!intel_crtc->active)
3966 return; 3923 return;
3967 3924
3968 haswell_crtc_disable_planes(crtc); 3925 ilk_crtc_disable_planes(crtc);
3969 3926
3970 for_each_encoder_on_crtc(dev, crtc, encoder) { 3927 for_each_encoder_on_crtc(dev, crtc, encoder) {
3971 intel_opregion_notify_encoder(encoder, false); 3928 intel_opregion_notify_encoder(encoder, false);
@@ -4207,6 +4164,9 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4207 struct drm_i915_private *dev_priv = dev->dev_private; 4164 struct drm_i915_private *dev_priv = dev->dev_private;
4208 u32 val, cmd; 4165 u32 val, cmd;
4209 4166
4167 WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq);
4168 dev_priv->vlv_cdclk_freq = cdclk;
4169
4210 if (cdclk >= 320) /* jump to highest voltage for 400MHz too */ 4170 if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
4211 cmd = 2; 4171 cmd = 2;
4212 else if (cdclk == 266) 4172 else if (cdclk == 266)
@@ -4261,7 +4221,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4261 intel_i2c_reset(dev); 4221 intel_i2c_reset(dev);
4262} 4222}
4263 4223
4264static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv) 4224int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
4265{ 4225{
4266 int cur_cdclk, vco; 4226 int cur_cdclk, vco;
4267 int divider; 4227 int divider;
@@ -4282,10 +4242,6 @@ static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
4282static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 4242static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4283 int max_pixclk) 4243 int max_pixclk)
4284{ 4244{
4285 int cur_cdclk;
4286
4287 cur_cdclk = valleyview_cur_cdclk(dev_priv);
4288
4289 /* 4245 /*
4290 * Really only a few cases to deal with, as only 4 CDclks are supported: 4246 * Really only a few cases to deal with, as only 4 CDclks are supported:
4291 * 200MHz 4247 * 200MHz
@@ -4327,9 +4283,9 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
4327 struct drm_i915_private *dev_priv = dev->dev_private; 4283 struct drm_i915_private *dev_priv = dev->dev_private;
4328 struct intel_crtc *intel_crtc; 4284 struct intel_crtc *intel_crtc;
4329 int max_pixclk = intel_mode_max_pixclk(dev_priv); 4285 int max_pixclk = intel_mode_max_pixclk(dev_priv);
4330 int cur_cdclk = valleyview_cur_cdclk(dev_priv);
4331 4286
4332 if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk) 4287 if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
4288 dev_priv->vlv_cdclk_freq)
4333 return; 4289 return;
4334 4290
4335 /* disable/enable all currently active pipes while we change cdclk */ 4291 /* disable/enable all currently active pipes while we change cdclk */
@@ -4343,10 +4299,9 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
4343{ 4299{
4344 struct drm_i915_private *dev_priv = dev->dev_private; 4300 struct drm_i915_private *dev_priv = dev->dev_private;
4345 int max_pixclk = intel_mode_max_pixclk(dev_priv); 4301 int max_pixclk = intel_mode_max_pixclk(dev_priv);
4346 int cur_cdclk = valleyview_cur_cdclk(dev_priv);
4347 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 4302 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4348 4303
4349 if (req_cdclk != cur_cdclk) 4304 if (req_cdclk != dev_priv->vlv_cdclk_freq)
4350 valleyview_set_cdclk(dev, req_cdclk); 4305 valleyview_set_cdclk(dev, req_cdclk);
4351 modeset_update_crtc_power_domains(dev); 4306 modeset_update_crtc_power_domains(dev);
4352} 4307}
@@ -4387,7 +4342,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4387 4342
4388 intel_update_watermarks(crtc); 4343 intel_update_watermarks(crtc);
4389 intel_enable_pipe(intel_crtc); 4344 intel_enable_pipe(intel_crtc);
4345 intel_wait_for_vblank(dev_priv->dev, pipe);
4390 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4346 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4347
4391 intel_enable_primary_hw_plane(dev_priv, plane, pipe); 4348 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
4392 intel_enable_planes(crtc); 4349 intel_enable_planes(crtc);
4393 intel_crtc_update_cursor(crtc, true); 4350 intel_crtc_update_cursor(crtc, true);
@@ -4426,7 +4383,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
4426 4383
4427 intel_update_watermarks(crtc); 4384 intel_update_watermarks(crtc);
4428 intel_enable_pipe(intel_crtc); 4385 intel_enable_pipe(intel_crtc);
4386 intel_wait_for_vblank(dev_priv->dev, pipe);
4429 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4387 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4388
4430 intel_enable_primary_hw_plane(dev_priv, plane, pipe); 4389 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
4431 intel_enable_planes(crtc); 4390 intel_enable_planes(crtc);
4432 /* The fixup needs to happen before cursor is enabled */ 4391 /* The fixup needs to happen before cursor is enabled */
@@ -5245,9 +5204,6 @@ static void vlv_update_pll(struct intel_crtc *crtc)
5245 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5204 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5246 crtc->config.dpll_hw_state.dpll_md = dpll_md; 5205 crtc->config.dpll_hw_state.dpll_md = dpll_md;
5247 5206
5248 if (crtc->config.has_dp_encoder)
5249 intel_dp_set_m_n(crtc);
5250
5251 mutex_unlock(&dev_priv->dpio_lock); 5207 mutex_unlock(&dev_priv->dpio_lock);
5252} 5208}
5253 5209
@@ -5325,9 +5281,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
5325 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5281 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5326 crtc->config.dpll_hw_state.dpll_md = dpll_md; 5282 crtc->config.dpll_hw_state.dpll_md = dpll_md;
5327 } 5283 }
5328
5329 if (crtc->config.has_dp_encoder)
5330 intel_dp_set_m_n(crtc);
5331} 5284}
5332 5285
5333static void i8xx_update_pll(struct intel_crtc *crtc, 5286static void i8xx_update_pll(struct intel_crtc *crtc,
@@ -5656,6 +5609,9 @@ skip_dpll:
5656 dspcntr |= DISPPLANE_SEL_PIPE_B; 5609 dspcntr |= DISPPLANE_SEL_PIPE_B;
5657 } 5610 }
5658 5611
5612 if (intel_crtc->config.has_dp_encoder)
5613 intel_dp_set_m_n(intel_crtc);
5614
5659 intel_set_pipe_timings(intel_crtc); 5615 intel_set_pipe_timings(intel_crtc);
5660 5616
5661 /* pipesrc and dspsize control the size that is scaled from, 5617 /* pipesrc and dspsize control the size that is scaled from,
@@ -6880,8 +6836,6 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6880 struct drm_device *dev = dev_priv->dev; 6836 struct drm_device *dev = dev_priv->dev;
6881 struct intel_ddi_plls *plls = &dev_priv->ddi_plls; 6837 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
6882 struct intel_crtc *crtc; 6838 struct intel_crtc *crtc;
6883 unsigned long irqflags;
6884 uint32_t val;
6885 6839
6886 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) 6840 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6887 WARN(crtc->active, "CRTC for pipe %c enabled\n", 6841 WARN(crtc->active, "CRTC for pipe %c enabled\n",
@@ -6902,14 +6856,29 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6902 "Utility pin enabled\n"); 6856 "Utility pin enabled\n");
6903 WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 6857 WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
6904 6858
6905 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 6859 /*
6906 val = I915_READ(DEIMR); 6860 * In theory we can still leave IRQs enabled, as long as only the HPD
6907 WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff, 6861 * interrupts remain enabled. We used to check for that, but since it's
6908 "Unexpected DEIMR bits enabled: 0x%x\n", val); 6862 * gen-specific and since we only disable LCPLL after we fully disable
6909 val = I915_READ(SDEIMR); 6863 * the interrupts, the check below should be enough.
6910 WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff, 6864 */
6911 "Unexpected SDEIMR bits enabled: 0x%x\n", val); 6865 WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n");
6912 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 6866}
6867
6868static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
6869{
6870 struct drm_device *dev = dev_priv->dev;
6871
6872 if (IS_HASWELL(dev)) {
6873 mutex_lock(&dev_priv->rps.hw_lock);
6874 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
6875 val))
6876 DRM_ERROR("Failed to disable D_COMP\n");
6877 mutex_unlock(&dev_priv->rps.hw_lock);
6878 } else {
6879 I915_WRITE(D_COMP, val);
6880 }
6881 POSTING_READ(D_COMP);
6913} 6882}
6914 6883
6915/* 6884/*
@@ -6949,11 +6918,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6949 6918
6950 val = I915_READ(D_COMP); 6919 val = I915_READ(D_COMP);
6951 val |= D_COMP_COMP_DISABLE; 6920 val |= D_COMP_COMP_DISABLE;
6952 mutex_lock(&dev_priv->rps.hw_lock); 6921 hsw_write_dcomp(dev_priv, val);
6953 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6954 DRM_ERROR("Failed to disable D_COMP\n");
6955 mutex_unlock(&dev_priv->rps.hw_lock);
6956 POSTING_READ(D_COMP);
6957 ndelay(100); 6922 ndelay(100);
6958 6923
6959 if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 6924 if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
@@ -7008,11 +6973,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7008 val = I915_READ(D_COMP); 6973 val = I915_READ(D_COMP);
7009 val |= D_COMP_COMP_FORCE; 6974 val |= D_COMP_COMP_FORCE;
7010 val &= ~D_COMP_COMP_DISABLE; 6975 val &= ~D_COMP_COMP_DISABLE;
7011 mutex_lock(&dev_priv->rps.hw_lock); 6976 hsw_write_dcomp(dev_priv, val);
7012 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
7013 DRM_ERROR("Failed to enable D_COMP\n");
7014 mutex_unlock(&dev_priv->rps.hw_lock);
7015 POSTING_READ(D_COMP);
7016 6977
7017 val = I915_READ(LCPLL_CTL); 6978 val = I915_READ(LCPLL_CTL);
7018 val &= ~LCPLL_PLL_DISABLE; 6979 val &= ~LCPLL_PLL_DISABLE;
@@ -7066,8 +7027,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
7066 struct drm_device *dev = dev_priv->dev; 7027 struct drm_device *dev = dev_priv->dev;
7067 uint32_t val; 7028 uint32_t val;
7068 7029
7069 WARN_ON(!HAS_PC8(dev));
7070
7071 DRM_DEBUG_KMS("Enabling package C8+\n"); 7030 DRM_DEBUG_KMS("Enabling package C8+\n");
7072 7031
7073 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 7032 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
@@ -7077,7 +7036,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
7077 } 7036 }
7078 7037
7079 lpt_disable_clkout_dp(dev); 7038 lpt_disable_clkout_dp(dev);
7080 hsw_runtime_pm_disable_interrupts(dev); 7039 intel_runtime_pm_disable_interrupts(dev);
7081 hsw_disable_lcpll(dev_priv, true, true); 7040 hsw_disable_lcpll(dev_priv, true, true);
7082} 7041}
7083 7042
@@ -7086,12 +7045,10 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
7086 struct drm_device *dev = dev_priv->dev; 7045 struct drm_device *dev = dev_priv->dev;
7087 uint32_t val; 7046 uint32_t val;
7088 7047
7089 WARN_ON(!HAS_PC8(dev));
7090
7091 DRM_DEBUG_KMS("Disabling package C8+\n"); 7048 DRM_DEBUG_KMS("Disabling package C8+\n");
7092 7049
7093 hsw_restore_lcpll(dev_priv); 7050 hsw_restore_lcpll(dev_priv);
7094 hsw_runtime_pm_restore_interrupts(dev); 7051 intel_runtime_pm_restore_interrupts(dev);
7095 lpt_init_pch_refclk(dev); 7052 lpt_init_pch_refclk(dev);
7096 7053
7097 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 7054 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
@@ -7107,6 +7064,11 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
7107 mutex_unlock(&dev_priv->rps.hw_lock); 7064 mutex_unlock(&dev_priv->rps.hw_lock);
7108} 7065}
7109 7066
7067static void snb_modeset_global_resources(struct drm_device *dev)
7068{
7069 modeset_update_crtc_power_domains(dev);
7070}
7071
7110static void haswell_modeset_global_resources(struct drm_device *dev) 7072static void haswell_modeset_global_resources(struct drm_device *dev)
7111{ 7073{
7112 modeset_update_crtc_power_domains(dev); 7074 modeset_update_crtc_power_domains(dev);
@@ -7374,7 +7336,6 @@ static void haswell_write_eld(struct drm_connector *connector,
7374{ 7336{
7375 struct drm_i915_private *dev_priv = connector->dev->dev_private; 7337 struct drm_i915_private *dev_priv = connector->dev->dev_private;
7376 uint8_t *eld = connector->eld; 7338 uint8_t *eld = connector->eld;
7377 struct drm_device *dev = crtc->dev;
7378 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7339 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7379 uint32_t eldv; 7340 uint32_t eldv;
7380 uint32_t i; 7341 uint32_t i;
@@ -7387,17 +7348,14 @@ static void haswell_write_eld(struct drm_connector *connector,
7387 int aud_config = HSW_AUD_CFG(pipe); 7348 int aud_config = HSW_AUD_CFG(pipe);
7388 int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD; 7349 int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
7389 7350
7390
7391 DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
7392
7393 /* Audio output enable */ 7351 /* Audio output enable */
7394 DRM_DEBUG_DRIVER("HDMI audio: enable codec\n"); 7352 DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
7395 tmp = I915_READ(aud_cntrl_st2); 7353 tmp = I915_READ(aud_cntrl_st2);
7396 tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4)); 7354 tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
7397 I915_WRITE(aud_cntrl_st2, tmp); 7355 I915_WRITE(aud_cntrl_st2, tmp);
7356 POSTING_READ(aud_cntrl_st2);
7398 7357
7399 /* Wait for 1 vertical blank */ 7358 assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
7400 intel_wait_for_vblank(dev, pipe);
7401 7359
7402 /* Set ELD valid state */ 7360 /* Set ELD valid state */
7403 tmp = I915_READ(aud_cntrl_st2); 7361 tmp = I915_READ(aud_cntrl_st2);
@@ -8836,8 +8794,16 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
8836 } 8794 }
8837 8795
8838 len = 4; 8796 len = 4;
8839 if (ring->id == RCS) 8797 if (ring->id == RCS) {
8840 len += 6; 8798 len += 6;
8799 /*
8800 * On Gen 8, SRM is now taking an extra dword to accommodate
8801 * 48bits addresses, and we need a NOOP for the batch size to
8802 * stay even.
8803 */
8804 if (IS_GEN8(dev))
8805 len += 2;
8806 }
8841 8807
8842 /* 8808 /*
8843 * BSpec MI_DISPLAY_FLIP for IVB: 8809 * BSpec MI_DISPLAY_FLIP for IVB:
@@ -8872,10 +8838,18 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
8872 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 8838 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
8873 DERRMR_PIPEB_PRI_FLIP_DONE | 8839 DERRMR_PIPEB_PRI_FLIP_DONE |
8874 DERRMR_PIPEC_PRI_FLIP_DONE)); 8840 DERRMR_PIPEC_PRI_FLIP_DONE));
8875 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | 8841 if (IS_GEN8(dev))
8876 MI_SRM_LRM_GLOBAL_GTT); 8842 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
8843 MI_SRM_LRM_GLOBAL_GTT);
8844 else
8845 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
8846 MI_SRM_LRM_GLOBAL_GTT);
8877 intel_ring_emit(ring, DERRMR); 8847 intel_ring_emit(ring, DERRMR);
8878 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 8848 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
8849 if (IS_GEN8(dev)) {
8850 intel_ring_emit(ring, 0);
8851 intel_ring_emit(ring, MI_NOOP);
8852 }
8879 } 8853 }
8880 8854
8881 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 8855 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
@@ -10578,16 +10552,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
10578 10552
10579 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); 10553 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
10580 10554
10581 if (IS_GEN2(dev)) {
10582 intel_crtc->max_cursor_width = GEN2_CURSOR_WIDTH;
10583 intel_crtc->max_cursor_height = GEN2_CURSOR_HEIGHT;
10584 } else {
10585 intel_crtc->max_cursor_width = CURSOR_WIDTH;
10586 intel_crtc->max_cursor_height = CURSOR_HEIGHT;
10587 }
10588 dev->mode_config.cursor_width = intel_crtc->max_cursor_width;
10589 dev->mode_config.cursor_height = intel_crtc->max_cursor_height;
10590
10591 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 10555 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
10592 for (i = 0; i < 256; i++) { 10556 for (i = 0; i < 256; i++) {
10593 intel_crtc->lut_r[i] = i; 10557 intel_crtc->lut_r[i] = i;
@@ -11088,6 +11052,8 @@ static void intel_init_display(struct drm_device *dev)
11088 } else if (IS_GEN6(dev)) { 11052 } else if (IS_GEN6(dev)) {
11089 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 11053 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
11090 dev_priv->display.write_eld = ironlake_write_eld; 11054 dev_priv->display.write_eld = ironlake_write_eld;
11055 dev_priv->display.modeset_global_resources =
11056 snb_modeset_global_resources;
11091 } else if (IS_IVYBRIDGE(dev)) { 11057 } else if (IS_IVYBRIDGE(dev)) {
11092 /* FIXME: detect B0+ stepping and use auto training */ 11058 /* FIXME: detect B0+ stepping and use auto training */
11093 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 11059 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
@@ -11338,6 +11304,15 @@ void intel_modeset_init(struct drm_device *dev)
11338 dev->mode_config.max_width = 8192; 11304 dev->mode_config.max_width = 8192;
11339 dev->mode_config.max_height = 8192; 11305 dev->mode_config.max_height = 8192;
11340 } 11306 }
11307
11308 if (IS_GEN2(dev)) {
11309 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
11310 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
11311 } else {
11312 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
11313 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
11314 }
11315
11341 dev->mode_config.fb_base = dev_priv->gtt.mappable_base; 11316 dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
11342 11317
11343 DRM_DEBUG_KMS("%d display pipe%s available.\n", 11318 DRM_DEBUG_KMS("%d display pipe%s available.\n",
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index dfa85289f28f..44df493ad399 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -738,6 +738,20 @@ intel_dp_set_clock(struct intel_encoder *encoder,
738 } 738 }
739} 739}
740 740
741static void
742intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
743{
744 struct drm_device *dev = crtc->base.dev;
745 struct drm_i915_private *dev_priv = dev->dev_private;
746 enum transcoder transcoder = crtc->config.cpu_transcoder;
747
748 I915_WRITE(PIPE_DATA_M2(transcoder),
749 TU_SIZE(m_n->tu) | m_n->gmch_m);
750 I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
751 I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
752 I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
753}
754
741bool 755bool
742intel_dp_compute_config(struct intel_encoder *encoder, 756intel_dp_compute_config(struct intel_encoder *encoder,
743 struct intel_crtc_config *pipe_config) 757 struct intel_crtc_config *pipe_config)
@@ -842,6 +856,14 @@ found:
842 pipe_config->port_clock, 856 pipe_config->port_clock,
843 &pipe_config->dp_m_n); 857 &pipe_config->dp_m_n);
844 858
859 if (intel_connector->panel.downclock_mode != NULL &&
860 intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
861 intel_link_compute_m_n(bpp, lane_count,
862 intel_connector->panel.downclock_mode->clock,
863 pipe_config->port_clock,
864 &pipe_config->dp_m2_n2);
865 }
866
845 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 867 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
846 868
847 return true; 869 return true;
@@ -1044,7 +1066,10 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1044static bool _edp_panel_vdd_on(struct intel_dp *intel_dp) 1066static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1045{ 1067{
1046 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1068 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1069 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1070 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1047 struct drm_i915_private *dev_priv = dev->dev_private; 1071 struct drm_i915_private *dev_priv = dev->dev_private;
1072 enum intel_display_power_domain power_domain;
1048 u32 pp; 1073 u32 pp;
1049 u32 pp_stat_reg, pp_ctrl_reg; 1074 u32 pp_stat_reg, pp_ctrl_reg;
1050 bool need_to_disable = !intel_dp->want_panel_vdd; 1075 bool need_to_disable = !intel_dp->want_panel_vdd;
@@ -1057,7 +1082,8 @@ static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1057 if (edp_have_panel_vdd(intel_dp)) 1082 if (edp_have_panel_vdd(intel_dp))
1058 return need_to_disable; 1083 return need_to_disable;
1059 1084
1060 intel_runtime_pm_get(dev_priv); 1085 power_domain = intel_display_port_power_domain(intel_encoder);
1086 intel_display_power_get(dev_priv, power_domain);
1061 1087
1062 DRM_DEBUG_KMS("Turning eDP VDD on\n"); 1088 DRM_DEBUG_KMS("Turning eDP VDD on\n");
1063 1089
@@ -1104,6 +1130,11 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1104 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1130 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1105 1131
1106 if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) { 1132 if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
1133 struct intel_digital_port *intel_dig_port =
1134 dp_to_dig_port(intel_dp);
1135 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1136 enum intel_display_power_domain power_domain;
1137
1107 DRM_DEBUG_KMS("Turning eDP VDD off\n"); 1138 DRM_DEBUG_KMS("Turning eDP VDD off\n");
1108 1139
1109 pp = ironlake_get_pp_control(intel_dp); 1140 pp = ironlake_get_pp_control(intel_dp);
@@ -1122,7 +1153,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1122 if ((pp & POWER_TARGET_ON) == 0) 1153 if ((pp & POWER_TARGET_ON) == 0)
1123 intel_dp->last_power_cycle = jiffies; 1154 intel_dp->last_power_cycle = jiffies;
1124 1155
1125 intel_runtime_pm_put(dev_priv); 1156 power_domain = intel_display_port_power_domain(intel_encoder);
1157 intel_display_power_put(dev_priv, power_domain);
1126 } 1158 }
1127} 1159}
1128 1160
@@ -1206,8 +1238,11 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
1206 1238
1207void intel_edp_panel_off(struct intel_dp *intel_dp) 1239void intel_edp_panel_off(struct intel_dp *intel_dp)
1208{ 1240{
1241 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1242 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1209 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1243 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1210 struct drm_i915_private *dev_priv = dev->dev_private; 1244 struct drm_i915_private *dev_priv = dev->dev_private;
1245 enum intel_display_power_domain power_domain;
1211 u32 pp; 1246 u32 pp;
1212 u32 pp_ctrl_reg; 1247 u32 pp_ctrl_reg;
1213 1248
@@ -1237,7 +1272,8 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
1237 wait_panel_off(intel_dp); 1272 wait_panel_off(intel_dp);
1238 1273
1239 /* We got a reference when we enabled the VDD. */ 1274 /* We got a reference when we enabled the VDD. */
1240 intel_runtime_pm_put(dev_priv); 1275 power_domain = intel_display_port_power_domain(intel_encoder);
1276 intel_display_power_put(dev_priv, power_domain);
1241} 1277}
1242 1278
1243void intel_edp_backlight_on(struct intel_dp *intel_dp) 1279void intel_edp_backlight_on(struct intel_dp *intel_dp)
@@ -1778,17 +1814,23 @@ static void intel_disable_dp(struct intel_encoder *encoder)
1778 intel_dp_link_down(intel_dp); 1814 intel_dp_link_down(intel_dp);
1779} 1815}
1780 1816
1781static void intel_post_disable_dp(struct intel_encoder *encoder) 1817static void g4x_post_disable_dp(struct intel_encoder *encoder)
1782{ 1818{
1783 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1819 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1784 enum port port = dp_to_dig_port(intel_dp)->port; 1820 enum port port = dp_to_dig_port(intel_dp)->port;
1785 struct drm_device *dev = encoder->base.dev;
1786 1821
1787 if (port == PORT_A || IS_VALLEYVIEW(dev)) { 1822 if (port != PORT_A)
1788 intel_dp_link_down(intel_dp); 1823 return;
1789 if (!IS_VALLEYVIEW(dev)) 1824
1790 ironlake_edp_pll_off(intel_dp); 1825 intel_dp_link_down(intel_dp);
1791 } 1826 ironlake_edp_pll_off(intel_dp);
1827}
1828
1829static void vlv_post_disable_dp(struct intel_encoder *encoder)
1830{
1831 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1832
1833 intel_dp_link_down(intel_dp);
1792} 1834}
1793 1835
1794static void intel_enable_dp(struct intel_encoder *encoder) 1836static void intel_enable_dp(struct intel_encoder *encoder)
@@ -3613,6 +3655,130 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3613 I915_READ(pp_div_reg)); 3655 I915_READ(pp_div_reg));
3614} 3656}
3615 3657
3658void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
3659{
3660 struct drm_i915_private *dev_priv = dev->dev_private;
3661 struct intel_encoder *encoder;
3662 struct intel_dp *intel_dp = NULL;
3663 struct intel_crtc_config *config = NULL;
3664 struct intel_crtc *intel_crtc = NULL;
3665 struct intel_connector *intel_connector = dev_priv->drrs.connector;
3666 u32 reg, val;
3667 enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
3668
3669 if (refresh_rate <= 0) {
3670 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
3671 return;
3672 }
3673
3674 if (intel_connector == NULL) {
3675 DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
3676 return;
3677 }
3678
3679 if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
3680 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
3681 return;
3682 }
3683
3684 encoder = intel_attached_encoder(&intel_connector->base);
3685 intel_dp = enc_to_intel_dp(&encoder->base);
3686 intel_crtc = encoder->new_crtc;
3687
3688 if (!intel_crtc) {
3689 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
3690 return;
3691 }
3692
3693 config = &intel_crtc->config;
3694
3695 if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
3696 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
3697 return;
3698 }
3699
3700 if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
3701 index = DRRS_LOW_RR;
3702
3703 if (index == intel_dp->drrs_state.refresh_rate_type) {
3704 DRM_DEBUG_KMS(
3705 "DRRS requested for previously set RR...ignoring\n");
3706 return;
3707 }
3708
3709 if (!intel_crtc->active) {
3710 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
3711 return;
3712 }
3713
3714 if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
3715 reg = PIPECONF(intel_crtc->config.cpu_transcoder);
3716 val = I915_READ(reg);
3717 if (index > DRRS_HIGH_RR) {
3718 val |= PIPECONF_EDP_RR_MODE_SWITCH;
3719 intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
3720 } else {
3721 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
3722 }
3723 I915_WRITE(reg, val);
3724 }
3725
3726 /*
3727 * mutex taken to ensure that there is no race between differnt
3728 * drrs calls trying to update refresh rate. This scenario may occur
3729 * in future when idleness detection based DRRS in kernel and
3730 * possible calls from user space to set differnt RR are made.
3731 */
3732
3733 mutex_lock(&intel_dp->drrs_state.mutex);
3734
3735 intel_dp->drrs_state.refresh_rate_type = index;
3736
3737 mutex_unlock(&intel_dp->drrs_state.mutex);
3738
3739 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
3740}
3741
3742static struct drm_display_mode *
3743intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
3744 struct intel_connector *intel_connector,
3745 struct drm_display_mode *fixed_mode)
3746{
3747 struct drm_connector *connector = &intel_connector->base;
3748 struct intel_dp *intel_dp = &intel_dig_port->dp;
3749 struct drm_device *dev = intel_dig_port->base.base.dev;
3750 struct drm_i915_private *dev_priv = dev->dev_private;
3751 struct drm_display_mode *downclock_mode = NULL;
3752
3753 if (INTEL_INFO(dev)->gen <= 6) {
3754 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
3755 return NULL;
3756 }
3757
3758 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
3759 DRM_INFO("VBT doesn't support DRRS\n");
3760 return NULL;
3761 }
3762
3763 downclock_mode = intel_find_panel_downclock
3764 (dev, fixed_mode, connector);
3765
3766 if (!downclock_mode) {
3767 DRM_INFO("DRRS not supported\n");
3768 return NULL;
3769 }
3770
3771 dev_priv->drrs.connector = intel_connector;
3772
3773 mutex_init(&intel_dp->drrs_state.mutex);
3774
3775 intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
3776
3777 intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
3778 DRM_INFO("seamless DRRS supported for eDP panel.\n");
3779 return downclock_mode;
3780}
3781
3616static bool intel_edp_init_connector(struct intel_dp *intel_dp, 3782static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3617 struct intel_connector *intel_connector, 3783 struct intel_connector *intel_connector,
3618 struct edp_power_seq *power_seq) 3784 struct edp_power_seq *power_seq)
@@ -3623,10 +3789,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3623 struct drm_device *dev = intel_encoder->base.dev; 3789 struct drm_device *dev = intel_encoder->base.dev;
3624 struct drm_i915_private *dev_priv = dev->dev_private; 3790 struct drm_i915_private *dev_priv = dev->dev_private;
3625 struct drm_display_mode *fixed_mode = NULL; 3791 struct drm_display_mode *fixed_mode = NULL;
3792 struct drm_display_mode *downclock_mode = NULL;
3626 bool has_dpcd; 3793 bool has_dpcd;
3627 struct drm_display_mode *scan; 3794 struct drm_display_mode *scan;
3628 struct edid *edid; 3795 struct edid *edid;
3629 3796
3797 intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
3798
3630 if (!is_edp(intel_dp)) 3799 if (!is_edp(intel_dp))
3631 return true; 3800 return true;
3632 3801
@@ -3677,6 +3846,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3677 list_for_each_entry(scan, &connector->probed_modes, head) { 3846 list_for_each_entry(scan, &connector->probed_modes, head) {
3678 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 3847 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3679 fixed_mode = drm_mode_duplicate(dev, scan); 3848 fixed_mode = drm_mode_duplicate(dev, scan);
3849 downclock_mode = intel_dp_drrs_init(
3850 intel_dig_port,
3851 intel_connector, fixed_mode);
3680 break; 3852 break;
3681 } 3853 }
3682 } 3854 }
@@ -3690,7 +3862,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3690 } 3862 }
3691 mutex_unlock(&dev->mode_config.mutex); 3863 mutex_unlock(&dev->mode_config.mutex);
3692 3864
3693 intel_panel_init(&intel_connector->panel, fixed_mode, NULL); 3865 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
3694 intel_panel_setup_backlight(connector); 3866 intel_panel_setup_backlight(connector);
3695 3867
3696 return true; 3868 return true;
@@ -3841,16 +4013,17 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3841 intel_encoder->compute_config = intel_dp_compute_config; 4013 intel_encoder->compute_config = intel_dp_compute_config;
3842 intel_encoder->mode_set = intel_dp_mode_set; 4014 intel_encoder->mode_set = intel_dp_mode_set;
3843 intel_encoder->disable = intel_disable_dp; 4015 intel_encoder->disable = intel_disable_dp;
3844 intel_encoder->post_disable = intel_post_disable_dp;
3845 intel_encoder->get_hw_state = intel_dp_get_hw_state; 4016 intel_encoder->get_hw_state = intel_dp_get_hw_state;
3846 intel_encoder->get_config = intel_dp_get_config; 4017 intel_encoder->get_config = intel_dp_get_config;
3847 if (IS_VALLEYVIEW(dev)) { 4018 if (IS_VALLEYVIEW(dev)) {
3848 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 4019 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
3849 intel_encoder->pre_enable = vlv_pre_enable_dp; 4020 intel_encoder->pre_enable = vlv_pre_enable_dp;
3850 intel_encoder->enable = vlv_enable_dp; 4021 intel_encoder->enable = vlv_enable_dp;
4022 intel_encoder->post_disable = vlv_post_disable_dp;
3851 } else { 4023 } else {
3852 intel_encoder->pre_enable = g4x_pre_enable_dp; 4024 intel_encoder->pre_enable = g4x_pre_enable_dp;
3853 intel_encoder->enable = g4x_enable_dp; 4025 intel_encoder->enable = g4x_enable_dp;
4026 intel_encoder->post_disable = g4x_post_disable_dp;
3854 } 4027 }
3855 4028
3856 intel_dig_port->port = port; 4029 intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 328b1a70264b..b885df150910 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -81,8 +81,8 @@
81/* Maximum cursor sizes */ 81/* Maximum cursor sizes */
82#define GEN2_CURSOR_WIDTH 64 82#define GEN2_CURSOR_WIDTH 64
83#define GEN2_CURSOR_HEIGHT 64 83#define GEN2_CURSOR_HEIGHT 64
84#define CURSOR_WIDTH 256 84#define MAX_CURSOR_WIDTH 256
85#define CURSOR_HEIGHT 256 85#define MAX_CURSOR_HEIGHT 256
86 86
87#define INTEL_I2C_BUS_DVO 1 87#define INTEL_I2C_BUS_DVO 1
88#define INTEL_I2C_BUS_SDVO 2 88#define INTEL_I2C_BUS_SDVO 2
@@ -306,6 +306,9 @@ struct intel_crtc_config {
306 int pipe_bpp; 306 int pipe_bpp;
307 struct intel_link_m_n dp_m_n; 307 struct intel_link_m_n dp_m_n;
308 308
309 /* m2_n2 for eDP downclock */
310 struct intel_link_m_n dp_m2_n2;
311
309 /* 312 /*
310 * Frequence the dpll for the port should run at. Differs from the 313 * Frequence the dpll for the port should run at. Differs from the
311 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also 314 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
@@ -343,6 +346,9 @@ struct intel_pipe_wm {
343 struct intel_wm_level wm[5]; 346 struct intel_wm_level wm[5];
344 uint32_t linetime; 347 uint32_t linetime;
345 bool fbc_wm_enabled; 348 bool fbc_wm_enabled;
349 bool pipe_enabled;
350 bool sprites_enabled;
351 bool sprites_scaled;
346}; 352};
347 353
348struct intel_crtc { 354struct intel_crtc {
@@ -374,7 +380,6 @@ struct intel_crtc {
374 uint32_t cursor_addr; 380 uint32_t cursor_addr;
375 int16_t cursor_x, cursor_y; 381 int16_t cursor_x, cursor_y;
376 int16_t cursor_width, cursor_height; 382 int16_t cursor_width, cursor_height;
377 int16_t max_cursor_width, max_cursor_height;
378 bool cursor_visible; 383 bool cursor_visible;
379 384
380 struct intel_plane_config plane_config; 385 struct intel_plane_config plane_config;
@@ -484,6 +489,17 @@ struct intel_hdmi {
484 489
485#define DP_MAX_DOWNSTREAM_PORTS 0x10 490#define DP_MAX_DOWNSTREAM_PORTS 0x10
486 491
492/**
493 * HIGH_RR is the highest eDP panel refresh rate read from EDID
494 * LOW_RR is the lowest eDP panel refresh rate found from EDID
495 * parsing for same resolution.
496 */
497enum edp_drrs_refresh_rate_type {
498 DRRS_HIGH_RR,
499 DRRS_LOW_RR,
500 DRRS_MAX_RR, /* RR count */
501};
502
487struct intel_dp { 503struct intel_dp {
488 uint32_t output_reg; 504 uint32_t output_reg;
489 uint32_t aux_ch_ctl_reg; 505 uint32_t aux_ch_ctl_reg;
@@ -522,6 +538,12 @@ struct intel_dp {
522 bool has_aux_irq, 538 bool has_aux_irq,
523 int send_bytes, 539 int send_bytes,
524 uint32_t aux_clock_divider); 540 uint32_t aux_clock_divider);
541 struct {
542 enum drrs_support_type type;
543 enum edp_drrs_refresh_rate_type refresh_rate_type;
544 struct mutex mutex;
545 } drrs_state;
546
525}; 547};
526 548
527struct intel_digital_port { 549struct intel_digital_port {
@@ -629,8 +651,8 @@ void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
629void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 651void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
630void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 652void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
631void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 653void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
632void hsw_runtime_pm_disable_interrupts(struct drm_device *dev); 654void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
633void hsw_runtime_pm_restore_interrupts(struct drm_device *dev); 655void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
634 656
635 657
636/* intel_crt.c */ 658/* intel_crt.c */
@@ -666,6 +688,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
666const char *intel_output_name(int output); 688const char *intel_output_name(int output);
667bool intel_has_pending_fb_unpin(struct drm_device *dev); 689bool intel_has_pending_fb_unpin(struct drm_device *dev);
668int intel_pch_rawclk(struct drm_device *dev); 690int intel_pch_rawclk(struct drm_device *dev);
691int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
669void intel_mark_busy(struct drm_device *dev); 692void intel_mark_busy(struct drm_device *dev);
670void intel_mark_fb_busy(struct drm_i915_gem_object *obj, 693void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
671 struct intel_ring_buffer *ring); 694 struct intel_ring_buffer *ring);
@@ -774,7 +797,7 @@ void intel_edp_panel_off(struct intel_dp *intel_dp);
774void intel_edp_psr_enable(struct intel_dp *intel_dp); 797void intel_edp_psr_enable(struct intel_dp *intel_dp);
775void intel_edp_psr_disable(struct intel_dp *intel_dp); 798void intel_edp_psr_disable(struct intel_dp *intel_dp);
776void intel_edp_psr_update(struct drm_device *dev); 799void intel_edp_psr_update(struct drm_device *dev);
777 800void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
778 801
779/* intel_dsi.c */ 802/* intel_dsi.c */
780bool intel_dsi_init(struct drm_device *dev); 803bool intel_dsi_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 33656647f8bc..4e271c768fd0 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -110,6 +110,15 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
110 110
111 DRM_DEBUG_KMS("\n"); 111 DRM_DEBUG_KMS("\n");
112 112
113 mutex_lock(&dev_priv->dpio_lock);
114 /* program rcomp for compliance, reduce from 50 ohms to 45 ohms
115 * needed everytime after power gate */
116 vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
117 mutex_unlock(&dev_priv->dpio_lock);
118
119 /* bandgap reset is needed after everytime we do power gate */
120 band_gap_reset(dev_priv);
121
113 val = I915_READ(MIPI_PORT_CTRL(pipe)); 122 val = I915_READ(MIPI_PORT_CTRL(pipe));
114 I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD); 123 I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
115 usleep_range(1000, 1500); 124 usleep_range(1000, 1500);
@@ -122,21 +131,6 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
122 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY); 131 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
123 usleep_range(2000, 2500); 132 usleep_range(2000, 2500);
124} 133}
125static void intel_dsi_pre_enable(struct intel_encoder *encoder)
126{
127 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
128
129 DRM_DEBUG_KMS("\n");
130
131 if (intel_dsi->dev.dev_ops->panel_reset)
132 intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
133
134 /* put device in ready state */
135 intel_dsi_device_ready(encoder);
136
137 if (intel_dsi->dev.dev_ops->send_otp_cmds)
138 intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
139}
140 134
141static void intel_dsi_enable(struct intel_encoder *encoder) 135static void intel_dsi_enable(struct intel_encoder *encoder)
142{ 136{
@@ -153,18 +147,63 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
153 I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4); 147 I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
154 else { 148 else {
155 msleep(20); /* XXX */ 149 msleep(20); /* XXX */
156 dpi_send_cmd(intel_dsi, TURN_ON); 150 dpi_send_cmd(intel_dsi, TURN_ON, DPI_LP_MODE_EN);
157 msleep(100); 151 msleep(100);
158 152
153 if (intel_dsi->dev.dev_ops->enable)
154 intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
155
159 /* assert ip_tg_enable signal */ 156 /* assert ip_tg_enable signal */
160 temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK; 157 temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
161 temp = temp | intel_dsi->port_bits; 158 temp = temp | intel_dsi->port_bits;
162 I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE); 159 I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
163 POSTING_READ(MIPI_PORT_CTRL(pipe)); 160 POSTING_READ(MIPI_PORT_CTRL(pipe));
164 } 161 }
162}
163
164static void intel_dsi_pre_enable(struct intel_encoder *encoder)
165{
166 struct drm_device *dev = encoder->base.dev;
167 struct drm_i915_private *dev_priv = dev->dev_private;
168 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
169 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
170 enum pipe pipe = intel_crtc->pipe;
171 u32 tmp;
172
173 DRM_DEBUG_KMS("\n");
165 174
166 if (intel_dsi->dev.dev_ops->enable) 175 /* Disable DPOunit clock gating, can stall pipe
167 intel_dsi->dev.dev_ops->enable(&intel_dsi->dev); 176 * and we need DPLL REFA always enabled */
177 tmp = I915_READ(DPLL(pipe));
178 tmp |= DPLL_REFA_CLK_ENABLE_VLV;
179 I915_WRITE(DPLL(pipe), tmp);
180
181 tmp = I915_READ(DSPCLK_GATE_D);
182 tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
183 I915_WRITE(DSPCLK_GATE_D, tmp);
184
185 /* put device in ready state */
186 intel_dsi_device_ready(encoder);
187
188 if (intel_dsi->dev.dev_ops->panel_reset)
189 intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
190
191 if (intel_dsi->dev.dev_ops->send_otp_cmds)
192 intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
193
194 /* Enable port in pre-enable phase itself because as per hw team
195 * recommendation, port should be enabled befor plane & pipe */
196 intel_dsi_enable(encoder);
197}
198
199static void intel_dsi_enable_nop(struct intel_encoder *encoder)
200{
201 DRM_DEBUG_KMS("\n");
202
203 /* for DSI port enable has to be done before pipe
204 * and plane enable, so port enable is done in
205 * pre_enable phase itself unlike other encoders
206 */
168} 207}
169 208
170static void intel_dsi_disable(struct intel_encoder *encoder) 209static void intel_dsi_disable(struct intel_encoder *encoder)
@@ -179,7 +218,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
179 DRM_DEBUG_KMS("\n"); 218 DRM_DEBUG_KMS("\n");
180 219
181 if (is_vid_mode(intel_dsi)) { 220 if (is_vid_mode(intel_dsi)) {
182 dpi_send_cmd(intel_dsi, SHUTDOWN); 221 /* Send Shutdown command to the panel in LP mode */
222 dpi_send_cmd(intel_dsi, SHUTDOWN, DPI_LP_MODE_EN);
183 msleep(10); 223 msleep(10);
184 224
185 /* de-assert ip_tg_enable signal */ 225 /* de-assert ip_tg_enable signal */
@@ -190,6 +230,23 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
190 msleep(2); 230 msleep(2);
191 } 231 }
192 232
233 /* Panel commands can be sent when clock is in LP11 */
234 I915_WRITE(MIPI_DEVICE_READY(pipe), 0x0);
235
236 temp = I915_READ(MIPI_CTRL(pipe));
237 temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
238 I915_WRITE(MIPI_CTRL(pipe), temp |
239 intel_dsi->escape_clk_div <<
240 ESCAPE_CLOCK_DIVIDER_SHIFT);
241
242 I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP);
243
244 temp = I915_READ(MIPI_DSI_FUNC_PRG(pipe));
245 temp &= ~VID_MODE_FORMAT_MASK;
246 I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), temp);
247
248 I915_WRITE(MIPI_DEVICE_READY(pipe), 0x1);
249
193 /* if disable packets are sent before sending shutdown packet then in 250 /* if disable packets are sent before sending shutdown packet then in
194 * some next enable sequence send turn on packet error is observed */ 251 * some next enable sequence send turn on packet error is observed */
195 if (intel_dsi->dev.dev_ops->disable) 252 if (intel_dsi->dev.dev_ops->disable)
@@ -227,14 +284,21 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
227 284
228 vlv_disable_dsi_pll(encoder); 285 vlv_disable_dsi_pll(encoder);
229} 286}
287
230static void intel_dsi_post_disable(struct intel_encoder *encoder) 288static void intel_dsi_post_disable(struct intel_encoder *encoder)
231{ 289{
290 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
232 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 291 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
292 u32 val;
233 293
234 DRM_DEBUG_KMS("\n"); 294 DRM_DEBUG_KMS("\n");
235 295
236 intel_dsi_clear_device_ready(encoder); 296 intel_dsi_clear_device_ready(encoder);
237 297
298 val = I915_READ(DSPCLK_GATE_D);
299 val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
300 I915_WRITE(DSPCLK_GATE_D, val);
301
238 if (intel_dsi->dev.dev_ops->disable_panel_power) 302 if (intel_dsi->dev.dev_ops->disable_panel_power)
239 intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev); 303 intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
240} 304}
@@ -379,9 +443,6 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
379 443
380 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); 444 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
381 445
382 /* XXX: Location of the call */
383 band_gap_reset(dev_priv);
384
385 /* escape clock divider, 20MHz, shared for A and C. device ready must be 446 /* escape clock divider, 20MHz, shared for A and C. device ready must be
386 * off when doing this! txclkesc? */ 447 * off when doing this! txclkesc? */
387 tmp = I915_READ(MIPI_CTRL(0)); 448 tmp = I915_READ(MIPI_CTRL(0));
@@ -452,10 +513,17 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
452 /* dphy stuff */ 513 /* dphy stuff */
453 514
454 /* in terms of low power clock */ 515 /* in terms of low power clock */
455 I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100)); 516 I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(intel_dsi->escape_clk_div, 100));
517
518 val = 0;
519 if (intel_dsi->eotp_pkt == 0)
520 val |= EOT_DISABLE;
521
522 if (intel_dsi->clock_stop)
523 val |= CLOCKSTOP;
456 524
457 /* recovery disables */ 525 /* recovery disables */
458 I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable); 526 I915_WRITE(MIPI_EOT_DISABLE(pipe), val);
459 527
460 /* in terms of txbyteclkhs. actual high to low switch + 528 /* in terms of txbyteclkhs. actual high to low switch +
461 * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK. 529 * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
@@ -484,9 +552,14 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
484 intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT); 552 intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
485 553
486 if (is_vid_mode(intel_dsi)) 554 if (is_vid_mode(intel_dsi))
555 /* Some panels might have resolution which is not a multiple of
556 * 64 like 1366 x 768. Enable RANDOM resolution support for such
557 * panels by default */
487 I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe), 558 I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
488 intel_dsi->video_frmt_cfg_bits | 559 intel_dsi->video_frmt_cfg_bits |
489 intel_dsi->video_mode_format); 560 intel_dsi->video_mode_format |
561 IP_TG_CONFIG |
562 RANDOM_DPI_DISPLAY_RESOLUTION);
490} 563}
491 564
492static enum drm_connector_status 565static enum drm_connector_status
@@ -594,7 +667,7 @@ bool intel_dsi_init(struct drm_device *dev)
594 intel_encoder->compute_config = intel_dsi_compute_config; 667 intel_encoder->compute_config = intel_dsi_compute_config;
595 intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable; 668 intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
596 intel_encoder->pre_enable = intel_dsi_pre_enable; 669 intel_encoder->pre_enable = intel_dsi_pre_enable;
597 intel_encoder->enable = intel_dsi_enable; 670 intel_encoder->enable = intel_dsi_enable_nop;
598 intel_encoder->mode_set = intel_dsi_mode_set; 671 intel_encoder->mode_set = intel_dsi_mode_set;
599 intel_encoder->disable = intel_dsi_disable; 672 intel_encoder->disable = intel_dsi_disable;
600 intel_encoder->post_disable = intel_dsi_post_disable; 673 intel_encoder->post_disable = intel_dsi_post_disable;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index b4a27cec882f..550714c7860e 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -95,8 +95,10 @@ struct intel_dsi {
95 u32 video_mode_format; 95 u32 video_mode_format;
96 96
97 /* eot for MIPI_EOT_DISABLE register */ 97 /* eot for MIPI_EOT_DISABLE register */
98 u32 eot_disable; 98 u8 eotp_pkt;
99 u8 clock_stop;
99 100
101 u8 escape_clk_div;
100 u32 port_bits; 102 u32 port_bits;
101 u32 bw_timer; 103 u32 bw_timer;
102 u32 dphy_reg; 104 u32 dphy_reg;
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
index 7c40f981d2c7..3eeb21b9fddf 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -389,7 +389,7 @@ int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
389 * 389 *
390 * XXX: commands with data in MIPI_DPI_DATA? 390 * XXX: commands with data in MIPI_DPI_DATA?
391 */ 391 */
392int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd) 392int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
393{ 393{
394 struct drm_encoder *encoder = &intel_dsi->base.base; 394 struct drm_encoder *encoder = &intel_dsi->base.base;
395 struct drm_device *dev = encoder->dev; 395 struct drm_device *dev = encoder->dev;
@@ -399,7 +399,7 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
399 u32 mask; 399 u32 mask;
400 400
401 /* XXX: pipe, hs */ 401 /* XXX: pipe, hs */
402 if (intel_dsi->hs) 402 if (hs)
403 cmd &= ~DPI_LP_MODE; 403 cmd &= ~DPI_LP_MODE;
404 else 404 else
405 cmd |= DPI_LP_MODE; 405 cmd |= DPI_LP_MODE;
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
index 54c8a234a2e0..9a18cbfa5460 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.h
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -33,6 +33,9 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include "intel_dsi.h" 34#include "intel_dsi.h"
35 35
36#define DPI_LP_MODE_EN false
37#define DPI_HS_MODE_EN true
38
36void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable); 39void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
37 40
38int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel, 41int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
@@ -47,7 +50,7 @@ int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
47int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel, 50int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
48 u8 *reqdata, int reqlen, u8 *buf, int buflen); 51 u8 *reqdata, int reqlen, u8 *buf, int buflen);
49 52
50int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd); 53int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
51 54
52/* XXX: questionable write helpers */ 55/* XXX: questionable write helpers */
53static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi, 56static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 157267aa3561..b606162cc17c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -557,10 +557,12 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
557 struct drm_display_mode *adjusted_mode) 557 struct drm_display_mode *adjusted_mode)
558{ 558{
559 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 559 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
560 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
560 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 561 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
561 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 562 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
562 u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 563 u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
563 u32 val = I915_READ(reg); 564 u32 val = I915_READ(reg);
565 u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
564 566
565 assert_hdmi_port_disabled(intel_hdmi); 567 assert_hdmi_port_disabled(intel_hdmi);
566 568
@@ -576,9 +578,19 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
576 return; 578 return;
577 } 579 }
578 580
581 if (port != (val & VIDEO_DIP_PORT_MASK)) {
582 if (val & VIDEO_DIP_ENABLE) {
583 val &= ~VIDEO_DIP_ENABLE;
584 I915_WRITE(reg, val);
585 POSTING_READ(reg);
586 }
587 val &= ~VIDEO_DIP_PORT_MASK;
588 val |= port;
589 }
590
579 val |= VIDEO_DIP_ENABLE; 591 val |= VIDEO_DIP_ENABLE;
580 val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | 592 val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR |
581 VIDEO_DIP_ENABLE_GCP); 593 VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP);
582 594
583 I915_WRITE(reg, val); 595 I915_WRITE(reg, val);
584 POSTING_READ(reg); 596 POSTING_READ(reg);
@@ -638,8 +650,8 @@ static void intel_hdmi_mode_set(struct intel_encoder *encoder)
638 else 650 else
639 hdmi_val |= SDVO_COLOR_FORMAT_8bpc; 651 hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
640 652
641 /* Required on CPT */ 653 if (intel_hdmi->has_hdmi_sink &&
642 if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) 654 (HAS_PCH_CPT(dev) || IS_VALLEYVIEW(dev)))
643 hdmi_val |= HDMI_MODE_SELECT_HDMI; 655 hdmi_val |= HDMI_MODE_SELECT_HDMI;
644 656
645 if (intel_hdmi->has_audio) { 657 if (intel_hdmi->has_audio) {
@@ -657,8 +669,6 @@ static void intel_hdmi_mode_set(struct intel_encoder *encoder)
657 669
658 I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val); 670 I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
659 POSTING_READ(intel_hdmi->hdmi_reg); 671 POSTING_READ(intel_hdmi->hdmi_reg);
660
661 intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
662} 672}
663 673
664static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, 674static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
@@ -1104,13 +1114,26 @@ done:
1104 return 0; 1114 return 0;
1105} 1115}
1106 1116
1117static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
1118{
1119 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
1120 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1121 struct drm_display_mode *adjusted_mode =
1122 &intel_crtc->config.adjusted_mode;
1123
1124 intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
1125}
1126
1107static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) 1127static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1108{ 1128{
1109 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1129 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1130 struct intel_hdmi *intel_hdmi = &dport->hdmi;
1110 struct drm_device *dev = encoder->base.dev; 1131 struct drm_device *dev = encoder->base.dev;
1111 struct drm_i915_private *dev_priv = dev->dev_private; 1132 struct drm_i915_private *dev_priv = dev->dev_private;
1112 struct intel_crtc *intel_crtc = 1133 struct intel_crtc *intel_crtc =
1113 to_intel_crtc(encoder->base.crtc); 1134 to_intel_crtc(encoder->base.crtc);
1135 struct drm_display_mode *adjusted_mode =
1136 &intel_crtc->config.adjusted_mode;
1114 enum dpio_channel port = vlv_dport_to_channel(dport); 1137 enum dpio_channel port = vlv_dport_to_channel(dport);
1115 int pipe = intel_crtc->pipe; 1138 int pipe = intel_crtc->pipe;
1116 u32 val; 1139 u32 val;
@@ -1144,6 +1167,8 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1144 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); 1167 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
1145 mutex_unlock(&dev_priv->dpio_lock); 1168 mutex_unlock(&dev_priv->dpio_lock);
1146 1169
1170 intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
1171
1147 intel_enable_hdmi(encoder); 1172 intel_enable_hdmi(encoder);
1148 1173
1149 vlv_wait_port_ready(dev_priv, dport); 1174 vlv_wait_port_ready(dev_priv, dport);
@@ -1339,6 +1364,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1339 intel_encoder->enable = vlv_enable_hdmi; 1364 intel_encoder->enable = vlv_enable_hdmi;
1340 intel_encoder->post_disable = vlv_hdmi_post_disable; 1365 intel_encoder->post_disable = vlv_hdmi_post_disable;
1341 } else { 1366 } else {
1367 intel_encoder->pre_enable = intel_hdmi_pre_enable;
1342 intel_encoder->enable = intel_enable_hdmi; 1368 intel_encoder->enable = intel_enable_hdmi;
1343 } 1369 }
1344 1370
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f1ecf916474a..1b1541dfb440 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -111,13 +111,6 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
111 111
112 pipe_config->adjusted_mode.flags |= flags; 112 pipe_config->adjusted_mode.flags |= flags;
113 113
114 /* gen2/3 store dither state in pfit control, needs to match */
115 if (INTEL_INFO(dev)->gen < 4) {
116 tmp = I915_READ(PFIT_CONTROL);
117
118 pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
119 }
120
121 dotclock = pipe_config->port_clock; 114 dotclock = pipe_config->port_clock;
122 115
123 if (HAS_PCH_SPLIT(dev_priv->dev)) 116 if (HAS_PCH_SPLIT(dev_priv->dev))
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 0eead16aeda7..44ad415e3706 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -308,16 +308,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
308 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | 308 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
309 PFIT_FILTER_FUZZY); 309 PFIT_FILTER_FUZZY);
310 310
311 /* Make sure pre-965 set dither correctly for 18bpp panels. */
312 if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
313 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
314
311out: 315out:
312 if ((pfit_control & PFIT_ENABLE) == 0) { 316 if ((pfit_control & PFIT_ENABLE) == 0) {
313 pfit_control = 0; 317 pfit_control = 0;
314 pfit_pgm_ratios = 0; 318 pfit_pgm_ratios = 0;
315 } 319 }
316 320
317 /* Make sure pre-965 set dither correctly for 18bpp panels. */
318 if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
319 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
320
321 pipe_config->gmch_pfit.control = pfit_control; 321 pipe_config->gmch_pfit.control = pfit_control;
322 pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios; 322 pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
323 pipe_config->gmch_pfit.lvds_border_bits = border; 323 pipe_config->gmch_pfit.lvds_border_bits = border;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 19e94c3edc19..75c1c766b507 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1831,6 +1831,40 @@ static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1831 return 512; 1831 return 512;
1832} 1832}
1833 1833
1834static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1835 int level, bool is_sprite)
1836{
1837 if (INTEL_INFO(dev)->gen >= 8)
1838 /* BDW primary/sprite plane watermarks */
1839 return level == 0 ? 255 : 2047;
1840 else if (INTEL_INFO(dev)->gen >= 7)
1841 /* IVB/HSW primary/sprite plane watermarks */
1842 return level == 0 ? 127 : 1023;
1843 else if (!is_sprite)
1844 /* ILK/SNB primary plane watermarks */
1845 return level == 0 ? 127 : 511;
1846 else
1847 /* ILK/SNB sprite plane watermarks */
1848 return level == 0 ? 63 : 255;
1849}
1850
1851static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1852 int level)
1853{
1854 if (INTEL_INFO(dev)->gen >= 7)
1855 return level == 0 ? 63 : 255;
1856 else
1857 return level == 0 ? 31 : 63;
1858}
1859
1860static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1861{
1862 if (INTEL_INFO(dev)->gen >= 8)
1863 return 31;
1864 else
1865 return 15;
1866}
1867
1834/* Calculate the maximum primary/sprite plane watermark */ 1868/* Calculate the maximum primary/sprite plane watermark */
1835static unsigned int ilk_plane_wm_max(const struct drm_device *dev, 1869static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1836 int level, 1870 int level,
@@ -1839,7 +1873,6 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1839 bool is_sprite) 1873 bool is_sprite)
1840{ 1874{
1841 unsigned int fifo_size = ilk_display_fifo_size(dev); 1875 unsigned int fifo_size = ilk_display_fifo_size(dev);
1842 unsigned int max;
1843 1876
1844 /* if sprites aren't enabled, sprites get nothing */ 1877 /* if sprites aren't enabled, sprites get nothing */
1845 if (is_sprite && !config->sprites_enabled) 1878 if (is_sprite && !config->sprites_enabled)
@@ -1870,19 +1903,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1870 } 1903 }
1871 1904
1872 /* clamp to max that the registers can hold */ 1905 /* clamp to max that the registers can hold */
1873 if (INTEL_INFO(dev)->gen >= 8) 1906 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1874 max = level == 0 ? 255 : 2047;
1875 else if (INTEL_INFO(dev)->gen >= 7)
1876 /* IVB/HSW primary/sprite plane watermarks */
1877 max = level == 0 ? 127 : 1023;
1878 else if (!is_sprite)
1879 /* ILK/SNB primary plane watermarks */
1880 max = level == 0 ? 127 : 511;
1881 else
1882 /* ILK/SNB sprite plane watermarks */
1883 max = level == 0 ? 63 : 255;
1884
1885 return min(fifo_size, max);
1886} 1907}
1887 1908
1888/* Calculate the maximum cursor plane watermark */ 1909/* Calculate the maximum cursor plane watermark */
@@ -1895,20 +1916,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1895 return 64; 1916 return 64;
1896 1917
1897 /* otherwise just report max that registers can hold */ 1918 /* otherwise just report max that registers can hold */
1898 if (INTEL_INFO(dev)->gen >= 7) 1919 return ilk_cursor_wm_reg_max(dev, level);
1899 return level == 0 ? 63 : 255;
1900 else
1901 return level == 0 ? 31 : 63;
1902}
1903
1904/* Calculate the maximum FBC watermark */
1905static unsigned int ilk_fbc_wm_max(const struct drm_device *dev)
1906{
1907 /* max that registers can hold */
1908 if (INTEL_INFO(dev)->gen >= 8)
1909 return 31;
1910 else
1911 return 15;
1912} 1920}
1913 1921
1914static void ilk_compute_wm_maximums(const struct drm_device *dev, 1922static void ilk_compute_wm_maximums(const struct drm_device *dev,
@@ -1920,7 +1928,7 @@ static void ilk_compute_wm_maximums(const struct drm_device *dev,
1920 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); 1928 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1921 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); 1929 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1922 max->cur = ilk_cursor_wm_max(dev, level, config); 1930 max->cur = ilk_cursor_wm_max(dev, level, config);
1923 max->fbc = ilk_fbc_wm_max(dev); 1931 max->fbc = ilk_fbc_wm_reg_max(dev);
1924} 1932}
1925 1933
1926static bool ilk_validate_wm_level(int level, 1934static bool ilk_validate_wm_level(int level,
@@ -2115,38 +2123,52 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
2115} 2123}
2116 2124
2117static void ilk_compute_wm_parameters(struct drm_crtc *crtc, 2125static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2118 struct ilk_pipe_wm_parameters *p, 2126 struct ilk_pipe_wm_parameters *p)
2119 struct intel_wm_config *config)
2120{ 2127{
2121 struct drm_device *dev = crtc->dev; 2128 struct drm_device *dev = crtc->dev;
2122 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2129 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2123 enum pipe pipe = intel_crtc->pipe; 2130 enum pipe pipe = intel_crtc->pipe;
2124 struct drm_plane *plane; 2131 struct drm_plane *plane;
2125 2132
2126 p->active = intel_crtc_active(crtc); 2133 if (!intel_crtc_active(crtc))
2127 if (p->active) { 2134 return;
2128 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2129 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2130 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2131 p->cur.bytes_per_pixel = 4;
2132 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2133 p->cur.horiz_pixels = intel_crtc->cursor_width;
2134 /* TODO: for now, assume primary and cursor planes are always enabled. */
2135 p->pri.enabled = true;
2136 p->cur.enabled = true;
2137 }
2138 2135
2139 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 2136 p->active = true;
2140 config->num_pipes_active += intel_crtc_active(crtc); 2137 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2138 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2139 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2140 p->cur.bytes_per_pixel = 4;
2141 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2142 p->cur.horiz_pixels = intel_crtc->cursor_width;
2143 /* TODO: for now, assume primary and cursor planes are always enabled. */
2144 p->pri.enabled = true;
2145 p->cur.enabled = true;
2141 2146
2142 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { 2147 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
2143 struct intel_plane *intel_plane = to_intel_plane(plane); 2148 struct intel_plane *intel_plane = to_intel_plane(plane);
2144 2149
2145 if (intel_plane->pipe == pipe) 2150 if (intel_plane->pipe == pipe) {
2146 p->spr = intel_plane->wm; 2151 p->spr = intel_plane->wm;
2152 break;
2153 }
2154 }
2155}
2156
2157static void ilk_compute_wm_config(struct drm_device *dev,
2158 struct intel_wm_config *config)
2159{
2160 struct intel_crtc *intel_crtc;
2161
2162 /* Compute the currently _active_ config */
2163 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2164 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2147 2165
2148 config->sprites_enabled |= intel_plane->wm.enabled; 2166 if (!wm->pipe_enabled)
2149 config->sprites_scaled |= intel_plane->wm.scaled; 2167 continue;
2168
2169 config->sprites_enabled |= wm->sprites_enabled;
2170 config->sprites_scaled |= wm->sprites_scaled;
2171 config->num_pipes_active++;
2150 } 2172 }
2151} 2173}
2152 2174
@@ -2169,6 +2191,10 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2169 /* LP0 watermarks always use 1/2 DDB partitioning */ 2191 /* LP0 watermarks always use 1/2 DDB partitioning */
2170 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); 2192 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2171 2193
2194 pipe_wm->pipe_enabled = params->active;
2195 pipe_wm->sprites_enabled = params->spr.enabled;
2196 pipe_wm->sprites_scaled = params->spr.scaled;
2197
2172 /* ILK/SNB: LP2+ watermarks only w/o sprites */ 2198 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2173 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled) 2199 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2174 max_level = 1; 2200 max_level = 1;
@@ -2198,8 +2224,11 @@ static void ilk_merge_wm_level(struct drm_device *dev,
2198 const struct intel_crtc *intel_crtc; 2224 const struct intel_crtc *intel_crtc;
2199 2225
2200 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) { 2226 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2201 const struct intel_wm_level *wm = 2227 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2202 &intel_crtc->wm.active.wm[level]; 2228 const struct intel_wm_level *wm = &active->wm[level];
2229
2230 if (!active->pipe_enabled)
2231 continue;
2203 2232
2204 if (!wm->enable) 2233 if (!wm->enable)
2205 return; 2234 return;
@@ -2558,7 +2587,7 @@ static void ilk_update_wm(struct drm_crtc *crtc)
2558 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 2587 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2559 struct intel_wm_config config = {}; 2588 struct intel_wm_config config = {};
2560 2589
2561 ilk_compute_wm_parameters(crtc, &params, &config); 2590 ilk_compute_wm_parameters(crtc, &params);
2562 2591
2563 intel_compute_pipe_wm(crtc, &params, &pipe_wm); 2592 intel_compute_pipe_wm(crtc, &params, &pipe_wm);
2564 2593
@@ -2567,6 +2596,8 @@ static void ilk_update_wm(struct drm_crtc *crtc)
2567 2596
2568 intel_crtc->wm.active = pipe_wm; 2597 intel_crtc->wm.active = pipe_wm;
2569 2598
2599 ilk_compute_wm_config(dev, &config);
2600
2570 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); 2601 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2571 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); 2602 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2572 2603
@@ -2633,7 +2664,9 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
2633 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2664 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2634 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 2665 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
2635 2666
2636 if (intel_crtc_active(crtc)) { 2667 active->pipe_enabled = intel_crtc_active(crtc);
2668
2669 if (active->pipe_enabled) {
2637 u32 tmp = hw->wm_pipe[pipe]; 2670 u32 tmp = hw->wm_pipe[pipe];
2638 2671
2639 /* 2672 /*
@@ -2674,8 +2707,10 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
2674 hw->wm_lp[2] = I915_READ(WM3_LP_ILK); 2707 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
2675 2708
2676 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); 2709 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2677 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); 2710 if (INTEL_INFO(dev)->gen >= 7) {
2678 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); 2711 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2712 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2713 }
2679 2714
2680 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2715 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2681 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 2716 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
@@ -3051,7 +3086,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3051 if (val != dev_priv->rps.cur_freq) { 3086 if (val != dev_priv->rps.cur_freq) {
3052 gen6_set_rps_thresholds(dev_priv, val); 3087 gen6_set_rps_thresholds(dev_priv, val);
3053 3088
3054 if (IS_HASWELL(dev)) 3089 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3055 I915_WRITE(GEN6_RPNSWREQ, 3090 I915_WRITE(GEN6_RPNSWREQ,
3056 HSW_FREQUENCY(val)); 3091 HSW_FREQUENCY(val));
3057 else 3092 else
@@ -3252,6 +3287,27 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
3252 spin_unlock_irq(&dev_priv->irq_lock); 3287 spin_unlock_irq(&dev_priv->irq_lock);
3253} 3288}
3254 3289
3290static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
3291{
3292 /* All of these values are in units of 50MHz */
3293 dev_priv->rps.cur_freq = 0;
3294 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3295 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3296 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
3297 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3298 /* XXX: only BYT has a special efficient freq */
3299 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3300 /* hw_max = RP0 until we check for overclocking */
3301 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3302
3303 /* Preserve min/max settings in case of re-init */
3304 if (dev_priv->rps.max_freq_softlimit == 0)
3305 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3306
3307 if (dev_priv->rps.min_freq_softlimit == 0)
3308 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3309}
3310
3255static void gen8_enable_rps(struct drm_device *dev) 3311static void gen8_enable_rps(struct drm_device *dev)
3256{ 3312{
3257 struct drm_i915_private *dev_priv = dev->dev_private; 3313 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3270,6 +3326,7 @@ static void gen8_enable_rps(struct drm_device *dev)
3270 I915_WRITE(GEN6_RC_CONTROL, 0); 3326 I915_WRITE(GEN6_RC_CONTROL, 0);
3271 3327
3272 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3328 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3329 parse_rp_state_cap(dev_priv, rp_state_cap);
3273 3330
3274 /* 2b: Program RC6 thresholds.*/ 3331 /* 2b: Program RC6 thresholds.*/
3275 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 3332 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -3289,8 +3346,10 @@ static void gen8_enable_rps(struct drm_device *dev)
3289 rc6_mask); 3346 rc6_mask);
3290 3347
3291 /* 4 Program defaults and thresholds for RPS*/ 3348 /* 4 Program defaults and thresholds for RPS*/
3292 I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */ 3349 I915_WRITE(GEN6_RPNSWREQ,
3293 I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */ 3350 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3351 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3352 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3294 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ 3353 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3295 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */ 3354 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3296 3355
@@ -3356,23 +3415,7 @@ static void gen6_enable_rps(struct drm_device *dev)
3356 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3415 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3357 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 3416 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3358 3417
3359 /* All of these values are in units of 50MHz */ 3418 parse_rp_state_cap(dev_priv, rp_state_cap);
3360 dev_priv->rps.cur_freq = 0;
3361 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3362 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3363 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
3364 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3365 /* XXX: only BYT has a special efficient freq */
3366 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3367 /* hw_max = RP0 until we check for overclocking */
3368 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3369
3370 /* Preserve min/max settings in case of re-init */
3371 if (dev_priv->rps.max_freq_softlimit == 0)
3372 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3373
3374 if (dev_priv->rps.min_freq_softlimit == 0)
3375 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3376 3419
3377 /* disable the counters and set deterministic thresholds */ 3420 /* disable the counters and set deterministic thresholds */
3378 I915_WRITE(GEN6_RC_CONTROL, 0); 3421 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -4626,6 +4669,9 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
4626 I915_WRITE(CACHE_MODE_0, 4669 I915_WRITE(CACHE_MODE_0,
4627 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 4670 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
4628 4671
4672 /* WaDisable_RenderCache_OperationalFlush:ilk */
4673 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
4674
4629 g4x_disable_trickle_feed(dev); 4675 g4x_disable_trickle_feed(dev);
4630 4676
4631 ibx_init_clock_gating(dev); 4677 ibx_init_clock_gating(dev);
@@ -4701,6 +4747,9 @@ static void gen6_init_clock_gating(struct drm_device *dev)
4701 I915_WRITE(GEN6_GT_MODE, 4747 I915_WRITE(GEN6_GT_MODE,
4702 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); 4748 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
4703 4749
4750 /* WaDisable_RenderCache_OperationalFlush:snb */
4751 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
4752
4704 /* 4753 /*
4705 * BSpec recoomends 8x4 when MSAA is used, 4754 * BSpec recoomends 8x4 when MSAA is used,
4706 * however in practice 16x4 seems fastest. 4755 * however in practice 16x4 seems fastest.
@@ -4940,6 +4989,9 @@ static void haswell_init_clock_gating(struct drm_device *dev)
4940 I915_WRITE(GEN7_FF_THREAD_MODE, 4989 I915_WRITE(GEN7_FF_THREAD_MODE,
4941 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME); 4990 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
4942 4991
4992 /* WaDisable_RenderCache_OperationalFlush:hsw */
4993 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
4994
4943 /* enable HiZ Raw Stall Optimization */ 4995 /* enable HiZ Raw Stall Optimization */
4944 I915_WRITE(CACHE_MODE_0_GEN7, 4996 I915_WRITE(CACHE_MODE_0_GEN7,
4945 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 4997 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
@@ -4992,6 +5044,9 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
4992 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 5044 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4993 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 5045 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4994 5046
5047 /* WaDisable_RenderCache_OperationalFlush:ivb */
5048 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5049
4995 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ 5050 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
4996 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 5051 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4997 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 5052 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -5086,6 +5141,10 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
5086 } 5141 }
5087 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); 5142 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5088 5143
5144 dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
5145 DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
5146 dev_priv->vlv_cdclk_freq);
5147
5089 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 5148 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5090 5149
5091 /* WaDisableEarlyCull:vlv */ 5150 /* WaDisableEarlyCull:vlv */
@@ -5103,6 +5162,9 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
5103 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | 5162 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
5104 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 5163 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5105 5164
5165 /* WaDisable_RenderCache_OperationalFlush:vlv */
5166 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5167
5106 /* WaForceL3Serialization:vlv */ 5168 /* WaForceL3Serialization:vlv */
5107 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 5169 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5108 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 5170 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
@@ -5172,6 +5234,9 @@ static void g4x_init_clock_gating(struct drm_device *dev)
5172 I915_WRITE(CACHE_MODE_0, 5234 I915_WRITE(CACHE_MODE_0,
5173 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 5235 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5174 5236
5237 /* WaDisable_RenderCache_OperationalFlush:g4x */
5238 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5239
5175 g4x_disable_trickle_feed(dev); 5240 g4x_disable_trickle_feed(dev);
5176} 5241}
5177 5242
@@ -5186,6 +5251,9 @@ static void crestline_init_clock_gating(struct drm_device *dev)
5186 I915_WRITE16(DEUC, 0); 5251 I915_WRITE16(DEUC, 0);
5187 I915_WRITE(MI_ARB_STATE, 5252 I915_WRITE(MI_ARB_STATE,
5188 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 5253 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5254
5255 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5256 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5189} 5257}
5190 5258
5191static void broadwater_init_clock_gating(struct drm_device *dev) 5259static void broadwater_init_clock_gating(struct drm_device *dev)
@@ -5200,6 +5268,9 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
5200 I915_WRITE(RENCLK_GATE_D2, 0); 5268 I915_WRITE(RENCLK_GATE_D2, 0);
5201 I915_WRITE(MI_ARB_STATE, 5269 I915_WRITE(MI_ARB_STATE,
5202 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 5270 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5271
5272 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5273 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5203} 5274}
5204 5275
5205static void gen3_init_clock_gating(struct drm_device *dev) 5276static void gen3_init_clock_gating(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 79fb4cc2137c..eb3dd26b94de 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -41,12 +41,16 @@ static inline int ring_space(struct intel_ring_buffer *ring)
41 return space; 41 return space;
42} 42}
43 43
44void __intel_ring_advance(struct intel_ring_buffer *ring) 44static bool intel_ring_stopped(struct intel_ring_buffer *ring)
45{ 45{
46 struct drm_i915_private *dev_priv = ring->dev->dev_private; 46 struct drm_i915_private *dev_priv = ring->dev->dev_private;
47 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
48}
47 49
50void __intel_ring_advance(struct intel_ring_buffer *ring)
51{
48 ring->tail &= ring->size - 1; 52 ring->tail &= ring->size - 1;
49 if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring)) 53 if (intel_ring_stopped(ring))
50 return; 54 return;
51 ring->write_tail(ring, ring->tail); 55 ring->write_tail(ring, ring->tail);
52} 56}
@@ -601,13 +605,15 @@ static int init_render_ring(struct intel_ring_buffer *ring)
601 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 605 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
602 606
603 /* Required for the hardware to program scanline values for waiting */ 607 /* Required for the hardware to program scanline values for waiting */
608 /* WaEnableFlushTlbInvalidationMode:snb */
604 if (INTEL_INFO(dev)->gen == 6) 609 if (INTEL_INFO(dev)->gen == 6)
605 I915_WRITE(GFX_MODE, 610 I915_WRITE(GFX_MODE,
606 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS)); 611 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
607 612
613 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
608 if (IS_GEN7(dev)) 614 if (IS_GEN7(dev))
609 I915_WRITE(GFX_MODE_GEN7, 615 I915_WRITE(GFX_MODE_GEN7,
610 _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | 616 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
611 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 617 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
612 618
613 if (INTEL_INFO(dev)->gen >= 5) { 619 if (INTEL_INFO(dev)->gen >= 5) {
@@ -624,13 +630,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
624 */ 630 */
625 I915_WRITE(CACHE_MODE_0, 631 I915_WRITE(CACHE_MODE_0,
626 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 632 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
627
628 /* This is not explicitly set for GEN6, so read the register.
629 * see intel_ring_mi_set_context() for why we care.
630 * TODO: consider explicitly setting the bit for GEN5
631 */
632 ring->itlb_before_ctx_switch =
633 !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
634 } 633 }
635 634
636 if (INTEL_INFO(dev)->gen >= 6) 635 if (INTEL_INFO(dev)->gen >= 6)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2b91c4b4d34b..413cdc74ed53 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -153,10 +153,6 @@ struct intel_ring_buffer {
153 153
154 wait_queue_head_t irq_queue; 154 wait_queue_head_t irq_queue;
155 155
156 /**
157 * Do an explicit TLB flush before MI_SET_CONTEXT
158 */
159 bool itlb_before_ctx_switch;
160 struct i915_hw_context *default_context; 156 struct i915_hw_context *default_context;
161 struct i915_hw_context *last_context; 157 struct i915_hw_context *last_context;
162 158
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d27155adf5db..46be00d66df3 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2424,8 +2424,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2424 if (ret < 0) 2424 if (ret < 0)
2425 goto err1; 2425 goto err1;
2426 2426
2427 ret = sysfs_create_link(&encoder->ddc.dev.kobj, 2427 ret = sysfs_create_link(&drm_connector->kdev->kobj,
2428 &drm_connector->kdev->kobj, 2428 &encoder->ddc.dev.kobj,
2429 encoder->ddc.dev.kobj.name); 2429 encoder->ddc.dev.kobj.name);
2430 if (ret < 0) 2430 if (ret < 0)
2431 goto err2; 2431 goto err2;
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 0954f132726e..b1a5514e695a 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -182,6 +182,14 @@ u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
182 182
183 vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)), 183 vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
184 DPIO_OPCODE_REG_READ, reg, &val); 184 DPIO_OPCODE_REG_READ, reg, &val);
185
186 /*
187 * FIXME: There might be some registers where all 1's is a valid value,
188 * so ideally we should check the register offset instead...
189 */
190 WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
191 pipe_name(pipe), reg, val);
192
185 return val; 193 return val;
186} 194}
187 195
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index f729dc71d5be..2a72bab106d5 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -253,8 +253,7 @@ static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
253 253
254} 254}
255 255
256void vlv_force_wake_get(struct drm_i915_private *dev_priv, 256static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
257 int fw_engine)
258{ 257{
259 unsigned long irqflags; 258 unsigned long irqflags;
260 259
@@ -273,8 +272,7 @@ void vlv_force_wake_get(struct drm_i915_private *dev_priv,
273 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 272 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
274} 273}
275 274
276void vlv_force_wake_put(struct drm_i915_private *dev_priv, 275static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
277 int fw_engine)
278{ 276{
279 unsigned long irqflags; 277 unsigned long irqflags;
280 278
@@ -486,6 +484,17 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
486#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 484#define NEEDS_FORCE_WAKE(dev_priv, reg) \
487 ((reg) < 0x40000 && (reg) != FORCEWAKE) 485 ((reg) < 0x40000 && (reg) != FORCEWAKE)
488 486
487#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
488 (((reg) >= 0x2000 && (reg) < 0x4000) ||\
489 ((reg) >= 0x5000 && (reg) < 0x8000) ||\
490 ((reg) >= 0xB000 && (reg) < 0x12000) ||\
491 ((reg) >= 0x2E000 && (reg) < 0x30000))
492
493#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
494 (((reg) >= 0x12000 && (reg) < 0x14000) ||\
495 ((reg) >= 0x22000 && (reg) < 0x24000) ||\
496 ((reg) >= 0x30000 && (reg) < 0x40000))
497
489static void 498static void
490ilk_dummy_write(struct drm_i915_private *dev_priv) 499ilk_dummy_write(struct drm_i915_private *dev_priv)
491{ 500{
@@ -852,12 +861,15 @@ void intel_uncore_fini(struct drm_device *dev)
852 intel_uncore_forcewake_reset(dev, false); 861 intel_uncore_forcewake_reset(dev, false);
853} 862}
854 863
864#define GEN_RANGE(l, h) GENMASK(h, l)
865
855static const struct register_whitelist { 866static const struct register_whitelist {
856 uint64_t offset; 867 uint64_t offset;
857 uint32_t size; 868 uint32_t size;
858 uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 869 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
870 uint32_t gen_bitmask;
859} whitelist[] = { 871} whitelist[] = {
860 { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 }, 872 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
861}; 873};
862 874
863int i915_reg_read_ioctl(struct drm_device *dev, 875int i915_reg_read_ioctl(struct drm_device *dev,
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index a7c2a862b4f4..f3c9a63e3707 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1310,7 +1310,7 @@ extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
1310/* Cache management (drm_cache.c) */ 1310/* Cache management (drm_cache.c) */
1311void drm_clflush_pages(struct page *pages[], unsigned long num_pages); 1311void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
1312void drm_clflush_sg(struct sg_table *st); 1312void drm_clflush_sg(struct sg_table *st);
1313void drm_clflush_virt_range(char *addr, unsigned long length); 1313void drm_clflush_virt_range(void *addr, unsigned long length);
1314 1314
1315 /* Locking IOCTL support (drm_lock.h) */ 1315 /* Locking IOCTL support (drm_lock.h) */
1316extern int drm_lock(struct drm_device *dev, void *data, 1316extern int drm_lock(struct drm_device *dev, void *data,
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 126bfaa8bb6b..8a3e4ef00c3d 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -337,6 +337,7 @@ typedef struct drm_i915_irq_wait {
337#define I915_PARAM_HAS_EXEC_NO_RELOC 25 337#define I915_PARAM_HAS_EXEC_NO_RELOC 25
338#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 338#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
339#define I915_PARAM_HAS_WT 27 339#define I915_PARAM_HAS_WT 27
340#define I915_PARAM_CMD_PARSER_VERSION 28
340 341
341typedef struct drm_i915_getparam { 342typedef struct drm_i915_getparam {
342 int param; 343 int param;