aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/gpu/drm/i915
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile12
-rw-r--r--drivers/gpu/drm/i915/dvo.h23
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c23
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c29
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c588
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c22
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c32
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1205
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1349
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c813
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1132
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3691
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c533
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c32
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c305
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c99
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c793
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c699
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c201
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c80
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c10
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c2334
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1481
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c803
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c403
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h109
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c2
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c9
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c114
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h32
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c426
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1514
-rw-r--r--drivers/gpu/drm/i915/intel_display.c9145
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2060
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h431
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c131
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c54
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c840
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c554
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c473
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c84
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c190
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c494
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c230
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4458
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1554
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h129
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c957
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h567
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c730
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c264
52 files changed, 13310 insertions, 28954 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0f2c5493242..0ae6a7c5020 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,45 +3,37 @@
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6i915-y := i915_drv.o i915_dma.o i915_irq.o \ 6i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
7 i915_debugfs.o \ 7 i915_debugfs.o \
8 i915_suspend.o \ 8 i915_suspend.o \
9 i915_gem.o \ 9 i915_gem.o \
10 i915_gem_context.o \
11 i915_gem_debug.o \ 10 i915_gem_debug.o \
12 i915_gem_evict.o \ 11 i915_gem_evict.o \
13 i915_gem_execbuffer.o \ 12 i915_gem_execbuffer.o \
14 i915_gem_gtt.o \ 13 i915_gem_gtt.o \
15 i915_gem_stolen.o \
16 i915_gem_tiling.o \ 14 i915_gem_tiling.o \
17 i915_sysfs.o \
18 i915_trace_points.o \ 15 i915_trace_points.o \
19 intel_display.o \ 16 intel_display.o \
20 intel_crt.o \ 17 intel_crt.o \
21 intel_lvds.o \ 18 intel_lvds.o \
22 intel_bios.o \ 19 intel_bios.o \
23 intel_ddi.o \
24 intel_dp.o \ 20 intel_dp.o \
25 intel_hdmi.o \ 21 intel_hdmi.o \
26 intel_sdvo.o \ 22 intel_sdvo.o \
27 intel_modes.o \ 23 intel_modes.o \
28 intel_panel.o \ 24 intel_panel.o \
29 intel_pm.o \
30 intel_i2c.o \ 25 intel_i2c.o \
31 intel_fb.o \ 26 intel_fb.o \
32 intel_tv.o \ 27 intel_tv.o \
33 intel_dvo.o \ 28 intel_dvo.o \
34 intel_ringbuffer.o \ 29 intel_ringbuffer.o \
35 intel_overlay.o \ 30 intel_overlay.o \
36 intel_sprite.o \
37 intel_opregion.o \ 31 intel_opregion.o \
38 dvo_ch7xxx.o \ 32 dvo_ch7xxx.o \
39 dvo_ch7017.o \ 33 dvo_ch7017.o \
40 dvo_ivch.o \ 34 dvo_ivch.o \
41 dvo_tfp410.o \ 35 dvo_tfp410.o \
42 dvo_sil164.o \ 36 dvo_sil164.o
43 dvo_ns2501.o \
44 i915_gem_dmabuf.o
45 37
46i915-$(CONFIG_COMPAT) += i915_ioc32.o 38i915-$(CONFIG_COMPAT) += i915_ioc32.o
47 39
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 33a62ad8010..8c2ad014c47 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -24,8 +24,9 @@
24#define _INTEL_DVO_H 24#define _INTEL_DVO_H
25 25
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27#include <drm/drmP.h> 27#include "drmP.h"
28#include <drm/drm_crtc.h> 28#include "drm.h"
29#include "drm_crtc.h"
29#include "intel_drv.h" 30#include "intel_drv.h"
30 31
31struct intel_dvo_device { 32struct intel_dvo_device {
@@ -57,12 +58,13 @@ struct intel_dvo_dev_ops {
57 void (*create_resources)(struct intel_dvo_device *dvo); 58 void (*create_resources)(struct intel_dvo_device *dvo);
58 59
59 /* 60 /*
60 * Turn on/off output. 61 * Turn on/off output or set intermediate power levels if available.
61 * 62 *
62 * Because none of our dvo drivers support an intermediate power levels, 63 * Unsupported intermediate modes drop to the lower power setting.
63 * we don't expose this in the interfac. 64 * If the mode is DPMSModeOff, the output must be disabled,
65 * as the DPLL may be disabled afterwards.
64 */ 66 */
65 void (*dpms)(struct intel_dvo_device *dvo, bool enable); 67 void (*dpms)(struct intel_dvo_device *dvo, int mode);
66 68
67 /* 69 /*
68 * Callback for testing a video mode for a given output. 70 * Callback for testing a video mode for a given output.
@@ -84,7 +86,7 @@ struct intel_dvo_dev_ops {
84 * buses with clock limitations. 86 * buses with clock limitations.
85 */ 87 */
86 bool (*mode_fixup)(struct intel_dvo_device *dvo, 88 bool (*mode_fixup)(struct intel_dvo_device *dvo,
87 const struct drm_display_mode *mode, 89 struct drm_display_mode *mode,
88 struct drm_display_mode *adjusted_mode); 90 struct drm_display_mode *adjusted_mode);
89 91
90 /* 92 /*
@@ -113,12 +115,6 @@ struct intel_dvo_dev_ops {
113 */ 115 */
114 enum drm_connector_status (*detect)(struct intel_dvo_device *dvo); 116 enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
115 117
116 /*
117 * Probe the current hw status, returning true if the connected output
118 * is active.
119 */
120 bool (*get_hw_state)(struct intel_dvo_device *dev);
121
122 /** 118 /**
123 * Query the device for the modes it provides. 119 * Query the device for the modes it provides.
124 * 120 *
@@ -144,6 +140,5 @@ extern struct intel_dvo_dev_ops ch7xxx_ops;
144extern struct intel_dvo_dev_ops ivch_ops; 140extern struct intel_dvo_dev_ops ivch_ops;
145extern struct intel_dvo_dev_ops tfp410_ops; 141extern struct intel_dvo_dev_ops tfp410_ops;
146extern struct intel_dvo_dev_ops ch7017_ops; 142extern struct intel_dvo_dev_ops ch7017_ops;
147extern struct intel_dvo_dev_ops ns2501_ops;
148 143
149#endif /* _INTEL_DVO_H */ 144#endif /* _INTEL_DVO_H */
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 86b27d1d90c..d3e8c540f77 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -163,7 +163,7 @@ struct ch7017_priv {
163}; 163};
164 164
165static void ch7017_dump_regs(struct intel_dvo_device *dvo); 165static void ch7017_dump_regs(struct intel_dvo_device *dvo);
166static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable); 166static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
167 167
168static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val) 168static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
169{ 169{
@@ -227,7 +227,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
227 default: 227 default:
228 DRM_DEBUG_KMS("ch701x not detected, got %d: from %s " 228 DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
229 "slave %d.\n", 229 "slave %d.\n",
230 val, adapter->name, dvo->slave_addr); 230 val, adapter->name,dvo->slave_addr);
231 goto fail; 231 goto fail;
232 } 232 }
233 233
@@ -309,7 +309,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
309 lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED | 309 lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
310 (mode->hdisplay & 0x0700) >> 8; 310 (mode->hdisplay & 0x0700) >> 8;
311 311
312 ch7017_dpms(dvo, false); 312 ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
313 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, 313 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
314 horizontal_active_pixel_input); 314 horizontal_active_pixel_input);
315 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT, 315 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
@@ -331,7 +331,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
331} 331}
332 332
333/* set the CH7017 power state */ 333/* set the CH7017 power state */
334static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable) 334static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
335{ 335{
336 uint8_t val; 336 uint8_t val;
337 337
@@ -345,7 +345,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
345 CH7017_DAC3_POWER_DOWN | 345 CH7017_DAC3_POWER_DOWN |
346 CH7017_TV_POWER_DOWN_EN); 346 CH7017_TV_POWER_DOWN_EN);
347 347
348 if (enable) { 348 if (mode == DRM_MODE_DPMS_ON) {
349 /* Turn on the LVDS */ 349 /* Turn on the LVDS */
350 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, 350 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
351 val & ~CH7017_LVDS_POWER_DOWN_EN); 351 val & ~CH7017_LVDS_POWER_DOWN_EN);
@@ -359,18 +359,6 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
359 msleep(20); 359 msleep(20);
360} 360}
361 361
362static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
363{
364 uint8_t val;
365
366 ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
367
368 if (val & CH7017_LVDS_POWER_DOWN_EN)
369 return false;
370 else
371 return true;
372}
373
374static void ch7017_dump_regs(struct intel_dvo_device *dvo) 362static void ch7017_dump_regs(struct intel_dvo_device *dvo)
375{ 363{
376 uint8_t val; 364 uint8_t val;
@@ -408,7 +396,6 @@ struct intel_dvo_dev_ops ch7017_ops = {
408 .mode_valid = ch7017_mode_valid, 396 .mode_valid = ch7017_mode_valid,
409 .mode_set = ch7017_mode_set, 397 .mode_set = ch7017_mode_set,
410 .dpms = ch7017_dpms, 398 .dpms = ch7017_dpms,
411 .get_hw_state = ch7017_get_hw_state,
412 .dump_regs = ch7017_dump_regs, 399 .dump_regs = ch7017_dump_regs,
413 .destroy = ch7017_destroy, 400 .destroy = ch7017_destroy,
414}; 401};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 3edd981e077..7eaa94e4ff0 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -111,7 +111,7 @@ static char *ch7xxx_get_id(uint8_t vid)
111/** Reads an 8 bit register */ 111/** Reads an 8 bit register */
112static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) 112static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
113{ 113{
114 struct ch7xxx_priv *ch7xxx = dvo->dev_priv; 114 struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
115 struct i2c_adapter *adapter = dvo->i2c_bus; 115 struct i2c_adapter *adapter = dvo->i2c_bus;
116 u8 out_buf[2]; 116 u8 out_buf[2];
117 u8 in_buf[2]; 117 u8 in_buf[2];
@@ -289,33 +289,21 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
289} 289}
290 290
291/* set the CH7xxx power state */ 291/* set the CH7xxx power state */
292static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable) 292static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
293{ 293{
294 if (enable) 294 if (mode == DRM_MODE_DPMS_ON)
295 ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP); 295 ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
296 else 296 else
297 ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD); 297 ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
298} 298}
299 299
300static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
301{
302 u8 val;
303
304 ch7xxx_readb(dvo, CH7xxx_PM, &val);
305
306 if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
307 return true;
308 else
309 return false;
310}
311
312static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) 300static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
313{ 301{
314 int i; 302 int i;
315 303
316 for (i = 0; i < CH7xxx_NUM_REGS; i++) { 304 for (i = 0; i < CH7xxx_NUM_REGS; i++) {
317 uint8_t val; 305 uint8_t val;
318 if ((i % 8) == 0) 306 if ((i % 8) == 0 )
319 DRM_LOG_KMS("\n %02X: ", i); 307 DRM_LOG_KMS("\n %02X: ", i);
320 ch7xxx_readb(dvo, i, &val); 308 ch7xxx_readb(dvo, i, &val);
321 DRM_LOG_KMS("%02X ", val); 309 DRM_LOG_KMS("%02X ", val);
@@ -338,7 +326,6 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
338 .mode_valid = ch7xxx_mode_valid, 326 .mode_valid = ch7xxx_mode_valid,
339 .mode_set = ch7xxx_mode_set, 327 .mode_set = ch7xxx_mode_set,
340 .dpms = ch7xxx_dpms, 328 .dpms = ch7xxx_dpms,
341 .get_hw_state = ch7xxx_get_hw_state,
342 .dump_regs = ch7xxx_dump_regs, 329 .dump_regs = ch7xxx_dump_regs,
343 .destroy = ch7xxx_destroy, 330 .destroy = ch7xxx_destroy,
344}; 331};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index baaf65bf0bd..a12ed9414cc 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -288,7 +288,7 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
288} 288}
289 289
290/** Sets the power state of the panel connected to the ivch */ 290/** Sets the power state of the panel connected to the ivch */
291static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) 291static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
292{ 292{
293 int i; 293 int i;
294 uint16_t vr01, vr30, backlight; 294 uint16_t vr01, vr30, backlight;
@@ -297,13 +297,13 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
297 if (!ivch_read(dvo, VR01, &vr01)) 297 if (!ivch_read(dvo, VR01, &vr01))
298 return; 298 return;
299 299
300 if (enable) 300 if (mode == DRM_MODE_DPMS_ON)
301 backlight = 1; 301 backlight = 1;
302 else 302 else
303 backlight = 0; 303 backlight = 0;
304 ivch_write(dvo, VR80, backlight); 304 ivch_write(dvo, VR80, backlight);
305 305
306 if (enable) 306 if (mode == DRM_MODE_DPMS_ON)
307 vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE; 307 vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
308 else 308 else
309 vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE); 309 vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
@@ -315,7 +315,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
315 if (!ivch_read(dvo, VR30, &vr30)) 315 if (!ivch_read(dvo, VR30, &vr30))
316 break; 316 break;
317 317
318 if (((vr30 & VR30_PANEL_ON) != 0) == enable) 318 if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON))
319 break; 319 break;
320 udelay(1000); 320 udelay(1000);
321 } 321 }
@@ -323,20 +323,6 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
323 udelay(16 * 1000); 323 udelay(16 * 1000);
324} 324}
325 325
326static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
327{
328 uint16_t vr01;
329
330 /* Set the new power state of the panel. */
331 if (!ivch_read(dvo, VR01, &vr01))
332 return false;
333
334 if (vr01 & VR01_LCD_ENABLE)
335 return true;
336 else
337 return false;
338}
339
340static void ivch_mode_set(struct intel_dvo_device *dvo, 326static void ivch_mode_set(struct intel_dvo_device *dvo,
341 struct drm_display_mode *mode, 327 struct drm_display_mode *mode,
342 struct drm_display_mode *adjusted_mode) 328 struct drm_display_mode *adjusted_mode)
@@ -358,8 +344,8 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
358 (adjusted_mode->hdisplay - 1)) >> 2; 344 (adjusted_mode->hdisplay - 1)) >> 2;
359 y_ratio = (((mode->vdisplay - 1) << 16) / 345 y_ratio = (((mode->vdisplay - 1) << 16) /
360 (adjusted_mode->vdisplay - 1)) >> 2; 346 (adjusted_mode->vdisplay - 1)) >> 2;
361 ivch_write(dvo, VR42, x_ratio); 347 ivch_write (dvo, VR42, x_ratio);
362 ivch_write(dvo, VR41, y_ratio); 348 ivch_write (dvo, VR41, y_ratio);
363 } else { 349 } else {
364 vr01 &= ~VR01_PANEL_FIT_ENABLE; 350 vr01 &= ~VR01_PANEL_FIT_ENABLE;
365 vr40 &= ~VR40_CLOCK_GATING_ENABLE; 351 vr40 &= ~VR40_CLOCK_GATING_ENABLE;
@@ -424,10 +410,9 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
424 } 410 }
425} 411}
426 412
427struct intel_dvo_dev_ops ivch_ops = { 413struct intel_dvo_dev_ops ivch_ops= {
428 .init = ivch_init, 414 .init = ivch_init,
429 .dpms = ivch_dpms, 415 .dpms = ivch_dpms,
430 .get_hw_state = ivch_get_hw_state,
431 .mode_valid = ivch_mode_valid, 416 .mode_valid = ivch_mode_valid,
432 .mode_set = ivch_mode_set, 417 .mode_set = ivch_mode_set,
433 .detect = ivch_detect, 418 .detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
deleted file mode 100644
index c4a255be697..00000000000
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ /dev/null
@@ -1,588 +0,0 @@
1/*
2 *
3 * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
4 *
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include "dvo.h"
30#include "i915_reg.h"
31#include "i915_drv.h"
32
33#define NS2501_VID 0x1305
34#define NS2501_DID 0x6726
35
36#define NS2501_VID_LO 0x00
37#define NS2501_VID_HI 0x01
38#define NS2501_DID_LO 0x02
39#define NS2501_DID_HI 0x03
40#define NS2501_REV 0x04
41#define NS2501_RSVD 0x05
42#define NS2501_FREQ_LO 0x06
43#define NS2501_FREQ_HI 0x07
44
45#define NS2501_REG8 0x08
46#define NS2501_8_VEN (1<<5)
47#define NS2501_8_HEN (1<<4)
48#define NS2501_8_DSEL (1<<3)
49#define NS2501_8_BPAS (1<<2)
50#define NS2501_8_RSVD (1<<1)
51#define NS2501_8_PD (1<<0)
52
53#define NS2501_REG9 0x09
54#define NS2501_9_VLOW (1<<7)
55#define NS2501_9_MSEL_MASK (0x7<<4)
56#define NS2501_9_TSEL (1<<3)
57#define NS2501_9_RSEN (1<<2)
58#define NS2501_9_RSVD (1<<1)
59#define NS2501_9_MDI (1<<0)
60
61#define NS2501_REGC 0x0c
62
63struct ns2501_priv {
64 //I2CDevRec d;
65 bool quiet;
66 int reg_8_shadow;
67 int reg_8_set;
68 // Shadow registers for i915
69 int dvoc;
70 int pll_a;
71 int srcdim;
72 int fw_blc;
73};
74
75#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
76
77/*
78 * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
79 * laptops does not react on the i2c bus unless
80 * both the PLL is running and the display is configured in its native
81 * resolution.
82 * This function forces the DVO on, and stores the registers it touches.
83 * Afterwards, registers are restored to regular values.
84 *
85 * This is pretty much a hack, though it works.
86 * Without that, ns2501_readb and ns2501_writeb fail
87 * when switching the resolution.
88 */
89
90static void enable_dvo(struct intel_dvo_device *dvo)
91{
92 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
93 struct i2c_adapter *adapter = dvo->i2c_bus;
94 struct intel_gmbus *bus = container_of(adapter,
95 struct intel_gmbus,
96 adapter);
97 struct drm_i915_private *dev_priv = bus->dev_priv;
98
99 DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
100
101 ns->dvoc = I915_READ(DVO_C);
102 ns->pll_a = I915_READ(_DPLL_A);
103 ns->srcdim = I915_READ(DVOC_SRCDIM);
104 ns->fw_blc = I915_READ(FW_BLC);
105
106 I915_WRITE(DVOC, 0x10004084);
107 I915_WRITE(_DPLL_A, 0xd0820000);
108 I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768
109 I915_WRITE(FW_BLC, 0x1080304);
110
111 I915_WRITE(DVOC, 0x90004084);
112}
113
114/*
115 * Restore the I915 registers modified by the above
116 * trigger function.
117 */
118static void restore_dvo(struct intel_dvo_device *dvo)
119{
120 struct i2c_adapter *adapter = dvo->i2c_bus;
121 struct intel_gmbus *bus = container_of(adapter,
122 struct intel_gmbus,
123 adapter);
124 struct drm_i915_private *dev_priv = bus->dev_priv;
125 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
126
127 I915_WRITE(DVOC, ns->dvoc);
128 I915_WRITE(_DPLL_A, ns->pll_a);
129 I915_WRITE(DVOC_SRCDIM, ns->srcdim);
130 I915_WRITE(FW_BLC, ns->fw_blc);
131}
132
133/*
134** Read a register from the ns2501.
135** Returns true if successful, false otherwise.
136** If it returns false, it might be wise to enable the
137** DVO with the above function.
138*/
139static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
140{
141 struct ns2501_priv *ns = dvo->dev_priv;
142 struct i2c_adapter *adapter = dvo->i2c_bus;
143 u8 out_buf[2];
144 u8 in_buf[2];
145
146 struct i2c_msg msgs[] = {
147 {
148 .addr = dvo->slave_addr,
149 .flags = 0,
150 .len = 1,
151 .buf = out_buf,
152 },
153 {
154 .addr = dvo->slave_addr,
155 .flags = I2C_M_RD,
156 .len = 1,
157 .buf = in_buf,
158 }
159 };
160
161 out_buf[0] = addr;
162 out_buf[1] = 0;
163
164 if (i2c_transfer(adapter, msgs, 2) == 2) {
165 *ch = in_buf[0];
166 return true;
167 };
168
169 if (!ns->quiet) {
170 DRM_DEBUG_KMS
171 ("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
172 adapter->name, dvo->slave_addr);
173 }
174
175 return false;
176}
177
178/*
179** Write a register to the ns2501.
180** Returns true if successful, false otherwise.
181** If it returns false, it might be wise to enable the
182** DVO with the above function.
183*/
184static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
185{
186 struct ns2501_priv *ns = dvo->dev_priv;
187 struct i2c_adapter *adapter = dvo->i2c_bus;
188 uint8_t out_buf[2];
189
190 struct i2c_msg msg = {
191 .addr = dvo->slave_addr,
192 .flags = 0,
193 .len = 2,
194 .buf = out_buf,
195 };
196
197 out_buf[0] = addr;
198 out_buf[1] = ch;
199
200 if (i2c_transfer(adapter, &msg, 1) == 1) {
201 return true;
202 }
203
204 if (!ns->quiet) {
205 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
206 addr, adapter->name, dvo->slave_addr);
207 }
208
209 return false;
210}
211
212/* National Semiconductor 2501 driver for chip on i2c bus
213 * scan for the chip on the bus.
214 * Hope the VBIOS initialized the PLL correctly so we can
215 * talk to it. If not, it will not be seen and not detected.
216 * Bummer!
217 */
218static bool ns2501_init(struct intel_dvo_device *dvo,
219 struct i2c_adapter *adapter)
220{
221 /* this will detect the NS2501 chip on the specified i2c bus */
222 struct ns2501_priv *ns;
223 unsigned char ch;
224
225 ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
226 if (ns == NULL)
227 return false;
228
229 dvo->i2c_bus = adapter;
230 dvo->dev_priv = ns;
231 ns->quiet = true;
232
233 if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
234 goto out;
235
236 if (ch != (NS2501_VID & 0xff)) {
237 DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
238 ch, adapter->name, dvo->slave_addr);
239 goto out;
240 }
241
242 if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
243 goto out;
244
245 if (ch != (NS2501_DID & 0xff)) {
246 DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
247 ch, adapter->name, dvo->slave_addr);
248 goto out;
249 }
250 ns->quiet = false;
251 ns->reg_8_set = 0;
252 ns->reg_8_shadow =
253 NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
254
255 DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
256 return true;
257
258out:
259 kfree(ns);
260 return false;
261}
262
263static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
264{
265 /*
266 * This is a Laptop display, it doesn't have hotplugging.
267 * Even if not, the detection bit of the 2501 is unreliable as
268 * it only works for some display types.
269 * It is even more unreliable as the PLL must be active for
270 * allowing reading from the chiop.
271 */
272 return connector_status_connected;
273}
274
275static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
276 struct drm_display_mode *mode)
277{
278 DRM_DEBUG_KMS
279 ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
280 __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
281 mode->vtotal);
282
283 /*
284 * Currently, these are all the modes I have data from.
285 * More might exist. Unclear how to find the native resolution
286 * of the panel in here so we could always accept it
287 * by disabling the scaler.
288 */
289 if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
290 (mode->hdisplay == 640 && mode->vdisplay == 480) ||
291 (mode->hdisplay == 1024 && mode->vdisplay == 768)) {
292 return MODE_OK;
293 } else {
294 return MODE_ONE_SIZE; /* Is this a reasonable error? */
295 }
296}
297
298static void ns2501_mode_set(struct intel_dvo_device *dvo,
299 struct drm_display_mode *mode,
300 struct drm_display_mode *adjusted_mode)
301{
302 bool ok;
303 bool restore = false;
304 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
305
306 DRM_DEBUG_KMS
307 ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
308 __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
309 mode->vtotal);
310
311 /*
312 * Where do I find the native resolution for which scaling is not required???
313 *
314 * First trigger the DVO on as otherwise the chip does not appear on the i2c
315 * bus.
316 */
317 do {
318 ok = true;
319
320 if (mode->hdisplay == 800 && mode->vdisplay == 600) {
321 /* mode 277 */
322 ns->reg_8_shadow &= ~NS2501_8_BPAS;
323 DRM_DEBUG_KMS("%s: switching to 800x600\n",
324 __FUNCTION__);
325
326 /*
327 * No, I do not know where this data comes from.
328 * It is just what the video bios left in the DVO, so
329 * I'm just copying it here over.
330 * This also means that I cannot support any other modes
331 * except the ones supported by the bios.
332 */
333 ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
334 ok &= ns2501_writeb(dvo, 0x1b, 0x19);
335 ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
336 ok &= ns2501_writeb(dvo, 0x1d, 0x02);
337
338 ok &= ns2501_writeb(dvo, 0x34, 0x03);
339 ok &= ns2501_writeb(dvo, 0x35, 0xff);
340
341 ok &= ns2501_writeb(dvo, 0x80, 0x27);
342 ok &= ns2501_writeb(dvo, 0x81, 0x03);
343 ok &= ns2501_writeb(dvo, 0x82, 0x41);
344 ok &= ns2501_writeb(dvo, 0x83, 0x05);
345
346 ok &= ns2501_writeb(dvo, 0x8d, 0x02);
347 ok &= ns2501_writeb(dvo, 0x8e, 0x04);
348 ok &= ns2501_writeb(dvo, 0x8f, 0x00);
349
350 ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
351 ok &= ns2501_writeb(dvo, 0x91, 0x07);
352 ok &= ns2501_writeb(dvo, 0x94, 0x00);
353 ok &= ns2501_writeb(dvo, 0x95, 0x00);
354
355 ok &= ns2501_writeb(dvo, 0x96, 0x00);
356
357 ok &= ns2501_writeb(dvo, 0x99, 0x00);
358 ok &= ns2501_writeb(dvo, 0x9a, 0x88);
359
360 ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
361 ok &= ns2501_writeb(dvo, 0x9d, 0x00);
362 ok &= ns2501_writeb(dvo, 0x9e, 0x25);
363 ok &= ns2501_writeb(dvo, 0x9f, 0x03);
364
365 ok &= ns2501_writeb(dvo, 0xa4, 0x80);
366
367 ok &= ns2501_writeb(dvo, 0xb6, 0x00);
368
369 ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
370 ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
371
372 ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
373 ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
374
375 ok &= ns2501_writeb(dvo, 0xc2, 0x00);
376 ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
377
378 ok &= ns2501_writeb(dvo, 0xc4, 0x03);
379 ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
380
381 ok &= ns2501_writeb(dvo, 0xc6, 0x00);
382 ok &= ns2501_writeb(dvo, 0xc7, 0x73);
383 ok &= ns2501_writeb(dvo, 0xc8, 0x02);
384
385 } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
386 /* mode 274 */
387 DRM_DEBUG_KMS("%s: switching to 640x480\n",
388 __FUNCTION__);
389 /*
390 * No, I do not know where this data comes from.
391 * It is just what the video bios left in the DVO, so
392 * I'm just copying it here over.
393 * This also means that I cannot support any other modes
394 * except the ones supported by the bios.
395 */
396 ns->reg_8_shadow &= ~NS2501_8_BPAS;
397
398 ok &= ns2501_writeb(dvo, 0x11, 0xa0);
399 ok &= ns2501_writeb(dvo, 0x1b, 0x11);
400 ok &= ns2501_writeb(dvo, 0x1c, 0x54);
401 ok &= ns2501_writeb(dvo, 0x1d, 0x03);
402
403 ok &= ns2501_writeb(dvo, 0x34, 0x03);
404 ok &= ns2501_writeb(dvo, 0x35, 0xff);
405
406 ok &= ns2501_writeb(dvo, 0x80, 0xff);
407 ok &= ns2501_writeb(dvo, 0x81, 0x07);
408 ok &= ns2501_writeb(dvo, 0x82, 0x3d);
409 ok &= ns2501_writeb(dvo, 0x83, 0x05);
410
411 ok &= ns2501_writeb(dvo, 0x8d, 0x02);
412 ok &= ns2501_writeb(dvo, 0x8e, 0x10);
413 ok &= ns2501_writeb(dvo, 0x8f, 0x00);
414
415 ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
416 ok &= ns2501_writeb(dvo, 0x91, 0x07);
417 ok &= ns2501_writeb(dvo, 0x94, 0x00);
418 ok &= ns2501_writeb(dvo, 0x95, 0x00);
419
420 ok &= ns2501_writeb(dvo, 0x96, 0x05);
421
422 ok &= ns2501_writeb(dvo, 0x99, 0x00);
423 ok &= ns2501_writeb(dvo, 0x9a, 0x88);
424
425 ok &= ns2501_writeb(dvo, 0x9c, 0x24);
426 ok &= ns2501_writeb(dvo, 0x9d, 0x00);
427 ok &= ns2501_writeb(dvo, 0x9e, 0x25);
428 ok &= ns2501_writeb(dvo, 0x9f, 0x03);
429
430 ok &= ns2501_writeb(dvo, 0xa4, 0x84);
431
432 ok &= ns2501_writeb(dvo, 0xb6, 0x09);
433
434 ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
435 ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
436
437 ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
438 ok &= ns2501_writeb(dvo, 0xc1, 0x90);
439
440 ok &= ns2501_writeb(dvo, 0xc2, 0x00);
441 ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
442
443 ok &= ns2501_writeb(dvo, 0xc4, 0x03);
444 ok &= ns2501_writeb(dvo, 0xc5, 0x16);
445
446 ok &= ns2501_writeb(dvo, 0xc6, 0x00);
447 ok &= ns2501_writeb(dvo, 0xc7, 0x02);
448 ok &= ns2501_writeb(dvo, 0xc8, 0x02);
449
450 } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
451 /* mode 280 */
452 DRM_DEBUG_KMS("%s: switching to 1024x768\n",
453 __FUNCTION__);
454 /*
455 * This might or might not work, actually. I'm silently
456 * assuming here that the native panel resolution is
457 * 1024x768. If not, then this leaves the scaler disabled
458 * generating a picture that is likely not the expected.
459 *
460 * Problem is that I do not know where to take the panel
461 * dimensions from.
462 *
463 * Enable the bypass, scaling not required.
464 *
465 * The scaler registers are irrelevant here....
466 *
467 */
468 ns->reg_8_shadow |= NS2501_8_BPAS;
469 ok &= ns2501_writeb(dvo, 0x37, 0x44);
470 } else {
471 /*
472 * Data not known. Bummer!
473 * Hopefully, the code should not go here
474 * as mode_OK delivered no other modes.
475 */
476 ns->reg_8_shadow |= NS2501_8_BPAS;
477 }
478 ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
479
480 if (!ok) {
481 if (restore)
482 restore_dvo(dvo);
483 enable_dvo(dvo);
484 restore = true;
485 }
486 } while (!ok);
487 /*
488 * Restore the old i915 registers before
489 * forcing the ns2501 on.
490 */
491 if (restore)
492 restore_dvo(dvo);
493}
494
495/* set the NS2501 power state */
496static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
497{
498 unsigned char ch;
499
500 if (!ns2501_readb(dvo, NS2501_REG8, &ch))
501 return false;
502
503 if (ch & NS2501_8_PD)
504 return true;
505 else
506 return false;
507}
508
509/* set the NS2501 power state */
510static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
511{
512 bool ok;
513 bool restore = false;
514 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
515 unsigned char ch;
516
517 DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
518 __FUNCTION__, enable);
519
520 ch = ns->reg_8_shadow;
521
522 if (enable)
523 ch |= NS2501_8_PD;
524 else
525 ch &= ~NS2501_8_PD;
526
527 if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
528 ns->reg_8_set = 1;
529 ns->reg_8_shadow = ch;
530
531 do {
532 ok = true;
533 ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
534 ok &=
535 ns2501_writeb(dvo, 0x34,
536 enable ? 0x03 : 0x00);
537 ok &=
538 ns2501_writeb(dvo, 0x35,
539 enable ? 0xff : 0x00);
540 if (!ok) {
541 if (restore)
542 restore_dvo(dvo);
543 enable_dvo(dvo);
544 restore = true;
545 }
546 } while (!ok);
547
548 if (restore)
549 restore_dvo(dvo);
550 }
551}
552
553static void ns2501_dump_regs(struct intel_dvo_device *dvo)
554{
555 uint8_t val;
556
557 ns2501_readb(dvo, NS2501_FREQ_LO, &val);
558 DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
559 ns2501_readb(dvo, NS2501_FREQ_HI, &val);
560 DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
561 ns2501_readb(dvo, NS2501_REG8, &val);
562 DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
563 ns2501_readb(dvo, NS2501_REG9, &val);
564 DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
565 ns2501_readb(dvo, NS2501_REGC, &val);
566 DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
567}
568
569static void ns2501_destroy(struct intel_dvo_device *dvo)
570{
571 struct ns2501_priv *ns = dvo->dev_priv;
572
573 if (ns) {
574 kfree(ns);
575 dvo->dev_priv = NULL;
576 }
577}
578
579struct intel_dvo_dev_ops ns2501_ops = {
580 .init = ns2501_init,
581 .detect = ns2501_detect,
582 .mode_valid = ns2501_mode_valid,
583 .mode_set = ns2501_mode_set,
584 .dpms = ns2501_dpms,
585 .get_hw_state = ns2501_get_hw_state,
586 .dump_regs = ns2501_dump_regs,
587 .destroy = ns2501_destroy,
588};
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 4debd32e3e4..e4b4091df94 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -104,7 +104,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
104 104
105static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) 105static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
106{ 106{
107 struct sil164_priv *sil = dvo->dev_priv; 107 struct sil164_priv *sil= dvo->dev_priv;
108 struct i2c_adapter *adapter = dvo->i2c_bus; 108 struct i2c_adapter *adapter = dvo->i2c_bus;
109 uint8_t out_buf[2]; 109 uint8_t out_buf[2];
110 struct i2c_msg msg = { 110 struct i2c_msg msg = {
@@ -208,7 +208,7 @@ static void sil164_mode_set(struct intel_dvo_device *dvo,
208} 208}
209 209
210/* set the SIL164 power state */ 210/* set the SIL164 power state */
211static void sil164_dpms(struct intel_dvo_device *dvo, bool enable) 211static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
212{ 212{
213 int ret; 213 int ret;
214 unsigned char ch; 214 unsigned char ch;
@@ -217,7 +217,7 @@ static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
217 if (ret == false) 217 if (ret == false)
218 return; 218 return;
219 219
220 if (enable) 220 if (mode == DRM_MODE_DPMS_ON)
221 ch |= SIL164_8_PD; 221 ch |= SIL164_8_PD;
222 else 222 else
223 ch &= ~SIL164_8_PD; 223 ch &= ~SIL164_8_PD;
@@ -226,21 +226,6 @@ static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
226 return; 226 return;
227} 227}
228 228
229static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
230{
231 int ret;
232 unsigned char ch;
233
234 ret = sil164_readb(dvo, SIL164_REG8, &ch);
235 if (ret == false)
236 return false;
237
238 if (ch & SIL164_8_PD)
239 return true;
240 else
241 return false;
242}
243
244static void sil164_dump_regs(struct intel_dvo_device *dvo) 229static void sil164_dump_regs(struct intel_dvo_device *dvo)
245{ 230{
246 uint8_t val; 231 uint8_t val;
@@ -273,7 +258,6 @@ struct intel_dvo_dev_ops sil164_ops = {
273 .mode_valid = sil164_mode_valid, 258 .mode_valid = sil164_mode_valid,
274 .mode_set = sil164_mode_set, 259 .mode_set = sil164_mode_set,
275 .dpms = sil164_dpms, 260 .dpms = sil164_dpms,
276 .get_hw_state = sil164_get_hw_state,
277 .dump_regs = sil164_dump_regs, 261 .dump_regs = sil164_dump_regs,
278 .destroy = sil164_destroy, 262 .destroy = sil164_destroy,
279}; 263};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index e17f1b07e91..8ab2855bb54 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -56,7 +56,7 @@
56#define TFP410_CTL_2_MDI (1<<0) 56#define TFP410_CTL_2_MDI (1<<0)
57 57
58#define TFP410_CTL_3 0x0A 58#define TFP410_CTL_3 0x0A
59#define TFP410_CTL_3_DK_MASK (0x7<<5) 59#define TFP410_CTL_3_DK_MASK (0x7<<5)
60#define TFP410_CTL_3_DK (1<<5) 60#define TFP410_CTL_3_DK (1<<5)
61#define TFP410_CTL_3_DKEN (1<<4) 61#define TFP410_CTL_3_DKEN (1<<4)
62#define TFP410_CTL_3_CTL_MASK (0x7<<1) 62#define TFP410_CTL_3_CTL_MASK (0x7<<1)
@@ -225,23 +225,23 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
225 struct drm_display_mode *mode, 225 struct drm_display_mode *mode,
226 struct drm_display_mode *adjusted_mode) 226 struct drm_display_mode *adjusted_mode)
227{ 227{
228 /* As long as the basics are set up, since we don't have clock dependencies 228 /* As long as the basics are set up, since we don't have clock dependencies
229 * in the mode setup, we can just leave the registers alone and everything 229 * in the mode setup, we can just leave the registers alone and everything
230 * will work fine. 230 * will work fine.
231 */ 231 */
232 /* don't do much */ 232 /* don't do much */
233 return; 233 return;
234} 234}
235 235
236/* set the tfp410 power state */ 236/* set the tfp410 power state */
237static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable) 237static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
238{ 238{
239 uint8_t ctl1; 239 uint8_t ctl1;
240 240
241 if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) 241 if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
242 return; 242 return;
243 243
244 if (enable) 244 if (mode == DRM_MODE_DPMS_ON)
245 ctl1 |= TFP410_CTL_1_PD; 245 ctl1 |= TFP410_CTL_1_PD;
246 else 246 else
247 ctl1 &= ~TFP410_CTL_1_PD; 247 ctl1 &= ~TFP410_CTL_1_PD;
@@ -249,19 +249,6 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
249 tfp410_writeb(dvo, TFP410_CTL_1, ctl1); 249 tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
250} 250}
251 251
252static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
253{
254 uint8_t ctl1;
255
256 if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
257 return false;
258
259 if (ctl1 & TFP410_CTL_1_PD)
260 return true;
261 else
262 return false;
263}
264
265static void tfp410_dump_regs(struct intel_dvo_device *dvo) 252static void tfp410_dump_regs(struct intel_dvo_device *dvo)
266{ 253{
267 uint8_t val, val2; 254 uint8_t val, val2;
@@ -312,7 +299,6 @@ struct intel_dvo_dev_ops tfp410_ops = {
312 .mode_valid = tfp410_mode_valid, 299 .mode_valid = tfp410_mode_valid,
313 .mode_set = tfp410_mode_set, 300 .mode_set = tfp410_mode_set,
314 .dpms = tfp410_dpms, 301 .dpms = tfp410_dpms,
315 .get_hw_state = tfp410_get_hw_state,
316 .dump_regs = tfp410_dump_regs, 302 .dump_regs = tfp410_dump_regs,
317 .destroy = tfp410_destroy, 303 .destroy = tfp410_destroy,
318}; 304};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e6a11ca85ea..3c395a59da3 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -29,11 +29,11 @@
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/debugfs.h> 30#include <linux/debugfs.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/export.h> 32#include "drmP.h"
33#include <drm/drmP.h> 33#include "drm.h"
34#include "intel_drv.h" 34#include "intel_drv.h"
35#include "intel_ringbuffer.h" 35#include "intel_ringbuffer.h"
36#include <drm/i915_drm.h> 36#include "i915_drm.h"
37#include "i915_drv.h" 37#include "i915_drv.h"
38 38
39#define DRM_I915_RING_DEBUG 1 39#define DRM_I915_RING_DEBUG 1
@@ -43,8 +43,10 @@
43 43
44enum { 44enum {
45 ACTIVE_LIST, 45 ACTIVE_LIST,
46 FLUSHING_LIST,
46 INACTIVE_LIST, 47 INACTIVE_LIST,
47 PINNED_LIST, 48 PINNED_LIST,
49 DEFERRED_FREE_LIST,
48}; 50};
49 51
50static const char *yesno(int v) 52static const char *yesno(int v)
@@ -59,12 +61,27 @@ static int i915_capabilities(struct seq_file *m, void *data)
59 const struct intel_device_info *info = INTEL_INFO(dev); 61 const struct intel_device_info *info = INTEL_INFO(dev);
60 62
61 seq_printf(m, "gen: %d\n", info->gen); 63 seq_printf(m, "gen: %d\n", info->gen);
62 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 64#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
63#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 65 B(is_mobile);
64#define DEV_INFO_SEP ; 66 B(is_i85x);
65 DEV_INFO_FLAGS; 67 B(is_i915g);
66#undef DEV_INFO_FLAG 68 B(is_i945gm);
67#undef DEV_INFO_SEP 69 B(is_g33);
70 B(need_gfx_hws);
71 B(is_g4x);
72 B(is_pineview);
73 B(is_broadwater);
74 B(is_crestline);
75 B(has_fbc);
76 B(has_pipe_cxsr);
77 B(has_hotplug);
78 B(cursor_needs_physical);
79 B(has_overlay);
80 B(overlay_needs_physical);
81 B(supports_tv);
82 B(has_bsd_ring);
83 B(has_blt_ring);
84#undef B
68 85
69 return 0; 86 return 0;
70} 87}
@@ -81,12 +98,12 @@ static const char *get_pin_flag(struct drm_i915_gem_object *obj)
81 98
82static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 99static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
83{ 100{
84 switch (obj->tiling_mode) { 101 switch (obj->tiling_mode) {
85 default: 102 default:
86 case I915_TILING_NONE: return " "; 103 case I915_TILING_NONE: return " ";
87 case I915_TILING_X: return "X"; 104 case I915_TILING_X: return "X";
88 case I915_TILING_Y: return "Y"; 105 case I915_TILING_Y: return "Y";
89 } 106 }
90} 107}
91 108
92static const char *cache_level_str(int type) 109static const char *cache_level_str(int type)
@@ -102,23 +119,20 @@ static const char *cache_level_str(int type)
102static void 119static void
103describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 120describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
104{ 121{
105 seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s", 122 seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
106 &obj->base, 123 &obj->base,
107 get_pin_flag(obj), 124 get_pin_flag(obj),
108 get_tiling_flag(obj), 125 get_tiling_flag(obj),
109 obj->base.size / 1024, 126 obj->base.size,
110 obj->base.read_domains, 127 obj->base.read_domains,
111 obj->base.write_domain, 128 obj->base.write_domain,
112 obj->last_read_seqno, 129 obj->last_rendering_seqno,
113 obj->last_write_seqno,
114 obj->last_fenced_seqno, 130 obj->last_fenced_seqno,
115 cache_level_str(obj->cache_level), 131 cache_level_str(obj->cache_level),
116 obj->dirty ? " dirty" : "", 132 obj->dirty ? " dirty" : "",
117 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 133 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
118 if (obj->base.name) 134 if (obj->base.name)
119 seq_printf(m, " (name: %d)", obj->base.name); 135 seq_printf(m, " (name: %d)", obj->base.name);
120 if (obj->pin_count)
121 seq_printf(m, " (pinned x %d)", obj->pin_count);
122 if (obj->fence_reg != I915_FENCE_REG_NONE) 136 if (obj->fence_reg != I915_FENCE_REG_NONE)
123 seq_printf(m, " (fence: %d)", obj->fence_reg); 137 seq_printf(m, " (fence: %d)", obj->fence_reg);
124 if (obj->gtt_space != NULL) 138 if (obj->gtt_space != NULL)
@@ -161,6 +175,18 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
161 seq_printf(m, "Inactive:\n"); 175 seq_printf(m, "Inactive:\n");
162 head = &dev_priv->mm.inactive_list; 176 head = &dev_priv->mm.inactive_list;
163 break; 177 break;
178 case PINNED_LIST:
179 seq_printf(m, "Pinned:\n");
180 head = &dev_priv->mm.pinned_list;
181 break;
182 case FLUSHING_LIST:
183 seq_printf(m, "Flushing:\n");
184 head = &dev_priv->mm.flushing_list;
185 break;
186 case DEFERRED_FREE_LIST:
187 seq_printf(m, "Deferred free:\n");
188 head = &dev_priv->mm.deferred_free_list;
189 break;
164 default: 190 default:
165 mutex_unlock(&dev->struct_mutex); 191 mutex_unlock(&dev->struct_mutex);
166 return -EINVAL; 192 return -EINVAL;
@@ -191,15 +217,15 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
191 ++mappable_count; \ 217 ++mappable_count; \
192 } \ 218 } \
193 } \ 219 } \
194} while (0) 220} while(0)
195 221
196static int i915_gem_object_info(struct seq_file *m, void* data) 222static int i915_gem_object_info(struct seq_file *m, void* data)
197{ 223{
198 struct drm_info_node *node = (struct drm_info_node *) m->private; 224 struct drm_info_node *node = (struct drm_info_node *) m->private;
199 struct drm_device *dev = node->minor->dev; 225 struct drm_device *dev = node->minor->dev;
200 struct drm_i915_private *dev_priv = dev->dev_private; 226 struct drm_i915_private *dev_priv = dev->dev_private;
201 u32 count, mappable_count, purgeable_count; 227 u32 count, mappable_count;
202 size_t size, mappable_size, purgeable_size; 228 size_t size, mappable_size;
203 struct drm_i915_gem_object *obj; 229 struct drm_i915_gem_object *obj;
204 int ret; 230 int ret;
205 231
@@ -212,30 +238,33 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
212 dev_priv->mm.object_memory); 238 dev_priv->mm.object_memory);
213 239
214 size = count = mappable_size = mappable_count = 0; 240 size = count = mappable_size = mappable_count = 0;
215 count_objects(&dev_priv->mm.bound_list, gtt_list); 241 count_objects(&dev_priv->mm.gtt_list, gtt_list);
216 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 242 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
217 count, mappable_count, size, mappable_size); 243 count, mappable_count, size, mappable_size);
218 244
219 size = count = mappable_size = mappable_count = 0; 245 size = count = mappable_size = mappable_count = 0;
220 count_objects(&dev_priv->mm.active_list, mm_list); 246 count_objects(&dev_priv->mm.active_list, mm_list);
247 count_objects(&dev_priv->mm.flushing_list, mm_list);
221 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 248 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
222 count, mappable_count, size, mappable_size); 249 count, mappable_count, size, mappable_size);
223 250
224 size = count = mappable_size = mappable_count = 0; 251 size = count = mappable_size = mappable_count = 0;
252 count_objects(&dev_priv->mm.pinned_list, mm_list);
253 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
254 count, mappable_count, size, mappable_size);
255
256 size = count = mappable_size = mappable_count = 0;
225 count_objects(&dev_priv->mm.inactive_list, mm_list); 257 count_objects(&dev_priv->mm.inactive_list, mm_list);
226 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 258 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
227 count, mappable_count, size, mappable_size); 259 count, mappable_count, size, mappable_size);
228 260
229 size = count = purgeable_size = purgeable_count = 0; 261 size = count = mappable_size = mappable_count = 0;
230 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) { 262 count_objects(&dev_priv->mm.deferred_free_list, mm_list);
231 size += obj->base.size, ++count; 263 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
232 if (obj->madv == I915_MADV_DONTNEED) 264 count, mappable_count, size, mappable_size);
233 purgeable_size += obj->base.size, ++purgeable_count;
234 }
235 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
236 265
237 size = count = mappable_size = mappable_count = 0; 266 size = count = mappable_size = mappable_count = 0;
238 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 267 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
239 if (obj->fault_mappable) { 268 if (obj->fault_mappable) {
240 size += obj->gtt_space->size; 269 size += obj->gtt_space->size;
241 ++count; 270 ++count;
@@ -244,13 +273,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
244 mappable_size += obj->gtt_space->size; 273 mappable_size += obj->gtt_space->size;
245 ++mappable_count; 274 ++mappable_count;
246 } 275 }
247 if (obj->madv == I915_MADV_DONTNEED) {
248 purgeable_size += obj->base.size;
249 ++purgeable_count;
250 }
251 } 276 }
252 seq_printf(m, "%u purgeable objects, %zu bytes\n",
253 purgeable_count, purgeable_size);
254 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 277 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
255 mappable_count, mappable_size); 278 mappable_count, mappable_size);
256 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 279 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
@@ -268,7 +291,6 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
268{ 291{
269 struct drm_info_node *node = (struct drm_info_node *) m->private; 292 struct drm_info_node *node = (struct drm_info_node *) m->private;
270 struct drm_device *dev = node->minor->dev; 293 struct drm_device *dev = node->minor->dev;
271 uintptr_t list = (uintptr_t) node->info_ent->data;
272 struct drm_i915_private *dev_priv = dev->dev_private; 294 struct drm_i915_private *dev_priv = dev->dev_private;
273 struct drm_i915_gem_object *obj; 295 struct drm_i915_gem_object *obj;
274 size_t total_obj_size, total_gtt_size; 296 size_t total_obj_size, total_gtt_size;
@@ -279,10 +301,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
279 return ret; 301 return ret;
280 302
281 total_obj_size = total_gtt_size = count = 0; 303 total_obj_size = total_gtt_size = count = 0;
282 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 304 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
283 if (list == PINNED_LIST && obj->pin_count == 0)
284 continue;
285
286 seq_printf(m, " "); 305 seq_printf(m, " ");
287 describe_obj(m, obj); 306 describe_obj(m, obj);
288 seq_printf(m, "\n"); 307 seq_printf(m, "\n");
@@ -299,6 +318,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
299 return 0; 318 return 0;
300} 319}
301 320
321
302static int i915_gem_pageflip_info(struct seq_file *m, void *data) 322static int i915_gem_pageflip_info(struct seq_file *m, void *data)
303{ 323{
304 struct drm_info_node *node = (struct drm_info_node *) m->private; 324 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -317,7 +337,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
317 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 337 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
318 pipe, plane); 338 pipe, plane);
319 } else { 339 } else {
320 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 340 if (!work->pending) {
321 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 341 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
322 pipe, plane); 342 pipe, plane);
323 } else { 343 } else {
@@ -328,7 +348,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
328 seq_printf(m, "Stall check enabled, "); 348 seq_printf(m, "Stall check enabled, ");
329 else 349 else
330 seq_printf(m, "Stall check waiting for page flip ioctl, "); 350 seq_printf(m, "Stall check waiting for page flip ioctl, ");
331 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 351 seq_printf(m, "%d prepares\n", work->pending);
332 352
333 if (work->old_fb_obj) { 353 if (work->old_fb_obj) {
334 struct drm_i915_gem_object *obj = work->old_fb_obj; 354 struct drm_i915_gem_object *obj = work->old_fb_obj;
@@ -352,22 +372,40 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
352 struct drm_info_node *node = (struct drm_info_node *) m->private; 372 struct drm_info_node *node = (struct drm_info_node *) m->private;
353 struct drm_device *dev = node->minor->dev; 373 struct drm_device *dev = node->minor->dev;
354 drm_i915_private_t *dev_priv = dev->dev_private; 374 drm_i915_private_t *dev_priv = dev->dev_private;
355 struct intel_ring_buffer *ring;
356 struct drm_i915_gem_request *gem_request; 375 struct drm_i915_gem_request *gem_request;
357 int ret, count, i; 376 int ret, count;
358 377
359 ret = mutex_lock_interruptible(&dev->struct_mutex); 378 ret = mutex_lock_interruptible(&dev->struct_mutex);
360 if (ret) 379 if (ret)
361 return ret; 380 return ret;
362 381
363 count = 0; 382 count = 0;
364 for_each_ring(ring, dev_priv, i) { 383 if (!list_empty(&dev_priv->ring[RCS].request_list)) {
365 if (list_empty(&ring->request_list)) 384 seq_printf(m, "Render requests:\n");
366 continue;
367
368 seq_printf(m, "%s requests:\n", ring->name);
369 list_for_each_entry(gem_request, 385 list_for_each_entry(gem_request,
370 &ring->request_list, 386 &dev_priv->ring[RCS].request_list,
387 list) {
388 seq_printf(m, " %d @ %d\n",
389 gem_request->seqno,
390 (int) (jiffies - gem_request->emitted_jiffies));
391 }
392 count++;
393 }
394 if (!list_empty(&dev_priv->ring[VCS].request_list)) {
395 seq_printf(m, "BSD requests:\n");
396 list_for_each_entry(gem_request,
397 &dev_priv->ring[VCS].request_list,
398 list) {
399 seq_printf(m, " %d @ %d\n",
400 gem_request->seqno,
401 (int) (jiffies - gem_request->emitted_jiffies));
402 }
403 count++;
404 }
405 if (!list_empty(&dev_priv->ring[BCS].request_list)) {
406 seq_printf(m, "BLT requests:\n");
407 list_for_each_entry(gem_request,
408 &dev_priv->ring[BCS].request_list,
371 list) { 409 list) {
372 seq_printf(m, " %d @ %d\n", 410 seq_printf(m, " %d @ %d\n",
373 gem_request->seqno, 411 gem_request->seqno,
@@ -388,7 +426,11 @@ static void i915_ring_seqno_info(struct seq_file *m,
388{ 426{
389 if (ring->get_seqno) { 427 if (ring->get_seqno) {
390 seq_printf(m, "Current sequence (%s): %d\n", 428 seq_printf(m, "Current sequence (%s): %d\n",
391 ring->name, ring->get_seqno(ring, false)); 429 ring->name, ring->get_seqno(ring));
430 seq_printf(m, "Waiter sequence (%s): %d\n",
431 ring->name, ring->waiting_seqno);
432 seq_printf(m, "IRQ sequence (%s): %d\n",
433 ring->name, ring->irq_seqno);
392 } 434 }
393} 435}
394 436
@@ -397,15 +439,14 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
397 struct drm_info_node *node = (struct drm_info_node *) m->private; 439 struct drm_info_node *node = (struct drm_info_node *) m->private;
398 struct drm_device *dev = node->minor->dev; 440 struct drm_device *dev = node->minor->dev;
399 drm_i915_private_t *dev_priv = dev->dev_private; 441 drm_i915_private_t *dev_priv = dev->dev_private;
400 struct intel_ring_buffer *ring;
401 int ret, i; 442 int ret, i;
402 443
403 ret = mutex_lock_interruptible(&dev->struct_mutex); 444 ret = mutex_lock_interruptible(&dev->struct_mutex);
404 if (ret) 445 if (ret)
405 return ret; 446 return ret;
406 447
407 for_each_ring(ring, dev_priv, i) 448 for (i = 0; i < I915_NUM_RINGS; i++)
408 i915_ring_seqno_info(m, ring); 449 i915_ring_seqno_info(m, &dev_priv->ring[i]);
409 450
410 mutex_unlock(&dev->struct_mutex); 451 mutex_unlock(&dev->struct_mutex);
411 452
@@ -418,52 +459,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
418 struct drm_info_node *node = (struct drm_info_node *) m->private; 459 struct drm_info_node *node = (struct drm_info_node *) m->private;
419 struct drm_device *dev = node->minor->dev; 460 struct drm_device *dev = node->minor->dev;
420 drm_i915_private_t *dev_priv = dev->dev_private; 461 drm_i915_private_t *dev_priv = dev->dev_private;
421 struct intel_ring_buffer *ring;
422 int ret, i, pipe; 462 int ret, i, pipe;
423 463
424 ret = mutex_lock_interruptible(&dev->struct_mutex); 464 ret = mutex_lock_interruptible(&dev->struct_mutex);
425 if (ret) 465 if (ret)
426 return ret; 466 return ret;
427 467
428 if (IS_VALLEYVIEW(dev)) { 468 if (!HAS_PCH_SPLIT(dev)) {
429 seq_printf(m, "Display IER:\t%08x\n",
430 I915_READ(VLV_IER));
431 seq_printf(m, "Display IIR:\t%08x\n",
432 I915_READ(VLV_IIR));
433 seq_printf(m, "Display IIR_RW:\t%08x\n",
434 I915_READ(VLV_IIR_RW));
435 seq_printf(m, "Display IMR:\t%08x\n",
436 I915_READ(VLV_IMR));
437 for_each_pipe(pipe)
438 seq_printf(m, "Pipe %c stat:\t%08x\n",
439 pipe_name(pipe),
440 I915_READ(PIPESTAT(pipe)));
441
442 seq_printf(m, "Master IER:\t%08x\n",
443 I915_READ(VLV_MASTER_IER));
444
445 seq_printf(m, "Render IER:\t%08x\n",
446 I915_READ(GTIER));
447 seq_printf(m, "Render IIR:\t%08x\n",
448 I915_READ(GTIIR));
449 seq_printf(m, "Render IMR:\t%08x\n",
450 I915_READ(GTIMR));
451
452 seq_printf(m, "PM IER:\t\t%08x\n",
453 I915_READ(GEN6_PMIER));
454 seq_printf(m, "PM IIR:\t\t%08x\n",
455 I915_READ(GEN6_PMIIR));
456 seq_printf(m, "PM IMR:\t\t%08x\n",
457 I915_READ(GEN6_PMIMR));
458
459 seq_printf(m, "Port hotplug:\t%08x\n",
460 I915_READ(PORT_HOTPLUG_EN));
461 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
462 I915_READ(VLV_DPFLIPSTAT));
463 seq_printf(m, "DPINVGTT:\t%08x\n",
464 I915_READ(DPINVGTT));
465
466 } else if (!HAS_PCH_SPLIT(dev)) {
467 seq_printf(m, "Interrupt enable: %08x\n", 469 seq_printf(m, "Interrupt enable: %08x\n",
468 I915_READ(IER)); 470 I915_READ(IER));
469 seq_printf(m, "Interrupt identity: %08x\n", 471 seq_printf(m, "Interrupt identity: %08x\n",
@@ -496,13 +498,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
496 } 498 }
497 seq_printf(m, "Interrupts received: %d\n", 499 seq_printf(m, "Interrupts received: %d\n",
498 atomic_read(&dev_priv->irq_received)); 500 atomic_read(&dev_priv->irq_received));
499 for_each_ring(ring, dev_priv, i) { 501 for (i = 0; i < I915_NUM_RINGS; i++) {
500 if (IS_GEN6(dev) || IS_GEN7(dev)) { 502 if (IS_GEN6(dev) || IS_GEN7(dev)) {
501 seq_printf(m, 503 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
502 "Graphics Interrupt mask (%s): %08x\n", 504 dev_priv->ring[i].name,
503 ring->name, I915_READ_IMR(ring)); 505 I915_READ_IMR(&dev_priv->ring[i]));
504 } 506 }
505 i915_ring_seqno_info(m, ring); 507 i915_ring_seqno_info(m, &dev_priv->ring[i]);
506 } 508 }
507 mutex_unlock(&dev->struct_mutex); 509 mutex_unlock(&dev->struct_mutex);
508 510
@@ -525,8 +527,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
525 for (i = 0; i < dev_priv->num_fence_regs; i++) { 527 for (i = 0; i < dev_priv->num_fence_regs; i++) {
526 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 528 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
527 529
528 seq_printf(m, "Fence %d, pin count = %d, object = ", 530 seq_printf(m, "Fenced object[%2d] = ", i);
529 i, dev_priv->fence_regs[i].pin_count);
530 if (obj == NULL) 531 if (obj == NULL)
531 seq_printf(m, "unused"); 532 seq_printf(m, "unused");
532 else 533 else
@@ -560,12 +561,107 @@ static int i915_hws_info(struct seq_file *m, void *data)
560 return 0; 561 return 0;
561} 562}
562 563
564static void i915_dump_object(struct seq_file *m,
565 struct io_mapping *mapping,
566 struct drm_i915_gem_object *obj)
567{
568 int page, page_count, i;
569
570 page_count = obj->base.size / PAGE_SIZE;
571 for (page = 0; page < page_count; page++) {
572 u32 *mem = io_mapping_map_wc(mapping,
573 obj->gtt_offset + page * PAGE_SIZE);
574 for (i = 0; i < PAGE_SIZE; i += 4)
575 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
576 io_mapping_unmap(mem);
577 }
578}
579
580static int i915_batchbuffer_info(struct seq_file *m, void *data)
581{
582 struct drm_info_node *node = (struct drm_info_node *) m->private;
583 struct drm_device *dev = node->minor->dev;
584 drm_i915_private_t *dev_priv = dev->dev_private;
585 struct drm_i915_gem_object *obj;
586 int ret;
587
588 ret = mutex_lock_interruptible(&dev->struct_mutex);
589 if (ret)
590 return ret;
591
592 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
593 if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
594 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
595 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
596 }
597 }
598
599 mutex_unlock(&dev->struct_mutex);
600 return 0;
601}
602
603static int i915_ringbuffer_data(struct seq_file *m, void *data)
604{
605 struct drm_info_node *node = (struct drm_info_node *) m->private;
606 struct drm_device *dev = node->minor->dev;
607 drm_i915_private_t *dev_priv = dev->dev_private;
608 struct intel_ring_buffer *ring;
609 int ret;
610
611 ret = mutex_lock_interruptible(&dev->struct_mutex);
612 if (ret)
613 return ret;
614
615 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
616 if (!ring->obj) {
617 seq_printf(m, "No ringbuffer setup\n");
618 } else {
619 const u8 __iomem *virt = ring->virtual_start;
620 uint32_t off;
621
622 for (off = 0; off < ring->size; off += 4) {
623 uint32_t *ptr = (uint32_t *)(virt + off);
624 seq_printf(m, "%08x : %08x\n", off, *ptr);
625 }
626 }
627 mutex_unlock(&dev->struct_mutex);
628
629 return 0;
630}
631
632static int i915_ringbuffer_info(struct seq_file *m, void *data)
633{
634 struct drm_info_node *node = (struct drm_info_node *) m->private;
635 struct drm_device *dev = node->minor->dev;
636 drm_i915_private_t *dev_priv = dev->dev_private;
637 struct intel_ring_buffer *ring;
638
639 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
640 if (ring->size == 0)
641 return 0;
642
643 seq_printf(m, "Ring %s:\n", ring->name);
644 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
645 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
646 seq_printf(m, " Size : %08x\n", ring->size);
647 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
648 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
649 if (IS_GEN6(dev)) {
650 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
651 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
652 }
653 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
654 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
655
656 return 0;
657}
658
563static const char *ring_str(int ring) 659static const char *ring_str(int ring)
564{ 660{
565 switch (ring) { 661 switch (ring) {
566 case RCS: return "render"; 662 case RING_RENDER: return " render";
567 case VCS: return "bsd"; 663 case RING_BSD: return " bsd";
568 case BCS: return "blt"; 664 case RING_BLT: return " blt";
569 default: return ""; 665 default: return "";
570 } 666 }
571} 667}
@@ -608,17 +704,16 @@ static void print_error_buffers(struct seq_file *m,
608 seq_printf(m, "%s [%d]:\n", name, count); 704 seq_printf(m, "%s [%d]:\n", name, count);
609 705
610 while (count--) { 706 while (count--) {
611 seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s", 707 seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s",
612 err->gtt_offset, 708 err->gtt_offset,
613 err->size, 709 err->size,
614 err->read_domains, 710 err->read_domains,
615 err->write_domain, 711 err->write_domain,
616 err->rseqno, err->wseqno, 712 err->seqno,
617 pin_flag(err->pinned), 713 pin_flag(err->pinned),
618 tiling_flag(err->tiling), 714 tiling_flag(err->tiling),
619 dirty_flag(err->dirty), 715 dirty_flag(err->dirty),
620 purgeable_flag(err->purgeable), 716 purgeable_flag(err->purgeable),
621 err->ring != -1 ? " " : "",
622 ring_str(err->ring), 717 ring_str(err->ring),
623 cache_level_str(err->cache_level)); 718 cache_level_str(err->cache_level));
624 719
@@ -632,85 +727,57 @@ static void print_error_buffers(struct seq_file *m,
632 } 727 }
633} 728}
634 729
635static void i915_ring_error_state(struct seq_file *m,
636 struct drm_device *dev,
637 struct drm_i915_error_state *error,
638 unsigned ring)
639{
640 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
641 seq_printf(m, "%s command stream:\n", ring_str(ring));
642 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
643 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
644 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
645 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
646 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
647 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
648 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
649 seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
650
651 if (INTEL_INFO(dev)->gen >= 4)
652 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
653 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
654 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
655 if (INTEL_INFO(dev)->gen >= 6) {
656 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
657 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
658 seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
659 error->semaphore_mboxes[ring][0],
660 error->semaphore_seqno[ring][0]);
661 seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
662 error->semaphore_mboxes[ring][1],
663 error->semaphore_seqno[ring][1]);
664 }
665 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
666 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
667 seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
668 seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
669}
670
671struct i915_error_state_file_priv {
672 struct drm_device *dev;
673 struct drm_i915_error_state *error;
674};
675
676static int i915_error_state(struct seq_file *m, void *unused) 730static int i915_error_state(struct seq_file *m, void *unused)
677{ 731{
678 struct i915_error_state_file_priv *error_priv = m->private; 732 struct drm_info_node *node = (struct drm_info_node *) m->private;
679 struct drm_device *dev = error_priv->dev; 733 struct drm_device *dev = node->minor->dev;
680 drm_i915_private_t *dev_priv = dev->dev_private; 734 drm_i915_private_t *dev_priv = dev->dev_private;
681 struct drm_i915_error_state *error = error_priv->error; 735 struct drm_i915_error_state *error;
682 struct intel_ring_buffer *ring; 736 unsigned long flags;
683 int i, j, page, offset, elt; 737 int i, page, offset, elt;
684 738
685 if (!error) { 739 spin_lock_irqsave(&dev_priv->error_lock, flags);
740 if (!dev_priv->first_error) {
686 seq_printf(m, "no error state collected\n"); 741 seq_printf(m, "no error state collected\n");
687 return 0; 742 goto out;
688 } 743 }
689 744
745 error = dev_priv->first_error;
746
690 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 747 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
691 error->time.tv_usec); 748 error->time.tv_usec);
692 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 749 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
693 seq_printf(m, "EIR: 0x%08x\n", error->eir); 750 seq_printf(m, "EIR: 0x%08x\n", error->eir);
694 seq_printf(m, "IER: 0x%08x\n", error->ier);
695 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 751 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
696 seq_printf(m, "CCID: 0x%08x\n", error->ccid);
697
698 for (i = 0; i < dev_priv->num_fence_regs; i++)
699 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
700
701 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
702 seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]);
703
704 if (INTEL_INFO(dev)->gen >= 6) { 752 if (INTEL_INFO(dev)->gen >= 6) {
705 seq_printf(m, "ERROR: 0x%08x\n", error->error); 753 seq_printf(m, "ERROR: 0x%08x\n", error->error);
706 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 754 seq_printf(m, "Blitter command stream:\n");
755 seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
756 seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
757 seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
758 seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
759 seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
760 seq_printf(m, "Video (BSD) command stream:\n");
761 seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
762 seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
763 seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
764 seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
765 seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
707 } 766 }
767 seq_printf(m, "Render command stream:\n");
768 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
769 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
770 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
771 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
772 if (INTEL_INFO(dev)->gen >= 4) {
773 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
774 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
775 }
776 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
777 seq_printf(m, " seqno: 0x%08x\n", error->seqno);
708 778
709 if (INTEL_INFO(dev)->gen == 7) 779 for (i = 0; i < dev_priv->num_fence_regs; i++)
710 seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 780 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
711
712 for_each_ring(ring, dev_priv, i)
713 i915_ring_error_state(m, dev, error, i);
714 781
715 if (error->active_bo) 782 if (error->active_bo)
716 print_error_buffers(m, "Active", 783 print_error_buffers(m, "Active",
@@ -722,10 +789,10 @@ static int i915_error_state(struct seq_file *m, void *unused)
722 error->pinned_bo, 789 error->pinned_bo,
723 error->pinned_bo_count); 790 error->pinned_bo_count);
724 791
725 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 792 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
726 struct drm_i915_error_object *obj; 793 if (error->batchbuffer[i]) {
794 struct drm_i915_error_object *obj = error->batchbuffer[i];
727 795
728 if ((obj = error->ring[i].batchbuffer)) {
729 seq_printf(m, "%s --- gtt_offset = 0x%08x\n", 796 seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
730 dev_priv->ring[i].name, 797 dev_priv->ring[i].name,
731 obj->gtt_offset); 798 obj->gtt_offset);
@@ -737,20 +804,11 @@ static int i915_error_state(struct seq_file *m, void *unused)
737 } 804 }
738 } 805 }
739 } 806 }
807 }
740 808
741 if (error->ring[i].num_requests) { 809 for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
742 seq_printf(m, "%s --- %d requests\n", 810 if (error->ringbuffer[i]) {
743 dev_priv->ring[i].name, 811 struct drm_i915_error_object *obj = error->ringbuffer[i];
744 error->ring[i].num_requests);
745 for (j = 0; j < error->ring[i].num_requests; j++) {
746 seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
747 error->ring[i].requests[j].seqno,
748 error->ring[i].requests[j].jiffies,
749 error->ring[i].requests[j].tail);
750 }
751 }
752
753 if ((obj = error->ring[i].ringbuffer)) {
754 seq_printf(m, "%s --- ringbuffer = 0x%08x\n", 812 seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
755 dev_priv->ring[i].name, 813 dev_priv->ring[i].name,
756 obj->gtt_offset); 814 obj->gtt_offset);
@@ -772,90 +830,18 @@ static int i915_error_state(struct seq_file *m, void *unused)
772 if (error->display) 830 if (error->display)
773 intel_display_print_error_state(m, dev, error->display); 831 intel_display_print_error_state(m, dev, error->display);
774 832
775 return 0; 833out:
776}
777
778static ssize_t
779i915_error_state_write(struct file *filp,
780 const char __user *ubuf,
781 size_t cnt,
782 loff_t *ppos)
783{
784 struct seq_file *m = filp->private_data;
785 struct i915_error_state_file_priv *error_priv = m->private;
786 struct drm_device *dev = error_priv->dev;
787 int ret;
788
789 DRM_DEBUG_DRIVER("Resetting error state\n");
790
791 ret = mutex_lock_interruptible(&dev->struct_mutex);
792 if (ret)
793 return ret;
794
795 i915_destroy_error_state(dev);
796 mutex_unlock(&dev->struct_mutex);
797
798 return cnt;
799}
800
801static int i915_error_state_open(struct inode *inode, struct file *file)
802{
803 struct drm_device *dev = inode->i_private;
804 drm_i915_private_t *dev_priv = dev->dev_private;
805 struct i915_error_state_file_priv *error_priv;
806 unsigned long flags;
807
808 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
809 if (!error_priv)
810 return -ENOMEM;
811
812 error_priv->dev = dev;
813
814 spin_lock_irqsave(&dev_priv->error_lock, flags);
815 error_priv->error = dev_priv->first_error;
816 if (error_priv->error)
817 kref_get(&error_priv->error->ref);
818 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 834 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
819 835
820 return single_open(file, i915_error_state, error_priv); 836 return 0;
821}
822
823static int i915_error_state_release(struct inode *inode, struct file *file)
824{
825 struct seq_file *m = file->private_data;
826 struct i915_error_state_file_priv *error_priv = m->private;
827
828 if (error_priv->error)
829 kref_put(&error_priv->error->ref, i915_error_state_free);
830 kfree(error_priv);
831
832 return single_release(inode, file);
833} 837}
834 838
835static const struct file_operations i915_error_state_fops = {
836 .owner = THIS_MODULE,
837 .open = i915_error_state_open,
838 .read = seq_read,
839 .write = i915_error_state_write,
840 .llseek = default_llseek,
841 .release = i915_error_state_release,
842};
843
844static int i915_rstdby_delays(struct seq_file *m, void *unused) 839static int i915_rstdby_delays(struct seq_file *m, void *unused)
845{ 840{
846 struct drm_info_node *node = (struct drm_info_node *) m->private; 841 struct drm_info_node *node = (struct drm_info_node *) m->private;
847 struct drm_device *dev = node->minor->dev; 842 struct drm_device *dev = node->minor->dev;
848 drm_i915_private_t *dev_priv = dev->dev_private; 843 drm_i915_private_t *dev_priv = dev->dev_private;
849 u16 crstanddelay; 844 u16 crstanddelay = I915_READ16(CRSTANDVID);
850 int ret;
851
852 ret = mutex_lock_interruptible(&dev->struct_mutex);
853 if (ret)
854 return ret;
855
856 crstanddelay = I915_READ16(CRSTANDVID);
857
858 mutex_unlock(&dev->struct_mutex);
859 845
860 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 846 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
861 847
@@ -915,7 +901,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
915 seq_printf(m, "Render p-state limit: %d\n", 901 seq_printf(m, "Render p-state limit: %d\n",
916 rp_state_limits & 0xff); 902 rp_state_limits & 0xff);
917 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 903 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
918 GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER); 904 GEN6_CAGF_SHIFT) * 50);
919 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 905 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
920 GEN6_CURICONT_MASK); 906 GEN6_CURICONT_MASK);
921 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 907 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@@ -931,15 +917,15 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
931 917
932 max_freq = (rp_state_cap & 0xff0000) >> 16; 918 max_freq = (rp_state_cap & 0xff0000) >> 16;
933 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 919 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
934 max_freq * GT_FREQUENCY_MULTIPLIER); 920 max_freq * 50);
935 921
936 max_freq = (rp_state_cap & 0xff00) >> 8; 922 max_freq = (rp_state_cap & 0xff00) >> 8;
937 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 923 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
938 max_freq * GT_FREQUENCY_MULTIPLIER); 924 max_freq * 50);
939 925
940 max_freq = rp_state_cap & 0xff; 926 max_freq = rp_state_cap & 0xff;
941 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 927 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
942 max_freq * GT_FREQUENCY_MULTIPLIER); 928 max_freq * 50);
943 } else { 929 } else {
944 seq_printf(m, "no P-state info available\n"); 930 seq_printf(m, "no P-state info available\n");
945 } 931 }
@@ -953,11 +939,7 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
953 struct drm_device *dev = node->minor->dev; 939 struct drm_device *dev = node->minor->dev;
954 drm_i915_private_t *dev_priv = dev->dev_private; 940 drm_i915_private_t *dev_priv = dev->dev_private;
955 u32 delayfreq; 941 u32 delayfreq;
956 int ret, i; 942 int i;
957
958 ret = mutex_lock_interruptible(&dev->struct_mutex);
959 if (ret)
960 return ret;
961 943
962 for (i = 0; i < 16; i++) { 944 for (i = 0; i < 16; i++) {
963 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 945 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@@ -965,8 +947,6 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
965 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 947 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
966 } 948 }
967 949
968 mutex_unlock(&dev->struct_mutex);
969
970 return 0; 950 return 0;
971} 951}
972 952
@@ -981,40 +961,24 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
981 struct drm_device *dev = node->minor->dev; 961 struct drm_device *dev = node->minor->dev;
982 drm_i915_private_t *dev_priv = dev->dev_private; 962 drm_i915_private_t *dev_priv = dev->dev_private;
983 u32 inttoext; 963 u32 inttoext;
984 int ret, i; 964 int i;
985
986 ret = mutex_lock_interruptible(&dev->struct_mutex);
987 if (ret)
988 return ret;
989 965
990 for (i = 1; i <= 32; i++) { 966 for (i = 1; i <= 32; i++) {
991 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 967 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
992 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 968 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
993 } 969 }
994 970
995 mutex_unlock(&dev->struct_mutex);
996
997 return 0; 971 return 0;
998} 972}
999 973
1000static int ironlake_drpc_info(struct seq_file *m) 974static int i915_drpc_info(struct seq_file *m, void *unused)
1001{ 975{
1002 struct drm_info_node *node = (struct drm_info_node *) m->private; 976 struct drm_info_node *node = (struct drm_info_node *) m->private;
1003 struct drm_device *dev = node->minor->dev; 977 struct drm_device *dev = node->minor->dev;
1004 drm_i915_private_t *dev_priv = dev->dev_private; 978 drm_i915_private_t *dev_priv = dev->dev_private;
1005 u32 rgvmodectl, rstdbyctl; 979 u32 rgvmodectl = I915_READ(MEMMODECTL);
1006 u16 crstandvid; 980 u32 rstdbyctl = I915_READ(RSTDBYCTL);
1007 int ret; 981 u16 crstandvid = I915_READ16(CRSTANDVID);
1008
1009 ret = mutex_lock_interruptible(&dev->struct_mutex);
1010 if (ret)
1011 return ret;
1012
1013 rgvmodectl = I915_READ(MEMMODECTL);
1014 rstdbyctl = I915_READ(RSTDBYCTL);
1015 crstandvid = I915_READ16(CRSTANDVID);
1016
1017 mutex_unlock(&dev->struct_mutex);
1018 982
1019 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 983 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1020 "yes" : "no"); 984 "yes" : "no");
@@ -1064,115 +1028,6 @@ static int ironlake_drpc_info(struct seq_file *m)
1064 return 0; 1028 return 0;
1065} 1029}
1066 1030
1067static int gen6_drpc_info(struct seq_file *m)
1068{
1069
1070 struct drm_info_node *node = (struct drm_info_node *) m->private;
1071 struct drm_device *dev = node->minor->dev;
1072 struct drm_i915_private *dev_priv = dev->dev_private;
1073 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1074 unsigned forcewake_count;
1075 int count=0, ret;
1076
1077
1078 ret = mutex_lock_interruptible(&dev->struct_mutex);
1079 if (ret)
1080 return ret;
1081
1082 spin_lock_irq(&dev_priv->gt_lock);
1083 forcewake_count = dev_priv->forcewake_count;
1084 spin_unlock_irq(&dev_priv->gt_lock);
1085
1086 if (forcewake_count) {
1087 seq_printf(m, "RC information inaccurate because somebody "
1088 "holds a forcewake reference \n");
1089 } else {
1090 /* NB: we cannot use forcewake, else we read the wrong values */
1091 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1092 udelay(10);
1093 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1094 }
1095
1096 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1097 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
1098
1099 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1100 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1101 mutex_unlock(&dev->struct_mutex);
1102 mutex_lock(&dev_priv->rps.hw_lock);
1103 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1104 mutex_unlock(&dev_priv->rps.hw_lock);
1105
1106 seq_printf(m, "Video Turbo Mode: %s\n",
1107 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1108 seq_printf(m, "HW control enabled: %s\n",
1109 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1110 seq_printf(m, "SW control enabled: %s\n",
1111 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1112 GEN6_RP_MEDIA_SW_MODE));
1113 seq_printf(m, "RC1e Enabled: %s\n",
1114 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1115 seq_printf(m, "RC6 Enabled: %s\n",
1116 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1117 seq_printf(m, "Deep RC6 Enabled: %s\n",
1118 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1119 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1120 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1121 seq_printf(m, "Current RC state: ");
1122 switch (gt_core_status & GEN6_RCn_MASK) {
1123 case GEN6_RC0:
1124 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1125 seq_printf(m, "Core Power Down\n");
1126 else
1127 seq_printf(m, "on\n");
1128 break;
1129 case GEN6_RC3:
1130 seq_printf(m, "RC3\n");
1131 break;
1132 case GEN6_RC6:
1133 seq_printf(m, "RC6\n");
1134 break;
1135 case GEN6_RC7:
1136 seq_printf(m, "RC7\n");
1137 break;
1138 default:
1139 seq_printf(m, "Unknown\n");
1140 break;
1141 }
1142
1143 seq_printf(m, "Core Power Down: %s\n",
1144 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1145
1146 /* Not exactly sure what this is */
1147 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1148 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1149 seq_printf(m, "RC6 residency since boot: %u\n",
1150 I915_READ(GEN6_GT_GFX_RC6));
1151 seq_printf(m, "RC6+ residency since boot: %u\n",
1152 I915_READ(GEN6_GT_GFX_RC6p));
1153 seq_printf(m, "RC6++ residency since boot: %u\n",
1154 I915_READ(GEN6_GT_GFX_RC6pp));
1155
1156 seq_printf(m, "RC6 voltage: %dmV\n",
1157 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1158 seq_printf(m, "RC6+ voltage: %dmV\n",
1159 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1160 seq_printf(m, "RC6++ voltage: %dmV\n",
1161 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1162 return 0;
1163}
1164
1165static int i915_drpc_info(struct seq_file *m, void *unused)
1166{
1167 struct drm_info_node *node = (struct drm_info_node *) m->private;
1168 struct drm_device *dev = node->minor->dev;
1169
1170 if (IS_GEN6(dev) || IS_GEN7(dev))
1171 return gen6_drpc_info(m);
1172 else
1173 return ironlake_drpc_info(m);
1174}
1175
1176static int i915_fbc_status(struct seq_file *m, void *unused) 1031static int i915_fbc_status(struct seq_file *m, void *unused)
1177{ 1032{
1178 struct drm_info_node *node = (struct drm_info_node *) m->private; 1033 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1251,9 +1106,6 @@ static int i915_emon_status(struct seq_file *m, void *unused)
1251 unsigned long temp, chipset, gfx; 1106 unsigned long temp, chipset, gfx;
1252 int ret; 1107 int ret;
1253 1108
1254 if (!IS_GEN5(dev))
1255 return -ENODEV;
1256
1257 ret = mutex_lock_interruptible(&dev->struct_mutex); 1109 ret = mutex_lock_interruptible(&dev->struct_mutex);
1258 if (ret) 1110 if (ret)
1259 return ret; 1111 return ret;
@@ -1284,23 +1136,27 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1284 return 0; 1136 return 0;
1285 } 1137 }
1286 1138
1287 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1139 ret = mutex_lock_interruptible(&dev->struct_mutex);
1288 if (ret) 1140 if (ret)
1289 return ret; 1141 return ret;
1290 1142
1291 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); 1143 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1292 1144
1293 for (gpu_freq = dev_priv->rps.min_delay; 1145 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
1294 gpu_freq <= dev_priv->rps.max_delay;
1295 gpu_freq++) { 1146 gpu_freq++) {
1296 ia_freq = gpu_freq; 1147 I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
1297 sandybridge_pcode_read(dev_priv, 1148 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
1298 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1149 GEN6_PCODE_READ_MIN_FREQ_TABLE);
1299 &ia_freq); 1150 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
1300 seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); 1151 GEN6_PCODE_READY) == 0, 10)) {
1152 DRM_ERROR("pcode read of freq table timed out\n");
1153 continue;
1154 }
1155 ia_freq = I915_READ(GEN6_PCODE_DATA);
1156 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
1301 } 1157 }
1302 1158
1303 mutex_unlock(&dev_priv->rps.hw_lock); 1159 mutex_unlock(&dev->struct_mutex);
1304 1160
1305 return 0; 1161 return 0;
1306} 1162}
@@ -1310,16 +1166,9 @@ static int i915_gfxec(struct seq_file *m, void *unused)
1310 struct drm_info_node *node = (struct drm_info_node *) m->private; 1166 struct drm_info_node *node = (struct drm_info_node *) m->private;
1311 struct drm_device *dev = node->minor->dev; 1167 struct drm_device *dev = node->minor->dev;
1312 drm_i915_private_t *dev_priv = dev->dev_private; 1168 drm_i915_private_t *dev_priv = dev->dev_private;
1313 int ret;
1314
1315 ret = mutex_lock_interruptible(&dev->struct_mutex);
1316 if (ret)
1317 return ret;
1318 1169
1319 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1170 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1320 1171
1321 mutex_unlock(&dev->struct_mutex);
1322
1323 return 0; 1172 return 0;
1324} 1173}
1325 1174
@@ -1329,25 +1178,17 @@ static int i915_opregion(struct seq_file *m, void *unused)
1329 struct drm_device *dev = node->minor->dev; 1178 struct drm_device *dev = node->minor->dev;
1330 drm_i915_private_t *dev_priv = dev->dev_private; 1179 drm_i915_private_t *dev_priv = dev->dev_private;
1331 struct intel_opregion *opregion = &dev_priv->opregion; 1180 struct intel_opregion *opregion = &dev_priv->opregion;
1332 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1333 int ret; 1181 int ret;
1334 1182
1335 if (data == NULL)
1336 return -ENOMEM;
1337
1338 ret = mutex_lock_interruptible(&dev->struct_mutex); 1183 ret = mutex_lock_interruptible(&dev->struct_mutex);
1339 if (ret) 1184 if (ret)
1340 goto out; 1185 return ret;
1341 1186
1342 if (opregion->header) { 1187 if (opregion->header)
1343 memcpy_fromio(data, opregion->header, OPREGION_SIZE); 1188 seq_write(m, opregion->header, OPREGION_SIZE);
1344 seq_write(m, data, OPREGION_SIZE);
1345 }
1346 1189
1347 mutex_unlock(&dev->struct_mutex); 1190 mutex_unlock(&dev->struct_mutex);
1348 1191
1349out:
1350 kfree(data);
1351 return 0; 1192 return 0;
1352} 1193}
1353 1194
@@ -1404,15 +1245,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
1404 if (ret) 1245 if (ret)
1405 return ret; 1246 return ret;
1406 1247
1407 if (dev_priv->ips.pwrctx) { 1248 if (dev_priv->pwrctx) {
1408 seq_printf(m, "power context "); 1249 seq_printf(m, "power context ");
1409 describe_obj(m, dev_priv->ips.pwrctx); 1250 describe_obj(m, dev_priv->pwrctx);
1410 seq_printf(m, "\n"); 1251 seq_printf(m, "\n");
1411 } 1252 }
1412 1253
1413 if (dev_priv->ips.renderctx) { 1254 if (dev_priv->renderctx) {
1414 seq_printf(m, "render context "); 1255 seq_printf(m, "render context ");
1415 describe_obj(m, dev_priv->ips.renderctx); 1256 describe_obj(m, dev_priv->renderctx);
1416 seq_printf(m, "\n"); 1257 seq_printf(m, "\n");
1417 } 1258 }
1418 1259
@@ -1426,162 +1267,18 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1426 struct drm_info_node *node = (struct drm_info_node *) m->private; 1267 struct drm_info_node *node = (struct drm_info_node *) m->private;
1427 struct drm_device *dev = node->minor->dev; 1268 struct drm_device *dev = node->minor->dev;
1428 struct drm_i915_private *dev_priv = dev->dev_private; 1269 struct drm_i915_private *dev_priv = dev->dev_private;
1429 unsigned forcewake_count;
1430 1270
1431 spin_lock_irq(&dev_priv->gt_lock); 1271 seq_printf(m, "forcewake count = %d\n",
1432 forcewake_count = dev_priv->forcewake_count; 1272 atomic_read(&dev_priv->forcewake_count));
1433 spin_unlock_irq(&dev_priv->gt_lock);
1434
1435 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1436 1273
1437 return 0; 1274 return 0;
1438} 1275}
1439 1276
1440static const char *swizzle_string(unsigned swizzle) 1277static int
1441{ 1278i915_wedged_open(struct inode *inode,
1442 switch(swizzle) { 1279 struct file *filp)
1443 case I915_BIT_6_SWIZZLE_NONE:
1444 return "none";
1445 case I915_BIT_6_SWIZZLE_9:
1446 return "bit9";
1447 case I915_BIT_6_SWIZZLE_9_10:
1448 return "bit9/bit10";
1449 case I915_BIT_6_SWIZZLE_9_11:
1450 return "bit9/bit11";
1451 case I915_BIT_6_SWIZZLE_9_10_11:
1452 return "bit9/bit10/bit11";
1453 case I915_BIT_6_SWIZZLE_9_17:
1454 return "bit9/bit17";
1455 case I915_BIT_6_SWIZZLE_9_10_17:
1456 return "bit9/bit10/bit17";
1457 case I915_BIT_6_SWIZZLE_UNKNOWN:
1458 return "unkown";
1459 }
1460
1461 return "bug";
1462}
1463
1464static int i915_swizzle_info(struct seq_file *m, void *data)
1465{
1466 struct drm_info_node *node = (struct drm_info_node *) m->private;
1467 struct drm_device *dev = node->minor->dev;
1468 struct drm_i915_private *dev_priv = dev->dev_private;
1469 int ret;
1470
1471 ret = mutex_lock_interruptible(&dev->struct_mutex);
1472 if (ret)
1473 return ret;
1474
1475 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1476 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1477 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1478 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1479
1480 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1481 seq_printf(m, "DDC = 0x%08x\n",
1482 I915_READ(DCC));
1483 seq_printf(m, "C0DRB3 = 0x%04x\n",
1484 I915_READ16(C0DRB3));
1485 seq_printf(m, "C1DRB3 = 0x%04x\n",
1486 I915_READ16(C1DRB3));
1487 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
1488 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1489 I915_READ(MAD_DIMM_C0));
1490 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1491 I915_READ(MAD_DIMM_C1));
1492 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1493 I915_READ(MAD_DIMM_C2));
1494 seq_printf(m, "TILECTL = 0x%08x\n",
1495 I915_READ(TILECTL));
1496 seq_printf(m, "ARB_MODE = 0x%08x\n",
1497 I915_READ(ARB_MODE));
1498 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1499 I915_READ(DISP_ARB_CTL));
1500 }
1501 mutex_unlock(&dev->struct_mutex);
1502
1503 return 0;
1504}
1505
1506static int i915_ppgtt_info(struct seq_file *m, void *data)
1507{
1508 struct drm_info_node *node = (struct drm_info_node *) m->private;
1509 struct drm_device *dev = node->minor->dev;
1510 struct drm_i915_private *dev_priv = dev->dev_private;
1511 struct intel_ring_buffer *ring;
1512 int i, ret;
1513
1514
1515 ret = mutex_lock_interruptible(&dev->struct_mutex);
1516 if (ret)
1517 return ret;
1518 if (INTEL_INFO(dev)->gen == 6)
1519 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1520
1521 for_each_ring(ring, dev_priv, i) {
1522 seq_printf(m, "%s\n", ring->name);
1523 if (INTEL_INFO(dev)->gen == 7)
1524 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1525 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1526 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1527 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1528 }
1529 if (dev_priv->mm.aliasing_ppgtt) {
1530 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1531
1532 seq_printf(m, "aliasing PPGTT:\n");
1533 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1534 }
1535 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1536 mutex_unlock(&dev->struct_mutex);
1537
1538 return 0;
1539}
1540
1541static int i915_dpio_info(struct seq_file *m, void *data)
1542{ 1280{
1543 struct drm_info_node *node = (struct drm_info_node *) m->private; 1281 filp->private_data = inode->i_private;
1544 struct drm_device *dev = node->minor->dev;
1545 struct drm_i915_private *dev_priv = dev->dev_private;
1546 int ret;
1547
1548
1549 if (!IS_VALLEYVIEW(dev)) {
1550 seq_printf(m, "unsupported\n");
1551 return 0;
1552 }
1553
1554 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1555 if (ret)
1556 return ret;
1557
1558 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1559
1560 seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1561 intel_dpio_read(dev_priv, _DPIO_DIV_A));
1562 seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1563 intel_dpio_read(dev_priv, _DPIO_DIV_B));
1564
1565 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1566 intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
1567 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1568 intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
1569
1570 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1571 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
1572 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1573 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
1574
1575 seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
1576 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
1577 seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
1578 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
1579
1580 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1581 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
1582
1583 mutex_unlock(&dev->mode_config.mutex);
1584
1585 return 0; 1282 return 0;
1586} 1283}
1587 1284
@@ -1596,12 +1293,12 @@ i915_wedged_read(struct file *filp,
1596 char buf[80]; 1293 char buf[80];
1597 int len; 1294 int len;
1598 1295
1599 len = snprintf(buf, sizeof(buf), 1296 len = snprintf(buf, sizeof (buf),
1600 "wedged : %d\n", 1297 "wedged : %d\n",
1601 atomic_read(&dev_priv->mm.wedged)); 1298 atomic_read(&dev_priv->mm.wedged));
1602 1299
1603 if (len > sizeof(buf)) 1300 if (len > sizeof (buf))
1604 len = sizeof(buf); 1301 len = sizeof (buf);
1605 1302
1606 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1303 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1607} 1304}
@@ -1617,7 +1314,7 @@ i915_wedged_write(struct file *filp,
1617 int val = 1; 1314 int val = 1;
1618 1315
1619 if (cnt > 0) { 1316 if (cnt > 0) {
1620 if (cnt > sizeof(buf) - 1) 1317 if (cnt > sizeof (buf) - 1)
1621 return -EINVAL; 1318 return -EINVAL;
1622 1319
1623 if (copy_from_user(buf, ubuf, cnt)) 1320 if (copy_from_user(buf, ubuf, cnt))
@@ -1635,74 +1332,20 @@ i915_wedged_write(struct file *filp,
1635 1332
1636static const struct file_operations i915_wedged_fops = { 1333static const struct file_operations i915_wedged_fops = {
1637 .owner = THIS_MODULE, 1334 .owner = THIS_MODULE,
1638 .open = simple_open, 1335 .open = i915_wedged_open,
1639 .read = i915_wedged_read, 1336 .read = i915_wedged_read,
1640 .write = i915_wedged_write, 1337 .write = i915_wedged_write,
1641 .llseek = default_llseek, 1338 .llseek = default_llseek,
1642}; 1339};
1643 1340
1644static ssize_t 1341static int
1645i915_ring_stop_read(struct file *filp, 1342i915_max_freq_open(struct inode *inode,
1646 char __user *ubuf, 1343 struct file *filp)
1647 size_t max,
1648 loff_t *ppos)
1649{
1650 struct drm_device *dev = filp->private_data;
1651 drm_i915_private_t *dev_priv = dev->dev_private;
1652 char buf[20];
1653 int len;
1654
1655 len = snprintf(buf, sizeof(buf),
1656 "0x%08x\n", dev_priv->stop_rings);
1657
1658 if (len > sizeof(buf))
1659 len = sizeof(buf);
1660
1661 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1662}
1663
1664static ssize_t
1665i915_ring_stop_write(struct file *filp,
1666 const char __user *ubuf,
1667 size_t cnt,
1668 loff_t *ppos)
1669{ 1344{
1670 struct drm_device *dev = filp->private_data; 1345 filp->private_data = inode->i_private;
1671 struct drm_i915_private *dev_priv = dev->dev_private; 1346 return 0;
1672 char buf[20];
1673 int val = 0, ret;
1674
1675 if (cnt > 0) {
1676 if (cnt > sizeof(buf) - 1)
1677 return -EINVAL;
1678
1679 if (copy_from_user(buf, ubuf, cnt))
1680 return -EFAULT;
1681 buf[cnt] = 0;
1682
1683 val = simple_strtoul(buf, NULL, 0);
1684 }
1685
1686 DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
1687
1688 ret = mutex_lock_interruptible(&dev->struct_mutex);
1689 if (ret)
1690 return ret;
1691
1692 dev_priv->stop_rings = val;
1693 mutex_unlock(&dev->struct_mutex);
1694
1695 return cnt;
1696} 1347}
1697 1348
1698static const struct file_operations i915_ring_stop_fops = {
1699 .owner = THIS_MODULE,
1700 .open = simple_open,
1701 .read = i915_ring_stop_read,
1702 .write = i915_ring_stop_write,
1703 .llseek = default_llseek,
1704};
1705
1706static ssize_t 1349static ssize_t
1707i915_max_freq_read(struct file *filp, 1350i915_max_freq_read(struct file *filp,
1708 char __user *ubuf, 1351 char __user *ubuf,
@@ -1712,21 +1355,13 @@ i915_max_freq_read(struct file *filp,
1712 struct drm_device *dev = filp->private_data; 1355 struct drm_device *dev = filp->private_data;
1713 drm_i915_private_t *dev_priv = dev->dev_private; 1356 drm_i915_private_t *dev_priv = dev->dev_private;
1714 char buf[80]; 1357 char buf[80];
1715 int len, ret; 1358 int len;
1716
1717 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1718 return -ENODEV;
1719
1720 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1721 if (ret)
1722 return ret;
1723 1359
1724 len = snprintf(buf, sizeof(buf), 1360 len = snprintf(buf, sizeof (buf),
1725 "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER); 1361 "max freq: %d\n", dev_priv->max_delay * 50);
1726 mutex_unlock(&dev_priv->rps.hw_lock);
1727 1362
1728 if (len > sizeof(buf)) 1363 if (len > sizeof (buf))
1729 len = sizeof(buf); 1364 len = sizeof (buf);
1730 1365
1731 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1366 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1732} 1367}
@@ -1740,13 +1375,10 @@ i915_max_freq_write(struct file *filp,
1740 struct drm_device *dev = filp->private_data; 1375 struct drm_device *dev = filp->private_data;
1741 struct drm_i915_private *dev_priv = dev->dev_private; 1376 struct drm_i915_private *dev_priv = dev->dev_private;
1742 char buf[20]; 1377 char buf[20];
1743 int val = 1, ret; 1378 int val = 1;
1744
1745 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1746 return -ENODEV;
1747 1379
1748 if (cnt > 0) { 1380 if (cnt > 0) {
1749 if (cnt > sizeof(buf) - 1) 1381 if (cnt > sizeof (buf) - 1)
1750 return -EINVAL; 1382 return -EINVAL;
1751 1383
1752 if (copy_from_user(buf, ubuf, cnt)) 1384 if (copy_from_user(buf, ubuf, cnt))
@@ -1758,104 +1390,33 @@ i915_max_freq_write(struct file *filp,
1758 1390
1759 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); 1391 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
1760 1392
1761 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1762 if (ret)
1763 return ret;
1764
1765 /* 1393 /*
1766 * Turbo will still be enabled, but won't go above the set value. 1394 * Turbo will still be enabled, but won't go above the set value.
1767 */ 1395 */
1768 dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER; 1396 dev_priv->max_delay = val / 50;
1769 1397
1770 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); 1398 gen6_set_rps(dev, val / 50);
1771 mutex_unlock(&dev_priv->rps.hw_lock);
1772 1399
1773 return cnt; 1400 return cnt;
1774} 1401}
1775 1402
1776static const struct file_operations i915_max_freq_fops = { 1403static const struct file_operations i915_max_freq_fops = {
1777 .owner = THIS_MODULE, 1404 .owner = THIS_MODULE,
1778 .open = simple_open, 1405 .open = i915_max_freq_open,
1779 .read = i915_max_freq_read, 1406 .read = i915_max_freq_read,
1780 .write = i915_max_freq_write, 1407 .write = i915_max_freq_write,
1781 .llseek = default_llseek, 1408 .llseek = default_llseek,
1782}; 1409};
1783 1410
1784static ssize_t 1411static int
1785i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max, 1412i915_cache_sharing_open(struct inode *inode,
1786 loff_t *ppos) 1413 struct file *filp)
1787{ 1414{
1788 struct drm_device *dev = filp->private_data; 1415 filp->private_data = inode->i_private;
1789 drm_i915_private_t *dev_priv = dev->dev_private; 1416 return 0;
1790 char buf[80];
1791 int len, ret;
1792
1793 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1794 return -ENODEV;
1795
1796 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1797 if (ret)
1798 return ret;
1799
1800 len = snprintf(buf, sizeof(buf),
1801 "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
1802 mutex_unlock(&dev_priv->rps.hw_lock);
1803
1804 if (len > sizeof(buf))
1805 len = sizeof(buf);
1806
1807 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1808} 1417}
1809 1418
1810static ssize_t 1419static ssize_t
1811i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
1812 loff_t *ppos)
1813{
1814 struct drm_device *dev = filp->private_data;
1815 struct drm_i915_private *dev_priv = dev->dev_private;
1816 char buf[20];
1817 int val = 1, ret;
1818
1819 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1820 return -ENODEV;
1821
1822 if (cnt > 0) {
1823 if (cnt > sizeof(buf) - 1)
1824 return -EINVAL;
1825
1826 if (copy_from_user(buf, ubuf, cnt))
1827 return -EFAULT;
1828 buf[cnt] = 0;
1829
1830 val = simple_strtoul(buf, NULL, 0);
1831 }
1832
1833 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
1834
1835 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1836 if (ret)
1837 return ret;
1838
1839 /*
1840 * Turbo will still be enabled, but won't go below the set value.
1841 */
1842 dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
1843
1844 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
1845 mutex_unlock(&dev_priv->rps.hw_lock);
1846
1847 return cnt;
1848}
1849
1850static const struct file_operations i915_min_freq_fops = {
1851 .owner = THIS_MODULE,
1852 .open = simple_open,
1853 .read = i915_min_freq_read,
1854 .write = i915_min_freq_write,
1855 .llseek = default_llseek,
1856};
1857
1858static ssize_t
1859i915_cache_sharing_read(struct file *filp, 1420i915_cache_sharing_read(struct file *filp,
1860 char __user *ubuf, 1421 char __user *ubuf,
1861 size_t max, 1422 size_t max,
@@ -1865,24 +1426,18 @@ i915_cache_sharing_read(struct file *filp,
1865 drm_i915_private_t *dev_priv = dev->dev_private; 1426 drm_i915_private_t *dev_priv = dev->dev_private;
1866 char buf[80]; 1427 char buf[80];
1867 u32 snpcr; 1428 u32 snpcr;
1868 int len, ret; 1429 int len;
1869
1870 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1871 return -ENODEV;
1872
1873 ret = mutex_lock_interruptible(&dev->struct_mutex);
1874 if (ret)
1875 return ret;
1876 1430
1431 mutex_lock(&dev_priv->dev->struct_mutex);
1877 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1432 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1878 mutex_unlock(&dev_priv->dev->struct_mutex); 1433 mutex_unlock(&dev_priv->dev->struct_mutex);
1879 1434
1880 len = snprintf(buf, sizeof(buf), 1435 len = snprintf(buf, sizeof (buf),
1881 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >> 1436 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
1882 GEN6_MBC_SNPCR_SHIFT); 1437 GEN6_MBC_SNPCR_SHIFT);
1883 1438
1884 if (len > sizeof(buf)) 1439 if (len > sizeof (buf))
1885 len = sizeof(buf); 1440 len = sizeof (buf);
1886 1441
1887 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1442 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1888} 1443}
@@ -1899,11 +1454,8 @@ i915_cache_sharing_write(struct file *filp,
1899 u32 snpcr; 1454 u32 snpcr;
1900 int val = 1; 1455 int val = 1;
1901 1456
1902 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1903 return -ENODEV;
1904
1905 if (cnt > 0) { 1457 if (cnt > 0) {
1906 if (cnt > sizeof(buf) - 1) 1458 if (cnt > sizeof (buf) - 1)
1907 return -EINVAL; 1459 return -EINVAL;
1908 1460
1909 if (copy_from_user(buf, ubuf, cnt)) 1461 if (copy_from_user(buf, ubuf, cnt))
@@ -1929,7 +1481,7 @@ i915_cache_sharing_write(struct file *filp,
1929 1481
1930static const struct file_operations i915_cache_sharing_fops = { 1482static const struct file_operations i915_cache_sharing_fops = {
1931 .owner = THIS_MODULE, 1483 .owner = THIS_MODULE,
1932 .open = simple_open, 1484 .open = i915_cache_sharing_open,
1933 .read = i915_cache_sharing_read, 1485 .read = i915_cache_sharing_read,
1934 .write = i915_cache_sharing_write, 1486 .write = i915_cache_sharing_write,
1935 .llseek = default_llseek, 1487 .llseek = default_llseek,
@@ -1953,36 +1505,62 @@ drm_add_fake_info_node(struct drm_minor *minor,
1953 node->minor = minor; 1505 node->minor = minor;
1954 node->dent = ent; 1506 node->dent = ent;
1955 node->info_ent = (void *) key; 1507 node->info_ent = (void *) key;
1956 1508 list_add(&node->list, &minor->debugfs_nodes.list);
1957 mutex_lock(&minor->debugfs_lock);
1958 list_add(&node->list, &minor->debugfs_list);
1959 mutex_unlock(&minor->debugfs_lock);
1960 1509
1961 return 0; 1510 return 0;
1962} 1511}
1963 1512
1513static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
1514{
1515 struct drm_device *dev = minor->dev;
1516 struct dentry *ent;
1517
1518 ent = debugfs_create_file("i915_wedged",
1519 S_IRUGO | S_IWUSR,
1520 root, dev,
1521 &i915_wedged_fops);
1522 if (IS_ERR(ent))
1523 return PTR_ERR(ent);
1524
1525 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
1526}
1527
1964static int i915_forcewake_open(struct inode *inode, struct file *file) 1528static int i915_forcewake_open(struct inode *inode, struct file *file)
1965{ 1529{
1966 struct drm_device *dev = inode->i_private; 1530 struct drm_device *dev = inode->i_private;
1967 struct drm_i915_private *dev_priv = dev->dev_private; 1531 struct drm_i915_private *dev_priv = dev->dev_private;
1532 int ret;
1968 1533
1969 if (INTEL_INFO(dev)->gen < 6) 1534 if (!IS_GEN6(dev))
1970 return 0; 1535 return 0;
1971 1536
1537 ret = mutex_lock_interruptible(&dev->struct_mutex);
1538 if (ret)
1539 return ret;
1972 gen6_gt_force_wake_get(dev_priv); 1540 gen6_gt_force_wake_get(dev_priv);
1541 mutex_unlock(&dev->struct_mutex);
1973 1542
1974 return 0; 1543 return 0;
1975} 1544}
1976 1545
1977static int i915_forcewake_release(struct inode *inode, struct file *file) 1546int i915_forcewake_release(struct inode *inode, struct file *file)
1978{ 1547{
1979 struct drm_device *dev = inode->i_private; 1548 struct drm_device *dev = inode->i_private;
1980 struct drm_i915_private *dev_priv = dev->dev_private; 1549 struct drm_i915_private *dev_priv = dev->dev_private;
1981 1550
1982 if (INTEL_INFO(dev)->gen < 6) 1551 if (!IS_GEN6(dev))
1983 return 0; 1552 return 0;
1984 1553
1554 /*
1555 * It's bad that we can potentially hang userspace if struct_mutex gets
1556 * forever stuck. However, if we cannot acquire this lock it means that
1557 * almost certainly the driver has hung, is not unload-able. Therefore
1558 * hanging here is probably a minor inconvenience not to be seen my
1559 * almost every user.
1560 */
1561 mutex_lock(&dev->struct_mutex);
1985 gen6_gt_force_wake_put(dev_priv); 1562 gen6_gt_force_wake_put(dev_priv);
1563 mutex_unlock(&dev->struct_mutex);
1986 1564
1987 return 0; 1565 return 0;
1988} 1566}
@@ -2008,31 +1586,45 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
2008 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 1586 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
2009} 1587}
2010 1588
2011static int i915_debugfs_create(struct dentry *root, 1589static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
2012 struct drm_minor *minor,
2013 const char *name,
2014 const struct file_operations *fops)
2015{ 1590{
2016 struct drm_device *dev = minor->dev; 1591 struct drm_device *dev = minor->dev;
2017 struct dentry *ent; 1592 struct dentry *ent;
2018 1593
2019 ent = debugfs_create_file(name, 1594 ent = debugfs_create_file("i915_max_freq",
2020 S_IRUGO | S_IWUSR, 1595 S_IRUGO | S_IWUSR,
2021 root, dev, 1596 root, dev,
2022 fops); 1597 &i915_max_freq_fops);
2023 if (IS_ERR(ent)) 1598 if (IS_ERR(ent))
2024 return PTR_ERR(ent); 1599 return PTR_ERR(ent);
2025 1600
2026 return drm_add_fake_info_node(minor, ent, fops); 1601 return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
1602}
1603
1604static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
1605{
1606 struct drm_device *dev = minor->dev;
1607 struct dentry *ent;
1608
1609 ent = debugfs_create_file("i915_cache_sharing",
1610 S_IRUGO | S_IWUSR,
1611 root, dev,
1612 &i915_cache_sharing_fops);
1613 if (IS_ERR(ent))
1614 return PTR_ERR(ent);
1615
1616 return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
2027} 1617}
2028 1618
2029static struct drm_info_list i915_debugfs_list[] = { 1619static struct drm_info_list i915_debugfs_list[] = {
2030 {"i915_capabilities", i915_capabilities, 0}, 1620 {"i915_capabilities", i915_capabilities, 0},
2031 {"i915_gem_objects", i915_gem_object_info, 0}, 1621 {"i915_gem_objects", i915_gem_object_info, 0},
2032 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 1622 {"i915_gem_gtt", i915_gem_gtt_info, 0},
2033 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
2034 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 1623 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1624 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
2035 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 1625 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1626 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1627 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
2036 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 1628 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2037 {"i915_gem_request", i915_gem_request_info, 0}, 1629 {"i915_gem_request", i915_gem_request_info, 0},
2038 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1630 {"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -2041,6 +1633,14 @@ static struct drm_info_list i915_debugfs_list[] = {
2041 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 1633 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
2042 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 1634 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
2043 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 1635 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
1636 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
1637 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
1638 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
1639 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
1640 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
1641 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
1642 {"i915_batchbuffers", i915_batchbuffer_info, 0},
1643 {"i915_error_state", i915_error_state, 0},
2044 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 1644 {"i915_rstdby_delays", i915_rstdby_delays, 0},
2045 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 1645 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
2046 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 1646 {"i915_delayfreq_table", i915_delayfreq_table, 0},
@@ -2055,9 +1655,6 @@ static struct drm_info_list i915_debugfs_list[] = {
2055 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 1655 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2056 {"i915_context_status", i915_context_status, 0}, 1656 {"i915_context_status", i915_context_status, 0},
2057 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 1657 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
2058 {"i915_swizzle_info", i915_swizzle_info, 0},
2059 {"i915_ppgtt_info", i915_ppgtt_info, 0},
2060 {"i915_dpio", i915_dpio_info, 0},
2061}; 1658};
2062#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1659#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2063 1660
@@ -2065,43 +1662,17 @@ int i915_debugfs_init(struct drm_minor *minor)
2065{ 1662{
2066 int ret; 1663 int ret;
2067 1664
2068 ret = i915_debugfs_create(minor->debugfs_root, minor, 1665 ret = i915_wedged_create(minor->debugfs_root, minor);
2069 "i915_wedged",
2070 &i915_wedged_fops);
2071 if (ret) 1666 if (ret)
2072 return ret; 1667 return ret;
2073 1668
2074 ret = i915_forcewake_create(minor->debugfs_root, minor); 1669 ret = i915_forcewake_create(minor->debugfs_root, minor);
2075 if (ret) 1670 if (ret)
2076 return ret; 1671 return ret;
2077 1672 ret = i915_max_freq_create(minor->debugfs_root, minor);
2078 ret = i915_debugfs_create(minor->debugfs_root, minor,
2079 "i915_max_freq",
2080 &i915_max_freq_fops);
2081 if (ret)
2082 return ret;
2083
2084 ret = i915_debugfs_create(minor->debugfs_root, minor,
2085 "i915_min_freq",
2086 &i915_min_freq_fops);
2087 if (ret) 1673 if (ret)
2088 return ret; 1674 return ret;
2089 1675 ret = i915_cache_sharing_create(minor->debugfs_root, minor);
2090 ret = i915_debugfs_create(minor->debugfs_root, minor,
2091 "i915_cache_sharing",
2092 &i915_cache_sharing_fops);
2093 if (ret)
2094 return ret;
2095
2096 ret = i915_debugfs_create(minor->debugfs_root, minor,
2097 "i915_ring_stop",
2098 &i915_ring_stop_fops);
2099 if (ret)
2100 return ret;
2101
2102 ret = i915_debugfs_create(minor->debugfs_root, minor,
2103 "i915_error_state",
2104 &i915_error_state_fops);
2105 if (ret) 1676 if (ret)
2106 return ret; 1677 return ret;
2107 1678
@@ -2120,14 +1691,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
2120 1, minor); 1691 1, minor);
2121 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, 1692 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
2122 1, minor); 1693 1, minor);
2123 drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
2124 1, minor);
2125 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, 1694 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
2126 1, minor); 1695 1, minor);
2127 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
2128 1, minor);
2129 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
2130 1, minor);
2131} 1696}
2132 1697
2133#endif /* CONFIG_DEBUG_FS */ 1698#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 99daa896105..c72b590f7d8 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -26,15 +26,15 @@
26 * 26 *
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29#include "drmP.h"
30 30#include "drm.h"
31#include <drm/drmP.h> 31#include "drm_crtc_helper.h"
32#include <drm/drm_crtc_helper.h> 32#include "drm_fb_helper.h"
33#include <drm/drm_fb_helper.h>
34#include "intel_drv.h" 33#include "intel_drv.h"
35#include <drm/i915_drm.h> 34#include "i915_drm.h"
36#include "i915_drv.h" 35#include "i915_drv.h"
37#include "i915_trace.h" 36#include "i915_trace.h"
37#include "../../../platform/x86/intel_ips.h"
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/vgaarb.h> 39#include <linux/vgaarb.h>
40#include <linux/acpi.h> 40#include <linux/acpi.h>
@@ -42,65 +42,42 @@
42#include <linux/vga_switcheroo.h> 42#include <linux/vga_switcheroo.h>
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <acpi/video.h> 44#include <acpi/video.h>
45#include <asm/pat.h>
46
47#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
48 45
49#define BEGIN_LP_RING(n) \ 46static void i915_write_hws_pga(struct drm_device *dev)
50 intel_ring_begin(LP_RING(dev_priv), (n)) 47{
51 48 drm_i915_private_t *dev_priv = dev->dev_private;
52#define OUT_RING(x) \ 49 u32 addr;
53 intel_ring_emit(LP_RING(dev_priv), x)
54 50
55#define ADVANCE_LP_RING() \ 51 addr = dev_priv->status_page_dmah->busaddr;
56 intel_ring_advance(LP_RING(dev_priv)) 52 if (INTEL_INFO(dev)->gen >= 4)
53 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
54 I915_WRITE(HWS_PGA, addr);
55}
57 56
58/** 57/**
59 * Lock test for when it's just for synchronization of ring access. 58 * Sets up the hardware status page for devices that need a physical address
60 * 59 * in the register.
61 * In that case, we don't need to do it when GEM is initialized as nobody else
62 * has access to the ring.
63 */ 60 */
64#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ 61static int i915_init_phys_hws(struct drm_device *dev)
65 if (LP_RING(dev->dev_private)->obj == NULL) \
66 LOCK_TEST_WITH_RETURN(dev, file); \
67} while (0)
68
69static inline u32
70intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
71{
72 if (I915_NEED_GFX_HWS(dev_priv->dev))
73 return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
74 else
75 return intel_read_status_page(LP_RING(dev_priv), reg);
76}
77
78#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
79#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
80#define I915_BREADCRUMB_INDEX 0x21
81
82void i915_update_dri1_breadcrumb(struct drm_device *dev)
83{ 62{
84 drm_i915_private_t *dev_priv = dev->dev_private; 63 drm_i915_private_t *dev_priv = dev->dev_private;
85 struct drm_i915_master_private *master_priv;
86 64
87 if (dev->primary->master) { 65 /* Program Hardware Status Page */
88 master_priv = dev->primary->master->driver_priv; 66 dev_priv->status_page_dmah =
89 if (master_priv->sarea_priv) 67 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
90 master_priv->sarea_priv->last_dispatch = 68
91 READ_BREADCRUMB(dev_priv); 69 if (!dev_priv->status_page_dmah) {
70 DRM_ERROR("Can not allocate hardware status page\n");
71 return -ENOMEM;
92 } 72 }
93}
94 73
95static void i915_write_hws_pga(struct drm_device *dev) 74 memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
96{ 75 0, PAGE_SIZE);
97 drm_i915_private_t *dev_priv = dev->dev_private;
98 u32 addr;
99 76
100 addr = dev_priv->status_page_dmah->busaddr; 77 i915_write_hws_pga(dev);
101 if (INTEL_INFO(dev)->gen >= 4) 78
102 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 79 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
103 I915_WRITE(HWS_PGA, addr); 80 return 0;
104} 81}
105 82
106/** 83/**
@@ -119,7 +96,7 @@ static void i915_free_hws(struct drm_device *dev)
119 96
120 if (ring->status_page.gfx_addr) { 97 if (ring->status_page.gfx_addr) {
121 ring->status_page.gfx_addr = 0; 98 ring->status_page.gfx_addr = 0;
122 iounmap(dev_priv->dri1.gfx_hws_cpu_addr); 99 drm_core_ioremapfree(&dev_priv->hws_map, dev);
123 } 100 }
124 101
125 /* Need to rewrite hardware status page */ 102 /* Need to rewrite hardware status page */
@@ -141,7 +118,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
141 118
142 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 119 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
143 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 120 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
144 ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE); 121 ring->space = ring->head - (ring->tail + 8);
145 if (ring->space < 0) 122 if (ring->space < 0)
146 ring->space += ring->size; 123 ring->space += ring->size;
147 124
@@ -208,16 +185,16 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
208 } 185 }
209 } 186 }
210 187
211 dev_priv->dri1.cpp = init->cpp; 188 dev_priv->cpp = init->cpp;
212 dev_priv->dri1.back_offset = init->back_offset; 189 dev_priv->back_offset = init->back_offset;
213 dev_priv->dri1.front_offset = init->front_offset; 190 dev_priv->front_offset = init->front_offset;
214 dev_priv->dri1.current_page = 0; 191 dev_priv->current_page = 0;
215 if (master_priv->sarea_priv) 192 if (master_priv->sarea_priv)
216 master_priv->sarea_priv->pf_current_page = 0; 193 master_priv->sarea_priv->pf_current_page = 0;
217 194
218 /* Allow hardware batchbuffers unless told otherwise. 195 /* Allow hardware batchbuffers unless told otherwise.
219 */ 196 */
220 dev_priv->dri1.allow_batchbuffer = 1; 197 dev_priv->allow_batchbuffer = 1;
221 198
222 return 0; 199 return 0;
223} 200}
@@ -229,7 +206,7 @@ static int i915_dma_resume(struct drm_device * dev)
229 206
230 DRM_DEBUG_DRIVER("%s\n", __func__); 207 DRM_DEBUG_DRIVER("%s\n", __func__);
231 208
232 if (ring->virtual_start == NULL) { 209 if (ring->map.handle == NULL) {
233 DRM_ERROR("can not ioremap virtual address for" 210 DRM_ERROR("can not ioremap virtual address for"
234 " ring buffer\n"); 211 " ring buffer\n");
235 return -ENOMEM; 212 return -ENOMEM;
@@ -258,9 +235,6 @@ static int i915_dma_init(struct drm_device *dev, void *data,
258 drm_i915_init_t *init = data; 235 drm_i915_init_t *init = data;
259 int retcode = 0; 236 int retcode = 0;
260 237
261 if (drm_core_check_feature(dev, DRIVER_MODESET))
262 return -ENODEV;
263
264 switch (init->func) { 238 switch (init->func) {
265 case I915_INIT_DMA: 239 case I915_INIT_DMA:
266 retcode = i915_initialize(dev, init); 240 retcode = i915_initialize(dev, init);
@@ -425,16 +399,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
425 drm_i915_private_t *dev_priv = dev->dev_private; 399 drm_i915_private_t *dev_priv = dev->dev_private;
426 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 400 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
427 401
428 dev_priv->dri1.counter++; 402 dev_priv->counter++;
429 if (dev_priv->dri1.counter > 0x7FFFFFFFUL) 403 if (dev_priv->counter > 0x7FFFFFFFUL)
430 dev_priv->dri1.counter = 0; 404 dev_priv->counter = 0;
431 if (master_priv->sarea_priv) 405 if (master_priv->sarea_priv)
432 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; 406 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
433 407
434 if (BEGIN_LP_RING(4) == 0) { 408 if (BEGIN_LP_RING(4) == 0) {
435 OUT_RING(MI_STORE_DWORD_INDEX); 409 OUT_RING(MI_STORE_DWORD_INDEX);
436 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 410 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
437 OUT_RING(dev_priv->dri1.counter); 411 OUT_RING(dev_priv->counter);
438 OUT_RING(0); 412 OUT_RING(0);
439 ADVANCE_LP_RING(); 413 ADVANCE_LP_RING();
440 } 414 }
@@ -548,7 +522,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
548 522
549 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", 523 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
550 __func__, 524 __func__,
551 dev_priv->dri1.current_page, 525 dev_priv->current_page,
552 master_priv->sarea_priv->pf_current_page); 526 master_priv->sarea_priv->pf_current_page);
553 527
554 i915_kernel_lost_context(dev); 528 i915_kernel_lost_context(dev);
@@ -562,12 +536,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
562 536
563 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 537 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
564 OUT_RING(0); 538 OUT_RING(0);
565 if (dev_priv->dri1.current_page == 0) { 539 if (dev_priv->current_page == 0) {
566 OUT_RING(dev_priv->dri1.back_offset); 540 OUT_RING(dev_priv->back_offset);
567 dev_priv->dri1.current_page = 1; 541 dev_priv->current_page = 1;
568 } else { 542 } else {
569 OUT_RING(dev_priv->dri1.front_offset); 543 OUT_RING(dev_priv->front_offset);
570 dev_priv->dri1.current_page = 0; 544 dev_priv->current_page = 0;
571 } 545 }
572 OUT_RING(0); 546 OUT_RING(0);
573 547
@@ -576,24 +550,26 @@ static int i915_dispatch_flip(struct drm_device * dev)
576 550
577 ADVANCE_LP_RING(); 551 ADVANCE_LP_RING();
578 552
579 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; 553 master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
580 554
581 if (BEGIN_LP_RING(4) == 0) { 555 if (BEGIN_LP_RING(4) == 0) {
582 OUT_RING(MI_STORE_DWORD_INDEX); 556 OUT_RING(MI_STORE_DWORD_INDEX);
583 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 557 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
584 OUT_RING(dev_priv->dri1.counter); 558 OUT_RING(dev_priv->counter);
585 OUT_RING(0); 559 OUT_RING(0);
586 ADVANCE_LP_RING(); 560 ADVANCE_LP_RING();
587 } 561 }
588 562
589 master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; 563 master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
590 return 0; 564 return 0;
591} 565}
592 566
593static int i915_quiescent(struct drm_device *dev) 567static int i915_quiescent(struct drm_device *dev)
594{ 568{
569 struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
570
595 i915_kernel_lost_context(dev); 571 i915_kernel_lost_context(dev);
596 return intel_ring_idle(LP_RING(dev->dev_private)); 572 return intel_wait_ring_idle(ring);
597} 573}
598 574
599static int i915_flush_ioctl(struct drm_device *dev, void *data, 575static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -601,9 +577,6 @@ static int i915_flush_ioctl(struct drm_device *dev, void *data,
601{ 577{
602 int ret; 578 int ret;
603 579
604 if (drm_core_check_feature(dev, DRIVER_MODESET))
605 return -ENODEV;
606
607 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 580 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
608 581
609 mutex_lock(&dev->struct_mutex); 582 mutex_lock(&dev->struct_mutex);
@@ -624,10 +597,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
624 int ret; 597 int ret;
625 struct drm_clip_rect *cliprects = NULL; 598 struct drm_clip_rect *cliprects = NULL;
626 599
627 if (drm_core_check_feature(dev, DRIVER_MODESET)) 600 if (!dev_priv->allow_batchbuffer) {
628 return -ENODEV;
629
630 if (!dev_priv->dri1.allow_batchbuffer) {
631 DRM_ERROR("Batchbuffer ioctl disabled\n"); 601 DRM_ERROR("Batchbuffer ioctl disabled\n");
632 return -EINVAL; 602 return -EINVAL;
633 } 603 }
@@ -684,9 +654,6 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
684 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 654 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
685 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 655 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
686 656
687 if (drm_core_check_feature(dev, DRIVER_MODESET))
688 return -ENODEV;
689
690 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 657 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
691 658
692 if (cmdbuf->num_cliprects < 0) 659 if (cmdbuf->num_cliprects < 0)
@@ -738,166 +705,11 @@ fail_batch_free:
738 return ret; 705 return ret;
739} 706}
740 707
741static int i915_emit_irq(struct drm_device * dev)
742{
743 drm_i915_private_t *dev_priv = dev->dev_private;
744 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
745
746 i915_kernel_lost_context(dev);
747
748 DRM_DEBUG_DRIVER("\n");
749
750 dev_priv->dri1.counter++;
751 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
752 dev_priv->dri1.counter = 1;
753 if (master_priv->sarea_priv)
754 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
755
756 if (BEGIN_LP_RING(4) == 0) {
757 OUT_RING(MI_STORE_DWORD_INDEX);
758 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
759 OUT_RING(dev_priv->dri1.counter);
760 OUT_RING(MI_USER_INTERRUPT);
761 ADVANCE_LP_RING();
762 }
763
764 return dev_priv->dri1.counter;
765}
766
767static int i915_wait_irq(struct drm_device * dev, int irq_nr)
768{
769 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
770 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
771 int ret = 0;
772 struct intel_ring_buffer *ring = LP_RING(dev_priv);
773
774 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
775 READ_BREADCRUMB(dev_priv));
776
777 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
778 if (master_priv->sarea_priv)
779 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
780 return 0;
781 }
782
783 if (master_priv->sarea_priv)
784 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
785
786 if (ring->irq_get(ring)) {
787 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
788 READ_BREADCRUMB(dev_priv) >= irq_nr);
789 ring->irq_put(ring);
790 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
791 ret = -EBUSY;
792
793 if (ret == -EBUSY) {
794 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
795 READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
796 }
797
798 return ret;
799}
800
801/* Needs the lock as it touches the ring.
802 */
803static int i915_irq_emit(struct drm_device *dev, void *data,
804 struct drm_file *file_priv)
805{
806 drm_i915_private_t *dev_priv = dev->dev_private;
807 drm_i915_irq_emit_t *emit = data;
808 int result;
809
810 if (drm_core_check_feature(dev, DRIVER_MODESET))
811 return -ENODEV;
812
813 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
814 DRM_ERROR("called with no initialization\n");
815 return -EINVAL;
816 }
817
818 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
819
820 mutex_lock(&dev->struct_mutex);
821 result = i915_emit_irq(dev);
822 mutex_unlock(&dev->struct_mutex);
823
824 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
825 DRM_ERROR("copy_to_user\n");
826 return -EFAULT;
827 }
828
829 return 0;
830}
831
832/* Doesn't need the hardware lock.
833 */
834static int i915_irq_wait(struct drm_device *dev, void *data,
835 struct drm_file *file_priv)
836{
837 drm_i915_private_t *dev_priv = dev->dev_private;
838 drm_i915_irq_wait_t *irqwait = data;
839
840 if (drm_core_check_feature(dev, DRIVER_MODESET))
841 return -ENODEV;
842
843 if (!dev_priv) {
844 DRM_ERROR("called with no initialization\n");
845 return -EINVAL;
846 }
847
848 return i915_wait_irq(dev, irqwait->irq_seq);
849}
850
851static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
852 struct drm_file *file_priv)
853{
854 drm_i915_private_t *dev_priv = dev->dev_private;
855 drm_i915_vblank_pipe_t *pipe = data;
856
857 if (drm_core_check_feature(dev, DRIVER_MODESET))
858 return -ENODEV;
859
860 if (!dev_priv) {
861 DRM_ERROR("called with no initialization\n");
862 return -EINVAL;
863 }
864
865 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
866
867 return 0;
868}
869
870/**
871 * Schedule buffer swap at given vertical blank.
872 */
873static int i915_vblank_swap(struct drm_device *dev, void *data,
874 struct drm_file *file_priv)
875{
876 /* The delayed swap mechanism was fundamentally racy, and has been
877 * removed. The model was that the client requested a delayed flip/swap
878 * from the kernel, then waited for vblank before continuing to perform
879 * rendering. The problem was that the kernel might wake the client
880 * up before it dispatched the vblank swap (since the lock has to be
881 * held while touching the ringbuffer), in which case the client would
882 * clear and start the next frame before the swap occurred, and
883 * flicker would occur in addition to likely missing the vblank.
884 *
885 * In the absence of this ioctl, userland falls back to a correct path
886 * of waiting for a vblank, then dispatching the swap on its own.
887 * Context switching to userland and back is plenty fast enough for
888 * meeting the requirements of vblank swapping.
889 */
890 return -EINVAL;
891}
892
893static int i915_flip_bufs(struct drm_device *dev, void *data, 708static int i915_flip_bufs(struct drm_device *dev, void *data,
894 struct drm_file *file_priv) 709 struct drm_file *file_priv)
895{ 710{
896 int ret; 711 int ret;
897 712
898 if (drm_core_check_feature(dev, DRIVER_MODESET))
899 return -ENODEV;
900
901 DRM_DEBUG_DRIVER("%s\n", __func__); 713 DRM_DEBUG_DRIVER("%s\n", __func__);
902 714
903 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 715 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -926,7 +738,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
926 value = dev->pdev->irq ? 1 : 0; 738 value = dev->pdev->irq ? 1 : 0;
927 break; 739 break;
928 case I915_PARAM_ALLOW_BATCHBUFFER: 740 case I915_PARAM_ALLOW_BATCHBUFFER:
929 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; 741 value = dev_priv->allow_batchbuffer ? 1 : 0;
930 break; 742 break;
931 case I915_PARAM_LAST_DISPATCH: 743 case I915_PARAM_LAST_DISPATCH:
932 value = READ_BREADCRUMB(dev_priv); 744 value = READ_BREADCRUMB(dev_priv);
@@ -935,7 +747,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
935 value = dev->pci_device; 747 value = dev->pci_device;
936 break; 748 break;
937 case I915_PARAM_HAS_GEM: 749 case I915_PARAM_HAS_GEM:
938 value = 1; 750 value = dev_priv->has_gem;
939 break; 751 break;
940 case I915_PARAM_NUM_FENCES_AVAIL: 752 case I915_PARAM_NUM_FENCES_AVAIL:
941 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 753 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
@@ -948,13 +760,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
948 break; 760 break;
949 case I915_PARAM_HAS_EXECBUF2: 761 case I915_PARAM_HAS_EXECBUF2:
950 /* depends on GEM */ 762 /* depends on GEM */
951 value = 1; 763 value = dev_priv->has_gem;
952 break; 764 break;
953 case I915_PARAM_HAS_BSD: 765 case I915_PARAM_HAS_BSD:
954 value = intel_ring_initialized(&dev_priv->ring[VCS]); 766 value = HAS_BSD(dev);
955 break; 767 break;
956 case I915_PARAM_HAS_BLT: 768 case I915_PARAM_HAS_BLT:
957 value = intel_ring_initialized(&dev_priv->ring[BCS]); 769 value = HAS_BLT(dev);
958 break; 770 break;
959 case I915_PARAM_HAS_RELAXED_FENCING: 771 case I915_PARAM_HAS_RELAXED_FENCING:
960 value = 1; 772 value = 1;
@@ -968,30 +780,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
968 case I915_PARAM_HAS_RELAXED_DELTA: 780 case I915_PARAM_HAS_RELAXED_DELTA:
969 value = 1; 781 value = 1;
970 break; 782 break;
971 case I915_PARAM_HAS_GEN7_SOL_RESET:
972 value = 1;
973 break;
974 case I915_PARAM_HAS_LLC:
975 value = HAS_LLC(dev);
976 break;
977 case I915_PARAM_HAS_ALIASING_PPGTT:
978 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
979 break;
980 case I915_PARAM_HAS_WAIT_TIMEOUT:
981 value = 1;
982 break;
983 case I915_PARAM_HAS_SEMAPHORES:
984 value = i915_semaphore_is_enabled(dev);
985 break;
986 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
987 value = 1;
988 break;
989 case I915_PARAM_HAS_SECURE_BATCHES:
990 value = capable(CAP_SYS_ADMIN);
991 break;
992 case I915_PARAM_HAS_PINNED_BATCHES:
993 value = 1;
994 break;
995 default: 783 default:
996 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 784 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
997 param->param); 785 param->param);
@@ -1021,9 +809,10 @@ static int i915_setparam(struct drm_device *dev, void *data,
1021 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 809 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1022 break; 810 break;
1023 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 811 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
812 dev_priv->tex_lru_log_granularity = param->value;
1024 break; 813 break;
1025 case I915_SETPARAM_ALLOW_BATCHBUFFER: 814 case I915_SETPARAM_ALLOW_BATCHBUFFER:
1026 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; 815 dev_priv->allow_batchbuffer = param->value;
1027 break; 816 break;
1028 case I915_SETPARAM_NUM_USED_FENCES: 817 case I915_SETPARAM_NUM_USED_FENCES:
1029 if (param->value > dev_priv->num_fence_regs || 818 if (param->value > dev_priv->num_fence_regs ||
@@ -1046,10 +835,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
1046{ 835{
1047 drm_i915_private_t *dev_priv = dev->dev_private; 836 drm_i915_private_t *dev_priv = dev->dev_private;
1048 drm_i915_hws_addr_t *hws = data; 837 drm_i915_hws_addr_t *hws = data;
1049 struct intel_ring_buffer *ring; 838 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1050
1051 if (drm_core_check_feature(dev, DRIVER_MODESET))
1052 return -ENODEV;
1053 839
1054 if (!I915_NEED_GFX_HWS(dev)) 840 if (!I915_NEED_GFX_HWS(dev))
1055 return -EINVAL; 841 return -EINVAL;
@@ -1066,20 +852,25 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
1066 852
1067 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); 853 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1068 854
1069 ring = LP_RING(dev_priv);
1070 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 855 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1071 856
1072 dev_priv->dri1.gfx_hws_cpu_addr = 857 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
1073 ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096); 858 dev_priv->hws_map.size = 4*1024;
1074 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { 859 dev_priv->hws_map.type = 0;
860 dev_priv->hws_map.flags = 0;
861 dev_priv->hws_map.mtrr = 0;
862
863 drm_core_ioremap_wc(&dev_priv->hws_map, dev);
864 if (dev_priv->hws_map.handle == NULL) {
1075 i915_dma_cleanup(dev); 865 i915_dma_cleanup(dev);
1076 ring->status_page.gfx_addr = 0; 866 ring->status_page.gfx_addr = 0;
1077 DRM_ERROR("can not ioremap virtual address for" 867 DRM_ERROR("can not ioremap virtual address for"
1078 " G33 hw status page\n"); 868 " G33 hw status page\n");
1079 return -ENOMEM; 869 return -ENOMEM;
1080 } 870 }
1081 871 ring->status_page.page_addr =
1082 memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE); 872 (void __force __iomem *)dev_priv->hws_map.handle;
873 memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
1083 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); 874 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
1084 875
1085 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 876 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
@@ -1093,7 +884,7 @@ static int i915_get_bridge_dev(struct drm_device *dev)
1093{ 884{
1094 struct drm_i915_private *dev_priv = dev->dev_private; 885 struct drm_i915_private *dev_priv = dev->dev_private;
1095 886
1096 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); 887 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
1097 if (!dev_priv->bridge_dev) { 888 if (!dev_priv->bridge_dev) {
1098 DRM_ERROR("bridge device not found\n"); 889 DRM_ERROR("bridge device not found\n");
1099 return -1; 890 return -1;
@@ -1215,6 +1006,133 @@ intel_teardown_mchbar(struct drm_device *dev)
1215 release_resource(&dev_priv->mch_res); 1006 release_resource(&dev_priv->mch_res);
1216} 1007}
1217 1008
1009#define PTE_ADDRESS_MASK 0xfffff000
1010#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1011#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
1012#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
1013#define PTE_MAPPING_TYPE_CACHED (3 << 1)
1014#define PTE_MAPPING_TYPE_MASK (3 << 1)
1015#define PTE_VALID (1 << 0)
1016
1017/**
1018 * i915_stolen_to_phys - take an offset into stolen memory and turn it into
1019 * a physical one
1020 * @dev: drm device
1021 * @offset: address to translate
1022 *
1023 * Some chip functions require allocations from stolen space and need the
1024 * physical address of the memory in question.
1025 */
1026static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
1027{
1028 struct drm_i915_private *dev_priv = dev->dev_private;
1029 struct pci_dev *pdev = dev_priv->bridge_dev;
1030 u32 base;
1031
1032#if 0
1033 /* On the machines I have tested the Graphics Base of Stolen Memory
1034 * is unreliable, so compute the base by subtracting the stolen memory
1035 * from the Top of Low Usable DRAM which is where the BIOS places
1036 * the graphics stolen memory.
1037 */
1038 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
1039 /* top 32bits are reserved = 0 */
1040 pci_read_config_dword(pdev, 0xA4, &base);
1041 } else {
1042 /* XXX presume 8xx is the same as i915 */
1043 pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
1044 }
1045#else
1046 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
1047 u16 val;
1048 pci_read_config_word(pdev, 0xb0, &val);
1049 base = val >> 4 << 20;
1050 } else {
1051 u8 val;
1052 pci_read_config_byte(pdev, 0x9c, &val);
1053 base = val >> 3 << 27;
1054 }
1055 base -= dev_priv->mm.gtt->stolen_size;
1056#endif
1057
1058 return base + offset;
1059}
1060
1061static void i915_warn_stolen(struct drm_device *dev)
1062{
1063 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
1064 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
1065}
1066
1067static void i915_setup_compression(struct drm_device *dev, int size)
1068{
1069 struct drm_i915_private *dev_priv = dev->dev_private;
1070 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
1071 unsigned long cfb_base;
1072 unsigned long ll_base = 0;
1073
1074 /* Just in case the BIOS is doing something questionable. */
1075 intel_disable_fbc(dev);
1076
1077 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
1078 if (compressed_fb)
1079 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1080 if (!compressed_fb)
1081 goto err;
1082
1083 cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
1084 if (!cfb_base)
1085 goto err_fb;
1086
1087 if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
1088 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
1089 4096, 4096, 0);
1090 if (compressed_llb)
1091 compressed_llb = drm_mm_get_block(compressed_llb,
1092 4096, 4096);
1093 if (!compressed_llb)
1094 goto err_fb;
1095
1096 ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
1097 if (!ll_base)
1098 goto err_llb;
1099 }
1100
1101 dev_priv->cfb_size = size;
1102
1103 dev_priv->compressed_fb = compressed_fb;
1104 if (HAS_PCH_SPLIT(dev))
1105 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
1106 else if (IS_GM45(dev)) {
1107 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
1108 } else {
1109 I915_WRITE(FBC_CFB_BASE, cfb_base);
1110 I915_WRITE(FBC_LL_BASE, ll_base);
1111 dev_priv->compressed_llb = compressed_llb;
1112 }
1113
1114 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
1115 cfb_base, ll_base, size >> 20);
1116 return;
1117
1118err_llb:
1119 drm_mm_put_block(compressed_llb);
1120err_fb:
1121 drm_mm_put_block(compressed_fb);
1122err:
1123 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1124 i915_warn_stolen(dev);
1125}
1126
1127static void i915_cleanup_compression(struct drm_device *dev)
1128{
1129 struct drm_i915_private *dev_priv = dev->dev_private;
1130
1131 drm_mm_put_block(dev_priv->compressed_fb);
1132 if (dev_priv->compressed_llb)
1133 drm_mm_put_block(dev_priv->compressed_llb);
1134}
1135
1218/* true = enable decode, false = disable decoder */ 1136/* true = enable decode, false = disable decoder */
1219static unsigned int i915_vga_set_decode(void *cookie, bool state) 1137static unsigned int i915_vga_set_decode(void *cookie, bool state)
1220{ 1138{
@@ -1233,14 +1151,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
1233 struct drm_device *dev = pci_get_drvdata(pdev); 1151 struct drm_device *dev = pci_get_drvdata(pdev);
1234 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 1152 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1235 if (state == VGA_SWITCHEROO_ON) { 1153 if (state == VGA_SWITCHEROO_ON) {
1236 pr_info("switched on\n"); 1154 printk(KERN_INFO "i915: switched on\n");
1237 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1155 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1238 /* i915 resume handler doesn't set to D0 */ 1156 /* i915 resume handler doesn't set to D0 */
1239 pci_set_power_state(dev->pdev, PCI_D0); 1157 pci_set_power_state(dev->pdev, PCI_D0);
1240 i915_resume(dev); 1158 i915_resume(dev);
1241 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1159 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1242 } else { 1160 } else {
1243 pr_err("switched off\n"); 1161 printk(KERN_ERR "i915: switched off\n");
1244 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1162 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1245 i915_suspend(dev, pmm); 1163 i915_suspend(dev, pmm);
1246 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1164 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
@@ -1258,11 +1176,54 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1258 return can_switch; 1176 return can_switch;
1259} 1177}
1260 1178
1261static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { 1179static int i915_load_gem_init(struct drm_device *dev)
1262 .set_gpu_state = i915_switcheroo_set_state, 1180{
1263 .reprobe = NULL, 1181 struct drm_i915_private *dev_priv = dev->dev_private;
1264 .can_switch = i915_switcheroo_can_switch, 1182 unsigned long prealloc_size, gtt_size, mappable_size;
1265}; 1183 int ret;
1184
1185 prealloc_size = dev_priv->mm.gtt->stolen_size;
1186 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
1187 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1188
1189 /* Basic memrange allocator for stolen space */
1190 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
1191
1192 /* Let GEM Manage all of the aperture.
1193 *
1194 * However, leave one page at the end still bound to the scratch page.
1195 * There are a number of places where the hardware apparently
1196 * prefetches past the end of the object, and we've seen multiple
1197 * hangs with the GPU head pointer stuck in a batchbuffer bound
1198 * at the last page of the aperture. One page should be enough to
1199 * keep any prefetching inside of the aperture.
1200 */
1201 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
1202
1203 mutex_lock(&dev->struct_mutex);
1204 ret = i915_gem_init_ringbuffer(dev);
1205 mutex_unlock(&dev->struct_mutex);
1206 if (ret)
1207 return ret;
1208
1209 /* Try to set up FBC with a reasonable compressed buffer size */
1210 if (I915_HAS_FBC(dev) && i915_powersave) {
1211 int cfb_size;
1212
1213 /* Leave 1M for line length buffer & misc. */
1214
1215 /* Try to get a 32M buffer... */
1216 if (prealloc_size > (36*1024*1024))
1217 cfb_size = 32*1024*1024;
1218 else /* fall back to 7/8 of the stolen space */
1219 cfb_size = prealloc_size * 7 / 8;
1220 i915_setup_compression(dev, cfb_size);
1221 }
1222
1223 /* Allow hardware batchbuffers unless told otherwise. */
1224 dev_priv->allow_batchbuffer = 1;
1225 return 0;
1226}
1266 1227
1267static int i915_load_modeset_init(struct drm_device *dev) 1228static int i915_load_modeset_init(struct drm_device *dev)
1268{ 1229{
@@ -1286,27 +1247,25 @@ static int i915_load_modeset_init(struct drm_device *dev)
1286 1247
1287 intel_register_dsm_handler(); 1248 intel_register_dsm_handler();
1288 1249
1289 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops); 1250 ret = vga_switcheroo_register_client(dev->pdev,
1251 i915_switcheroo_set_state,
1252 NULL,
1253 i915_switcheroo_can_switch);
1290 if (ret) 1254 if (ret)
1291 goto cleanup_vga_client; 1255 goto cleanup_vga_client;
1292 1256
1293 /* Initialise stolen first so that we may reserve preallocated 1257 /* IIR "flip pending" bit means done if this bit is set */
1294 * objects for the BIOS to KMS transition. 1258 if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
1295 */ 1259 dev_priv->flip_pending_is_done = true;
1296 ret = i915_gem_init_stolen(dev);
1297 if (ret)
1298 goto cleanup_vga_switcheroo;
1299 1260
1300 intel_modeset_init(dev); 1261 intel_modeset_init(dev);
1301 1262
1302 ret = i915_gem_init(dev); 1263 ret = i915_load_gem_init(dev);
1303 if (ret) 1264 if (ret)
1304 goto cleanup_gem_stolen; 1265 goto cleanup_vga_switcheroo;
1305 1266
1306 intel_modeset_gem_init(dev); 1267 intel_modeset_gem_init(dev);
1307 1268
1308 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
1309
1310 ret = drm_irq_install(dev); 1269 ret = drm_irq_install(dev);
1311 if (ret) 1270 if (ret)
1312 goto cleanup_gem; 1271 goto cleanup_gem;
@@ -1332,9 +1291,6 @@ cleanup_gem:
1332 mutex_lock(&dev->struct_mutex); 1291 mutex_lock(&dev->struct_mutex);
1333 i915_gem_cleanup_ringbuffer(dev); 1292 i915_gem_cleanup_ringbuffer(dev);
1334 mutex_unlock(&dev->struct_mutex); 1293 mutex_unlock(&dev->struct_mutex);
1335 i915_gem_cleanup_aliasing_ppgtt(dev);
1336cleanup_gem_stolen:
1337 i915_gem_cleanup_stolen(dev);
1338cleanup_vga_switcheroo: 1294cleanup_vga_switcheroo:
1339 vga_switcheroo_unregister_client(dev->pdev); 1295 vga_switcheroo_unregister_client(dev->pdev);
1340cleanup_vga_client: 1296cleanup_vga_client:
@@ -1367,63 +1323,570 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1367 master->driver_priv = NULL; 1323 master->driver_priv = NULL;
1368} 1324}
1369 1325
1370static void 1326static void i915_pineview_get_mem_freq(struct drm_device *dev)
1371i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
1372 unsigned long size)
1373{ 1327{
1374 dev_priv->mm.gtt_mtrr = -1; 1328 drm_i915_private_t *dev_priv = dev->dev_private;
1329 u32 tmp;
1375 1330
1376#if defined(CONFIG_X86_PAT) 1331 tmp = I915_READ(CLKCFG);
1377 if (cpu_has_pat)
1378 return;
1379#endif
1380 1332
1381 /* Set up a WC MTRR for non-PAT systems. This is more common than 1333 switch (tmp & CLKCFG_FSB_MASK) {
1382 * one would think, because the kernel disables PAT on first 1334 case CLKCFG_FSB_533:
1383 * generation Core chips because WC PAT gets overridden by a UC 1335 dev_priv->fsb_freq = 533; /* 133*4 */
1384 * MTRR if present. Even if a UC MTRR isn't present. 1336 break;
1337 case CLKCFG_FSB_800:
1338 dev_priv->fsb_freq = 800; /* 200*4 */
1339 break;
1340 case CLKCFG_FSB_667:
1341 dev_priv->fsb_freq = 667; /* 167*4 */
1342 break;
1343 case CLKCFG_FSB_400:
1344 dev_priv->fsb_freq = 400; /* 100*4 */
1345 break;
1346 }
1347
1348 switch (tmp & CLKCFG_MEM_MASK) {
1349 case CLKCFG_MEM_533:
1350 dev_priv->mem_freq = 533;
1351 break;
1352 case CLKCFG_MEM_667:
1353 dev_priv->mem_freq = 667;
1354 break;
1355 case CLKCFG_MEM_800:
1356 dev_priv->mem_freq = 800;
1357 break;
1358 }
1359
1360 /* detect pineview DDR3 setting */
1361 tmp = I915_READ(CSHRDDR3CTL);
1362 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
1363}
1364
1365static void i915_ironlake_get_mem_freq(struct drm_device *dev)
1366{
1367 drm_i915_private_t *dev_priv = dev->dev_private;
1368 u16 ddrpll, csipll;
1369
1370 ddrpll = I915_READ16(DDRMPLL1);
1371 csipll = I915_READ16(CSIPLL0);
1372
1373 switch (ddrpll & 0xff) {
1374 case 0xc:
1375 dev_priv->mem_freq = 800;
1376 break;
1377 case 0x10:
1378 dev_priv->mem_freq = 1066;
1379 break;
1380 case 0x14:
1381 dev_priv->mem_freq = 1333;
1382 break;
1383 case 0x18:
1384 dev_priv->mem_freq = 1600;
1385 break;
1386 default:
1387 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
1388 ddrpll & 0xff);
1389 dev_priv->mem_freq = 0;
1390 break;
1391 }
1392
1393 dev_priv->r_t = dev_priv->mem_freq;
1394
1395 switch (csipll & 0x3ff) {
1396 case 0x00c:
1397 dev_priv->fsb_freq = 3200;
1398 break;
1399 case 0x00e:
1400 dev_priv->fsb_freq = 3733;
1401 break;
1402 case 0x010:
1403 dev_priv->fsb_freq = 4266;
1404 break;
1405 case 0x012:
1406 dev_priv->fsb_freq = 4800;
1407 break;
1408 case 0x014:
1409 dev_priv->fsb_freq = 5333;
1410 break;
1411 case 0x016:
1412 dev_priv->fsb_freq = 5866;
1413 break;
1414 case 0x018:
1415 dev_priv->fsb_freq = 6400;
1416 break;
1417 default:
1418 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
1419 csipll & 0x3ff);
1420 dev_priv->fsb_freq = 0;
1421 break;
1422 }
1423
1424 if (dev_priv->fsb_freq == 3200) {
1425 dev_priv->c_m = 0;
1426 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
1427 dev_priv->c_m = 1;
1428 } else {
1429 dev_priv->c_m = 2;
1430 }
1431}
1432
1433static const struct cparams {
1434 u16 i;
1435 u16 t;
1436 u16 m;
1437 u16 c;
1438} cparams[] = {
1439 { 1, 1333, 301, 28664 },
1440 { 1, 1066, 294, 24460 },
1441 { 1, 800, 294, 25192 },
1442 { 0, 1333, 276, 27605 },
1443 { 0, 1066, 276, 27605 },
1444 { 0, 800, 231, 23784 },
1445};
1446
1447unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1448{
1449 u64 total_count, diff, ret;
1450 u32 count1, count2, count3, m = 0, c = 0;
1451 unsigned long now = jiffies_to_msecs(jiffies), diff1;
1452 int i;
1453
1454 diff1 = now - dev_priv->last_time1;
1455
1456 /* Prevent division-by-zero if we are asking too fast.
1457 * Also, we don't get interesting results if we are polling
1458 * faster than once in 10ms, so just return the saved value
1459 * in such cases.
1385 */ 1460 */
1386 dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1); 1461 if (diff1 <= 10)
1387 if (dev_priv->mm.gtt_mtrr < 0) { 1462 return dev_priv->chipset_power;
1388 DRM_INFO("MTRR allocation failed. Graphics " 1463
1389 "performance may suffer.\n"); 1464 count1 = I915_READ(DMIEC);
1465 count2 = I915_READ(DDREC);
1466 count3 = I915_READ(CSIEC);
1467
1468 total_count = count1 + count2 + count3;
1469
1470 /* FIXME: handle per-counter overflow */
1471 if (total_count < dev_priv->last_count1) {
1472 diff = ~0UL - dev_priv->last_count1;
1473 diff += total_count;
1474 } else {
1475 diff = total_count - dev_priv->last_count1;
1390 } 1476 }
1477
1478 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
1479 if (cparams[i].i == dev_priv->c_m &&
1480 cparams[i].t == dev_priv->r_t) {
1481 m = cparams[i].m;
1482 c = cparams[i].c;
1483 break;
1484 }
1485 }
1486
1487 diff = div_u64(diff, diff1);
1488 ret = ((m * diff) + c);
1489 ret = div_u64(ret, 10);
1490
1491 dev_priv->last_count1 = total_count;
1492 dev_priv->last_time1 = now;
1493
1494 dev_priv->chipset_power = ret;
1495
1496 return ret;
1391} 1497}
1392 1498
1393static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 1499unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
1394{ 1500{
1395 struct apertures_struct *ap; 1501 unsigned long m, x, b;
1396 struct pci_dev *pdev = dev_priv->dev->pdev; 1502 u32 tsfs;
1397 bool primary; 1503
1504 tsfs = I915_READ(TSFS);
1398 1505
1399 ap = alloc_apertures(1); 1506 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
1400 if (!ap) 1507 x = I915_READ8(TR1);
1508
1509 b = tsfs & TSFS_INTR_MASK;
1510
1511 return ((m * x) / 127) - b;
1512}
1513
1514static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
1515{
1516 static const struct v_table {
1517 u16 vd; /* in .1 mil */
1518 u16 vm; /* in .1 mil */
1519 } v_table[] = {
1520 { 0, 0, },
1521 { 375, 0, },
1522 { 500, 0, },
1523 { 625, 0, },
1524 { 750, 0, },
1525 { 875, 0, },
1526 { 1000, 0, },
1527 { 1125, 0, },
1528 { 4125, 3000, },
1529 { 4125, 3000, },
1530 { 4125, 3000, },
1531 { 4125, 3000, },
1532 { 4125, 3000, },
1533 { 4125, 3000, },
1534 { 4125, 3000, },
1535 { 4125, 3000, },
1536 { 4125, 3000, },
1537 { 4125, 3000, },
1538 { 4125, 3000, },
1539 { 4125, 3000, },
1540 { 4125, 3000, },
1541 { 4125, 3000, },
1542 { 4125, 3000, },
1543 { 4125, 3000, },
1544 { 4125, 3000, },
1545 { 4125, 3000, },
1546 { 4125, 3000, },
1547 { 4125, 3000, },
1548 { 4125, 3000, },
1549 { 4125, 3000, },
1550 { 4125, 3000, },
1551 { 4125, 3000, },
1552 { 4250, 3125, },
1553 { 4375, 3250, },
1554 { 4500, 3375, },
1555 { 4625, 3500, },
1556 { 4750, 3625, },
1557 { 4875, 3750, },
1558 { 5000, 3875, },
1559 { 5125, 4000, },
1560 { 5250, 4125, },
1561 { 5375, 4250, },
1562 { 5500, 4375, },
1563 { 5625, 4500, },
1564 { 5750, 4625, },
1565 { 5875, 4750, },
1566 { 6000, 4875, },
1567 { 6125, 5000, },
1568 { 6250, 5125, },
1569 { 6375, 5250, },
1570 { 6500, 5375, },
1571 { 6625, 5500, },
1572 { 6750, 5625, },
1573 { 6875, 5750, },
1574 { 7000, 5875, },
1575 { 7125, 6000, },
1576 { 7250, 6125, },
1577 { 7375, 6250, },
1578 { 7500, 6375, },
1579 { 7625, 6500, },
1580 { 7750, 6625, },
1581 { 7875, 6750, },
1582 { 8000, 6875, },
1583 { 8125, 7000, },
1584 { 8250, 7125, },
1585 { 8375, 7250, },
1586 { 8500, 7375, },
1587 { 8625, 7500, },
1588 { 8750, 7625, },
1589 { 8875, 7750, },
1590 { 9000, 7875, },
1591 { 9125, 8000, },
1592 { 9250, 8125, },
1593 { 9375, 8250, },
1594 { 9500, 8375, },
1595 { 9625, 8500, },
1596 { 9750, 8625, },
1597 { 9875, 8750, },
1598 { 10000, 8875, },
1599 { 10125, 9000, },
1600 { 10250, 9125, },
1601 { 10375, 9250, },
1602 { 10500, 9375, },
1603 { 10625, 9500, },
1604 { 10750, 9625, },
1605 { 10875, 9750, },
1606 { 11000, 9875, },
1607 { 11125, 10000, },
1608 { 11250, 10125, },
1609 { 11375, 10250, },
1610 { 11500, 10375, },
1611 { 11625, 10500, },
1612 { 11750, 10625, },
1613 { 11875, 10750, },
1614 { 12000, 10875, },
1615 { 12125, 11000, },
1616 { 12250, 11125, },
1617 { 12375, 11250, },
1618 { 12500, 11375, },
1619 { 12625, 11500, },
1620 { 12750, 11625, },
1621 { 12875, 11750, },
1622 { 13000, 11875, },
1623 { 13125, 12000, },
1624 { 13250, 12125, },
1625 { 13375, 12250, },
1626 { 13500, 12375, },
1627 { 13625, 12500, },
1628 { 13750, 12625, },
1629 { 13875, 12750, },
1630 { 14000, 12875, },
1631 { 14125, 13000, },
1632 { 14250, 13125, },
1633 { 14375, 13250, },
1634 { 14500, 13375, },
1635 { 14625, 13500, },
1636 { 14750, 13625, },
1637 { 14875, 13750, },
1638 { 15000, 13875, },
1639 { 15125, 14000, },
1640 { 15250, 14125, },
1641 { 15375, 14250, },
1642 { 15500, 14375, },
1643 { 15625, 14500, },
1644 { 15750, 14625, },
1645 { 15875, 14750, },
1646 { 16000, 14875, },
1647 { 16125, 15000, },
1648 };
1649 if (dev_priv->info->is_mobile)
1650 return v_table[pxvid].vm;
1651 else
1652 return v_table[pxvid].vd;
1653}
1654
1655void i915_update_gfx_val(struct drm_i915_private *dev_priv)
1656{
1657 struct timespec now, diff1;
1658 u64 diff;
1659 unsigned long diffms;
1660 u32 count;
1661
1662 getrawmonotonic(&now);
1663 diff1 = timespec_sub(now, dev_priv->last_time2);
1664
1665 /* Don't divide by 0 */
1666 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
1667 if (!diffms)
1401 return; 1668 return;
1402 1669
1403 ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr; 1670 count = I915_READ(GFXEC);
1404 ap->ranges[0].size =
1405 dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1406 primary =
1407 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1408 1671
1409 remove_conflicting_framebuffers(ap, "inteldrmfb", primary); 1672 if (count < dev_priv->last_count2) {
1673 diff = ~0UL - dev_priv->last_count2;
1674 diff += count;
1675 } else {
1676 diff = count - dev_priv->last_count2;
1677 }
1410 1678
1411 kfree(ap); 1679 dev_priv->last_count2 = count;
1680 dev_priv->last_time2 = now;
1681
1682 /* More magic constants... */
1683 diff = diff * 1181;
1684 diff = div_u64(diff, diffms * 10);
1685 dev_priv->gfx_power = diff;
1412} 1686}
1413 1687
1414static void i915_dump_device_info(struct drm_i915_private *dev_priv) 1688unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
1415{ 1689{
1416 const struct intel_device_info *info = dev_priv->info; 1690 unsigned long t, corr, state1, corr2, state2;
1417 1691 u32 pxvid, ext_v;
1418#define DEV_INFO_FLAG(name) info->name ? #name "," : "" 1692
1419#define DEV_INFO_SEP , 1693 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
1420 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" 1694 pxvid = (pxvid >> 24) & 0x7f;
1421 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 1695 ext_v = pvid_to_extvid(dev_priv, pxvid);
1422 info->gen, 1696
1423 dev_priv->dev->pdev->device, 1697 state1 = ext_v;
1424 DEV_INFO_FLAGS); 1698
1425#undef DEV_INFO_FLAG 1699 t = i915_mch_val(dev_priv);
1426#undef DEV_INFO_SEP 1700
1701 /* Revel in the empirically derived constants */
1702
1703 /* Correction factor in 1/100000 units */
1704 if (t > 80)
1705 corr = ((t * 2349) + 135940);
1706 else if (t >= 50)
1707 corr = ((t * 964) + 29317);
1708 else /* < 50 */
1709 corr = ((t * 301) + 1004);
1710
1711 corr = corr * ((150142 * state1) / 10000 - 78642);
1712 corr /= 100000;
1713 corr2 = (corr * dev_priv->corr);
1714
1715 state2 = (corr2 * state1) / 10000;
1716 state2 /= 100; /* convert to mW */
1717
1718 i915_update_gfx_val(dev_priv);
1719
1720 return dev_priv->gfx_power + state2;
1721}
1722
1723/* Global for IPS driver to get at the current i915 device */
1724static struct drm_i915_private *i915_mch_dev;
1725/*
1726 * Lock protecting IPS related data structures
1727 * - i915_mch_dev
1728 * - dev_priv->max_delay
1729 * - dev_priv->min_delay
1730 * - dev_priv->fmax
1731 * - dev_priv->gpu_busy
1732 */
1733static DEFINE_SPINLOCK(mchdev_lock);
1734
1735/**
1736 * i915_read_mch_val - return value for IPS use
1737 *
1738 * Calculate and return a value for the IPS driver to use when deciding whether
1739 * we have thermal and power headroom to increase CPU or GPU power budget.
1740 */
1741unsigned long i915_read_mch_val(void)
1742{
1743 struct drm_i915_private *dev_priv;
1744 unsigned long chipset_val, graphics_val, ret = 0;
1745
1746 spin_lock(&mchdev_lock);
1747 if (!i915_mch_dev)
1748 goto out_unlock;
1749 dev_priv = i915_mch_dev;
1750
1751 chipset_val = i915_chipset_val(dev_priv);
1752 graphics_val = i915_gfx_val(dev_priv);
1753
1754 ret = chipset_val + graphics_val;
1755
1756out_unlock:
1757 spin_unlock(&mchdev_lock);
1758
1759 return ret;
1760}
1761EXPORT_SYMBOL_GPL(i915_read_mch_val);
1762
1763/**
1764 * i915_gpu_raise - raise GPU frequency limit
1765 *
1766 * Raise the limit; IPS indicates we have thermal headroom.
1767 */
1768bool i915_gpu_raise(void)
1769{
1770 struct drm_i915_private *dev_priv;
1771 bool ret = true;
1772
1773 spin_lock(&mchdev_lock);
1774 if (!i915_mch_dev) {
1775 ret = false;
1776 goto out_unlock;
1777 }
1778 dev_priv = i915_mch_dev;
1779
1780 if (dev_priv->max_delay > dev_priv->fmax)
1781 dev_priv->max_delay--;
1782
1783out_unlock:
1784 spin_unlock(&mchdev_lock);
1785
1786 return ret;
1787}
1788EXPORT_SYMBOL_GPL(i915_gpu_raise);
1789
1790/**
1791 * i915_gpu_lower - lower GPU frequency limit
1792 *
1793 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1794 * frequency maximum.
1795 */
1796bool i915_gpu_lower(void)
1797{
1798 struct drm_i915_private *dev_priv;
1799 bool ret = true;
1800
1801 spin_lock(&mchdev_lock);
1802 if (!i915_mch_dev) {
1803 ret = false;
1804 goto out_unlock;
1805 }
1806 dev_priv = i915_mch_dev;
1807
1808 if (dev_priv->max_delay < dev_priv->min_delay)
1809 dev_priv->max_delay++;
1810
1811out_unlock:
1812 spin_unlock(&mchdev_lock);
1813
1814 return ret;
1815}
1816EXPORT_SYMBOL_GPL(i915_gpu_lower);
1817
1818/**
1819 * i915_gpu_busy - indicate GPU business to IPS
1820 *
1821 * Tell the IPS driver whether or not the GPU is busy.
1822 */
1823bool i915_gpu_busy(void)
1824{
1825 struct drm_i915_private *dev_priv;
1826 bool ret = false;
1827
1828 spin_lock(&mchdev_lock);
1829 if (!i915_mch_dev)
1830 goto out_unlock;
1831 dev_priv = i915_mch_dev;
1832
1833 ret = dev_priv->busy;
1834
1835out_unlock:
1836 spin_unlock(&mchdev_lock);
1837
1838 return ret;
1839}
1840EXPORT_SYMBOL_GPL(i915_gpu_busy);
1841
1842/**
1843 * i915_gpu_turbo_disable - disable graphics turbo
1844 *
1845 * Disable graphics turbo by resetting the max frequency and setting the
1846 * current frequency to the default.
1847 */
1848bool i915_gpu_turbo_disable(void)
1849{
1850 struct drm_i915_private *dev_priv;
1851 bool ret = true;
1852
1853 spin_lock(&mchdev_lock);
1854 if (!i915_mch_dev) {
1855 ret = false;
1856 goto out_unlock;
1857 }
1858 dev_priv = i915_mch_dev;
1859
1860 dev_priv->max_delay = dev_priv->fstart;
1861
1862 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
1863 ret = false;
1864
1865out_unlock:
1866 spin_unlock(&mchdev_lock);
1867
1868 return ret;
1869}
1870EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
1871
1872/**
1873 * Tells the intel_ips driver that the i915 driver is now loaded, if
1874 * IPS got loaded first.
1875 *
1876 * This awkward dance is so that neither module has to depend on the
1877 * other in order for IPS to do the appropriate communication of
1878 * GPU turbo limits to i915.
1879 */
1880static void
1881ips_ping_for_i915_load(void)
1882{
1883 void (*link)(void);
1884
1885 link = symbol_get(ips_link_to_i915_driver);
1886 if (link) {
1887 link();
1888 symbol_put(ips_link_to_i915_driver);
1889 }
1427} 1890}
1428 1891
1429/** 1892/**
@@ -1440,15 +1903,8 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1440int i915_driver_load(struct drm_device *dev, unsigned long flags) 1903int i915_driver_load(struct drm_device *dev, unsigned long flags)
1441{ 1904{
1442 struct drm_i915_private *dev_priv; 1905 struct drm_i915_private *dev_priv;
1443 struct intel_device_info *info; 1906 int ret = 0, mmio_bar;
1444 int ret = 0, mmio_bar, mmio_size; 1907 uint32_t agp_size;
1445 uint32_t aperture_size;
1446
1447 info = (struct intel_device_info *) flags;
1448
1449 /* Refuse to load on gen6+ without kms enabled. */
1450 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1451 return -ENODEV;
1452 1908
1453 /* i915 has 4 more counters */ 1909 /* i915 has 4 more counters */
1454 dev->counters += 4; 1910 dev->counters += 4;
@@ -1463,24 +1919,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1463 1919
1464 dev->dev_private = (void *)dev_priv; 1920 dev->dev_private = (void *)dev_priv;
1465 dev_priv->dev = dev; 1921 dev_priv->dev = dev;
1466 dev_priv->info = info; 1922 dev_priv->info = (struct intel_device_info *) flags;
1467
1468 i915_dump_device_info(dev_priv);
1469 1923
1470 if (i915_get_bridge_dev(dev)) { 1924 if (i915_get_bridge_dev(dev)) {
1471 ret = -EIO; 1925 ret = -EIO;
1472 goto free_priv; 1926 goto free_priv;
1473 } 1927 }
1474 1928
1475 ret = i915_gem_gtt_init(dev);
1476 if (ret)
1477 goto put_bridge;
1478
1479 if (drm_core_check_feature(dev, DRIVER_MODESET))
1480 i915_kick_out_firmware_fb(dev_priv);
1481
1482 pci_set_master(dev->pdev);
1483
1484 /* overlay on gen2 is broken and can't address above 1G */ 1929 /* overlay on gen2 is broken and can't address above 1G */
1485 if (IS_GEN2(dev)) 1930 if (IS_GEN2(dev))
1486 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1931 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
@@ -1497,38 +1942,41 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1497 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); 1942 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1498 1943
1499 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1944 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1500 /* Before gen4, the registers and the GTT are behind different BARs. 1945 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
1501 * However, from gen4 onwards, the registers and the GTT are shared
1502 * in the same BAR, so we want to restrict this ioremap from
1503 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1504 * the register BAR remains the same size for all the earlier
1505 * generations up to Ironlake.
1506 */
1507 if (info->gen < 5)
1508 mmio_size = 512*1024;
1509 else
1510 mmio_size = 2*1024*1024;
1511
1512 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
1513 if (!dev_priv->regs) { 1946 if (!dev_priv->regs) {
1514 DRM_ERROR("failed to map registers\n"); 1947 DRM_ERROR("failed to map registers\n");
1515 ret = -EIO; 1948 ret = -EIO;
1516 goto put_gmch; 1949 goto put_bridge;
1950 }
1951
1952 dev_priv->mm.gtt = intel_gtt_get();
1953 if (!dev_priv->mm.gtt) {
1954 DRM_ERROR("Failed to initialize GTT\n");
1955 ret = -ENODEV;
1956 goto out_rmmap;
1517 } 1957 }
1518 1958
1519 aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1959 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1520 dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
1521 1960
1522 dev_priv->mm.gtt_mapping = 1961 dev_priv->mm.gtt_mapping =
1523 io_mapping_create_wc(dev_priv->mm.gtt_base_addr, 1962 io_mapping_create_wc(dev->agp->base, agp_size);
1524 aperture_size);
1525 if (dev_priv->mm.gtt_mapping == NULL) { 1963 if (dev_priv->mm.gtt_mapping == NULL) {
1526 ret = -EIO; 1964 ret = -EIO;
1527 goto out_rmmap; 1965 goto out_rmmap;
1528 } 1966 }
1529 1967
1530 i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr, 1968 /* Set up a WC MTRR for non-PAT systems. This is more common than
1531 aperture_size); 1969 * one would think, because the kernel disables PAT on first
1970 * generation Core chips because WC PAT gets overridden by a UC
1971 * MTRR if present. Even if a UC MTRR isn't present.
1972 */
1973 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
1974 agp_size,
1975 MTRR_TYPE_WRCOMB, 1);
1976 if (dev_priv->mm.gtt_mtrr < 0) {
1977 DRM_INFO("MTRR allocation failed. Graphics "
1978 "performance may suffer.\n");
1979 }
1532 1980
1533 /* The i915 workqueue is primarily used for batched retirement of 1981 /* The i915 workqueue is primarily used for batched retirement of
1534 * requests (and thus managing bo) once the task has been completed 1982 * requests (and thus managing bo) once the task has been completed
@@ -1541,30 +1989,44 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1541 * 1989 *
1542 * All tasks on the workqueue are expected to acquire the dev mutex 1990 * All tasks on the workqueue are expected to acquire the dev mutex
1543 * so there is no point in running more than one instance of the 1991 * so there is no point in running more than one instance of the
1544 * workqueue at any time. Use an ordered one. 1992 * workqueue at any time: max_active = 1 and NON_REENTRANT.
1545 */ 1993 */
1546 dev_priv->wq = alloc_ordered_workqueue("i915", 0); 1994 dev_priv->wq = alloc_workqueue("i915",
1995 WQ_UNBOUND | WQ_NON_REENTRANT,
1996 1);
1547 if (dev_priv->wq == NULL) { 1997 if (dev_priv->wq == NULL) {
1548 DRM_ERROR("Failed to create our workqueue.\n"); 1998 DRM_ERROR("Failed to create our workqueue.\n");
1549 ret = -ENOMEM; 1999 ret = -ENOMEM;
1550 goto out_mtrrfree; 2000 goto out_mtrrfree;
1551 } 2001 }
1552 2002
1553 /* This must be called before any calls to HAS_PCH_* */ 2003 /* enable GEM by default */
1554 intel_detect_pch(dev); 2004 dev_priv->has_gem = 1;
1555 2005
1556 intel_irq_init(dev); 2006 intel_irq_init(dev);
1557 intel_gt_init(dev);
1558 2007
1559 /* Try to make sure MCHBAR is enabled before poking at it */ 2008 /* Try to make sure MCHBAR is enabled before poking at it */
1560 intel_setup_mchbar(dev); 2009 intel_setup_mchbar(dev);
1561 intel_setup_gmbus(dev); 2010 intel_setup_gmbus(dev);
1562 intel_opregion_setup(dev); 2011 intel_opregion_setup(dev);
1563 2012
2013 /* Make sure the bios did its job and set up vital registers */
1564 intel_setup_bios(dev); 2014 intel_setup_bios(dev);
1565 2015
1566 i915_gem_load(dev); 2016 i915_gem_load(dev);
1567 2017
2018 /* Init HWS */
2019 if (!I915_NEED_GFX_HWS(dev)) {
2020 ret = i915_init_phys_hws(dev);
2021 if (ret)
2022 goto out_gem_unload;
2023 }
2024
2025 if (IS_PINEVIEW(dev))
2026 i915_pineview_get_mem_freq(dev);
2027 else if (IS_GEN5(dev))
2028 i915_ironlake_get_mem_freq(dev);
2029
1568 /* On the 945G/GM, the chipset reports the MSI capability on the 2030 /* On the 945G/GM, the chipset reports the MSI capability on the
1569 * integrated graphics even though the support isn't actually there 2031 * integrated graphics even though the support isn't actually there
1570 * according to the published specs. It doesn't appear to function 2032 * according to the published specs. It doesn't appear to function
@@ -1581,14 +2043,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1581 2043
1582 spin_lock_init(&dev_priv->irq_lock); 2044 spin_lock_init(&dev_priv->irq_lock);
1583 spin_lock_init(&dev_priv->error_lock); 2045 spin_lock_init(&dev_priv->error_lock);
1584 spin_lock_init(&dev_priv->rps.lock); 2046 spin_lock_init(&dev_priv->rps_lock);
1585 spin_lock_init(&dev_priv->dpio_lock);
1586
1587 mutex_init(&dev_priv->rps.hw_lock);
1588 2047
1589 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 2048 if (IS_MOBILE(dev) || !IS_GEN2(dev))
1590 dev_priv->num_pipe = 3;
1591 else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1592 dev_priv->num_pipe = 2; 2049 dev_priv->num_pipe = 2;
1593 else 2050 else
1594 dev_priv->num_pipe = 1; 2051 dev_priv->num_pipe = 1;
@@ -1600,6 +2057,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1600 /* Start out suspended */ 2057 /* Start out suspended */
1601 dev_priv->mm.suspended = 1; 2058 dev_priv->mm.suspended = 1;
1602 2059
2060 intel_detect_pch(dev);
2061
1603 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2062 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1604 ret = i915_load_modeset_init(dev); 2063 ret = i915_load_modeset_init(dev);
1605 if (ret < 0) { 2064 if (ret < 0) {
@@ -1608,8 +2067,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1608 } 2067 }
1609 } 2068 }
1610 2069
1611 i915_setup_sysfs(dev);
1612
1613 /* Must be done after probing outputs */ 2070 /* Must be done after probing outputs */
1614 intel_opregion_init(dev); 2071 intel_opregion_init(dev);
1615 acpi_video_register(); 2072 acpi_video_register();
@@ -1617,8 +2074,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1617 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2074 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
1618 (unsigned long) dev); 2075 (unsigned long) dev);
1619 2076
1620 if (IS_GEN5(dev)) 2077 spin_lock(&mchdev_lock);
1621 intel_gpu_ips_init(dev_priv); 2078 i915_mch_dev = dev_priv;
2079 dev_priv->mchdev_lock = &mchdev_lock;
2080 spin_unlock(&mchdev_lock);
2081
2082 ips_ping_for_i915_load();
1622 2083
1623 return 0; 2084 return 0;
1624 2085
@@ -1634,16 +2095,13 @@ out_gem_unload:
1634 destroy_workqueue(dev_priv->wq); 2095 destroy_workqueue(dev_priv->wq);
1635out_mtrrfree: 2096out_mtrrfree:
1636 if (dev_priv->mm.gtt_mtrr >= 0) { 2097 if (dev_priv->mm.gtt_mtrr >= 0) {
1637 mtrr_del(dev_priv->mm.gtt_mtrr, 2098 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
1638 dev_priv->mm.gtt_base_addr, 2099 dev->agp->agp_info.aper_size * 1024 * 1024);
1639 aperture_size);
1640 dev_priv->mm.gtt_mtrr = -1; 2100 dev_priv->mm.gtt_mtrr = -1;
1641 } 2101 }
1642 io_mapping_free(dev_priv->mm.gtt_mapping); 2102 io_mapping_free(dev_priv->mm.gtt_mapping);
1643out_rmmap: 2103out_rmmap:
1644 pci_iounmap(dev->pdev, dev_priv->regs); 2104 pci_iounmap(dev->pdev, dev_priv->regs);
1645put_gmch:
1646 i915_gem_gtt_fini(dev);
1647put_bridge: 2105put_bridge:
1648 pci_dev_put(dev_priv->bridge_dev); 2106 pci_dev_put(dev_priv->bridge_dev);
1649free_priv: 2107free_priv:
@@ -1656,9 +2114,9 @@ int i915_driver_unload(struct drm_device *dev)
1656 struct drm_i915_private *dev_priv = dev->dev_private; 2114 struct drm_i915_private *dev_priv = dev->dev_private;
1657 int ret; 2115 int ret;
1658 2116
1659 intel_gpu_ips_teardown(); 2117 spin_lock(&mchdev_lock);
1660 2118 i915_mch_dev = NULL;
1661 i915_teardown_sysfs(dev); 2119 spin_unlock(&mchdev_lock);
1662 2120
1663 if (dev_priv->mm.inactive_shrinker.shrink) 2121 if (dev_priv->mm.inactive_shrinker.shrink)
1664 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 2122 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
@@ -1667,7 +2125,6 @@ int i915_driver_unload(struct drm_device *dev)
1667 ret = i915_gpu_idle(dev); 2125 ret = i915_gpu_idle(dev);
1668 if (ret) 2126 if (ret)
1669 DRM_ERROR("failed to idle hardware: %d\n", ret); 2127 DRM_ERROR("failed to idle hardware: %d\n", ret);
1670 i915_gem_retire_requests(dev);
1671 mutex_unlock(&dev->struct_mutex); 2128 mutex_unlock(&dev->struct_mutex);
1672 2129
1673 /* Cancel the retire work handler, which should be idle now. */ 2130 /* Cancel the retire work handler, which should be idle now. */
@@ -1675,9 +2132,8 @@ int i915_driver_unload(struct drm_device *dev)
1675 2132
1676 io_mapping_free(dev_priv->mm.gtt_mapping); 2133 io_mapping_free(dev_priv->mm.gtt_mapping);
1677 if (dev_priv->mm.gtt_mtrr >= 0) { 2134 if (dev_priv->mm.gtt_mtrr >= 0) {
1678 mtrr_del(dev_priv->mm.gtt_mtrr, 2135 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
1679 dev_priv->mm.gtt_base_addr, 2136 dev->agp->agp_info.aper_size * 1024 * 1024);
1680 dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE);
1681 dev_priv->mm.gtt_mtrr = -1; 2137 dev_priv->mm.gtt_mtrr = -1;
1682 } 2138 }
1683 2139
@@ -1686,7 +2142,6 @@ int i915_driver_unload(struct drm_device *dev)
1686 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2142 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1687 intel_fbdev_fini(dev); 2143 intel_fbdev_fini(dev);
1688 intel_modeset_cleanup(dev); 2144 intel_modeset_cleanup(dev);
1689 cancel_work_sync(&dev_priv->console_resume_work);
1690 2145
1691 /* 2146 /*
1692 * free the memory space allocated for the child device 2147 * free the memory space allocated for the child device
@@ -1719,10 +2174,9 @@ int i915_driver_unload(struct drm_device *dev)
1719 mutex_lock(&dev->struct_mutex); 2174 mutex_lock(&dev->struct_mutex);
1720 i915_gem_free_all_phys_object(dev); 2175 i915_gem_free_all_phys_object(dev);
1721 i915_gem_cleanup_ringbuffer(dev); 2176 i915_gem_cleanup_ringbuffer(dev);
1722 i915_gem_context_fini(dev);
1723 mutex_unlock(&dev->struct_mutex); 2177 mutex_unlock(&dev->struct_mutex);
1724 i915_gem_cleanup_aliasing_ppgtt(dev); 2178 if (I915_HAS_FBC(dev) && i915_powersave)
1725 i915_gem_cleanup_stolen(dev); 2179 i915_cleanup_compression(dev);
1726 drm_mm_takedown(&dev_priv->mm.stolen); 2180 drm_mm_takedown(&dev_priv->mm.stolen);
1727 2181
1728 intel_cleanup_overlay(dev); 2182 intel_cleanup_overlay(dev);
@@ -1759,8 +2213,6 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1759 spin_lock_init(&file_priv->mm.lock); 2213 spin_lock_init(&file_priv->mm.lock);
1760 INIT_LIST_HEAD(&file_priv->mm.request_list); 2214 INIT_LIST_HEAD(&file_priv->mm.request_list);
1761 2215
1762 idr_init(&file_priv->context_idr);
1763
1764 return 0; 2216 return 0;
1765} 2217}
1766 2218
@@ -1772,7 +2224,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1772 * mode setting case, we want to restore the kernel's initial mode (just 2224 * mode setting case, we want to restore the kernel's initial mode (just
1773 * in case the last client left us in a bad state). 2225 * in case the last client left us in a bad state).
1774 * 2226 *
1775 * Additionally, in the non-mode setting case, we'll tear down the GTT 2227 * Additionally, in the non-mode setting case, we'll tear down the AGP
1776 * and DMA structures, since the kernel won't be using them, and clea 2228 * and DMA structures, since the kernel won't be using them, and clea
1777 * up any GEM state. 2229 * up any GEM state.
1778 */ 2230 */
@@ -1780,13 +2232,7 @@ void i915_driver_lastclose(struct drm_device * dev)
1780{ 2232{
1781 drm_i915_private_t *dev_priv = dev->dev_private; 2233 drm_i915_private_t *dev_priv = dev->dev_private;
1782 2234
1783 /* On gen6+ we refuse to init without kms enabled, but then the drm core 2235 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1784 * goes right around and calls lastclose. Check for this and don't clean
1785 * up anything. */
1786 if (!dev_priv)
1787 return;
1788
1789 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1790 intel_fb_restore_mode(dev); 2236 intel_fb_restore_mode(dev);
1791 vga_switcheroo_process_delayed_switch(); 2237 vga_switcheroo_process_delayed_switch();
1792 return; 2238 return;
@@ -1794,13 +2240,18 @@ void i915_driver_lastclose(struct drm_device * dev)
1794 2240
1795 i915_gem_lastclose(dev); 2241 i915_gem_lastclose(dev);
1796 2242
2243 if (dev_priv->agp_heap)
2244 i915_mem_takedown(&(dev_priv->agp_heap));
2245
1797 i915_dma_cleanup(dev); 2246 i915_dma_cleanup(dev);
1798} 2247}
1799 2248
1800void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 2249void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1801{ 2250{
1802 i915_gem_context_close(dev, file_priv); 2251 drm_i915_private_t *dev_priv = dev->dev_private;
1803 i915_gem_release(dev, file_priv); 2252 i915_gem_release(dev, file_priv);
2253 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2254 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
1804} 2255}
1805 2256
1806void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 2257void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@ -1819,12 +2270,12 @@ struct drm_ioctl_desc i915_ioctls[] = {
1819 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 2270 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1820 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), 2271 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
1821 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2272 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1822 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 2273 DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
1823 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 2274 DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
1824 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2275 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1825 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 2276 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1826 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2277 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1827 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2278 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1828 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), 2279 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
1829 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 2280 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1830 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2281 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -1834,8 +2285,6 @@ struct drm_ioctl_desc i915_ioctls[] = {
1834 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2285 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1835 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2286 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1836 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 2287 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1837 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
1838 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
1839 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), 2288 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1840 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2289 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1841 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2290 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -1853,20 +2302,20 @@ struct drm_ioctl_desc i915_ioctls[] = {
1853 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), 2302 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1854 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2303 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1855 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2304 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1856 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1857 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1858 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
1859 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
1860 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
1861 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
1862}; 2305};
1863 2306
1864int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 2307int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1865 2308
1866/* 2309/**
1867 * This is really ugly: Because old userspace abused the linux agp interface to 2310 * Determine if the device really is AGP or not.
1868 * manage the gtt, we need to claim that all intel devices are agp. For 2311 *
1869 * otherwise the drm core refuses to initialize the agp support code. 2312 * All Intel graphics chipsets are treated as AGP, even if they are really
2313 * PCI-e.
2314 *
2315 * \param dev The device to be tested.
2316 *
2317 * \returns
2318 * A value of 1 is always retured to indictate every i9x5 is AGP.
1870 */ 2319 */
1871int i915_driver_device_is_agp(struct drm_device * dev) 2320int i915_driver_device_is_agp(struct drm_device * dev)
1872{ 2321{
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 117265840b1..f07e4252b70 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -28,15 +28,14 @@
28 */ 28 */
29 29
30#include <linux/device.h> 30#include <linux/device.h>
31#include <drm/drmP.h> 31#include "drmP.h"
32#include <drm/i915_drm.h> 32#include "drm.h"
33#include "i915_drm.h"
33#include "i915_drv.h" 34#include "i915_drv.h"
34#include "i915_trace.h"
35#include "intel_drv.h" 35#include "intel_drv.h"
36 36
37#include <linux/console.h> 37#include <linux/console.h>
38#include <linux/module.h> 38#include "drm_crtc_helper.h"
39#include <drm/drm_crtc_helper.h>
40 39
41static int i915_modeset __read_mostly = -1; 40static int i915_modeset __read_mostly = -1;
42module_param_named(modeset, i915_modeset, int, 0400); 41module_param_named(modeset, i915_modeset, int, 0400);
@@ -47,32 +46,28 @@ MODULE_PARM_DESC(modeset,
47unsigned int i915_fbpercrtc __always_unused = 0; 46unsigned int i915_fbpercrtc __always_unused = 0;
48module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 47module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
49 48
50int i915_panel_ignore_lid __read_mostly = 1; 49int i915_panel_ignore_lid __read_mostly = 0;
51module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); 50module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
52MODULE_PARM_DESC(panel_ignore_lid, 51MODULE_PARM_DESC(panel_ignore_lid,
53 "Override lid status (0=autodetect, 1=autodetect disabled [default], " 52 "Override lid status (0=autodetect [default], 1=lid open, "
54 "-1=force lid closed, -2=force lid open)"); 53 "-1=lid closed)");
55 54
56unsigned int i915_powersave __read_mostly = 1; 55unsigned int i915_powersave __read_mostly = 1;
57module_param_named(powersave, i915_powersave, int, 0600); 56module_param_named(powersave, i915_powersave, int, 0600);
58MODULE_PARM_DESC(powersave, 57MODULE_PARM_DESC(powersave,
59 "Enable powersavings, fbc, downclocking, etc. (default: true)"); 58 "Enable powersavings, fbc, downclocking, etc. (default: true)");
60 59
61int i915_semaphores __read_mostly = -1; 60unsigned int i915_semaphores __read_mostly = 0;
62module_param_named(semaphores, i915_semaphores, int, 0600); 61module_param_named(semaphores, i915_semaphores, int, 0600);
63MODULE_PARM_DESC(semaphores, 62MODULE_PARM_DESC(semaphores,
64 "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); 63 "Use semaphores for inter-ring sync (default: false)");
65 64
66int i915_enable_rc6 __read_mostly = -1; 65unsigned int i915_enable_rc6 __read_mostly = 0;
67module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400); 66module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
68MODULE_PARM_DESC(i915_enable_rc6, 67MODULE_PARM_DESC(i915_enable_rc6,
69 "Enable power-saving render C-state 6. " 68 "Enable power-saving render C-state 6 (default: true)");
70 "Different stages can be selected via bitmask values "
71 "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
72 "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
73 "default: -1 (use per-chip default)");
74 69
75int i915_enable_fbc __read_mostly = -1; 70unsigned int i915_enable_fbc __read_mostly = -1;
76module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 71module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
77MODULE_PARM_DESC(i915_enable_fbc, 72MODULE_PARM_DESC(i915_enable_fbc,
78 "Enable frame buffer compression for power savings " 73 "Enable frame buffer compression for power savings "
@@ -84,23 +79,17 @@ MODULE_PARM_DESC(lvds_downclock,
84 "Use panel (LVDS/eDP) downclocking for power savings " 79 "Use panel (LVDS/eDP) downclocking for power savings "
85 "(default: false)"); 80 "(default: false)");
86 81
87int i915_lvds_channel_mode __read_mostly; 82unsigned int i915_panel_use_ssc __read_mostly = 1;
88module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
89MODULE_PARM_DESC(lvds_channel_mode,
90 "Specify LVDS channel mode "
91 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
92
93int i915_panel_use_ssc __read_mostly = -1;
94module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 83module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
95MODULE_PARM_DESC(lvds_use_ssc, 84MODULE_PARM_DESC(lvds_use_ssc,
96 "Use Spread Spectrum Clock with panels [LVDS/eDP] " 85 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
97 "(default: auto from VBT)"); 86 "(default: true)");
98 87
99int i915_vbt_sdvo_panel_type __read_mostly = -1; 88int i915_vbt_sdvo_panel_type __read_mostly = -1;
100module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); 89module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
101MODULE_PARM_DESC(vbt_sdvo_panel_type, 90MODULE_PARM_DESC(vbt_sdvo_panel_type,
102 "Override/Ignore selection of SDVO panel mode in the VBT " 91 "Override selection of SDVO panel mode in the VBT "
103 "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); 92 "(default: auto)");
104 93
105static bool i915_try_reset __read_mostly = true; 94static bool i915_try_reset __read_mostly = true;
106module_param_named(reset, i915_try_reset, bool, 0600); 95module_param_named(reset, i915_try_reset, bool, 0600);
@@ -113,23 +102,11 @@ MODULE_PARM_DESC(enable_hangcheck,
113 "WARNING: Disabling this can cause system wide hangs. " 102 "WARNING: Disabling this can cause system wide hangs. "
114 "(default: true)"); 103 "(default: true)");
115 104
116int i915_enable_ppgtt __read_mostly = -1;
117module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
118MODULE_PARM_DESC(i915_enable_ppgtt,
119 "Enable PPGTT (default: true)");
120
121unsigned int i915_preliminary_hw_support __read_mostly = 0;
122module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
123MODULE_PARM_DESC(preliminary_hw_support,
124 "Enable preliminary hardware support. "
125 "Enable Haswell and ValleyView Support. "
126 "(default: false)");
127
128static struct drm_driver driver; 105static struct drm_driver driver;
129extern int intel_agp_enabled; 106extern int intel_agp_enabled;
130 107
131#define INTEL_VGA_DEVICE(id, info) { \ 108#define INTEL_VGA_DEVICE(id, info) { \
132 .class = PCI_BASE_CLASS_DISPLAY << 16, \ 109 .class = PCI_CLASS_DISPLAY_VGA << 8, \
133 .class_mask = 0xff0000, \ 110 .class_mask = 0xff0000, \
134 .vendor = 0x8086, \ 111 .vendor = 0x8086, \
135 .device = id, \ 112 .device = id, \
@@ -220,7 +197,7 @@ static const struct intel_device_info intel_pineview_info = {
220 197
221static const struct intel_device_info intel_ironlake_d_info = { 198static const struct intel_device_info intel_ironlake_d_info = {
222 .gen = 5, 199 .gen = 5,
223 .need_gfx_hws = 1, .has_hotplug = 1, 200 .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
224 .has_bsd_ring = 1, 201 .has_bsd_ring = 1,
225}; 202};
226 203
@@ -236,8 +213,6 @@ static const struct intel_device_info intel_sandybridge_d_info = {
236 .need_gfx_hws = 1, .has_hotplug = 1, 213 .need_gfx_hws = 1, .has_hotplug = 1,
237 .has_bsd_ring = 1, 214 .has_bsd_ring = 1,
238 .has_blt_ring = 1, 215 .has_blt_ring = 1,
239 .has_llc = 1,
240 .has_force_wake = 1,
241}; 216};
242 217
243static const struct intel_device_info intel_sandybridge_m_info = { 218static const struct intel_device_info intel_sandybridge_m_info = {
@@ -246,8 +221,6 @@ static const struct intel_device_info intel_sandybridge_m_info = {
246 .has_fbc = 1, 221 .has_fbc = 1,
247 .has_bsd_ring = 1, 222 .has_bsd_ring = 1,
248 .has_blt_ring = 1, 223 .has_blt_ring = 1,
249 .has_llc = 1,
250 .has_force_wake = 1,
251}; 224};
252 225
253static const struct intel_device_info intel_ivybridge_d_info = { 226static const struct intel_device_info intel_ivybridge_d_info = {
@@ -255,8 +228,6 @@ static const struct intel_device_info intel_ivybridge_d_info = {
255 .need_gfx_hws = 1, .has_hotplug = 1, 228 .need_gfx_hws = 1, .has_hotplug = 1,
256 .has_bsd_ring = 1, 229 .has_bsd_ring = 1,
257 .has_blt_ring = 1, 230 .has_blt_ring = 1,
258 .has_llc = 1,
259 .has_force_wake = 1,
260}; 231};
261 232
262static const struct intel_device_info intel_ivybridge_m_info = { 233static const struct intel_device_info intel_ivybridge_m_info = {
@@ -265,44 +236,6 @@ static const struct intel_device_info intel_ivybridge_m_info = {
265 .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ 236 .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
266 .has_bsd_ring = 1, 237 .has_bsd_ring = 1,
267 .has_blt_ring = 1, 238 .has_blt_ring = 1,
268 .has_llc = 1,
269 .has_force_wake = 1,
270};
271
272static const struct intel_device_info intel_valleyview_m_info = {
273 .gen = 7, .is_mobile = 1,
274 .need_gfx_hws = 1, .has_hotplug = 1,
275 .has_fbc = 0,
276 .has_bsd_ring = 1,
277 .has_blt_ring = 1,
278 .is_valleyview = 1,
279};
280
281static const struct intel_device_info intel_valleyview_d_info = {
282 .gen = 7,
283 .need_gfx_hws = 1, .has_hotplug = 1,
284 .has_fbc = 0,
285 .has_bsd_ring = 1,
286 .has_blt_ring = 1,
287 .is_valleyview = 1,
288};
289
290static const struct intel_device_info intel_haswell_d_info = {
291 .is_haswell = 1, .gen = 7,
292 .need_gfx_hws = 1, .has_hotplug = 1,
293 .has_bsd_ring = 1,
294 .has_blt_ring = 1,
295 .has_llc = 1,
296 .has_force_wake = 1,
297};
298
299static const struct intel_device_info intel_haswell_m_info = {
300 .is_haswell = 1, .gen = 7, .is_mobile = 1,
301 .need_gfx_hws = 1, .has_hotplug = 1,
302 .has_bsd_ring = 1,
303 .has_blt_ring = 1,
304 .has_llc = 1,
305 .has_force_wake = 1,
306}; 239};
307 240
308static const struct pci_device_id pciidlist[] = { /* aka */ 241static const struct pci_device_id pciidlist[] = { /* aka */
@@ -349,46 +282,6 @@ static const struct pci_device_id pciidlist[] = { /* aka */
349 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ 282 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
350 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ 283 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
351 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ 284 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
352 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
353 INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
354 INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
355 INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
356 INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
357 INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
358 INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
359 INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
360 INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
361 INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
362 INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
363 INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
364 INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
365 INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
366 INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
367 INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
368 INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
369 INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
370 INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
371 INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
372 INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
373 INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
374 INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
375 INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
376 INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
377 INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
378 INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
379 INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
380 INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
381 INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
382 INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
383 INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
384 INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
385 INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
386 INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
387 INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
388 INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
389 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
390 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
391 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
392 {0, 0, 0} 285 {0, 0, 0}
393}; 286};
394 287
@@ -396,7 +289,12 @@ static const struct pci_device_id pciidlist[] = { /* aka */
396MODULE_DEVICE_TABLE(pci, pciidlist); 289MODULE_DEVICE_TABLE(pci, pciidlist);
397#endif 290#endif
398 291
399void intel_detect_pch(struct drm_device *dev) 292#define INTEL_PCH_DEVICE_ID_MASK 0xff00
293#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
294#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
295#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
296
297void intel_detect_pch (struct drm_device *dev)
400{ 298{
401 struct drm_i915_private *dev_priv = dev->dev_private; 299 struct drm_i915_private *dev_priv = dev->dev_private;
402 struct pci_dev *pch; 300 struct pci_dev *pch;
@@ -410,58 +308,86 @@ void intel_detect_pch(struct drm_device *dev)
410 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); 308 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
411 if (pch) { 309 if (pch) {
412 if (pch->vendor == PCI_VENDOR_ID_INTEL) { 310 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
413 unsigned short id; 311 int id;
414 id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 312 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
415 dev_priv->pch_id = id;
416 313
417 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 314 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
418 dev_priv->pch_type = PCH_IBX; 315 dev_priv->pch_type = PCH_IBX;
419 dev_priv->num_pch_pll = 2;
420 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 316 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
421 WARN_ON(!IS_GEN5(dev));
422 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 317 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
423 dev_priv->pch_type = PCH_CPT; 318 dev_priv->pch_type = PCH_CPT;
424 dev_priv->num_pch_pll = 2;
425 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 319 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
426 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
427 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 320 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
428 /* PantherPoint is CPT compatible */ 321 /* PantherPoint is CPT compatible */
429 dev_priv->pch_type = PCH_CPT; 322 dev_priv->pch_type = PCH_CPT;
430 dev_priv->num_pch_pll = 2;
431 DRM_DEBUG_KMS("Found PatherPoint PCH\n"); 323 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
432 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
433 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
434 dev_priv->pch_type = PCH_LPT;
435 dev_priv->num_pch_pll = 0;
436 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
437 WARN_ON(!IS_HASWELL(dev));
438 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
439 dev_priv->pch_type = PCH_LPT;
440 dev_priv->num_pch_pll = 0;
441 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
442 WARN_ON(!IS_HASWELL(dev));
443 } 324 }
444 BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
445 } 325 }
446 pci_dev_put(pch); 326 pci_dev_put(pch);
447 } 327 }
448} 328}
449 329
450bool i915_semaphore_is_enabled(struct drm_device *dev) 330static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
451{ 331{
452 if (INTEL_INFO(dev)->gen < 6) 332 int count;
453 return 0;
454 333
455 if (i915_semaphores >= 0) 334 count = 0;
456 return i915_semaphores; 335 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
336 udelay(10);
457 337
458#ifdef CONFIG_INTEL_IOMMU 338 I915_WRITE_NOTRACE(FORCEWAKE, 1);
459 /* Enable semaphores on SNB when IO remapping is off */ 339 POSTING_READ(FORCEWAKE);
460 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 340
461 return false; 341 count = 0;
462#endif 342 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
343 udelay(10);
344}
345
346/*
347 * Generally this is called implicitly by the register read function. However,
348 * if some sequence requires the GT to not power down then this function should
349 * be called at the beginning of the sequence followed by a call to
350 * gen6_gt_force_wake_put() at the end of the sequence.
351 */
352void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
353{
354 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
355
356 /* Forcewake is atomic in case we get in here without the lock */
357 if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
358 __gen6_gt_force_wake_get(dev_priv);
359}
360
361static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
362{
363 I915_WRITE_NOTRACE(FORCEWAKE, 0);
364 POSTING_READ(FORCEWAKE);
365}
463 366
464 return 1; 367/*
368 * see gen6_gt_force_wake_get()
369 */
370void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
371{
372 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
373
374 if (atomic_dec_and_test(&dev_priv->forcewake_count))
375 __gen6_gt_force_wake_put(dev_priv);
376}
377
378void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
379{
380 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES ) {
381 int loop = 500;
382 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
383 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
384 udelay(10);
385 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
386 }
387 WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES);
388 dev_priv->gt_fifo_count = fifo;
389 }
390 dev_priv->gt_fifo_count--;
465} 391}
466 392
467static int i915_drm_freeze(struct drm_device *dev) 393static int i915_drm_freeze(struct drm_device *dev)
@@ -480,11 +406,6 @@ static int i915_drm_freeze(struct drm_device *dev)
480 "GEM idle failed, resume might fail\n"); 406 "GEM idle failed, resume might fail\n");
481 return error; 407 return error;
482 } 408 }
483
484 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
485
486 intel_modeset_disable(dev);
487
488 drm_irq_uninstall(dev); 409 drm_irq_uninstall(dev);
489 } 410 }
490 411
@@ -495,10 +416,6 @@ static int i915_drm_freeze(struct drm_device *dev)
495 /* Modeset on resume, not lid events */ 416 /* Modeset on resume, not lid events */
496 dev_priv->modeset_on_lid = 0; 417 dev_priv->modeset_on_lid = 0;
497 418
498 console_lock();
499 intel_fbdev_set_suspend(dev, 1);
500 console_unlock();
501
502 return 0; 419 return 0;
503} 420}
504 421
@@ -532,80 +449,47 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
532 return 0; 449 return 0;
533} 450}
534 451
535void intel_console_resume(struct work_struct *work) 452static int i915_drm_thaw(struct drm_device *dev)
536{
537 struct drm_i915_private *dev_priv =
538 container_of(work, struct drm_i915_private,
539 console_resume_work);
540 struct drm_device *dev = dev_priv->dev;
541
542 console_lock();
543 intel_fbdev_set_suspend(dev, 0);
544 console_unlock();
545}
546
547static int __i915_drm_thaw(struct drm_device *dev)
548{ 453{
549 struct drm_i915_private *dev_priv = dev->dev_private; 454 struct drm_i915_private *dev_priv = dev->dev_private;
550 int error = 0; 455 int error = 0;
551 456
457 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
458 mutex_lock(&dev->struct_mutex);
459 i915_gem_restore_gtt_mappings(dev);
460 mutex_unlock(&dev->struct_mutex);
461 }
462
552 i915_restore_state(dev); 463 i915_restore_state(dev);
553 intel_opregion_setup(dev); 464 intel_opregion_setup(dev);
554 465
555 /* KMS EnterVT equivalent */ 466 /* KMS EnterVT equivalent */
556 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 467 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
557 intel_init_pch_refclk(dev);
558
559 mutex_lock(&dev->struct_mutex); 468 mutex_lock(&dev->struct_mutex);
560 dev_priv->mm.suspended = 0; 469 dev_priv->mm.suspended = 0;
561 470
562 error = i915_gem_init_hw(dev); 471 error = i915_gem_init_ringbuffer(dev);
563 mutex_unlock(&dev->struct_mutex); 472 mutex_unlock(&dev->struct_mutex);
564 473
565 intel_modeset_init_hw(dev); 474 drm_mode_config_reset(dev);
566 intel_modeset_setup_hw_state(dev, false);
567 drm_irq_install(dev); 475 drm_irq_install(dev);
568 }
569 476
570 intel_opregion_init(dev); 477 /* Resume the modeset for every activated CRTC */
478 drm_helper_resume_force_mode(dev);
571 479
572 dev_priv->modeset_on_lid = 0; 480 if (IS_IRONLAKE_M(dev))
573 481 ironlake_enable_rc6(dev);
574 /*
575 * The console lock can be pretty contented on resume due
576 * to all the printk activity. Try to keep it out of the hot
577 * path of resume if possible.
578 */
579 if (console_trylock()) {
580 intel_fbdev_set_suspend(dev, 0);
581 console_unlock();
582 } else {
583 schedule_work(&dev_priv->console_resume_work);
584 } 482 }
585 483
586 return error; 484 intel_opregion_init(dev);
587}
588
589static int i915_drm_thaw(struct drm_device *dev)
590{
591 int error = 0;
592
593 intel_gt_reset(dev);
594
595 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
596 mutex_lock(&dev->struct_mutex);
597 i915_gem_restore_gtt_mappings(dev);
598 mutex_unlock(&dev->struct_mutex);
599 }
600 485
601 __i915_drm_thaw(dev); 486 dev_priv->modeset_on_lid = 0;
602 487
603 return error; 488 return error;
604} 489}
605 490
606int i915_resume(struct drm_device *dev) 491int i915_resume(struct drm_device *dev)
607{ 492{
608 struct drm_i915_private *dev_priv = dev->dev_private;
609 int ret; 493 int ret;
610 494
611 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 495 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -616,20 +500,7 @@ int i915_resume(struct drm_device *dev)
616 500
617 pci_set_master(dev->pdev); 501 pci_set_master(dev->pdev);
618 502
619 intel_gt_reset(dev); 503 ret = i915_drm_thaw(dev);
620
621 /*
622 * Platforms with opregion should have sane BIOS, older ones (gen3 and
623 * earlier) need this since the BIOS might clear all our scratch PTEs.
624 */
625 if (drm_core_check_feature(dev, DRIVER_MODESET) &&
626 !dev_priv->opregion.header) {
627 mutex_lock(&dev->struct_mutex);
628 i915_gem_restore_gtt_mappings(dev);
629 mutex_unlock(&dev->struct_mutex);
630 }
631
632 ret = __i915_drm_thaw(dev);
633 if (ret) 504 if (ret)
634 return ret; 505 return ret;
635 506
@@ -637,7 +508,7 @@ int i915_resume(struct drm_device *dev)
637 return 0; 508 return 0;
638} 509}
639 510
640static int i8xx_do_reset(struct drm_device *dev) 511static int i8xx_do_reset(struct drm_device *dev, u8 flags)
641{ 512{
642 struct drm_i915_private *dev_priv = dev->dev_private; 513 struct drm_i915_private *dev_priv = dev->dev_private;
643 514
@@ -671,12 +542,11 @@ static int i965_reset_complete(struct drm_device *dev)
671{ 542{
672 u8 gdrst; 543 u8 gdrst;
673 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); 544 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
674 return (gdrst & GRDOM_RESET_ENABLE) == 0; 545 return gdrst & 0x1;
675} 546}
676 547
677static int i965_do_reset(struct drm_device *dev) 548static int i965_do_reset(struct drm_device *dev, u8 flags)
678{ 549{
679 int ret;
680 u8 gdrst; 550 u8 gdrst;
681 551
682 /* 552 /*
@@ -685,115 +555,31 @@ static int i965_do_reset(struct drm_device *dev)
685 * triggers the reset; when done, the hardware will clear it. 555 * triggers the reset; when done, the hardware will clear it.
686 */ 556 */
687 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); 557 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
688 pci_write_config_byte(dev->pdev, I965_GDRST, 558 pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
689 gdrst | GRDOM_RENDER |
690 GRDOM_RESET_ENABLE);
691 ret = wait_for(i965_reset_complete(dev), 500);
692 if (ret)
693 return ret;
694
695 /* We can't reset render&media without also resetting display ... */
696 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
697 pci_write_config_byte(dev->pdev, I965_GDRST,
698 gdrst | GRDOM_MEDIA |
699 GRDOM_RESET_ENABLE);
700 559
701 return wait_for(i965_reset_complete(dev), 500); 560 return wait_for(i965_reset_complete(dev), 500);
702} 561}
703 562
704static int ironlake_do_reset(struct drm_device *dev) 563static int ironlake_do_reset(struct drm_device *dev, u8 flags)
705{ 564{
706 struct drm_i915_private *dev_priv = dev->dev_private; 565 struct drm_i915_private *dev_priv = dev->dev_private;
707 u32 gdrst; 566 u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
708 int ret; 567 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
709
710 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
711 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
712 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
713 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
714 if (ret)
715 return ret;
716
717 /* We can't reset render&media without also resetting display ... */
718 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
719 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
720 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
721 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 568 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
722} 569}
723 570
724static int gen6_do_reset(struct drm_device *dev) 571static int gen6_do_reset(struct drm_device *dev, u8 flags)
725{ 572{
726 struct drm_i915_private *dev_priv = dev->dev_private; 573 struct drm_i915_private *dev_priv = dev->dev_private;
727 int ret;
728 unsigned long irqflags;
729
730 /* Hold gt_lock across reset to prevent any register access
731 * with forcewake not set correctly
732 */
733 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
734 574
735 /* Reset the chip */ 575 I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
736 576 return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
737 /* GEN6_GDRST is not in the gt power well, no need to check
738 * for fifo space for the write or forcewake the chip for
739 * the read
740 */
741 I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
742
743 /* Spin waiting for the device to ack the reset request */
744 ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
745
746 /* If reset with a user forcewake, try to restore, otherwise turn it off */
747 if (dev_priv->forcewake_count)
748 dev_priv->gt.force_wake_get(dev_priv);
749 else
750 dev_priv->gt.force_wake_put(dev_priv);
751
752 /* Restore fifo count */
753 dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
754
755 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
756 return ret;
757}
758
759int intel_gpu_reset(struct drm_device *dev)
760{
761 struct drm_i915_private *dev_priv = dev->dev_private;
762 int ret = -ENODEV;
763
764 switch (INTEL_INFO(dev)->gen) {
765 case 7:
766 case 6:
767 ret = gen6_do_reset(dev);
768 break;
769 case 5:
770 ret = ironlake_do_reset(dev);
771 break;
772 case 4:
773 ret = i965_do_reset(dev);
774 break;
775 case 2:
776 ret = i8xx_do_reset(dev);
777 break;
778 }
779
780 /* Also reset the gpu hangman. */
781 if (dev_priv->stop_rings) {
782 DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
783 dev_priv->stop_rings = 0;
784 if (ret == -ENODEV) {
785 DRM_ERROR("Reset not implemented, but ignoring "
786 "error for simulated gpu hangs\n");
787 ret = 0;
788 }
789 }
790
791 return ret;
792} 577}
793 578
794/** 579/**
795 * i915_reset - reset chip after a hang 580 * i965_reset - reset chip after a hang
796 * @dev: drm device to reset 581 * @dev: drm device to reset
582 * @flags: reset domains
797 * 583 *
798 * Reset the chip. Useful if a hang is detected. Returns zero on successful 584 * Reset the chip. Useful if a hang is detected. Returns zero on successful
799 * reset or otherwise an error code. 585 * reset or otherwise an error code.
@@ -806,24 +592,45 @@ int intel_gpu_reset(struct drm_device *dev)
806 * - re-init interrupt state 592 * - re-init interrupt state
807 * - re-init display 593 * - re-init display
808 */ 594 */
809int i915_reset(struct drm_device *dev) 595int i915_reset(struct drm_device *dev, u8 flags)
810{ 596{
811 drm_i915_private_t *dev_priv = dev->dev_private; 597 drm_i915_private_t *dev_priv = dev->dev_private;
598 /*
599 * We really should only reset the display subsystem if we actually
600 * need to
601 */
602 bool need_display = true;
812 int ret; 603 int ret;
813 604
814 if (!i915_try_reset) 605 if (!i915_try_reset)
815 return 0; 606 return 0;
816 607
817 mutex_lock(&dev->struct_mutex); 608 if (!mutex_trylock(&dev->struct_mutex))
609 return -EBUSY;
818 610
819 i915_gem_reset(dev); 611 i915_gem_reset(dev);
820 612
821 ret = -ENODEV; 613 ret = -ENODEV;
822 if (get_seconds() - dev_priv->last_gpu_reset < 5) 614 if (get_seconds() - dev_priv->last_gpu_reset < 5) {
823 DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); 615 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
824 else 616 } else switch (INTEL_INFO(dev)->gen) {
825 ret = intel_gpu_reset(dev); 617 case 7:
826 618 case 6:
619 ret = gen6_do_reset(dev, flags);
620 /* If reset with a user forcewake, try to restore */
621 if (atomic_read(&dev_priv->forcewake_count))
622 __gen6_gt_force_wake_get(dev_priv);
623 break;
624 case 5:
625 ret = ironlake_do_reset(dev, flags);
626 break;
627 case 4:
628 ret = i965_do_reset(dev, flags);
629 break;
630 case 2:
631 ret = i8xx_do_reset(dev, flags);
632 break;
633 }
827 dev_priv->last_gpu_reset = get_seconds(); 634 dev_priv->last_gpu_reset = get_seconds();
828 if (ret) { 635 if (ret) {
829 DRM_ERROR("Failed to reset chip.\n"); 636 DRM_ERROR("Failed to reset chip.\n");
@@ -847,47 +654,41 @@ int i915_reset(struct drm_device *dev)
847 */ 654 */
848 if (drm_core_check_feature(dev, DRIVER_MODESET) || 655 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
849 !dev_priv->mm.suspended) { 656 !dev_priv->mm.suspended) {
850 struct intel_ring_buffer *ring;
851 int i;
852
853 dev_priv->mm.suspended = 0; 657 dev_priv->mm.suspended = 0;
854 658
855 i915_gem_init_swizzling(dev); 659 dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
856 660 if (HAS_BSD(dev))
857 for_each_ring(ring, dev_priv, i) 661 dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
858 ring->init(ring); 662 if (HAS_BLT(dev))
859 663 dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
860 i915_gem_context_init(dev);
861 i915_gem_init_ppgtt(dev);
862
863 /*
864 * It would make sense to re-init all the other hw state, at
865 * least the rps/rc6/emon init done within modeset_init_hw. For
866 * some unknown reason, this blows up my ilk, so don't.
867 */
868 664
869 mutex_unlock(&dev->struct_mutex); 665 mutex_unlock(&dev->struct_mutex);
870
871 drm_irq_uninstall(dev); 666 drm_irq_uninstall(dev);
667 drm_mode_config_reset(dev);
872 drm_irq_install(dev); 668 drm_irq_install(dev);
873 } else { 669 mutex_lock(&dev->struct_mutex);
874 mutex_unlock(&dev->struct_mutex); 670 }
671
672 mutex_unlock(&dev->struct_mutex);
673
674 /*
675 * Perform a full modeset as on later generations, e.g. Ironlake, we may
676 * need to retrain the display link and cannot just restore the register
677 * values.
678 */
679 if (need_display) {
680 mutex_lock(&dev->mode_config.mutex);
681 drm_helper_resume_force_mode(dev);
682 mutex_unlock(&dev->mode_config.mutex);
875 } 683 }
876 684
877 return 0; 685 return 0;
878} 686}
879 687
880static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
881{
882 struct intel_device_info *intel_info =
883 (struct intel_device_info *) ent->driver_data;
884
885 if (intel_info->is_valleyview)
886 if(!i915_preliminary_hw_support) {
887 DRM_ERROR("Preliminary hardware support disabled\n");
888 return -ENODEV;
889 }
890 688
689static int __devinit
690i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
691{
891 /* Only bind to function 0 of the device. Early generations 692 /* Only bind to function 0 of the device. Early generations
892 * used function 1 as a placeholder for multi-head. This causes 693 * used function 1 as a placeholder for multi-head. This causes
893 * us confusion instead, especially on the systems where both 694 * us confusion instead, especially on the systems where both
@@ -896,18 +697,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
896 if (PCI_FUNC(pdev->devfn)) 697 if (PCI_FUNC(pdev->devfn))
897 return -ENODEV; 698 return -ENODEV;
898 699
899 /* We've managed to ship a kms-enabled ddx that shipped with an XvMC
900 * implementation for gen3 (and only gen3) that used legacy drm maps
901 * (gasp!) to share buffers between X and the client. Hence we need to
902 * keep around the fake agp stuff for gen3, even when kms is enabled. */
903 if (intel_info->gen != 3) {
904 driver.driver_features &=
905 ~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
906 } else if (!intel_agp_enabled) {
907 DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
908 return -ENODEV;
909 }
910
911 return drm_get_pci_dev(pdev, ent, &driver); 700 return drm_get_pci_dev(pdev, ent, &driver);
912} 701}
913 702
@@ -981,42 +770,27 @@ static int i915_pm_poweroff(struct device *dev)
981} 770}
982 771
983static const struct dev_pm_ops i915_pm_ops = { 772static const struct dev_pm_ops i915_pm_ops = {
984 .suspend = i915_pm_suspend, 773 .suspend = i915_pm_suspend,
985 .resume = i915_pm_resume, 774 .resume = i915_pm_resume,
986 .freeze = i915_pm_freeze, 775 .freeze = i915_pm_freeze,
987 .thaw = i915_pm_thaw, 776 .thaw = i915_pm_thaw,
988 .poweroff = i915_pm_poweroff, 777 .poweroff = i915_pm_poweroff,
989 .restore = i915_pm_resume, 778 .restore = i915_pm_resume,
990}; 779};
991 780
992static const struct vm_operations_struct i915_gem_vm_ops = { 781static struct vm_operations_struct i915_gem_vm_ops = {
993 .fault = i915_gem_fault, 782 .fault = i915_gem_fault,
994 .open = drm_gem_vm_open, 783 .open = drm_gem_vm_open,
995 .close = drm_gem_vm_close, 784 .close = drm_gem_vm_close,
996}; 785};
997 786
998static const struct file_operations i915_driver_fops = {
999 .owner = THIS_MODULE,
1000 .open = drm_open,
1001 .release = drm_release,
1002 .unlocked_ioctl = drm_ioctl,
1003 .mmap = drm_gem_mmap,
1004 .poll = drm_poll,
1005 .fasync = drm_fasync,
1006 .read = drm_read,
1007#ifdef CONFIG_COMPAT
1008 .compat_ioctl = i915_compat_ioctl,
1009#endif
1010 .llseek = noop_llseek,
1011};
1012
1013static struct drm_driver driver = { 787static struct drm_driver driver = {
1014 /* Don't use MTRRs here; the Xserver or userspace app should 788 /* don't use mtrr's here, the Xserver or user space app should
1015 * deal with them for Intel hardware. 789 * deal with them for intel hardware.
1016 */ 790 */
1017 .driver_features = 791 .driver_features =
1018 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 792 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
1019 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME, 793 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
1020 .load = i915_driver_load, 794 .load = i915_driver_load,
1021 .unload = i915_driver_unload, 795 .unload = i915_driver_unload,
1022 .open = i915_driver_open, 796 .open = i915_driver_open,
@@ -1029,6 +803,7 @@ static struct drm_driver driver = {
1029 .resume = i915_resume, 803 .resume = i915_resume,
1030 804
1031 .device_is_agp = i915_driver_device_is_agp, 805 .device_is_agp = i915_driver_device_is_agp,
806 .reclaim_buffers = drm_core_reclaim_buffers,
1032 .master_create = i915_master_create, 807 .master_create = i915_master_create,
1033 .master_destroy = i915_master_destroy, 808 .master_destroy = i915_master_destroy,
1034#if defined(CONFIG_DEBUG_FS) 809#if defined(CONFIG_DEBUG_FS)
@@ -1038,17 +813,25 @@ static struct drm_driver driver = {
1038 .gem_init_object = i915_gem_init_object, 813 .gem_init_object = i915_gem_init_object,
1039 .gem_free_object = i915_gem_free_object, 814 .gem_free_object = i915_gem_free_object,
1040 .gem_vm_ops = &i915_gem_vm_ops, 815 .gem_vm_ops = &i915_gem_vm_ops,
1041
1042 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1043 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1044 .gem_prime_export = i915_gem_prime_export,
1045 .gem_prime_import = i915_gem_prime_import,
1046
1047 .dumb_create = i915_gem_dumb_create, 816 .dumb_create = i915_gem_dumb_create,
1048 .dumb_map_offset = i915_gem_mmap_gtt, 817 .dumb_map_offset = i915_gem_mmap_gtt,
1049 .dumb_destroy = i915_gem_dumb_destroy, 818 .dumb_destroy = i915_gem_dumb_destroy,
1050 .ioctls = i915_ioctls, 819 .ioctls = i915_ioctls,
1051 .fops = &i915_driver_fops, 820 .fops = {
821 .owner = THIS_MODULE,
822 .open = drm_open,
823 .release = drm_release,
824 .unlocked_ioctl = drm_ioctl,
825 .mmap = drm_gem_mmap,
826 .poll = drm_poll,
827 .fasync = drm_fasync,
828 .read = drm_read,
829#ifdef CONFIG_COMPAT
830 .compat_ioctl = i915_compat_ioctl,
831#endif
832 .llseek = noop_llseek,
833 },
834
1052 .name = DRIVER_NAME, 835 .name = DRIVER_NAME,
1053 .desc = DRIVER_DESC, 836 .desc = DRIVER_DESC,
1054 .date = DRIVER_DATE, 837 .date = DRIVER_DATE,
@@ -1067,6 +850,11 @@ static struct pci_driver i915_pci_driver = {
1067 850
1068static int __init i915_init(void) 851static int __init i915_init(void)
1069{ 852{
853 if (!intel_agp_enabled) {
854 DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
855 return -ENODEV;
856 }
857
1070 driver.num_ioctls = i915_max_ioctl; 858 driver.num_ioctls = i915_max_ioctl;
1071 859
1072 /* 860 /*
@@ -1107,220 +895,3 @@ module_exit(i915_exit);
1107MODULE_AUTHOR(DRIVER_AUTHOR); 895MODULE_AUTHOR(DRIVER_AUTHOR);
1108MODULE_DESCRIPTION(DRIVER_DESC); 896MODULE_DESCRIPTION(DRIVER_DESC);
1109MODULE_LICENSE("GPL and additional rights"); 897MODULE_LICENSE("GPL and additional rights");
1110
1111/* We give fast paths for the really cool registers */
1112#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1113 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
1114 ((reg) < 0x40000) && \
1115 ((reg) != FORCEWAKE))
1116
1117static bool IS_DISPLAYREG(u32 reg)
1118{
1119 /*
1120 * This should make it easier to transition modules over to the
1121 * new register block scheme, since we can do it incrementally.
1122 */
1123 if (reg >= VLV_DISPLAY_BASE)
1124 return false;
1125
1126 if (reg >= RENDER_RING_BASE &&
1127 reg < RENDER_RING_BASE + 0xff)
1128 return false;
1129 if (reg >= GEN6_BSD_RING_BASE &&
1130 reg < GEN6_BSD_RING_BASE + 0xff)
1131 return false;
1132 if (reg >= BLT_RING_BASE &&
1133 reg < BLT_RING_BASE + 0xff)
1134 return false;
1135
1136 if (reg == PGTBL_ER)
1137 return false;
1138
1139 if (reg >= IPEIR_I965 &&
1140 reg < HWSTAM)
1141 return false;
1142
1143 if (reg == MI_MODE)
1144 return false;
1145
1146 if (reg == GFX_MODE_GEN7)
1147 return false;
1148
1149 if (reg == RENDER_HWS_PGA_GEN7 ||
1150 reg == BSD_HWS_PGA_GEN7 ||
1151 reg == BLT_HWS_PGA_GEN7)
1152 return false;
1153
1154 if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
1155 reg == GEN6_BSD_RNCID)
1156 return false;
1157
1158 if (reg == GEN6_BLITTER_ECOSKPD)
1159 return false;
1160
1161 if (reg >= 0x4000c &&
1162 reg <= 0x4002c)
1163 return false;
1164
1165 if (reg >= 0x4f000 &&
1166 reg <= 0x4f08f)
1167 return false;
1168
1169 if (reg >= 0x4f100 &&
1170 reg <= 0x4f11f)
1171 return false;
1172
1173 if (reg >= VLV_MASTER_IER &&
1174 reg <= GEN6_PMIER)
1175 return false;
1176
1177 if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
1178 reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
1179 return false;
1180
1181 if (reg >= VLV_IIR_RW &&
1182 reg <= VLV_ISR)
1183 return false;
1184
1185 if (reg == FORCEWAKE_VLV ||
1186 reg == FORCEWAKE_ACK_VLV)
1187 return false;
1188
1189 if (reg == GEN6_GDRST)
1190 return false;
1191
1192 switch (reg) {
1193 case _3D_CHICKEN3:
1194 case IVB_CHICKEN3:
1195 case GEN7_COMMON_SLICE_CHICKEN1:
1196 case GEN7_L3CNTLREG1:
1197 case GEN7_L3_CHICKEN_MODE_REGISTER:
1198 case GEN7_ROW_CHICKEN2:
1199 case GEN7_L3SQCREG4:
1200 case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
1201 case GEN7_HALF_SLICE_CHICKEN1:
1202 case GEN6_MBCTL:
1203 case GEN6_UCGCTL2:
1204 return false;
1205 default:
1206 break;
1207 }
1208
1209 return true;
1210}
1211
1212static void
1213ilk_dummy_write(struct drm_i915_private *dev_priv)
1214{
1215 /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
1216 * chip from rc6 before touching it for real. MI_MODE is masked, hence
1217 * harmless to write 0 into. */
1218 I915_WRITE_NOTRACE(MI_MODE, 0);
1219}
1220
1221#define __i915_read(x, y) \
1222u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1223 u##x val = 0; \
1224 if (IS_GEN5(dev_priv->dev)) \
1225 ilk_dummy_write(dev_priv); \
1226 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1227 unsigned long irqflags; \
1228 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
1229 if (dev_priv->forcewake_count == 0) \
1230 dev_priv->gt.force_wake_get(dev_priv); \
1231 val = read##y(dev_priv->regs + reg); \
1232 if (dev_priv->forcewake_count == 0) \
1233 dev_priv->gt.force_wake_put(dev_priv); \
1234 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
1235 } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
1236 val = read##y(dev_priv->regs + reg + 0x180000); \
1237 } else { \
1238 val = read##y(dev_priv->regs + reg); \
1239 } \
1240 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1241 return val; \
1242}
1243
1244__i915_read(8, b)
1245__i915_read(16, w)
1246__i915_read(32, l)
1247__i915_read(64, q)
1248#undef __i915_read
1249
1250#define __i915_write(x, y) \
1251void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1252 u32 __fifo_ret = 0; \
1253 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1254 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1255 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1256 } \
1257 if (IS_GEN5(dev_priv->dev)) \
1258 ilk_dummy_write(dev_priv); \
1259 if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
1260 DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
1261 I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
1262 } \
1263 if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
1264 write##y(val, dev_priv->regs + reg + 0x180000); \
1265 } else { \
1266 write##y(val, dev_priv->regs + reg); \
1267 } \
1268 if (unlikely(__fifo_ret)) { \
1269 gen6_gt_check_fifodbg(dev_priv); \
1270 } \
1271 if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
1272 DRM_ERROR("Unclaimed write to %x\n", reg); \
1273 writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT); \
1274 } \
1275}
1276__i915_write(8, b)
1277__i915_write(16, w)
1278__i915_write(32, l)
1279__i915_write(64, q)
1280#undef __i915_write
1281
1282static const struct register_whitelist {
1283 uint64_t offset;
1284 uint32_t size;
1285 uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1286} whitelist[] = {
1287 { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
1288};
1289
1290int i915_reg_read_ioctl(struct drm_device *dev,
1291 void *data, struct drm_file *file)
1292{
1293 struct drm_i915_private *dev_priv = dev->dev_private;
1294 struct drm_i915_reg_read *reg = data;
1295 struct register_whitelist const *entry = whitelist;
1296 int i;
1297
1298 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1299 if (entry->offset == reg->offset &&
1300 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1301 break;
1302 }
1303
1304 if (i == ARRAY_SIZE(whitelist))
1305 return -EINVAL;
1306
1307 switch (entry->size) {
1308 case 8:
1309 reg->val = I915_READ64(reg->offset);
1310 break;
1311 case 4:
1312 reg->val = I915_READ(reg->offset);
1313 break;
1314 case 2:
1315 reg->val = I915_READ16(reg->offset);
1316 break;
1317 case 1:
1318 reg->val = I915_READ8(reg->offset);
1319 break;
1320 default:
1321 WARN_ON(1);
1322 return -EINVAL;
1323 }
1324
1325 return 0;
1326}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ed305957557..1a2a2d1790b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -35,11 +35,8 @@
35#include "intel_ringbuffer.h" 35#include "intel_ringbuffer.h"
36#include <linux/io-mapping.h> 36#include <linux/io-mapping.h>
37#include <linux/i2c.h> 37#include <linux/i2c.h>
38#include <linux/i2c-algo-bit.h>
39#include <drm/intel-gtt.h> 38#include <drm/intel-gtt.h>
40#include <linux/backlight.h> 39#include <linux/backlight.h>
41#include <linux/intel-iommu.h>
42#include <linux/kref.h>
43 40
44/* General customization: 41/* General customization:
45 */ 42 */
@@ -58,14 +55,6 @@ enum pipe {
58}; 55};
59#define pipe_name(p) ((p) + 'A') 56#define pipe_name(p) ((p) + 'A')
60 57
61enum transcoder {
62 TRANSCODER_A = 0,
63 TRANSCODER_B,
64 TRANSCODER_C,
65 TRANSCODER_EDP = 0xF,
66};
67#define transcoder_name(t) ((t) + 'A')
68
69enum plane { 58enum plane {
70 PLANE_A = 0, 59 PLANE_A = 0,
71 PLANE_B, 60 PLANE_B,
@@ -73,40 +62,10 @@ enum plane {
73}; 62};
74#define plane_name(p) ((p) + 'A') 63#define plane_name(p) ((p) + 'A')
75 64
76enum port {
77 PORT_A = 0,
78 PORT_B,
79 PORT_C,
80 PORT_D,
81 PORT_E,
82 I915_MAX_PORTS
83};
84#define port_name(p) ((p) + 'A')
85
86#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 65#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
87 66
88#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) 67#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
89 68
90#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
91 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
92 if ((intel_encoder)->base.crtc == (__crtc))
93
94struct intel_pch_pll {
95 int refcount; /* count of number of CRTCs sharing this PLL */
96 int active; /* count of number of active CRTCs (i.e. DPMS on) */
97 bool on; /* is the PLL actually active? Disabled during modeset */
98 int pll_reg;
99 int fp0_reg;
100 int fp1_reg;
101};
102#define I915_NUM_PLLS 2
103
104struct intel_ddi_plls {
105 int spll_refcount;
106 int wrpll1_refcount;
107 int wrpll2_refcount;
108};
109
110/* Interface history: 69/* Interface history:
111 * 70 *
112 * 1.1: Original. 71 * 1.1: Original.
@@ -123,7 +82,6 @@ struct intel_ddi_plls {
123 82
124#define WATCH_COHERENCY 0 83#define WATCH_COHERENCY 0
125#define WATCH_LISTS 0 84#define WATCH_LISTS 0
126#define WATCH_GTT 0
127 85
128#define I915_GEM_PHYS_CURSOR_0 1 86#define I915_GEM_PHYS_CURSOR_0 1
129#define I915_GEM_PHYS_CURSOR_1 2 87#define I915_GEM_PHYS_CURSOR_1 2
@@ -137,18 +95,25 @@ struct drm_i915_gem_phys_object {
137 struct drm_i915_gem_object *cur_obj; 95 struct drm_i915_gem_object *cur_obj;
138}; 96};
139 97
98struct mem_block {
99 struct mem_block *next;
100 struct mem_block *prev;
101 int start;
102 int size;
103 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
104};
105
140struct opregion_header; 106struct opregion_header;
141struct opregion_acpi; 107struct opregion_acpi;
142struct opregion_swsci; 108struct opregion_swsci;
143struct opregion_asle; 109struct opregion_asle;
144struct drm_i915_private;
145 110
146struct intel_opregion { 111struct intel_opregion {
147 struct opregion_header __iomem *header; 112 struct opregion_header *header;
148 struct opregion_acpi __iomem *acpi; 113 struct opregion_acpi *acpi;
149 struct opregion_swsci __iomem *swsci; 114 struct opregion_swsci *swsci;
150 struct opregion_asle __iomem *asle; 115 struct opregion_asle *asle;
151 void __iomem *vbt; 116 void *vbt;
152 u32 __iomem *lid_state; 117 u32 __iomem *lid_state;
153}; 118};
154#define OPREGION_SIZE (8*1024) 119#define OPREGION_SIZE (8*1024)
@@ -161,14 +126,11 @@ struct drm_i915_master_private {
161 struct _drm_i915_sarea *sarea_priv; 126 struct _drm_i915_sarea *sarea_priv;
162}; 127};
163#define I915_FENCE_REG_NONE -1 128#define I915_FENCE_REG_NONE -1
164#define I915_MAX_NUM_FENCES 16
165/* 16 fences + sign bit for FENCE_REG_NONE */
166#define I915_MAX_NUM_FENCE_BITS 5
167 129
168struct drm_i915_fence_reg { 130struct drm_i915_fence_reg {
169 struct list_head lru_list; 131 struct list_head lru_list;
170 struct drm_i915_gem_object *obj; 132 struct drm_i915_gem_object *obj;
171 int pin_count; 133 uint32_t setup_seqno;
172}; 134};
173 135
174struct sdvo_device_mapping { 136struct sdvo_device_mapping {
@@ -177,69 +139,56 @@ struct sdvo_device_mapping {
177 u8 slave_addr; 139 u8 slave_addr;
178 u8 dvo_wiring; 140 u8 dvo_wiring;
179 u8 i2c_pin; 141 u8 i2c_pin;
142 u8 i2c_speed;
180 u8 ddc_pin; 143 u8 ddc_pin;
181}; 144};
182 145
183struct intel_display_error_state; 146struct intel_display_error_state;
184 147
185struct drm_i915_error_state { 148struct drm_i915_error_state {
186 struct kref ref;
187 u32 eir; 149 u32 eir;
188 u32 pgtbl_er; 150 u32 pgtbl_er;
189 u32 ier;
190 u32 ccid;
191 bool waiting[I915_NUM_RINGS];
192 u32 pipestat[I915_MAX_PIPES]; 151 u32 pipestat[I915_MAX_PIPES];
193 u32 tail[I915_NUM_RINGS]; 152 u32 ipeir;
194 u32 head[I915_NUM_RINGS]; 153 u32 ipehr;
195 u32 ipeir[I915_NUM_RINGS]; 154 u32 instdone;
196 u32 ipehr[I915_NUM_RINGS]; 155 u32 acthd;
197 u32 instdone[I915_NUM_RINGS];
198 u32 acthd[I915_NUM_RINGS];
199 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
200 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
201 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
202 /* our own tracking of ring head and tail */
203 u32 cpu_ring_head[I915_NUM_RINGS];
204 u32 cpu_ring_tail[I915_NUM_RINGS];
205 u32 error; /* gen6+ */ 156 u32 error; /* gen6+ */
206 u32 err_int; /* gen7 */ 157 u32 bcs_acthd; /* gen6+ blt engine */
207 u32 instpm[I915_NUM_RINGS]; 158 u32 bcs_ipehr;
208 u32 instps[I915_NUM_RINGS]; 159 u32 bcs_ipeir;
209 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 160 u32 bcs_instdone;
210 u32 seqno[I915_NUM_RINGS]; 161 u32 bcs_seqno;
162 u32 vcs_acthd; /* gen6+ bsd engine */
163 u32 vcs_ipehr;
164 u32 vcs_ipeir;
165 u32 vcs_instdone;
166 u32 vcs_seqno;
167 u32 instpm;
168 u32 instps;
169 u32 instdone1;
170 u32 seqno;
211 u64 bbaddr; 171 u64 bbaddr;
212 u32 fault_reg[I915_NUM_RINGS]; 172 u64 fence[16];
213 u32 done_reg;
214 u32 faddr[I915_NUM_RINGS];
215 u64 fence[I915_MAX_NUM_FENCES];
216 struct timeval time; 173 struct timeval time;
217 struct drm_i915_error_ring { 174 struct drm_i915_error_object {
218 struct drm_i915_error_object { 175 int page_count;
219 int page_count; 176 u32 gtt_offset;
220 u32 gtt_offset; 177 u32 *pages[0];
221 u32 *pages[0]; 178 } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS];
222 } *ringbuffer, *batchbuffer;
223 struct drm_i915_error_request {
224 long jiffies;
225 u32 seqno;
226 u32 tail;
227 } *requests;
228 int num_requests;
229 } ring[I915_NUM_RINGS];
230 struct drm_i915_error_buffer { 179 struct drm_i915_error_buffer {
231 u32 size; 180 u32 size;
232 u32 name; 181 u32 name;
233 u32 rseqno, wseqno; 182 u32 seqno;
234 u32 gtt_offset; 183 u32 gtt_offset;
235 u32 read_domains; 184 u32 read_domains;
236 u32 write_domain; 185 u32 write_domain;
237 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 186 s32 fence_reg:5;
238 s32 pinned:2; 187 s32 pinned:2;
239 u32 tiling:2; 188 u32 tiling:2;
240 u32 dirty:1; 189 u32 dirty:1;
241 u32 purgeable:1; 190 u32 purgeable:1;
242 s32 ring:4; 191 u32 ring:4;
243 u32 cache_level:2; 192 u32 cache_level:2;
244 } *active_bo, *pinned_bo; 193 } *active_bo, *pinned_bo;
245 u32 active_bo_count, pinned_bo_count; 194 u32 active_bo_count, pinned_bo_count;
@@ -248,29 +197,21 @@ struct drm_i915_error_state {
248}; 197};
249 198
250struct drm_i915_display_funcs { 199struct drm_i915_display_funcs {
200 void (*dpms)(struct drm_crtc *crtc, int mode);
251 bool (*fbc_enabled)(struct drm_device *dev); 201 bool (*fbc_enabled)(struct drm_device *dev);
252 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); 202 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
253 void (*disable_fbc)(struct drm_device *dev); 203 void (*disable_fbc)(struct drm_device *dev);
254 int (*get_display_clock_speed)(struct drm_device *dev); 204 int (*get_display_clock_speed)(struct drm_device *dev);
255 int (*get_fifo_size)(struct drm_device *dev, int plane); 205 int (*get_fifo_size)(struct drm_device *dev, int plane);
256 void (*update_wm)(struct drm_device *dev); 206 void (*update_wm)(struct drm_device *dev);
257 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
258 uint32_t sprite_width, int pixel_size);
259 void (*update_linetime_wm)(struct drm_device *dev, int pipe,
260 struct drm_display_mode *mode);
261 void (*modeset_global_resources)(struct drm_device *dev);
262 int (*crtc_mode_set)(struct drm_crtc *crtc, 207 int (*crtc_mode_set)(struct drm_crtc *crtc,
263 struct drm_display_mode *mode, 208 struct drm_display_mode *mode,
264 struct drm_display_mode *adjusted_mode, 209 struct drm_display_mode *adjusted_mode,
265 int x, int y, 210 int x, int y,
266 struct drm_framebuffer *old_fb); 211 struct drm_framebuffer *old_fb);
267 void (*crtc_enable)(struct drm_crtc *crtc);
268 void (*crtc_disable)(struct drm_crtc *crtc);
269 void (*off)(struct drm_crtc *crtc);
270 void (*write_eld)(struct drm_connector *connector,
271 struct drm_crtc *crtc);
272 void (*fdi_link_train)(struct drm_crtc *crtc); 212 void (*fdi_link_train)(struct drm_crtc *crtc);
273 void (*init_clock_gating)(struct drm_device *dev); 213 void (*init_clock_gating)(struct drm_device *dev);
214 void (*init_pch_clock_gating)(struct drm_device *dev);
274 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 215 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
275 struct drm_framebuffer *fb, 216 struct drm_framebuffer *fb,
276 struct drm_i915_gem_object *obj); 217 struct drm_i915_gem_object *obj);
@@ -283,85 +224,28 @@ struct drm_i915_display_funcs {
283 /* pll clock increase/decrease */ 224 /* pll clock increase/decrease */
284}; 225};
285 226
286struct drm_i915_gt_funcs {
287 void (*force_wake_get)(struct drm_i915_private *dev_priv);
288 void (*force_wake_put)(struct drm_i915_private *dev_priv);
289};
290
291#define DEV_INFO_FLAGS \
292 DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
293 DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
294 DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
295 DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
296 DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
297 DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
298 DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
299 DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
300 DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
301 DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
302 DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
303 DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
304 DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
305 DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
306 DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
307 DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
308 DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
309 DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
310 DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
311 DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
312 DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
313 DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
314 DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
315 DEV_INFO_FLAG(has_llc)
316
317struct intel_device_info { 227struct intel_device_info {
318 u8 gen; 228 u8 gen;
319 u8 is_mobile:1; 229 u8 is_mobile : 1;
320 u8 is_i85x:1; 230 u8 is_i85x : 1;
321 u8 is_i915g:1; 231 u8 is_i915g : 1;
322 u8 is_i945gm:1; 232 u8 is_i945gm : 1;
323 u8 is_g33:1; 233 u8 is_g33 : 1;
324 u8 need_gfx_hws:1; 234 u8 need_gfx_hws : 1;
325 u8 is_g4x:1; 235 u8 is_g4x : 1;
326 u8 is_pineview:1; 236 u8 is_pineview : 1;
327 u8 is_broadwater:1; 237 u8 is_broadwater : 1;
328 u8 is_crestline:1; 238 u8 is_crestline : 1;
329 u8 is_ivybridge:1; 239 u8 is_ivybridge : 1;
330 u8 is_valleyview:1; 240 u8 has_fbc : 1;
331 u8 has_force_wake:1; 241 u8 has_pipe_cxsr : 1;
332 u8 is_haswell:1; 242 u8 has_hotplug : 1;
333 u8 has_fbc:1; 243 u8 cursor_needs_physical : 1;
334 u8 has_pipe_cxsr:1; 244 u8 has_overlay : 1;
335 u8 has_hotplug:1; 245 u8 overlay_needs_physical : 1;
336 u8 cursor_needs_physical:1; 246 u8 supports_tv : 1;
337 u8 has_overlay:1; 247 u8 has_bsd_ring : 1;
338 u8 overlay_needs_physical:1; 248 u8 has_blt_ring : 1;
339 u8 supports_tv:1;
340 u8 has_bsd_ring:1;
341 u8 has_blt_ring:1;
342 u8 has_llc:1;
343};
344
345#define I915_PPGTT_PD_ENTRIES 512
346#define I915_PPGTT_PT_ENTRIES 1024
347struct i915_hw_ppgtt {
348 struct drm_device *dev;
349 unsigned num_pd_entries;
350 struct page **pt_pages;
351 uint32_t pd_offset;
352 dma_addr_t *pt_dma_addr;
353 dma_addr_t scratch_page_dma_addr;
354};
355
356
357/* This must match up with the value previously used for execbuf2.rsvd1. */
358#define DEFAULT_CONTEXT_ID 0
359struct i915_hw_context {
360 int id;
361 bool is_initialized;
362 struct drm_i915_file_private *file_priv;
363 struct intel_ring_buffer *ring;
364 struct drm_i915_gem_object *obj;
365}; 249};
366 250
367enum no_fbc_reason { 251enum no_fbc_reason {
@@ -376,38 +260,147 @@ enum no_fbc_reason {
376}; 260};
377 261
378enum intel_pch { 262enum intel_pch {
379 PCH_NONE = 0, /* No PCH present */
380 PCH_IBX, /* Ibexpeak PCH */ 263 PCH_IBX, /* Ibexpeak PCH */
381 PCH_CPT, /* Cougarpoint PCH */ 264 PCH_CPT, /* Cougarpoint PCH */
382 PCH_LPT, /* Lynxpoint PCH */
383};
384
385enum intel_sbi_destination {
386 SBI_ICLK,
387 SBI_MPHY,
388}; 265};
389 266
390#define QUIRK_PIPEA_FORCE (1<<0) 267#define QUIRK_PIPEA_FORCE (1<<0)
391#define QUIRK_LVDS_SSC_DISABLE (1<<1) 268#define QUIRK_LVDS_SSC_DISABLE (1<<1)
392#define QUIRK_INVERT_BRIGHTNESS (1<<2)
393 269
394struct intel_fbdev; 270struct intel_fbdev;
395struct intel_fbc_work; 271struct intel_fbc_work;
396 272
397struct intel_gmbus { 273typedef struct drm_i915_private {
398 struct i2c_adapter adapter; 274 struct drm_device *dev;
399 u32 force_bit; 275
400 u32 reg0; 276 const struct intel_device_info *info;
401 u32 gpio_reg; 277
402 struct i2c_algo_bit_data bit_algo; 278 int has_gem;
403 struct drm_i915_private *dev_priv; 279 int relative_constants_mode;
404}; 280
281 void __iomem *regs;
282 u32 gt_fifo_count;
283
284 struct intel_gmbus {
285 struct i2c_adapter adapter;
286 struct i2c_adapter *force_bit;
287 u32 reg0;
288 } *gmbus;
289
290 struct pci_dev *bridge_dev;
291 struct intel_ring_buffer ring[I915_NUM_RINGS];
292 uint32_t next_seqno;
293
294 drm_dma_handle_t *status_page_dmah;
295 uint32_t counter;
296 drm_local_map_t hws_map;
297 struct drm_i915_gem_object *pwrctx;
298 struct drm_i915_gem_object *renderctx;
299
300 struct resource mch_res;
301
302 unsigned int cpp;
303 int back_offset;
304 int front_offset;
305 int current_page;
306 int page_flipping;
307
308 atomic_t irq_received;
309
310 /* protects the irq masks */
311 spinlock_t irq_lock;
312 /** Cached value of IMR to avoid reads in updating the bitfield */
313 u32 pipestat[2];
314 u32 irq_mask;
315 u32 gt_irq_mask;
316 u32 pch_irq_mask;
317
318 u32 hotplug_supported_mask;
319 struct work_struct hotplug_work;
320
321 int tex_lru_log_granularity;
322 int allow_batchbuffer;
323 struct mem_block *agp_heap;
324 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
325 int vblank_pipe;
326 int num_pipe;
327
328 /* For hangcheck timer */
329#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
330 struct timer_list hangcheck_timer;
331 int hangcheck_count;
332 uint32_t last_acthd;
333 uint32_t last_instdone;
334 uint32_t last_instdone1;
335
336 unsigned long cfb_size;
337 unsigned int cfb_fb;
338 enum plane cfb_plane;
339 int cfb_y;
340 struct intel_fbc_work *fbc_work;
341
342 struct intel_opregion opregion;
343
344 /* overlay */
345 struct intel_overlay *overlay;
346
347 /* LVDS info */
348 int backlight_level; /* restore backlight to this value */
349 bool backlight_enabled;
350 struct drm_display_mode *panel_fixed_mode;
351 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
352 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
353
354 /* Feature bits from the VBIOS */
355 unsigned int int_tv_support:1;
356 unsigned int lvds_dither:1;
357 unsigned int lvds_vbt:1;
358 unsigned int int_crt_support:1;
359 unsigned int lvds_use_ssc:1;
360 int lvds_ssc_freq;
361 struct {
362 int rate;
363 int lanes;
364 int preemphasis;
365 int vswing;
405 366
406struct i915_suspend_saved_registers { 367 bool initialized;
368 bool support;
369 int bpp;
370 struct edp_power_seq pps;
371 } edp;
372 bool no_aux_handshake;
373
374 struct notifier_block lid_notifier;
375
376 int crt_ddc_pin;
377 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
378 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
379 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
380
381 unsigned int fsb_freq, mem_freq, is_ddr3;
382
383 spinlock_t error_lock;
384 struct drm_i915_error_state *first_error;
385 struct work_struct error_work;
386 struct completion error_completion;
387 struct workqueue_struct *wq;
388
389 /* Display functions */
390 struct drm_i915_display_funcs display;
391
392 /* PCH chipset type */
393 enum intel_pch pch_type;
394
395 unsigned long quirks;
396
397 /* Register state */
398 bool modeset_on_lid;
407 u8 saveLBB; 399 u8 saveLBB;
408 u32 saveDSPACNTR; 400 u32 saveDSPACNTR;
409 u32 saveDSPBCNTR; 401 u32 saveDSPBCNTR;
410 u32 saveDSPARB; 402 u32 saveDSPARB;
403 u32 saveHWS;
411 u32 savePIPEACONF; 404 u32 savePIPEACONF;
412 u32 savePIPEBCONF; 405 u32 savePIPEBCONF;
413 u32 savePIPEASRC; 406 u32 savePIPEASRC;
@@ -512,7 +505,7 @@ struct i915_suspend_saved_registers {
512 u8 saveAR[21]; 505 u8 saveAR[21];
513 u8 saveDACMASK; 506 u8 saveDACMASK;
514 u8 saveCR[37]; 507 u8 saveCR[37];
515 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 508 uint64_t saveFENCE[16];
516 u32 saveCURACNTR; 509 u32 saveCURACNTR;
517 u32 saveCURAPOS; 510 u32 saveCURAPOS;
518 u32 saveCURABASE; 511 u32 saveCURABASE;
@@ -553,219 +546,17 @@ struct i915_suspend_saved_registers {
553 u32 savePIPEB_LINK_N1; 546 u32 savePIPEB_LINK_N1;
554 u32 saveMCHBAR_RENDER_STANDBY; 547 u32 saveMCHBAR_RENDER_STANDBY;
555 u32 savePCH_PORT_HOTPLUG; 548 u32 savePCH_PORT_HOTPLUG;
556};
557
558struct intel_gen6_power_mgmt {
559 struct work_struct work;
560 u32 pm_iir;
561 /* lock - irqsave spinlock that protectects the work_struct and
562 * pm_iir. */
563 spinlock_t lock;
564
565 /* The below variables an all the rps hw state are protected by
566 * dev->struct mutext. */
567 u8 cur_delay;
568 u8 min_delay;
569 u8 max_delay;
570
571 struct delayed_work delayed_resume_work;
572
573 /*
574 * Protects RPS/RC6 register access and PCU communication.
575 * Must be taken after struct_mutex if nested.
576 */
577 struct mutex hw_lock;
578};
579
580struct intel_ilk_power_mgmt {
581 u8 cur_delay;
582 u8 min_delay;
583 u8 max_delay;
584 u8 fmax;
585 u8 fstart;
586
587 u64 last_count1;
588 unsigned long last_time1;
589 unsigned long chipset_power;
590 u64 last_count2;
591 struct timespec last_time2;
592 unsigned long gfx_power;
593 u8 corr;
594
595 int c_m;
596 int r_t;
597
598 struct drm_i915_gem_object *pwrctx;
599 struct drm_i915_gem_object *renderctx;
600};
601
602struct i915_dri1_state {
603 unsigned allow_batchbuffer : 1;
604 u32 __iomem *gfx_hws_cpu_addr;
605
606 unsigned int cpp;
607 int back_offset;
608 int front_offset;
609 int current_page;
610 int page_flipping;
611
612 uint32_t counter;
613};
614
615struct intel_l3_parity {
616 u32 *remap_info;
617 struct work_struct error_work;
618};
619
620typedef struct drm_i915_private {
621 struct drm_device *dev;
622
623 const struct intel_device_info *info;
624
625 int relative_constants_mode;
626
627 void __iomem *regs;
628
629 struct drm_i915_gt_funcs gt;
630 /** gt_fifo_count and the subsequent register write are synchronized
631 * with dev->struct_mutex. */
632 unsigned gt_fifo_count;
633 /** forcewake_count is protected by gt_lock */
634 unsigned forcewake_count;
635 /** gt_lock is also taken in irq contexts. */
636 struct spinlock gt_lock;
637
638 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
639
640 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
641 * controller on different i2c buses. */
642 struct mutex gmbus_mutex;
643
644 /**
645 * Base address of the gmbus and gpio block.
646 */
647 uint32_t gpio_mmio_base;
648
649 struct pci_dev *bridge_dev;
650 struct intel_ring_buffer ring[I915_NUM_RINGS];
651 uint32_t next_seqno;
652
653 drm_dma_handle_t *status_page_dmah;
654 struct resource mch_res;
655
656 atomic_t irq_received;
657
658 /* protects the irq masks */
659 spinlock_t irq_lock;
660
661 /* DPIO indirect register protection */
662 spinlock_t dpio_lock;
663
664 /** Cached value of IMR to avoid reads in updating the bitfield */
665 u32 pipestat[2];
666 u32 irq_mask;
667 u32 gt_irq_mask;
668 u32 pch_irq_mask;
669
670 u32 hotplug_supported_mask;
671 struct work_struct hotplug_work;
672
673 int num_pipe;
674 int num_pch_pll;
675
676 /* For hangcheck timer */
677#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
678#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
679 struct timer_list hangcheck_timer;
680 int hangcheck_count;
681 uint32_t last_acthd[I915_NUM_RINGS];
682 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
683
684 unsigned int stop_rings;
685
686 unsigned long cfb_size;
687 unsigned int cfb_fb;
688 enum plane cfb_plane;
689 int cfb_y;
690 struct intel_fbc_work *fbc_work;
691
692 struct intel_opregion opregion;
693
694 /* overlay */
695 struct intel_overlay *overlay;
696 bool sprite_scaling_enabled;
697
698 /* LVDS info */
699 int backlight_level; /* restore backlight to this value */
700 bool backlight_enabled;
701 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
702 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
703
704 /* Feature bits from the VBIOS */
705 unsigned int int_tv_support:1;
706 unsigned int lvds_dither:1;
707 unsigned int lvds_vbt:1;
708 unsigned int int_crt_support:1;
709 unsigned int lvds_use_ssc:1;
710 unsigned int display_clock_mode:1;
711 int lvds_ssc_freq;
712 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
713 unsigned int lvds_val; /* used for checking LVDS channel mode */
714 struct {
715 int rate;
716 int lanes;
717 int preemphasis;
718 int vswing;
719
720 bool initialized;
721 bool support;
722 int bpp;
723 struct edp_power_seq pps;
724 } edp;
725 bool no_aux_handshake;
726
727 int crt_ddc_pin;
728 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
729 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
730 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
731
732 unsigned int fsb_freq, mem_freq, is_ddr3;
733
734 spinlock_t error_lock;
735 /* Protected by dev->error_lock. */
736 struct drm_i915_error_state *first_error;
737 struct work_struct error_work;
738 struct completion error_completion;
739 struct workqueue_struct *wq;
740
741 /* Display functions */
742 struct drm_i915_display_funcs display;
743
744 /* PCH chipset type */
745 enum intel_pch pch_type;
746 unsigned short pch_id;
747
748 unsigned long quirks;
749
750 /* Register state */
751 bool modeset_on_lid;
752 549
753 struct { 550 struct {
754 /** Bridge to intel-gtt-ko */ 551 /** Bridge to intel-gtt-ko */
755 struct intel_gtt *gtt; 552 const struct intel_gtt *gtt;
756 /** Memory allocator for GTT stolen memory */ 553 /** Memory allocator for GTT stolen memory */
757 struct drm_mm stolen; 554 struct drm_mm stolen;
758 /** Memory allocator for GTT */ 555 /** Memory allocator for GTT */
759 struct drm_mm gtt_space; 556 struct drm_mm gtt_space;
760 /** List of all objects in gtt_space. Used to restore gtt 557 /** List of all objects in gtt_space. Used to restore gtt
761 * mappings on resume */ 558 * mappings on resume */
762 struct list_head bound_list; 559 struct list_head gtt_list;
763 /**
764 * List of objects which are not bound to the GTT (thus
765 * are idle and not used by the GPU) but still have
766 * (presumably uncached) pages still attached.
767 */
768 struct list_head unbound_list;
769 560
770 /** Usable portion of the GTT for GEM */ 561 /** Usable portion of the GTT for GEM */
771 unsigned long gtt_start; 562 unsigned long gtt_start;
@@ -773,14 +564,9 @@ typedef struct drm_i915_private {
773 unsigned long gtt_end; 564 unsigned long gtt_end;
774 565
775 struct io_mapping *gtt_mapping; 566 struct io_mapping *gtt_mapping;
776 phys_addr_t gtt_base_addr;
777 int gtt_mtrr; 567 int gtt_mtrr;
778 568
779 /** PPGTT used for aliasing the PPGTT with the GTT */
780 struct i915_hw_ppgtt *aliasing_ppgtt;
781
782 struct shrinker inactive_shrinker; 569 struct shrinker inactive_shrinker;
783 bool shrinker_no_lock_stealing;
784 570
785 /** 571 /**
786 * List of objects currently involved in rendering. 572 * List of objects currently involved in rendering.
@@ -794,6 +580,17 @@ typedef struct drm_i915_private {
794 struct list_head active_list; 580 struct list_head active_list;
795 581
796 /** 582 /**
583 * List of objects which are not in the ringbuffer but which
584 * still have a write_domain which needs to be flushed before
585 * unbinding.
586 *
587 * last_rendering_seqno is 0 while an object is in this list.
588 *
589 * A reference is held on the buffer while on this list.
590 */
591 struct list_head flushing_list;
592
593 /**
797 * LRU list of objects which are not in the ringbuffer and 594 * LRU list of objects which are not in the ringbuffer and
798 * are ready to unbind, but are still in the GTT. 595 * are ready to unbind, but are still in the GTT.
799 * 596 *
@@ -805,10 +602,24 @@ typedef struct drm_i915_private {
805 */ 602 */
806 struct list_head inactive_list; 603 struct list_head inactive_list;
807 604
605 /**
606 * LRU list of objects which are not in the ringbuffer but
607 * are still pinned in the GTT.
608 */
609 struct list_head pinned_list;
610
808 /** LRU list of objects with fence regs on them. */ 611 /** LRU list of objects with fence regs on them. */
809 struct list_head fence_list; 612 struct list_head fence_list;
810 613
811 /** 614 /**
615 * List of objects currently pending being freed.
616 *
617 * These objects are no longer in use, but due to a signal
618 * we were prevented from freeing them at the appointed time.
619 */
620 struct list_head deferred_free_list;
621
622 /**
812 * We leave the user IRQ off as much as possible, 623 * We leave the user IRQ off as much as possible,
813 * but this means that requests will finish and never 624 * but this means that requests will finish and never
814 * be retired once the system goes idle. Set a timer to 625 * be retired once the system goes idle. Set a timer to
@@ -856,41 +667,54 @@ typedef struct drm_i915_private {
856 size_t object_memory; 667 size_t object_memory;
857 u32 object_count; 668 u32 object_count;
858 } mm; 669 } mm;
859
860 /* Kernel Modesetting */
861
862 struct sdvo_device_mapping sdvo_mappings[2]; 670 struct sdvo_device_mapping sdvo_mappings[2];
863 /* indicate whether the LVDS_BORDER should be enabled or not */ 671 /* indicate whether the LVDS_BORDER should be enabled or not */
864 unsigned int lvds_border_bits; 672 unsigned int lvds_border_bits;
865 /* Panel fitter placement and size for Ironlake+ */ 673 /* Panel fitter placement and size for Ironlake+ */
866 u32 pch_pf_pos, pch_pf_size; 674 u32 pch_pf_pos, pch_pf_size;
675 int panel_t3, panel_t12;
867 676
868 struct drm_crtc *plane_to_crtc_mapping[3]; 677 struct drm_crtc *plane_to_crtc_mapping[2];
869 struct drm_crtc *pipe_to_crtc_mapping[3]; 678 struct drm_crtc *pipe_to_crtc_mapping[2];
870 wait_queue_head_t pending_flip_queue; 679 wait_queue_head_t pending_flip_queue;
871 680 bool flip_pending_is_done;
872 struct intel_pch_pll pch_plls[I915_NUM_PLLS];
873 struct intel_ddi_plls ddi_plls;
874 681
875 /* Reclocking support */ 682 /* Reclocking support */
876 bool render_reclock_avail; 683 bool render_reclock_avail;
877 bool lvds_downclock_avail; 684 bool lvds_downclock_avail;
878 /* indicates the reduced downclock for LVDS*/ 685 /* indicates the reduced downclock for LVDS*/
879 int lvds_downclock; 686 int lvds_downclock;
687 struct work_struct idle_work;
688 struct timer_list idle_timer;
689 bool busy;
880 u16 orig_clock; 690 u16 orig_clock;
881 int child_dev_num; 691 int child_dev_num;
882 struct child_device_config *child_dev; 692 struct child_device_config *child_dev;
693 struct drm_connector *int_lvds_connector;
694 struct drm_connector *int_edp_connector;
883 695
884 bool mchbar_need_disable; 696 bool mchbar_need_disable;
885 697
886 struct intel_l3_parity l3_parity; 698 struct work_struct rps_work;
699 spinlock_t rps_lock;
700 u32 pm_iir;
887 701
888 /* gen6+ rps state */ 702 u8 cur_delay;
889 struct intel_gen6_power_mgmt rps; 703 u8 min_delay;
704 u8 max_delay;
705 u8 fmax;
706 u8 fstart;
890 707
891 /* ilk-only ips/rps state. Everything in here is protected by the global 708 u64 last_count1;
892 * mchdev_lock in intel_pm.c */ 709 unsigned long last_time1;
893 struct intel_ilk_power_mgmt ips; 710 unsigned long chipset_power;
711 u64 last_count2;
712 struct timespec last_time2;
713 unsigned long gfx_power;
714 int c_m;
715 int r_t;
716 u8 corr;
717 spinlock_t *mchdev_lock;
894 718
895 enum no_fbc_reason no_fbc_reason; 719 enum no_fbc_reason no_fbc_reason;
896 720
@@ -902,117 +726,73 @@ typedef struct drm_i915_private {
902 /* list of fbdev register on this device */ 726 /* list of fbdev register on this device */
903 struct intel_fbdev *fbdev; 727 struct intel_fbdev *fbdev;
904 728
905 /*
906 * The console may be contended at resume, but we don't
907 * want it to block on it.
908 */
909 struct work_struct console_resume_work;
910
911 struct backlight_device *backlight; 729 struct backlight_device *backlight;
912 730
913 struct drm_property *broadcast_rgb_property; 731 struct drm_property *broadcast_rgb_property;
914 struct drm_property *force_audio_property; 732 struct drm_property *force_audio_property;
915 733
916 bool hw_contexts_disabled; 734 atomic_t forcewake_count;
917 uint32_t hw_context_size;
918
919 bool fdi_rx_polarity_reversed;
920
921 struct i915_suspend_saved_registers regfile;
922
923 /* Old dri1 support infrastructure, beware the dragons ya fools entering
924 * here! */
925 struct i915_dri1_state dri1;
926} drm_i915_private_t; 735} drm_i915_private_t;
927 736
928/* Iterate over initialised rings */
929#define for_each_ring(ring__, dev_priv__, i__) \
930 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
931 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
932
933enum hdmi_force_audio {
934 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
935 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
936 HDMI_AUDIO_AUTO, /* trust EDID */
937 HDMI_AUDIO_ON, /* force turn on HDMI audio */
938};
939
940enum i915_cache_level { 737enum i915_cache_level {
941 I915_CACHE_NONE = 0, 738 I915_CACHE_NONE,
942 I915_CACHE_LLC, 739 I915_CACHE_LLC,
943 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ 740 I915_CACHE_LLC_MLC, /* gen6+ */
944};
945
946struct drm_i915_gem_object_ops {
947 /* Interface between the GEM object and its backing storage.
948 * get_pages() is called once prior to the use of the associated set
949 * of pages before to binding them into the GTT, and put_pages() is
950 * called after we no longer need them. As we expect there to be
951 * associated cost with migrating pages between the backing storage
952 * and making them available for the GPU (e.g. clflush), we may hold
953 * onto the pages after they are no longer referenced by the GPU
954 * in case they may be used again shortly (for example migrating the
955 * pages to a different memory domain within the GTT). put_pages()
956 * will therefore most likely be called when the object itself is
957 * being released or under memory pressure (where we attempt to
958 * reap pages for the shrinker).
959 */
960 int (*get_pages)(struct drm_i915_gem_object *);
961 void (*put_pages)(struct drm_i915_gem_object *);
962}; 741};
963 742
964struct drm_i915_gem_object { 743struct drm_i915_gem_object {
965 struct drm_gem_object base; 744 struct drm_gem_object base;
966 745
967 const struct drm_i915_gem_object_ops *ops;
968
969 /** Current space allocated to this object in the GTT, if any. */ 746 /** Current space allocated to this object in the GTT, if any. */
970 struct drm_mm_node *gtt_space; 747 struct drm_mm_node *gtt_space;
971 struct list_head gtt_list; 748 struct list_head gtt_list;
972 749
973 /** This object's place on the active/inactive lists */ 750 /** This object's place on the active/flushing/inactive lists */
974 struct list_head ring_list; 751 struct list_head ring_list;
975 struct list_head mm_list; 752 struct list_head mm_list;
753 /** This object's place on GPU write list */
754 struct list_head gpu_write_list;
976 /** This object's place in the batchbuffer or on the eviction list */ 755 /** This object's place in the batchbuffer or on the eviction list */
977 struct list_head exec_list; 756 struct list_head exec_list;
978 757
979 /** 758 /**
980 * This is set if the object is on the active lists (has pending 759 * This is set if the object is on the active or flushing lists
981 * rendering and so a non-zero seqno), and is not set if it i s on 760 * (has pending rendering), and is not set if it's on inactive (ready
982 * inactive (ready to be unbound) list. 761 * to be unbound).
983 */ 762 */
984 unsigned int active:1; 763 unsigned int active : 1;
985 764
986 /** 765 /**
987 * This is set if the object has been written to since last bound 766 * This is set if the object has been written to since last bound
988 * to the GTT 767 * to the GTT
989 */ 768 */
990 unsigned int dirty:1; 769 unsigned int dirty : 1;
770
771 /**
772 * This is set if the object has been written to since the last
773 * GPU flush.
774 */
775 unsigned int pending_gpu_write : 1;
991 776
992 /** 777 /**
993 * Fence register bits (if any) for this object. Will be set 778 * Fence register bits (if any) for this object. Will be set
994 * as needed when mapped into the GTT. 779 * as needed when mapped into the GTT.
995 * Protected by dev->struct_mutex. 780 * Protected by dev->struct_mutex.
781 *
782 * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
996 */ 783 */
997 signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 784 signed int fence_reg : 5;
998 785
999 /** 786 /**
1000 * Advice: are the backing pages purgeable? 787 * Advice: are the backing pages purgeable?
1001 */ 788 */
1002 unsigned int madv:2; 789 unsigned int madv : 2;
1003 790
1004 /** 791 /**
1005 * Current tiling mode for the object. 792 * Current tiling mode for the object.
1006 */ 793 */
1007 unsigned int tiling_mode:2; 794 unsigned int tiling_mode : 2;
1008 /** 795 unsigned int tiling_changed : 1;
1009 * Whether the tiling parameters for the currently associated fence
1010 * register have changed. Note that for the purposes of tracking
1011 * tiling changes we also treat the unfenced register, the register
1012 * slot that the object occupies whilst it executes a fenced
1013 * command (such as BLT on gen2/3), as a "fence".
1014 */
1015 unsigned int fence_dirty:1;
1016 796
1017 /** How many users have pinned this object in GTT space. The following 797 /** How many users have pinned this object in GTT space. The following
1018 * users can each hold at most one reference: pwrite/pread, pin_ioctl 798 * users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -1023,22 +803,22 @@ struct drm_i915_gem_object {
1023 * 803 *
1024 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 804 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1025 * bits with absolutely no headroom. So use 4 bits. */ 805 * bits with absolutely no headroom. So use 4 bits. */
1026 unsigned int pin_count:4; 806 unsigned int pin_count : 4;
1027#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf 807#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
1028 808
1029 /** 809 /**
1030 * Is the object at the current location in the gtt mappable and 810 * Is the object at the current location in the gtt mappable and
1031 * fenceable? Used to avoid costly recalculations. 811 * fenceable? Used to avoid costly recalculations.
1032 */ 812 */
1033 unsigned int map_and_fenceable:1; 813 unsigned int map_and_fenceable : 1;
1034 814
1035 /** 815 /**
1036 * Whether the current gtt mapping needs to be mappable (and isn't just 816 * Whether the current gtt mapping needs to be mappable (and isn't just
1037 * mappable by accident). Track pin and fault separate for a more 817 * mappable by accident). Track pin and fault separate for a more
1038 * accurate mappable working set. 818 * accurate mappable working set.
1039 */ 819 */
1040 unsigned int fault_mappable:1; 820 unsigned int fault_mappable : 1;
1041 unsigned int pin_mappable:1; 821 unsigned int pin_mappable : 1;
1042 822
1043 /* 823 /*
1044 * Is the GPU currently using a fence to access this buffer, 824 * Is the GPU currently using a fence to access this buffer,
@@ -1048,16 +828,13 @@ struct drm_i915_gem_object {
1048 828
1049 unsigned int cache_level:2; 829 unsigned int cache_level:2;
1050 830
1051 unsigned int has_aliasing_ppgtt_mapping:1; 831 struct page **pages;
1052 unsigned int has_global_gtt_mapping:1;
1053 unsigned int has_dma_mapping:1;
1054 832
1055 struct sg_table *pages; 833 /**
1056 int pages_pin_count; 834 * DMAR support
1057 835 */
1058 /* prime dma-buf support */ 836 struct scatterlist *sg_list;
1059 void *dma_buf_vmapping; 837 int num_sg;
1060 int vmapping_count;
1061 838
1062 /** 839 /**
1063 * Used for performing relocations during execbuffer insertion. 840 * Used for performing relocations during execbuffer insertion.
@@ -1073,13 +850,13 @@ struct drm_i915_gem_object {
1073 */ 850 */
1074 uint32_t gtt_offset; 851 uint32_t gtt_offset;
1075 852
853 /** Breadcrumb of last rendering to the buffer. */
854 uint32_t last_rendering_seqno;
1076 struct intel_ring_buffer *ring; 855 struct intel_ring_buffer *ring;
1077 856
1078 /** Breadcrumb of last rendering to the buffer. */
1079 uint32_t last_read_seqno;
1080 uint32_t last_write_seqno;
1081 /** Breadcrumb of last fenced GPU access to the buffer. */ 857 /** Breadcrumb of last fenced GPU access to the buffer. */
1082 uint32_t last_fenced_seqno; 858 uint32_t last_fenced_seqno;
859 struct intel_ring_buffer *last_fenced_ring;
1083 860
1084 /** Current tiling stride for the object, if it's tiled. */ 861 /** Current tiling stride for the object, if it's tiled. */
1085 uint32_t stride; 862 uint32_t stride;
@@ -1087,6 +864,13 @@ struct drm_i915_gem_object {
1087 /** Record of address bit 17 of each page at last unbind. */ 864 /** Record of address bit 17 of each page at last unbind. */
1088 unsigned long *bit_17; 865 unsigned long *bit_17;
1089 866
867
868 /**
869 * If present, while GEM_DOMAIN_CPU is in the read domain this array
870 * flags which individual pages are valid.
871 */
872 uint8_t *page_cpu_valid;
873
1090 /** User space pin count and filp owning the pin */ 874 /** User space pin count and filp owning the pin */
1091 uint32_t user_pin_count; 875 uint32_t user_pin_count;
1092 struct drm_file *pin_filp; 876 struct drm_file *pin_filp;
@@ -1101,7 +885,6 @@ struct drm_i915_gem_object {
1101 */ 885 */
1102 atomic_t pending_flip; 886 atomic_t pending_flip;
1103}; 887};
1104#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
1105 888
1106#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 889#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1107 890
@@ -1122,9 +905,6 @@ struct drm_i915_gem_request {
1122 /** GEM sequence number associated with this request. */ 905 /** GEM sequence number associated with this request. */
1123 uint32_t seqno; 906 uint32_t seqno;
1124 907
1125 /** Postion in the ringbuffer of the end of the request */
1126 u32 tail;
1127
1128 /** Time at which this request was emitted, in jiffies. */ 908 /** Time at which this request was emitted, in jiffies. */
1129 unsigned long emitted_jiffies; 909 unsigned long emitted_jiffies;
1130 910
@@ -1141,7 +921,6 @@ struct drm_i915_file_private {
1141 struct spinlock lock; 921 struct spinlock lock;
1142 struct list_head request_list; 922 struct list_head request_list;
1143 } mm; 923 } mm;
1144 struct idr context_idr;
1145}; 924};
1146 925
1147#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 926#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
@@ -1165,17 +944,7 @@ struct drm_i915_file_private {
1165#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 944#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1166#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 945#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1167#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 946#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1168#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1169 (dev)->pci_device == 0x0152 || \
1170 (dev)->pci_device == 0x015a)
1171#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1172 (dev)->pci_device == 0x0106 || \
1173 (dev)->pci_device == 0x010A)
1174#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1175#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1176#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 947#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1177#define IS_ULT(dev) (IS_HASWELL(dev) && \
1178 ((dev)->pci_device & 0xFF00) == 0x0A00)
1179 948
1180/* 949/*
1181 * The genX designation typically refers to the render engine, so render 950 * The genX designation typically refers to the render engine, so render
@@ -1192,18 +961,11 @@ struct drm_i915_file_private {
1192 961
1193#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) 962#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1194#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) 963#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1195#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1196#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 964#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1197 965
1198#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
1199#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
1200
1201#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 966#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1202#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 967#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1203 968
1204/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1205#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1206
1207/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 969/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1208 * rows, which changed the alignment requirements and fence programming. 970 * rows, which changed the alignment requirements and fence programming.
1209 */ 971 */
@@ -1222,65 +984,27 @@ struct drm_i915_file_private {
1222#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 984#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1223#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 985#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1224 986
987#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
1225#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 988#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1226 989
1227#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1228#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1229#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1230#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1231#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1232#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1233
1234#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 990#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1235#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1236#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 991#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1237#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 992#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1238#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
1239
1240#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1241
1242#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1243
1244#define GT_FREQUENCY_MULTIPLIER 50
1245 993
1246#include "i915_trace.h" 994#include "i915_trace.h"
1247 995
1248/**
1249 * RC6 is a special power stage which allows the GPU to enter an very
1250 * low-voltage mode when idle, using down to 0V while at this stage. This
1251 * stage is entered automatically when the GPU is idle when RC6 support is
1252 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1253 *
1254 * There are different RC6 modes available in Intel GPU, which differentiate
1255 * among each other with the latency required to enter and leave RC6 and
1256 * voltage consumed by the GPU in different states.
1257 *
1258 * The combination of the following flags define which states GPU is allowed
1259 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1260 * RC6pp is deepest RC6. Their support by hardware varies according to the
1261 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1262 * which brings the most power savings; deeper states save more power, but
1263 * require higher latency to switch to and wake up.
1264 */
1265#define INTEL_RC6_ENABLE (1<<0)
1266#define INTEL_RC6p_ENABLE (1<<1)
1267#define INTEL_RC6pp_ENABLE (1<<2)
1268
1269extern struct drm_ioctl_desc i915_ioctls[]; 996extern struct drm_ioctl_desc i915_ioctls[];
1270extern int i915_max_ioctl; 997extern int i915_max_ioctl;
1271extern unsigned int i915_fbpercrtc __always_unused; 998extern unsigned int i915_fbpercrtc __always_unused;
1272extern int i915_panel_ignore_lid __read_mostly; 999extern int i915_panel_ignore_lid __read_mostly;
1273extern unsigned int i915_powersave __read_mostly; 1000extern unsigned int i915_powersave __read_mostly;
1274extern int i915_semaphores __read_mostly; 1001extern unsigned int i915_semaphores __read_mostly;
1275extern unsigned int i915_lvds_downclock __read_mostly; 1002extern unsigned int i915_lvds_downclock __read_mostly;
1276extern int i915_lvds_channel_mode __read_mostly; 1003extern unsigned int i915_panel_use_ssc __read_mostly;
1277extern int i915_panel_use_ssc __read_mostly;
1278extern int i915_vbt_sdvo_panel_type __read_mostly; 1004extern int i915_vbt_sdvo_panel_type __read_mostly;
1279extern int i915_enable_rc6 __read_mostly; 1005extern unsigned int i915_enable_rc6 __read_mostly;
1280extern int i915_enable_fbc __read_mostly; 1006extern unsigned int i915_enable_fbc __read_mostly;
1281extern bool i915_enable_hangcheck __read_mostly; 1007extern bool i915_enable_hangcheck __read_mostly;
1282extern int i915_enable_ppgtt __read_mostly;
1283extern unsigned int i915_preliminary_hw_support __read_mostly;
1284 1008
1285extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1009extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1286extern int i915_resume(struct drm_device *dev); 1010extern int i915_resume(struct drm_device *dev);
@@ -1288,7 +1012,6 @@ extern int i915_master_create(struct drm_device *dev, struct drm_master *master)
1288extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 1012extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1289 1013
1290 /* i915_dma.c */ 1014 /* i915_dma.c */
1291void i915_update_dri1_breadcrumb(struct drm_device *dev);
1292extern void i915_kernel_lost_context(struct drm_device * dev); 1015extern void i915_kernel_lost_context(struct drm_device * dev);
1293extern int i915_driver_load(struct drm_device *, unsigned long flags); 1016extern int i915_driver_load(struct drm_device *, unsigned long flags);
1294extern int i915_driver_unload(struct drm_device *); 1017extern int i915_driver_unload(struct drm_device *);
@@ -1299,31 +1022,34 @@ extern void i915_driver_preclose(struct drm_device *dev,
1299extern void i915_driver_postclose(struct drm_device *dev, 1022extern void i915_driver_postclose(struct drm_device *dev,
1300 struct drm_file *file_priv); 1023 struct drm_file *file_priv);
1301extern int i915_driver_device_is_agp(struct drm_device * dev); 1024extern int i915_driver_device_is_agp(struct drm_device * dev);
1302#ifdef CONFIG_COMPAT
1303extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 1025extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1304 unsigned long arg); 1026 unsigned long arg);
1305#endif
1306extern int i915_emit_box(struct drm_device *dev, 1027extern int i915_emit_box(struct drm_device *dev,
1307 struct drm_clip_rect *box, 1028 struct drm_clip_rect *box,
1308 int DR1, int DR4); 1029 int DR1, int DR4);
1309extern int intel_gpu_reset(struct drm_device *dev); 1030extern int i915_reset(struct drm_device *dev, u8 flags);
1310extern int i915_reset(struct drm_device *dev);
1311extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 1031extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1312extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 1032extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1313extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 1033extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1314extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 1034extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1315 1035
1316extern void intel_console_resume(struct work_struct *work);
1317 1036
1318/* i915_irq.c */ 1037/* i915_irq.c */
1319void i915_hangcheck_elapsed(unsigned long data); 1038void i915_hangcheck_elapsed(unsigned long data);
1320void i915_handle_error(struct drm_device *dev, bool wedged); 1039void i915_handle_error(struct drm_device *dev, bool wedged);
1040extern int i915_irq_emit(struct drm_device *dev, void *data,
1041 struct drm_file *file_priv);
1042extern int i915_irq_wait(struct drm_device *dev, void *data,
1043 struct drm_file *file_priv);
1321 1044
1322extern void intel_irq_init(struct drm_device *dev); 1045extern void intel_irq_init(struct drm_device *dev);
1323extern void intel_gt_init(struct drm_device *dev);
1324extern void intel_gt_reset(struct drm_device *dev);
1325 1046
1326void i915_error_state_free(struct kref *error_ref); 1047extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1048 struct drm_file *file_priv);
1049extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1050 struct drm_file *file_priv);
1051extern int i915_vblank_swap(struct drm_device *dev, void *data,
1052 struct drm_file *file_priv);
1327 1053
1328void 1054void
1329i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1055i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1331,7 +1057,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1331void 1057void
1332i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1058i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1333 1059
1334void intel_enable_asle(struct drm_device *dev); 1060void intel_enable_asle (struct drm_device *dev);
1335 1061
1336#ifdef CONFIG_DEBUG_FS 1062#ifdef CONFIG_DEBUG_FS
1337extern void i915_destroy_error_state(struct drm_device *dev); 1063extern void i915_destroy_error_state(struct drm_device *dev);
@@ -1340,6 +1066,18 @@ extern void i915_destroy_error_state(struct drm_device *dev);
1340#endif 1066#endif
1341 1067
1342 1068
1069/* i915_mem.c */
1070extern int i915_mem_alloc(struct drm_device *dev, void *data,
1071 struct drm_file *file_priv);
1072extern int i915_mem_free(struct drm_device *dev, void *data,
1073 struct drm_file *file_priv);
1074extern int i915_mem_init_heap(struct drm_device *dev, void *data,
1075 struct drm_file *file_priv);
1076extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
1077 struct drm_file *file_priv);
1078extern void i915_mem_takedown(struct mem_block **heap);
1079extern void i915_mem_release(struct drm_device * dev,
1080 struct drm_file *file_priv, struct mem_block *heap);
1343/* i915_gem.c */ 1081/* i915_gem.c */
1344int i915_gem_init_ioctl(struct drm_device *dev, void *data, 1082int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1345 struct drm_file *file_priv); 1083 struct drm_file *file_priv);
@@ -1367,10 +1105,6 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1367 struct drm_file *file_priv); 1105 struct drm_file *file_priv);
1368int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 1106int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1369 struct drm_file *file_priv); 1107 struct drm_file *file_priv);
1370int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
1371 struct drm_file *file);
1372int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
1373 struct drm_file *file);
1374int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 1108int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1375 struct drm_file *file_priv); 1109 struct drm_file *file_priv);
1376int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 1110int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
@@ -1385,55 +1119,27 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
1385 struct drm_file *file_priv); 1119 struct drm_file *file_priv);
1386int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 1120int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1387 struct drm_file *file_priv); 1121 struct drm_file *file_priv);
1388int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1389 struct drm_file *file_priv);
1390void i915_gem_load(struct drm_device *dev); 1122void i915_gem_load(struct drm_device *dev);
1391int i915_gem_init_object(struct drm_gem_object *obj); 1123int i915_gem_init_object(struct drm_gem_object *obj);
1392void i915_gem_object_init(struct drm_i915_gem_object *obj, 1124int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
1393 const struct drm_i915_gem_object_ops *ops); 1125 uint32_t invalidate_domains,
1126 uint32_t flush_domains);
1394struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1127struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1395 size_t size); 1128 size_t size);
1396void i915_gem_free_object(struct drm_gem_object *obj); 1129void i915_gem_free_object(struct drm_gem_object *obj);
1397int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 1130int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1398 uint32_t alignment, 1131 uint32_t alignment,
1399 bool map_and_fenceable, 1132 bool map_and_fenceable);
1400 bool nonblocking);
1401void i915_gem_object_unpin(struct drm_i915_gem_object *obj); 1133void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1402int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); 1134int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1403void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1135void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1404void i915_gem_lastclose(struct drm_device *dev); 1136void i915_gem_lastclose(struct drm_device *dev);
1405 1137
1406int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
1407static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1408{
1409 struct scatterlist *sg = obj->pages->sgl;
1410 int nents = obj->pages->nents;
1411 while (nents > SG_MAX_SINGLE_ALLOC) {
1412 if (n < SG_MAX_SINGLE_ALLOC - 1)
1413 break;
1414
1415 sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
1416 n -= SG_MAX_SINGLE_ALLOC - 1;
1417 nents -= SG_MAX_SINGLE_ALLOC - 1;
1418 }
1419 return sg_page(sg+n);
1420}
1421static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1422{
1423 BUG_ON(obj->pages == NULL);
1424 obj->pages_pin_count++;
1425}
1426static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1427{
1428 BUG_ON(obj->pages_pin_count == 0);
1429 obj->pages_pin_count--;
1430}
1431
1432int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1138int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1433int i915_gem_object_sync(struct drm_i915_gem_object *obj, 1139int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1434 struct intel_ring_buffer *to);
1435void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1140void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1436 struct intel_ring_buffer *ring); 1141 struct intel_ring_buffer *ring,
1142 u32 seqno);
1437 1143
1438int i915_gem_dumb_create(struct drm_file *file_priv, 1144int i915_gem_dumb_create(struct drm_file *file_priv,
1439 struct drm_device *dev, 1145 struct drm_device *dev,
@@ -1441,7 +1147,7 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
1441int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 1147int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1442 uint32_t handle, uint64_t *offset); 1148 uint32_t handle, uint64_t *offset);
1443int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, 1149int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
1444 uint32_t handle); 1150 uint32_t handle);
1445/** 1151/**
1446 * Returns true if seq1 is later than seq2. 1152 * Returns true if seq1 is later than seq2.
1447 */ 1153 */
@@ -1451,62 +1157,42 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1451 return (int32_t)(seq1 - seq2) >= 0; 1157 return (int32_t)(seq1 - seq2) >= 0;
1452} 1158}
1453 1159
1454extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 1160static inline u32
1455 1161i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1456int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1457int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1458
1459static inline bool
1460i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1461{ 1162{
1462 if (obj->fence_reg != I915_FENCE_REG_NONE) { 1163 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1463 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1164 return ring->outstanding_lazy_request = dev_priv->next_seqno;
1464 dev_priv->fence_regs[obj->fence_reg].pin_count++;
1465 return true;
1466 } else
1467 return false;
1468} 1165}
1469 1166
1470static inline void 1167int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
1471i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) 1168 struct intel_ring_buffer *pipelined);
1472{ 1169int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1473 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1474 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1475 dev_priv->fence_regs[obj->fence_reg].pin_count--;
1476 }
1477}
1478 1170
1479void i915_gem_retire_requests(struct drm_device *dev); 1171void i915_gem_retire_requests(struct drm_device *dev);
1480void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1481int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
1482 bool interruptible);
1483
1484void i915_gem_reset(struct drm_device *dev); 1172void i915_gem_reset(struct drm_device *dev);
1485void i915_gem_clflush_object(struct drm_i915_gem_object *obj); 1173void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
1486int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, 1174int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1487 uint32_t read_domains, 1175 uint32_t read_domains,
1488 uint32_t write_domain); 1176 uint32_t write_domain);
1489int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 1177int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1490int __must_check i915_gem_init(struct drm_device *dev); 1178int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
1491int __must_check i915_gem_init_hw(struct drm_device *dev);
1492void i915_gem_l3_remap(struct drm_device *dev);
1493void i915_gem_init_swizzling(struct drm_device *dev);
1494void i915_gem_init_ppgtt(struct drm_device *dev);
1495void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1179void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1180void i915_gem_do_init(struct drm_device *dev,
1181 unsigned long start,
1182 unsigned long mappable_end,
1183 unsigned long end);
1496int __must_check i915_gpu_idle(struct drm_device *dev); 1184int __must_check i915_gpu_idle(struct drm_device *dev);
1497int __must_check i915_gem_idle(struct drm_device *dev); 1185int __must_check i915_gem_idle(struct drm_device *dev);
1498int i915_add_request(struct intel_ring_buffer *ring, 1186int __must_check i915_add_request(struct intel_ring_buffer *ring,
1499 struct drm_file *file, 1187 struct drm_file *file,
1500 u32 *seqno); 1188 struct drm_i915_gem_request *request);
1501int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, 1189int __must_check i915_wait_request(struct intel_ring_buffer *ring,
1502 uint32_t seqno); 1190 uint32_t seqno);
1503int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1191int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1504int __must_check 1192int __must_check
1505i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 1193i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1506 bool write); 1194 bool write);
1507int __must_check 1195int __must_check
1508i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
1509int __must_check
1510i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 1196i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1511 u32 alignment, 1197 u32 alignment,
1512 struct intel_ring_buffer *pipelined); 1198 struct intel_ring_buffer *pipelined);
@@ -1527,62 +1213,20 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1527int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 1213int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1528 enum i915_cache_level cache_level); 1214 enum i915_cache_level cache_level);
1529 1215
1530struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1531 struct dma_buf *dma_buf);
1532
1533struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1534 struct drm_gem_object *gem_obj, int flags);
1535
1536/* i915_gem_context.c */
1537void i915_gem_context_init(struct drm_device *dev);
1538void i915_gem_context_fini(struct drm_device *dev);
1539void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
1540int i915_switch_context(struct intel_ring_buffer *ring,
1541 struct drm_file *file, int to_id);
1542int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1543 struct drm_file *file);
1544int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
1545 struct drm_file *file);
1546
1547/* i915_gem_gtt.c */ 1216/* i915_gem_gtt.c */
1548int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
1549void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
1550void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
1551 struct drm_i915_gem_object *obj,
1552 enum i915_cache_level cache_level);
1553void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
1554 struct drm_i915_gem_object *obj);
1555
1556void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1217void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1557int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); 1218int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
1558void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 1219void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
1559 enum i915_cache_level cache_level); 1220 enum i915_cache_level cache_level);
1560void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 1221void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1561void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
1562void i915_gem_init_global_gtt(struct drm_device *dev,
1563 unsigned long start,
1564 unsigned long mappable_end,
1565 unsigned long end);
1566int i915_gem_gtt_init(struct drm_device *dev);
1567void i915_gem_gtt_fini(struct drm_device *dev);
1568static inline void i915_gem_chipset_flush(struct drm_device *dev)
1569{
1570 if (INTEL_INFO(dev)->gen < 6)
1571 intel_gtt_chipset_flush();
1572}
1573
1574 1222
1575/* i915_gem_evict.c */ 1223/* i915_gem_evict.c */
1576int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 1224int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
1577 unsigned alignment, 1225 unsigned alignment, bool mappable);
1578 unsigned cache_level, 1226int __must_check i915_gem_evict_everything(struct drm_device *dev,
1579 bool mappable, 1227 bool purgeable_only);
1580 bool nonblock); 1228int __must_check i915_gem_evict_inactive(struct drm_device *dev,
1581int i915_gem_evict_everything(struct drm_device *dev); 1229 bool purgeable_only);
1582
1583/* i915_gem_stolen.c */
1584int i915_gem_init_stolen(struct drm_device *dev);
1585void i915_gem_cleanup_stolen(struct drm_device *dev);
1586 1230
1587/* i915_gem_tiling.c */ 1231/* i915_gem_tiling.c */
1588void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1232void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -1614,20 +1258,9 @@ extern int i915_restore_state(struct drm_device *dev);
1614extern int i915_save_state(struct drm_device *dev); 1258extern int i915_save_state(struct drm_device *dev);
1615extern int i915_restore_state(struct drm_device *dev); 1259extern int i915_restore_state(struct drm_device *dev);
1616 1260
1617/* i915_sysfs.c */
1618void i915_setup_sysfs(struct drm_device *dev_priv);
1619void i915_teardown_sysfs(struct drm_device *dev_priv);
1620
1621/* intel_i2c.c */ 1261/* intel_i2c.c */
1622extern int intel_setup_gmbus(struct drm_device *dev); 1262extern int intel_setup_gmbus(struct drm_device *dev);
1623extern void intel_teardown_gmbus(struct drm_device *dev); 1263extern void intel_teardown_gmbus(struct drm_device *dev);
1624extern inline bool intel_gmbus_is_port_valid(unsigned port)
1625{
1626 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
1627}
1628
1629extern struct i2c_adapter *intel_gmbus_get_adapter(
1630 struct drm_i915_private *dev_priv, unsigned port);
1631extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 1264extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
1632extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 1265extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
1633extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 1266extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
@@ -1662,25 +1295,17 @@ static inline void intel_unregister_dsm_handler(void) { return; }
1662#endif /* CONFIG_ACPI */ 1295#endif /* CONFIG_ACPI */
1663 1296
1664/* modesetting */ 1297/* modesetting */
1665extern void intel_modeset_init_hw(struct drm_device *dev);
1666extern void intel_modeset_init(struct drm_device *dev); 1298extern void intel_modeset_init(struct drm_device *dev);
1667extern void intel_modeset_gem_init(struct drm_device *dev); 1299extern void intel_modeset_gem_init(struct drm_device *dev);
1668extern void intel_modeset_cleanup(struct drm_device *dev); 1300extern void intel_modeset_cleanup(struct drm_device *dev);
1669extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1301extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1670extern void intel_modeset_setup_hw_state(struct drm_device *dev,
1671 bool force_restore);
1672extern bool intel_fbc_enabled(struct drm_device *dev); 1302extern bool intel_fbc_enabled(struct drm_device *dev);
1673extern void intel_disable_fbc(struct drm_device *dev); 1303extern void intel_disable_fbc(struct drm_device *dev);
1674extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1304extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1675extern void intel_init_pch_refclk(struct drm_device *dev); 1305extern void ironlake_enable_rc6(struct drm_device *dev);
1676extern void gen6_set_rps(struct drm_device *dev, u8 val); 1306extern void gen6_set_rps(struct drm_device *dev, u8 val);
1677extern void intel_detect_pch(struct drm_device *dev); 1307extern void intel_detect_pch (struct drm_device *dev);
1678extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 1308extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1679extern int intel_enable_rc6(const struct drm_device *dev);
1680
1681extern bool i915_semaphore_is_enabled(struct drm_device *dev);
1682int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1683 struct drm_file *file);
1684 1309
1685/* overlay */ 1310/* overlay */
1686#ifdef CONFIG_DEBUG_FS 1311#ifdef CONFIG_DEBUG_FS
@@ -1693,19 +1318,55 @@ extern void intel_display_print_error_state(struct seq_file *m,
1693 struct intel_display_error_state *error); 1318 struct intel_display_error_state *error);
1694#endif 1319#endif
1695 1320
1321#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
1322
1323#define BEGIN_LP_RING(n) \
1324 intel_ring_begin(LP_RING(dev_priv), (n))
1325
1326#define OUT_RING(x) \
1327 intel_ring_emit(LP_RING(dev_priv), x)
1328
1329#define ADVANCE_LP_RING() \
1330 intel_ring_advance(LP_RING(dev_priv))
1331
1332/**
1333 * Lock test for when it's just for synchronization of ring access.
1334 *
1335 * In that case, we don't need to do it when GEM is initialized as nobody else
1336 * has access to the ring.
1337 */
1338#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
1339 if (LP_RING(dev->dev_private)->obj == NULL) \
1340 LOCK_TEST_WITH_RETURN(dev, file); \
1341} while (0)
1342
1696/* On SNB platform, before reading ring registers forcewake bit 1343/* On SNB platform, before reading ring registers forcewake bit
1697 * must be set to prevent GT core from power down and stale values being 1344 * must be set to prevent GT core from power down and stale values being
1698 * returned. 1345 * returned.
1699 */ 1346 */
1700void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 1347void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1701void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1348void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1702int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); 1349void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1703 1350
1704int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); 1351/* We give fast paths for the really cool registers */
1705int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); 1352#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1353 (((dev_priv)->info->gen >= 6) && \
1354 ((reg) < 0x40000) && \
1355 ((reg) != FORCEWAKE))
1706 1356
1707#define __i915_read(x, y) \ 1357#define __i915_read(x, y) \
1708 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 1358static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1359 u##x val = 0; \
1360 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1361 gen6_gt_force_wake_get(dev_priv); \
1362 val = read##y(dev_priv->regs + reg); \
1363 gen6_gt_force_wake_put(dev_priv); \
1364 } else { \
1365 val = read##y(dev_priv->regs + reg); \
1366 } \
1367 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1368 return val; \
1369}
1709 1370
1710__i915_read(8, b) 1371__i915_read(8, b)
1711__i915_read(16, w) 1372__i915_read(16, w)
@@ -1714,8 +1375,13 @@ __i915_read(64, q)
1714#undef __i915_read 1375#undef __i915_read
1715 1376
1716#define __i915_write(x, y) \ 1377#define __i915_write(x, y) \
1717 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); 1378static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1718 1379 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1380 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1381 __gen6_gt_wait_for_fifo(dev_priv); \
1382 } \
1383 write##y(val, dev_priv->regs + reg); \
1384}
1719__i915_write(8, b) 1385__i915_write(8, b)
1720__i915_write(16, w) 1386__i915_write(16, w)
1721__i915_write(32, l) 1387__i915_write(32, l)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8febea6daa0..346d5574f0a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -25,8 +25,9 @@
25 * 25 *
26 */ 26 */
27 27
28#include <drm/drmP.h> 28#include "drmP.h"
29#include <drm/i915_drm.h> 29#include "drm.h"
30#include "i915_drm.h"
30#include "i915_drv.h" 31#include "i915_drv.h"
31#include "i915_trace.h" 32#include "i915_trace.h"
32#include "intel_drv.h" 33#include "intel_drv.h"
@@ -34,42 +35,29 @@
34#include <linux/slab.h> 35#include <linux/slab.h>
35#include <linux/swap.h> 36#include <linux/swap.h>
36#include <linux/pci.h> 37#include <linux/pci.h>
37#include <linux/dma-buf.h>
38 38
39static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
39static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
43 bool write);
44static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
45 uint64_t offset,
46 uint64_t size);
47static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
41static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 48static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
42 unsigned alignment, 49 unsigned alignment,
43 bool map_and_fenceable, 50 bool map_and_fenceable);
44 bool nonblocking); 51static void i915_gem_clear_fence_reg(struct drm_device *dev,
52 struct drm_i915_fence_reg *reg);
45static int i915_gem_phys_pwrite(struct drm_device *dev, 53static int i915_gem_phys_pwrite(struct drm_device *dev,
46 struct drm_i915_gem_object *obj, 54 struct drm_i915_gem_object *obj,
47 struct drm_i915_gem_pwrite *args, 55 struct drm_i915_gem_pwrite *args,
48 struct drm_file *file); 56 struct drm_file *file);
49 57static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
50static void i915_gem_write_fence(struct drm_device *dev, int reg,
51 struct drm_i915_gem_object *obj);
52static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53 struct drm_i915_fence_reg *fence,
54 bool enable);
55 58
56static int i915_gem_inactive_shrink(struct shrinker *shrinker, 59static int i915_gem_inactive_shrink(struct shrinker *shrinker,
57 struct shrink_control *sc); 60 struct shrink_control *sc);
58static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
59static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
60static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
61
62static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
63{
64 if (obj->tiling_mode)
65 i915_gem_release_mmap(obj);
66
67 /* As we do not have an associated fence register, we will force
68 * a tiling change if we ever need to acquire one.
69 */
70 obj->fence_dirty = false;
71 obj->fence_reg = I915_FENCE_REG_NONE;
72}
73 61
74/* some bookkeeping */ 62/* some bookkeeping */
75static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 63static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -97,18 +85,9 @@ i915_gem_wait_for_error(struct drm_device *dev)
97 if (!atomic_read(&dev_priv->mm.wedged)) 85 if (!atomic_read(&dev_priv->mm.wedged))
98 return 0; 86 return 0;
99 87
100 /* 88 ret = wait_for_completion_interruptible(x);
101 * Only wait 10 seconds for the gpu reset to complete to avoid hanging 89 if (ret)
102 * userspace. If it takes that long something really bad is going on and
103 * we should simply try to bail out and fail as gracefully as possible.
104 */
105 ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
106 if (ret == 0) {
107 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
108 return -EIO;
109 } else if (ret < 0) {
110 return ret; 90 return ret;
111 }
112 91
113 if (atomic_read(&dev_priv->mm.wedged)) { 92 if (atomic_read(&dev_priv->mm.wedged)) {
114 /* GPU is hung, bump the completion count to account for 93 /* GPU is hung, bump the completion count to account for
@@ -142,7 +121,26 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
142static inline bool 121static inline bool
143i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) 122i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
144{ 123{
145 return obj->gtt_space && !obj->active; 124 return obj->gtt_space && !obj->active && obj->pin_count == 0;
125}
126
127void i915_gem_do_init(struct drm_device *dev,
128 unsigned long start,
129 unsigned long mappable_end,
130 unsigned long end)
131{
132 drm_i915_private_t *dev_priv = dev->dev_private;
133
134 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
135
136 dev_priv->mm.gtt_start = start;
137 dev_priv->mm.gtt_mappable_end = mappable_end;
138 dev_priv->mm.gtt_end = end;
139 dev_priv->mm.gtt_total = end - start;
140 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
141
142 /* Take over this portion of the GTT */
143 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
146} 144}
147 145
148int 146int
@@ -151,20 +149,12 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
151{ 149{
152 struct drm_i915_gem_init *args = data; 150 struct drm_i915_gem_init *args = data;
153 151
154 if (drm_core_check_feature(dev, DRIVER_MODESET))
155 return -ENODEV;
156
157 if (args->gtt_start >= args->gtt_end || 152 if (args->gtt_start >= args->gtt_end ||
158 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) 153 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
159 return -EINVAL; 154 return -EINVAL;
160 155
161 /* GEM with user mode setting was never supported on ilk and later. */
162 if (INTEL_INFO(dev)->gen >= 5)
163 return -ENODEV;
164
165 mutex_lock(&dev->struct_mutex); 156 mutex_lock(&dev->struct_mutex);
166 i915_gem_init_global_gtt(dev, args->gtt_start, 157 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
167 args->gtt_end, args->gtt_end);
168 mutex_unlock(&dev->struct_mutex); 158 mutex_unlock(&dev->struct_mutex);
169 159
170 return 0; 160 return 0;
@@ -179,15 +169,17 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
179 struct drm_i915_gem_object *obj; 169 struct drm_i915_gem_object *obj;
180 size_t pinned; 170 size_t pinned;
181 171
172 if (!(dev->driver->driver_features & DRIVER_GEM))
173 return -ENODEV;
174
182 pinned = 0; 175 pinned = 0;
183 mutex_lock(&dev->struct_mutex); 176 mutex_lock(&dev->struct_mutex);
184 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) 177 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
185 if (obj->pin_count) 178 pinned += obj->gtt_space->size;
186 pinned += obj->gtt_space->size;
187 mutex_unlock(&dev->struct_mutex); 179 mutex_unlock(&dev->struct_mutex);
188 180
189 args->aper_size = dev_priv->mm.gtt_total; 181 args->aper_size = dev_priv->mm.gtt_total;
190 args->aper_available_size = args->aper_size - pinned; 182 args->aper_available_size = args->aper_size -pinned;
191 183
192 return 0; 184 return 0;
193} 185}
@@ -203,8 +195,6 @@ i915_gem_create(struct drm_file *file,
203 u32 handle; 195 u32 handle;
204 196
205 size = roundup(size, PAGE_SIZE); 197 size = roundup(size, PAGE_SIZE);
206 if (size == 0)
207 return -EINVAL;
208 198
209 /* Allocate the new object */ 199 /* Allocate the new object */
210 obj = i915_gem_alloc_object(dev, size); 200 obj = i915_gem_alloc_object(dev, size);
@@ -254,7 +244,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
254 struct drm_file *file) 244 struct drm_file *file)
255{ 245{
256 struct drm_i915_gem_create *args = data; 246 struct drm_i915_gem_create *args = data;
257
258 return i915_gem_create(file, dev, 247 return i915_gem_create(file, dev,
259 args->size, &args->handle); 248 args->size, &args->handle);
260} 249}
@@ -267,246 +256,249 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
267 obj->tiling_mode != I915_TILING_NONE; 256 obj->tiling_mode != I915_TILING_NONE;
268} 257}
269 258
270static inline int 259static inline void
271__copy_to_user_swizzled(char __user *cpu_vaddr, 260slow_shmem_copy(struct page *dst_page,
272 const char *gpu_vaddr, int gpu_offset, 261 int dst_offset,
273 int length) 262 struct page *src_page,
263 int src_offset,
264 int length)
274{ 265{
275 int ret, cpu_offset = 0; 266 char *dst_vaddr, *src_vaddr;
276 267
277 while (length > 0) { 268 dst_vaddr = kmap(dst_page);
278 int cacheline_end = ALIGN(gpu_offset + 1, 64); 269 src_vaddr = kmap(src_page);
279 int this_length = min(cacheline_end - gpu_offset, length);
280 int swizzled_gpu_offset = gpu_offset ^ 64;
281 270
282 ret = __copy_to_user(cpu_vaddr + cpu_offset, 271 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
283 gpu_vaddr + swizzled_gpu_offset,
284 this_length);
285 if (ret)
286 return ret + length;
287 272
288 cpu_offset += this_length; 273 kunmap(src_page);
289 gpu_offset += this_length; 274 kunmap(dst_page);
290 length -= this_length;
291 }
292
293 return 0;
294} 275}
295 276
296static inline int 277static inline void
297__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, 278slow_shmem_bit17_copy(struct page *gpu_page,
298 const char __user *cpu_vaddr, 279 int gpu_offset,
299 int length) 280 struct page *cpu_page,
300{ 281 int cpu_offset,
301 int ret, cpu_offset = 0; 282 int length,
283 int is_read)
284{
285 char *gpu_vaddr, *cpu_vaddr;
286
287 /* Use the unswizzled path if this page isn't affected. */
288 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
289 if (is_read)
290 return slow_shmem_copy(cpu_page, cpu_offset,
291 gpu_page, gpu_offset, length);
292 else
293 return slow_shmem_copy(gpu_page, gpu_offset,
294 cpu_page, cpu_offset, length);
295 }
296
297 gpu_vaddr = kmap(gpu_page);
298 cpu_vaddr = kmap(cpu_page);
302 299
300 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
301 * XORing with the other bits (A9 for Y, A9 and A10 for X)
302 */
303 while (length > 0) { 303 while (length > 0) {
304 int cacheline_end = ALIGN(gpu_offset + 1, 64); 304 int cacheline_end = ALIGN(gpu_offset + 1, 64);
305 int this_length = min(cacheline_end - gpu_offset, length); 305 int this_length = min(cacheline_end - gpu_offset, length);
306 int swizzled_gpu_offset = gpu_offset ^ 64; 306 int swizzled_gpu_offset = gpu_offset ^ 64;
307 307
308 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, 308 if (is_read) {
309 cpu_vaddr + cpu_offset, 309 memcpy(cpu_vaddr + cpu_offset,
310 this_length); 310 gpu_vaddr + swizzled_gpu_offset,
311 if (ret) 311 this_length);
312 return ret + length; 312 } else {
313 313 memcpy(gpu_vaddr + swizzled_gpu_offset,
314 cpu_vaddr + cpu_offset,
315 this_length);
316 }
314 cpu_offset += this_length; 317 cpu_offset += this_length;
315 gpu_offset += this_length; 318 gpu_offset += this_length;
316 length -= this_length; 319 length -= this_length;
317 } 320 }
318 321
319 return 0; 322 kunmap(cpu_page);
323 kunmap(gpu_page);
320} 324}
321 325
322/* Per-page copy function for the shmem pread fastpath. 326/**
323 * Flushes invalid cachelines before reading the target if 327 * This is the fast shmem pread path, which attempts to copy_from_user directly
324 * needs_clflush is set. */ 328 * from the backing pages of the object to the user's address space. On a
329 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
330 */
325static int 331static int
326shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, 332i915_gem_shmem_pread_fast(struct drm_device *dev,
327 char __user *user_data, 333 struct drm_i915_gem_object *obj,
328 bool page_do_bit17_swizzling, bool needs_clflush) 334 struct drm_i915_gem_pread *args,
335 struct drm_file *file)
329{ 336{
330 char *vaddr; 337 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
331 int ret; 338 ssize_t remain;
339 loff_t offset;
340 char __user *user_data;
341 int page_offset, page_length;
332 342
333 if (unlikely(page_do_bit17_swizzling)) 343 user_data = (char __user *) (uintptr_t) args->data_ptr;
334 return -EINVAL; 344 remain = args->size;
335 345
336 vaddr = kmap_atomic(page); 346 offset = args->offset;
337 if (needs_clflush)
338 drm_clflush_virt_range(vaddr + shmem_page_offset,
339 page_length);
340 ret = __copy_to_user_inatomic(user_data,
341 vaddr + shmem_page_offset,
342 page_length);
343 kunmap_atomic(vaddr);
344 347
345 return ret ? -EFAULT : 0; 348 while (remain > 0) {
346} 349 struct page *page;
350 char *vaddr;
351 int ret;
347 352
348static void 353 /* Operation in this page
349shmem_clflush_swizzled_range(char *addr, unsigned long length, 354 *
350 bool swizzled) 355 * page_offset = offset within page
351{ 356 * page_length = bytes to copy for this page
352 if (unlikely(swizzled)) { 357 */
353 unsigned long start = (unsigned long) addr; 358 page_offset = offset_in_page(offset);
354 unsigned long end = (unsigned long) addr + length; 359 page_length = remain;
355 360 if ((page_offset + remain) > PAGE_SIZE)
356 /* For swizzling simply ensure that we always flush both 361 page_length = PAGE_SIZE - page_offset;
357 * channels. Lame, but simple and it works. Swizzled
358 * pwrite/pread is far from a hotpath - current userspace
359 * doesn't use it at all. */
360 start = round_down(start, 128);
361 end = round_up(end, 128);
362
363 drm_clflush_virt_range((void *)start, end - start);
364 } else {
365 drm_clflush_virt_range(addr, length);
366 }
367 362
368} 363 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
364 if (IS_ERR(page))
365 return PTR_ERR(page);
369 366
370/* Only difference to the fast-path function is that this can handle bit17 367 vaddr = kmap_atomic(page);
371 * and uses non-atomic copy and kmap functions. */ 368 ret = __copy_to_user_inatomic(user_data,
372static int 369 vaddr + page_offset,
373shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, 370 page_length);
374 char __user *user_data, 371 kunmap_atomic(vaddr);
375 bool page_do_bit17_swizzling, bool needs_clflush)
376{
377 char *vaddr;
378 int ret;
379 372
380 vaddr = kmap(page); 373 mark_page_accessed(page);
381 if (needs_clflush) 374 page_cache_release(page);
382 shmem_clflush_swizzled_range(vaddr + shmem_page_offset, 375 if (ret)
383 page_length, 376 return -EFAULT;
384 page_do_bit17_swizzling);
385 377
386 if (page_do_bit17_swizzling) 378 remain -= page_length;
387 ret = __copy_to_user_swizzled(user_data, 379 user_data += page_length;
388 vaddr, shmem_page_offset, 380 offset += page_length;
389 page_length); 381 }
390 else
391 ret = __copy_to_user(user_data,
392 vaddr + shmem_page_offset,
393 page_length);
394 kunmap(page);
395 382
396 return ret ? - EFAULT : 0; 383 return 0;
397} 384}
398 385
386/**
387 * This is the fallback shmem pread path, which allocates temporary storage
388 * in kernel space to copy_to_user into outside of the struct_mutex, so we
389 * can copy out of the object's backing pages while holding the struct mutex
390 * and not take page faults.
391 */
399static int 392static int
400i915_gem_shmem_pread(struct drm_device *dev, 393i915_gem_shmem_pread_slow(struct drm_device *dev,
401 struct drm_i915_gem_object *obj, 394 struct drm_i915_gem_object *obj,
402 struct drm_i915_gem_pread *args, 395 struct drm_i915_gem_pread *args,
403 struct drm_file *file) 396 struct drm_file *file)
404{ 397{
405 char __user *user_data; 398 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
399 struct mm_struct *mm = current->mm;
400 struct page **user_pages;
406 ssize_t remain; 401 ssize_t remain;
407 loff_t offset; 402 loff_t offset, pinned_pages, i;
408 int shmem_page_offset, page_length, ret = 0; 403 loff_t first_data_page, last_data_page, num_pages;
409 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 404 int shmem_page_offset;
410 int hit_slowpath = 0; 405 int data_page_index, data_page_offset;
411 int prefaulted = 0; 406 int page_length;
412 int needs_clflush = 0; 407 int ret;
413 struct scatterlist *sg; 408 uint64_t data_ptr = args->data_ptr;
414 int i; 409 int do_bit17_swizzling;
415 410
416 user_data = (char __user *) (uintptr_t) args->data_ptr;
417 remain = args->size; 411 remain = args->size;
418 412
419 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 413 /* Pin the user pages containing the data. We can't fault while
420 414 * holding the struct mutex, yet we want to hold it while
421 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { 415 * dereferencing the user data.
422 /* If we're not in the cpu read domain, set ourself into the gtt 416 */
423 * read domain and manually flush cachelines (if required). This 417 first_data_page = data_ptr / PAGE_SIZE;
424 * optimizes for the case when the gpu will dirty the data 418 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
425 * anyway again before the next pread happens. */ 419 num_pages = last_data_page - first_data_page + 1;
426 if (obj->cache_level == I915_CACHE_NONE) 420
427 needs_clflush = 1; 421 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
428 if (obj->gtt_space) { 422 if (user_pages == NULL)
429 ret = i915_gem_object_set_to_gtt_domain(obj, false); 423 return -ENOMEM;
430 if (ret) 424
431 return ret; 425 mutex_unlock(&dev->struct_mutex);
432 } 426 down_read(&mm->mmap_sem);
427 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
428 num_pages, 1, 0, user_pages, NULL);
429 up_read(&mm->mmap_sem);
430 mutex_lock(&dev->struct_mutex);
431 if (pinned_pages < num_pages) {
432 ret = -EFAULT;
433 goto out;
433 } 434 }
434 435
435 ret = i915_gem_object_get_pages(obj); 436 ret = i915_gem_object_set_cpu_read_domain_range(obj,
437 args->offset,
438 args->size);
436 if (ret) 439 if (ret)
437 return ret; 440 goto out;
438 441
439 i915_gem_object_pin_pages(obj); 442 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
440 443
441 offset = args->offset; 444 offset = args->offset;
442 445
443 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { 446 while (remain > 0) {
444 struct page *page; 447 struct page *page;
445 448
446 if (i < offset >> PAGE_SHIFT)
447 continue;
448
449 if (remain <= 0)
450 break;
451
452 /* Operation in this page 449 /* Operation in this page
453 * 450 *
454 * shmem_page_offset = offset within page in shmem file 451 * shmem_page_offset = offset within page in shmem file
452 * data_page_index = page number in get_user_pages return
453 * data_page_offset = offset with data_page_index page.
455 * page_length = bytes to copy for this page 454 * page_length = bytes to copy for this page
456 */ 455 */
457 shmem_page_offset = offset_in_page(offset); 456 shmem_page_offset = offset_in_page(offset);
457 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
458 data_page_offset = offset_in_page(data_ptr);
459
458 page_length = remain; 460 page_length = remain;
459 if ((shmem_page_offset + page_length) > PAGE_SIZE) 461 if ((shmem_page_offset + page_length) > PAGE_SIZE)
460 page_length = PAGE_SIZE - shmem_page_offset; 462 page_length = PAGE_SIZE - shmem_page_offset;
463 if ((data_page_offset + page_length) > PAGE_SIZE)
464 page_length = PAGE_SIZE - data_page_offset;
461 465
462 page = sg_page(sg); 466 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
463 page_do_bit17_swizzling = obj_do_bit17_swizzling && 467 if (IS_ERR(page)) {
464 (page_to_phys(page) & (1 << 17)) != 0; 468 ret = PTR_ERR(page);
465 469 goto out;
466 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
467 user_data, page_do_bit17_swizzling,
468 needs_clflush);
469 if (ret == 0)
470 goto next_page;
471
472 hit_slowpath = 1;
473 mutex_unlock(&dev->struct_mutex);
474
475 if (!prefaulted) {
476 ret = fault_in_multipages_writeable(user_data, remain);
477 /* Userspace is tricking us, but we've already clobbered
478 * its pages with the prefault and promised to write the
479 * data up to the first fault. Hence ignore any errors
480 * and just continue. */
481 (void)ret;
482 prefaulted = 1;
483 } 470 }
484 471
485 ret = shmem_pread_slow(page, shmem_page_offset, page_length, 472 if (do_bit17_swizzling) {
486 user_data, page_do_bit17_swizzling, 473 slow_shmem_bit17_copy(page,
487 needs_clflush); 474 shmem_page_offset,
488 475 user_pages[data_page_index],
489 mutex_lock(&dev->struct_mutex); 476 data_page_offset,
477 page_length,
478 1);
479 } else {
480 slow_shmem_copy(user_pages[data_page_index],
481 data_page_offset,
482 page,
483 shmem_page_offset,
484 page_length);
485 }
490 486
491next_page:
492 mark_page_accessed(page); 487 mark_page_accessed(page);
493 488 page_cache_release(page);
494 if (ret)
495 goto out;
496 489
497 remain -= page_length; 490 remain -= page_length;
498 user_data += page_length; 491 data_ptr += page_length;
499 offset += page_length; 492 offset += page_length;
500 } 493 }
501 494
502out: 495out:
503 i915_gem_object_unpin_pages(obj); 496 for (i = 0; i < pinned_pages; i++) {
504 497 SetPageDirty(user_pages[i]);
505 if (hit_slowpath) { 498 mark_page_accessed(user_pages[i]);
506 /* Fixup: Kill any reinstated backing storage pages */ 499 page_cache_release(user_pages[i]);
507 if (obj->madv == __I915_MADV_PURGED)
508 i915_gem_object_truncate(obj);
509 } 500 }
501 drm_free_large(user_pages);
510 502
511 return ret; 503 return ret;
512} 504}
@@ -532,6 +524,11 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
532 args->size)) 524 args->size))
533 return -EFAULT; 525 return -EFAULT;
534 526
527 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
528 args->size);
529 if (ret)
530 return -EFAULT;
531
535 ret = i915_mutex_lock_interruptible(dev); 532 ret = i915_mutex_lock_interruptible(dev);
536 if (ret) 533 if (ret)
537 return ret; 534 return ret;
@@ -549,17 +546,19 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
549 goto out; 546 goto out;
550 } 547 }
551 548
552 /* prime objects have no backing filp to GEM pread/pwrite
553 * pages from.
554 */
555 if (!obj->base.filp) {
556 ret = -EINVAL;
557 goto out;
558 }
559
560 trace_i915_gem_object_pread(obj, args->offset, args->size); 549 trace_i915_gem_object_pread(obj, args->offset, args->size);
561 550
562 ret = i915_gem_shmem_pread(dev, obj, args, file); 551 ret = i915_gem_object_set_cpu_read_domain_range(obj,
552 args->offset,
553 args->size);
554 if (ret)
555 goto out;
556
557 ret = -EFAULT;
558 if (!i915_gem_object_needs_bit17_swizzle(obj))
559 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
560 if (ret == -EFAULT)
561 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
563 562
564out: 563out:
565 drm_gem_object_unreference(&obj->base); 564 drm_gem_object_unreference(&obj->base);
@@ -578,19 +577,40 @@ fast_user_write(struct io_mapping *mapping,
578 char __user *user_data, 577 char __user *user_data,
579 int length) 578 int length)
580{ 579{
581 void __iomem *vaddr_atomic; 580 char *vaddr_atomic;
582 void *vaddr;
583 unsigned long unwritten; 581 unsigned long unwritten;
584 582
585 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); 583 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
586 /* We can use the cpu mem copy function because this is X86. */ 584 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
587 vaddr = (void __force*)vaddr_atomic + page_offset;
588 unwritten = __copy_from_user_inatomic_nocache(vaddr,
589 user_data, length); 585 user_data, length);
590 io_mapping_unmap_atomic(vaddr_atomic); 586 io_mapping_unmap_atomic(vaddr_atomic);
591 return unwritten; 587 return unwritten;
592} 588}
593 589
590/* Here's the write path which can sleep for
591 * page faults
592 */
593
594static inline void
595slow_kernel_write(struct io_mapping *mapping,
596 loff_t gtt_base, int gtt_offset,
597 struct page *user_page, int user_offset,
598 int length)
599{
600 char __iomem *dst_vaddr;
601 char *src_vaddr;
602
603 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
604 src_vaddr = kmap(user_page);
605
606 memcpy_toio(dst_vaddr + gtt_offset,
607 src_vaddr + user_offset,
608 length);
609
610 kunmap(user_page);
611 io_mapping_unmap(dst_vaddr);
612}
613
594/** 614/**
595 * This is the fast pwrite path, where we copy the data directly from the 615 * This is the fast pwrite path, where we copy the data directly from the
596 * user into the GTT, uncached. 616 * user into the GTT, uncached.
@@ -605,19 +625,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
605 ssize_t remain; 625 ssize_t remain;
606 loff_t offset, page_base; 626 loff_t offset, page_base;
607 char __user *user_data; 627 char __user *user_data;
608 int page_offset, page_length, ret; 628 int page_offset, page_length;
609
610 ret = i915_gem_object_pin(obj, 0, true, true);
611 if (ret)
612 goto out;
613
614 ret = i915_gem_object_set_to_gtt_domain(obj, true);
615 if (ret)
616 goto out_unpin;
617
618 ret = i915_gem_object_put_fence(obj);
619 if (ret)
620 goto out_unpin;
621 629
622 user_data = (char __user *) (uintptr_t) args->data_ptr; 630 user_data = (char __user *) (uintptr_t) args->data_ptr;
623 remain = args->size; 631 remain = args->size;
@@ -642,215 +650,295 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
642 * retry in the slow path. 650 * retry in the slow path.
643 */ 651 */
644 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, 652 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
645 page_offset, user_data, page_length)) { 653 page_offset, user_data, page_length))
646 ret = -EFAULT; 654 return -EFAULT;
647 goto out_unpin;
648 }
649 655
650 remain -= page_length; 656 remain -= page_length;
651 user_data += page_length; 657 user_data += page_length;
652 offset += page_length; 658 offset += page_length;
653 } 659 }
654 660
655out_unpin: 661 return 0;
656 i915_gem_object_unpin(obj);
657out:
658 return ret;
659} 662}
660 663
661/* Per-page copy function for the shmem pwrite fastpath. 664/**
662 * Flushes invalid cachelines before writing to the target if 665 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
663 * needs_clflush_before is set and flushes out any written cachelines after 666 * the memory and maps it using kmap_atomic for copying.
664 * writing if needs_clflush is set. */ 667 *
668 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
669 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
670 */
665static int 671static int
666shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, 672i915_gem_gtt_pwrite_slow(struct drm_device *dev,
667 char __user *user_data, 673 struct drm_i915_gem_object *obj,
668 bool page_do_bit17_swizzling, 674 struct drm_i915_gem_pwrite *args,
669 bool needs_clflush_before, 675 struct drm_file *file)
670 bool needs_clflush_after)
671{ 676{
672 char *vaddr; 677 drm_i915_private_t *dev_priv = dev->dev_private;
678 ssize_t remain;
679 loff_t gtt_page_base, offset;
680 loff_t first_data_page, last_data_page, num_pages;
681 loff_t pinned_pages, i;
682 struct page **user_pages;
683 struct mm_struct *mm = current->mm;
684 int gtt_page_offset, data_page_offset, data_page_index, page_length;
673 int ret; 685 int ret;
686 uint64_t data_ptr = args->data_ptr;
674 687
675 if (unlikely(page_do_bit17_swizzling)) 688 remain = args->size;
676 return -EINVAL;
677 689
678 vaddr = kmap_atomic(page); 690 /* Pin the user pages containing the data. We can't fault while
679 if (needs_clflush_before) 691 * holding the struct mutex, and all of the pwrite implementations
680 drm_clflush_virt_range(vaddr + shmem_page_offset, 692 * want to hold it while dereferencing the user data.
681 page_length); 693 */
682 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset, 694 first_data_page = data_ptr / PAGE_SIZE;
683 user_data, 695 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
684 page_length); 696 num_pages = last_data_page - first_data_page + 1;
685 if (needs_clflush_after) 697
686 drm_clflush_virt_range(vaddr + shmem_page_offset, 698 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
687 page_length); 699 if (user_pages == NULL)
688 kunmap_atomic(vaddr); 700 return -ENOMEM;
701
702 mutex_unlock(&dev->struct_mutex);
703 down_read(&mm->mmap_sem);
704 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
705 num_pages, 0, 0, user_pages, NULL);
706 up_read(&mm->mmap_sem);
707 mutex_lock(&dev->struct_mutex);
708 if (pinned_pages < num_pages) {
709 ret = -EFAULT;
710 goto out_unpin_pages;
711 }
712
713 ret = i915_gem_object_set_to_gtt_domain(obj, true);
714 if (ret)
715 goto out_unpin_pages;
689 716
690 return ret ? -EFAULT : 0; 717 ret = i915_gem_object_put_fence(obj);
718 if (ret)
719 goto out_unpin_pages;
720
721 offset = obj->gtt_offset + args->offset;
722
723 while (remain > 0) {
724 /* Operation in this page
725 *
726 * gtt_page_base = page offset within aperture
727 * gtt_page_offset = offset within page in aperture
728 * data_page_index = page number in get_user_pages return
729 * data_page_offset = offset with data_page_index page.
730 * page_length = bytes to copy for this page
731 */
732 gtt_page_base = offset & PAGE_MASK;
733 gtt_page_offset = offset_in_page(offset);
734 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
735 data_page_offset = offset_in_page(data_ptr);
736
737 page_length = remain;
738 if ((gtt_page_offset + page_length) > PAGE_SIZE)
739 page_length = PAGE_SIZE - gtt_page_offset;
740 if ((data_page_offset + page_length) > PAGE_SIZE)
741 page_length = PAGE_SIZE - data_page_offset;
742
743 slow_kernel_write(dev_priv->mm.gtt_mapping,
744 gtt_page_base, gtt_page_offset,
745 user_pages[data_page_index],
746 data_page_offset,
747 page_length);
748
749 remain -= page_length;
750 offset += page_length;
751 data_ptr += page_length;
752 }
753
754out_unpin_pages:
755 for (i = 0; i < pinned_pages; i++)
756 page_cache_release(user_pages[i]);
757 drm_free_large(user_pages);
758
759 return ret;
691} 760}
692 761
693/* Only difference to the fast-path function is that this can handle bit17 762/**
694 * and uses non-atomic copy and kmap functions. */ 763 * This is the fast shmem pwrite path, which attempts to directly
764 * copy_from_user into the kmapped pages backing the object.
765 */
695static int 766static int
696shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, 767i915_gem_shmem_pwrite_fast(struct drm_device *dev,
697 char __user *user_data, 768 struct drm_i915_gem_object *obj,
698 bool page_do_bit17_swizzling, 769 struct drm_i915_gem_pwrite *args,
699 bool needs_clflush_before, 770 struct drm_file *file)
700 bool needs_clflush_after)
701{ 771{
702 char *vaddr; 772 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
703 int ret; 773 ssize_t remain;
774 loff_t offset;
775 char __user *user_data;
776 int page_offset, page_length;
777
778 user_data = (char __user *) (uintptr_t) args->data_ptr;
779 remain = args->size;
780
781 offset = args->offset;
782 obj->dirty = 1;
783
784 while (remain > 0) {
785 struct page *page;
786 char *vaddr;
787 int ret;
788
789 /* Operation in this page
790 *
791 * page_offset = offset within page
792 * page_length = bytes to copy for this page
793 */
794 page_offset = offset_in_page(offset);
795 page_length = remain;
796 if ((page_offset + remain) > PAGE_SIZE)
797 page_length = PAGE_SIZE - page_offset;
798
799 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
800 if (IS_ERR(page))
801 return PTR_ERR(page);
704 802
705 vaddr = kmap(page); 803 vaddr = kmap_atomic(page, KM_USER0);
706 if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) 804 ret = __copy_from_user_inatomic(vaddr + page_offset,
707 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
708 page_length,
709 page_do_bit17_swizzling);
710 if (page_do_bit17_swizzling)
711 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
712 user_data, 805 user_data,
713 page_length); 806 page_length);
714 else 807 kunmap_atomic(vaddr, KM_USER0);
715 ret = __copy_from_user(vaddr + shmem_page_offset, 808
716 user_data, 809 set_page_dirty(page);
717 page_length); 810 mark_page_accessed(page);
718 if (needs_clflush_after) 811 page_cache_release(page);
719 shmem_clflush_swizzled_range(vaddr + shmem_page_offset, 812
720 page_length, 813 /* If we get a fault while copying data, then (presumably) our
721 page_do_bit17_swizzling); 814 * source page isn't available. Return the error and we'll
722 kunmap(page); 815 * retry in the slow path.
723 816 */
724 return ret ? -EFAULT : 0; 817 if (ret)
818 return -EFAULT;
819
820 remain -= page_length;
821 user_data += page_length;
822 offset += page_length;
823 }
824
825 return 0;
725} 826}
726 827
828/**
829 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
830 * the memory and maps it using kmap_atomic for copying.
831 *
832 * This avoids taking mmap_sem for faulting on the user's address while the
833 * struct_mutex is held.
834 */
727static int 835static int
728i915_gem_shmem_pwrite(struct drm_device *dev, 836i915_gem_shmem_pwrite_slow(struct drm_device *dev,
729 struct drm_i915_gem_object *obj, 837 struct drm_i915_gem_object *obj,
730 struct drm_i915_gem_pwrite *args, 838 struct drm_i915_gem_pwrite *args,
731 struct drm_file *file) 839 struct drm_file *file)
732{ 840{
841 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
842 struct mm_struct *mm = current->mm;
843 struct page **user_pages;
733 ssize_t remain; 844 ssize_t remain;
734 loff_t offset; 845 loff_t offset, pinned_pages, i;
735 char __user *user_data; 846 loff_t first_data_page, last_data_page, num_pages;
736 int shmem_page_offset, page_length, ret = 0; 847 int shmem_page_offset;
737 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 848 int data_page_index, data_page_offset;
738 int hit_slowpath = 0; 849 int page_length;
739 int needs_clflush_after = 0; 850 int ret;
740 int needs_clflush_before = 0; 851 uint64_t data_ptr = args->data_ptr;
741 int i; 852 int do_bit17_swizzling;
742 struct scatterlist *sg;
743 853
744 user_data = (char __user *) (uintptr_t) args->data_ptr;
745 remain = args->size; 854 remain = args->size;
746 855
747 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 856 /* Pin the user pages containing the data. We can't fault while
748 857 * holding the struct mutex, and all of the pwrite implementations
749 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 858 * want to hold it while dereferencing the user data.
750 /* If we're not in the cpu write domain, set ourself into the gtt 859 */
751 * write domain and manually flush cachelines (if required). This 860 first_data_page = data_ptr / PAGE_SIZE;
752 * optimizes for the case when the gpu will use the data 861 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
753 * right away and we therefore have to clflush anyway. */ 862 num_pages = last_data_page - first_data_page + 1;
754 if (obj->cache_level == I915_CACHE_NONE) 863
755 needs_clflush_after = 1; 864 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
756 if (obj->gtt_space) { 865 if (user_pages == NULL)
757 ret = i915_gem_object_set_to_gtt_domain(obj, true); 866 return -ENOMEM;
758 if (ret) 867
759 return ret; 868 mutex_unlock(&dev->struct_mutex);
760 } 869 down_read(&mm->mmap_sem);
870 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
871 num_pages, 0, 0, user_pages, NULL);
872 up_read(&mm->mmap_sem);
873 mutex_lock(&dev->struct_mutex);
874 if (pinned_pages < num_pages) {
875 ret = -EFAULT;
876 goto out;
761 } 877 }
762 /* Same trick applies for invalidate partially written cachelines before
763 * writing. */
764 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
765 && obj->cache_level == I915_CACHE_NONE)
766 needs_clflush_before = 1;
767 878
768 ret = i915_gem_object_get_pages(obj); 879 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
769 if (ret) 880 if (ret)
770 return ret; 881 goto out;
771 882
772 i915_gem_object_pin_pages(obj); 883 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
773 884
774 offset = args->offset; 885 offset = args->offset;
775 obj->dirty = 1; 886 obj->dirty = 1;
776 887
777 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { 888 while (remain > 0) {
778 struct page *page; 889 struct page *page;
779 int partial_cacheline_write;
780
781 if (i < offset >> PAGE_SHIFT)
782 continue;
783
784 if (remain <= 0)
785 break;
786 890
787 /* Operation in this page 891 /* Operation in this page
788 * 892 *
789 * shmem_page_offset = offset within page in shmem file 893 * shmem_page_offset = offset within page in shmem file
894 * data_page_index = page number in get_user_pages return
895 * data_page_offset = offset with data_page_index page.
790 * page_length = bytes to copy for this page 896 * page_length = bytes to copy for this page
791 */ 897 */
792 shmem_page_offset = offset_in_page(offset); 898 shmem_page_offset = offset_in_page(offset);
899 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
900 data_page_offset = offset_in_page(data_ptr);
793 901
794 page_length = remain; 902 page_length = remain;
795 if ((shmem_page_offset + page_length) > PAGE_SIZE) 903 if ((shmem_page_offset + page_length) > PAGE_SIZE)
796 page_length = PAGE_SIZE - shmem_page_offset; 904 page_length = PAGE_SIZE - shmem_page_offset;
905 if ((data_page_offset + page_length) > PAGE_SIZE)
906 page_length = PAGE_SIZE - data_page_offset;
797 907
798 /* If we don't overwrite a cacheline completely we need to be 908 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
799 * careful to have up-to-date data by first clflushing. Don't 909 if (IS_ERR(page)) {
800 * overcomplicate things and flush the entire patch. */ 910 ret = PTR_ERR(page);
801 partial_cacheline_write = needs_clflush_before && 911 goto out;
802 ((shmem_page_offset | page_length) 912 }
803 & (boot_cpu_data.x86_clflush_size - 1));
804
805 page = sg_page(sg);
806 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
807 (page_to_phys(page) & (1 << 17)) != 0;
808
809 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
810 user_data, page_do_bit17_swizzling,
811 partial_cacheline_write,
812 needs_clflush_after);
813 if (ret == 0)
814 goto next_page;
815
816 hit_slowpath = 1;
817 mutex_unlock(&dev->struct_mutex);
818 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
819 user_data, page_do_bit17_swizzling,
820 partial_cacheline_write,
821 needs_clflush_after);
822 913
823 mutex_lock(&dev->struct_mutex); 914 if (do_bit17_swizzling) {
915 slow_shmem_bit17_copy(page,
916 shmem_page_offset,
917 user_pages[data_page_index],
918 data_page_offset,
919 page_length,
920 0);
921 } else {
922 slow_shmem_copy(page,
923 shmem_page_offset,
924 user_pages[data_page_index],
925 data_page_offset,
926 page_length);
927 }
824 928
825next_page:
826 set_page_dirty(page); 929 set_page_dirty(page);
827 mark_page_accessed(page); 930 mark_page_accessed(page);
828 931 page_cache_release(page);
829 if (ret)
830 goto out;
831 932
832 remain -= page_length; 933 remain -= page_length;
833 user_data += page_length; 934 data_ptr += page_length;
834 offset += page_length; 935 offset += page_length;
835 } 936 }
836 937
837out: 938out:
838 i915_gem_object_unpin_pages(obj); 939 for (i = 0; i < pinned_pages; i++)
839 940 page_cache_release(user_pages[i]);
840 if (hit_slowpath) { 941 drm_free_large(user_pages);
841 /* Fixup: Kill any reinstated backing storage pages */
842 if (obj->madv == __I915_MADV_PURGED)
843 i915_gem_object_truncate(obj);
844 /* and flush dirty cachelines in case the object isn't in the cpu write
845 * domain anymore. */
846 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
847 i915_gem_clflush_object(obj);
848 i915_gem_chipset_flush(dev);
849 }
850 }
851
852 if (needs_clflush_after)
853 i915_gem_chipset_flush(dev);
854 942
855 return ret; 943 return ret;
856} 944}
@@ -876,8 +964,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
876 args->size)) 964 args->size))
877 return -EFAULT; 965 return -EFAULT;
878 966
879 ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr, 967 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
880 args->size); 968 args->size);
881 if (ret) 969 if (ret)
882 return -EFAULT; 970 return -EFAULT;
883 971
@@ -898,278 +986,52 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
898 goto out; 986 goto out;
899 } 987 }
900 988
901 /* prime objects have no backing filp to GEM pread/pwrite
902 * pages from.
903 */
904 if (!obj->base.filp) {
905 ret = -EINVAL;
906 goto out;
907 }
908
909 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 989 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
910 990
911 ret = -EFAULT;
912 /* We can only do the GTT pwrite on untiled buffers, as otherwise 991 /* We can only do the GTT pwrite on untiled buffers, as otherwise
913 * it would end up going through the fenced access, and we'll get 992 * it would end up going through the fenced access, and we'll get
914 * different detiling behavior between reading and writing. 993 * different detiling behavior between reading and writing.
915 * pread/pwrite currently are reading and writing from the CPU 994 * pread/pwrite currently are reading and writing from the CPU
916 * perspective, requiring manual detiling by the client. 995 * perspective, requiring manual detiling by the client.
917 */ 996 */
918 if (obj->phys_obj) { 997 if (obj->phys_obj)
919 ret = i915_gem_phys_pwrite(dev, obj, args, file); 998 ret = i915_gem_phys_pwrite(dev, obj, args, file);
920 goto out; 999 else if (obj->gtt_space &&
921 } 1000 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
922 1001 ret = i915_gem_object_pin(obj, 0, true);
923 if (obj->cache_level == I915_CACHE_NONE &&
924 obj->tiling_mode == I915_TILING_NONE &&
925 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
926 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
927 /* Note that the gtt paths might fail with non-page-backed user
928 * pointers (e.g. gtt mappings when moving data between
929 * textures). Fallback to the shmem path in that case. */
930 }
931
932 if (ret == -EFAULT || ret == -ENOSPC)
933 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
934
935out:
936 drm_gem_object_unreference(&obj->base);
937unlock:
938 mutex_unlock(&dev->struct_mutex);
939 return ret;
940}
941
942int
943i915_gem_check_wedge(struct drm_i915_private *dev_priv,
944 bool interruptible)
945{
946 if (atomic_read(&dev_priv->mm.wedged)) {
947 struct completion *x = &dev_priv->error_completion;
948 bool recovery_complete;
949 unsigned long flags;
950
951 /* Give the error handler a chance to run. */
952 spin_lock_irqsave(&x->wait.lock, flags);
953 recovery_complete = x->done > 0;
954 spin_unlock_irqrestore(&x->wait.lock, flags);
955
956 /* Non-interruptible callers can't handle -EAGAIN, hence return
957 * -EIO unconditionally for these. */
958 if (!interruptible)
959 return -EIO;
960
961 /* Recovery complete, but still wedged means reset failure. */
962 if (recovery_complete)
963 return -EIO;
964
965 return -EAGAIN;
966 }
967
968 return 0;
969}
970
971/*
972 * Compare seqno against outstanding lazy request. Emit a request if they are
973 * equal.
974 */
975static int
976i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
977{
978 int ret;
979
980 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
981
982 ret = 0;
983 if (seqno == ring->outstanding_lazy_request)
984 ret = i915_add_request(ring, NULL, NULL);
985
986 return ret;
987}
988
989/**
990 * __wait_seqno - wait until execution of seqno has finished
991 * @ring: the ring expected to report seqno
992 * @seqno: duh!
993 * @interruptible: do an interruptible wait (normally yes)
994 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
995 *
996 * Returns 0 if the seqno was found within the alloted time. Else returns the
997 * errno with remaining time filled in timeout argument.
998 */
999static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1000 bool interruptible, struct timespec *timeout)
1001{
1002 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1003 struct timespec before, now, wait_time={1,0};
1004 unsigned long timeout_jiffies;
1005 long end;
1006 bool wait_forever = true;
1007 int ret;
1008
1009 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1010 return 0;
1011
1012 trace_i915_gem_request_wait_begin(ring, seqno);
1013
1014 if (timeout != NULL) {
1015 wait_time = *timeout;
1016 wait_forever = false;
1017 }
1018
1019 timeout_jiffies = timespec_to_jiffies(&wait_time);
1020
1021 if (WARN_ON(!ring->irq_get(ring)))
1022 return -ENODEV;
1023
1024 /* Record current time in case interrupted by signal, or wedged * */
1025 getrawmonotonic(&before);
1026
1027#define EXIT_COND \
1028 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1029 atomic_read(&dev_priv->mm.wedged))
1030 do {
1031 if (interruptible)
1032 end = wait_event_interruptible_timeout(ring->irq_queue,
1033 EXIT_COND,
1034 timeout_jiffies);
1035 else
1036 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1037 timeout_jiffies);
1038
1039 ret = i915_gem_check_wedge(dev_priv, interruptible);
1040 if (ret) 1002 if (ret)
1041 end = ret; 1003 goto out;
1042 } while (end == 0 && wait_forever);
1043
1044 getrawmonotonic(&now);
1045
1046 ring->irq_put(ring);
1047 trace_i915_gem_request_wait_end(ring, seqno);
1048#undef EXIT_COND
1049
1050 if (timeout) {
1051 struct timespec sleep_time = timespec_sub(now, before);
1052 *timeout = timespec_sub(*timeout, sleep_time);
1053 }
1054
1055 switch (end) {
1056 case -EIO:
1057 case -EAGAIN: /* Wedged */
1058 case -ERESTARTSYS: /* Signal */
1059 return (int)end;
1060 case 0: /* Timeout */
1061 if (timeout)
1062 set_normalized_timespec(timeout, 0, 0);
1063 return -ETIME;
1064 default: /* Completed */
1065 WARN_ON(end < 0); /* We're not aware of other errors */
1066 return 0;
1067 }
1068}
1069
1070/**
1071 * Waits for a sequence number to be signaled, and cleans up the
1072 * request and object lists appropriately for that event.
1073 */
1074int
1075i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1076{
1077 struct drm_device *dev = ring->dev;
1078 struct drm_i915_private *dev_priv = dev->dev_private;
1079 bool interruptible = dev_priv->mm.interruptible;
1080 int ret;
1081
1082 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1083 BUG_ON(seqno == 0);
1084
1085 ret = i915_gem_check_wedge(dev_priv, interruptible);
1086 if (ret)
1087 return ret;
1088
1089 ret = i915_gem_check_olr(ring, seqno);
1090 if (ret)
1091 return ret;
1092
1093 return __wait_seqno(ring, seqno, interruptible, NULL);
1094}
1095 1004
1096/** 1005 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1097 * Ensures that all rendering to the object has completed and the object is 1006 if (ret)
1098 * safe to unbind from the GTT or access from the CPU. 1007 goto out_unpin;
1099 */
1100static __must_check int
1101i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1102 bool readonly)
1103{
1104 struct intel_ring_buffer *ring = obj->ring;
1105 u32 seqno;
1106 int ret;
1107 1008
1108 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; 1009 ret = i915_gem_object_put_fence(obj);
1109 if (seqno == 0) 1010 if (ret)
1110 return 0; 1011 goto out_unpin;
1111 1012
1112 ret = i915_wait_seqno(ring, seqno); 1013 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1113 if (ret) 1014 if (ret == -EFAULT)
1114 return ret; 1015 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
1115 1016
1116 i915_gem_retire_requests_ring(ring); 1017out_unpin:
1018 i915_gem_object_unpin(obj);
1019 } else {
1020 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1021 if (ret)
1022 goto out;
1117 1023
1118 /* Manually manage the write flush as we may have not yet 1024 ret = -EFAULT;
1119 * retired the buffer. 1025 if (!i915_gem_object_needs_bit17_swizzle(obj))
1120 */ 1026 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1121 if (obj->last_write_seqno && 1027 if (ret == -EFAULT)
1122 i915_seqno_passed(seqno, obj->last_write_seqno)) { 1028 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1123 obj->last_write_seqno = 0;
1124 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1125 } 1029 }
1126 1030
1127 return 0; 1031out:
1128} 1032 drm_gem_object_unreference(&obj->base);
1129 1033unlock:
1130/* A nonblocking variant of the above wait. This is a highly dangerous routine
1131 * as the object state may change during this call.
1132 */
1133static __must_check int
1134i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1135 bool readonly)
1136{
1137 struct drm_device *dev = obj->base.dev;
1138 struct drm_i915_private *dev_priv = dev->dev_private;
1139 struct intel_ring_buffer *ring = obj->ring;
1140 u32 seqno;
1141 int ret;
1142
1143 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1144 BUG_ON(!dev_priv->mm.interruptible);
1145
1146 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1147 if (seqno == 0)
1148 return 0;
1149
1150 ret = i915_gem_check_wedge(dev_priv, true);
1151 if (ret)
1152 return ret;
1153
1154 ret = i915_gem_check_olr(ring, seqno);
1155 if (ret)
1156 return ret;
1157
1158 mutex_unlock(&dev->struct_mutex); 1034 mutex_unlock(&dev->struct_mutex);
1159 ret = __wait_seqno(ring, seqno, true, NULL);
1160 mutex_lock(&dev->struct_mutex);
1161
1162 i915_gem_retire_requests_ring(ring);
1163
1164 /* Manually manage the write flush as we may have not yet
1165 * retired the buffer.
1166 */
1167 if (obj->last_write_seqno &&
1168 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1169 obj->last_write_seqno = 0;
1170 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1171 }
1172
1173 return ret; 1035 return ret;
1174} 1036}
1175 1037
@@ -1187,6 +1049,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1187 uint32_t write_domain = args->write_domain; 1049 uint32_t write_domain = args->write_domain;
1188 int ret; 1050 int ret;
1189 1051
1052 if (!(dev->driver->driver_features & DRIVER_GEM))
1053 return -ENODEV;
1054
1190 /* Only handle setting domains to types used by the CPU. */ 1055 /* Only handle setting domains to types used by the CPU. */
1191 if (write_domain & I915_GEM_GPU_DOMAINS) 1056 if (write_domain & I915_GEM_GPU_DOMAINS)
1192 return -EINVAL; 1057 return -EINVAL;
@@ -1210,14 +1075,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1210 goto unlock; 1075 goto unlock;
1211 } 1076 }
1212 1077
1213 /* Try to flush the object off the GPU without holding the lock.
1214 * We will repeat the flush holding the lock in the normal manner
1215 * to catch cases where we are gazumped.
1216 */
1217 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1218 if (ret)
1219 goto unref;
1220
1221 if (read_domains & I915_GEM_DOMAIN_GTT) { 1078 if (read_domains & I915_GEM_DOMAIN_GTT) {
1222 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1079 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1223 1080
@@ -1231,7 +1088,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1231 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1088 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1232 } 1089 }
1233 1090
1234unref:
1235 drm_gem_object_unreference(&obj->base); 1091 drm_gem_object_unreference(&obj->base);
1236unlock: 1092unlock:
1237 mutex_unlock(&dev->struct_mutex); 1093 mutex_unlock(&dev->struct_mutex);
@@ -1249,6 +1105,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1249 struct drm_i915_gem_object *obj; 1105 struct drm_i915_gem_object *obj;
1250 int ret = 0; 1106 int ret = 0;
1251 1107
1108 if (!(dev->driver->driver_features & DRIVER_GEM))
1109 return -ENODEV;
1110
1252 ret = i915_mutex_lock_interruptible(dev); 1111 ret = i915_mutex_lock_interruptible(dev);
1253 if (ret) 1112 if (ret)
1254 return ret; 1113 return ret;
@@ -1280,25 +1139,28 @@ int
1280i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1139i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1281 struct drm_file *file) 1140 struct drm_file *file)
1282{ 1141{
1142 struct drm_i915_private *dev_priv = dev->dev_private;
1283 struct drm_i915_gem_mmap *args = data; 1143 struct drm_i915_gem_mmap *args = data;
1284 struct drm_gem_object *obj; 1144 struct drm_gem_object *obj;
1285 unsigned long addr; 1145 unsigned long addr;
1286 1146
1147 if (!(dev->driver->driver_features & DRIVER_GEM))
1148 return -ENODEV;
1149
1287 obj = drm_gem_object_lookup(dev, file, args->handle); 1150 obj = drm_gem_object_lookup(dev, file, args->handle);
1288 if (obj == NULL) 1151 if (obj == NULL)
1289 return -ENOENT; 1152 return -ENOENT;
1290 1153
1291 /* prime objects have no backing filp to GEM mmap 1154 if (obj->size > dev_priv->mm.gtt_mappable_end) {
1292 * pages from.
1293 */
1294 if (!obj->filp) {
1295 drm_gem_object_unreference_unlocked(obj); 1155 drm_gem_object_unreference_unlocked(obj);
1296 return -EINVAL; 1156 return -E2BIG;
1297 } 1157 }
1298 1158
1299 addr = vm_mmap(obj->filp, 0, args->size, 1159 down_write(&current->mm->mmap_sem);
1160 addr = do_mmap(obj->filp, 0, args->size,
1300 PROT_READ | PROT_WRITE, MAP_SHARED, 1161 PROT_READ | PROT_WRITE, MAP_SHARED,
1301 args->offset); 1162 args->offset);
1163 up_write(&current->mm->mmap_sem);
1302 drm_gem_object_unreference_unlocked(obj); 1164 drm_gem_object_unreference_unlocked(obj);
1303 if (IS_ERR((void *)addr)) 1165 if (IS_ERR((void *)addr))
1304 return addr; 1166 return addr;
@@ -1345,37 +1207,43 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1345 trace_i915_gem_object_fault(obj, page_offset, true, write); 1207 trace_i915_gem_object_fault(obj, page_offset, true, write);
1346 1208
1347 /* Now bind it into the GTT if needed */ 1209 /* Now bind it into the GTT if needed */
1348 ret = i915_gem_object_pin(obj, 0, true, false); 1210 if (!obj->map_and_fenceable) {
1349 if (ret) 1211 ret = i915_gem_object_unbind(obj);
1350 goto unlock; 1212 if (ret)
1213 goto unlock;
1214 }
1215 if (!obj->gtt_space) {
1216 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1217 if (ret)
1218 goto unlock;
1351 1219
1352 ret = i915_gem_object_set_to_gtt_domain(obj, write); 1220 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1353 if (ret) 1221 if (ret)
1354 goto unpin; 1222 goto unlock;
1223 }
1355 1224
1356 ret = i915_gem_object_get_fence(obj); 1225 if (obj->tiling_mode == I915_TILING_NONE)
1226 ret = i915_gem_object_put_fence(obj);
1227 else
1228 ret = i915_gem_object_get_fence(obj, NULL);
1357 if (ret) 1229 if (ret)
1358 goto unpin; 1230 goto unlock;
1231
1232 if (i915_gem_object_is_inactive(obj))
1233 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1359 1234
1360 obj->fault_mappable = true; 1235 obj->fault_mappable = true;
1361 1236
1362 pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) + 1237 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1363 page_offset; 1238 page_offset;
1364 1239
1365 /* Finally, remap it using the new GTT offset */ 1240 /* Finally, remap it using the new GTT offset */
1366 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1241 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1367unpin:
1368 i915_gem_object_unpin(obj);
1369unlock: 1242unlock:
1370 mutex_unlock(&dev->struct_mutex); 1243 mutex_unlock(&dev->struct_mutex);
1371out: 1244out:
1372 switch (ret) { 1245 switch (ret) {
1373 case -EIO: 1246 case -EIO:
1374 /* If this -EIO is due to a gpu hang, give the reset code a
1375 * chance to clean up the mess. Otherwise return the proper
1376 * SIGBUS. */
1377 if (!atomic_read(&dev_priv->mm.wedged))
1378 return VM_FAULT_SIGBUS;
1379 case -EAGAIN: 1247 case -EAGAIN:
1380 /* Give the error handler a chance to run and move the 1248 /* Give the error handler a chance to run and move the
1381 * objects off the GPU active list. Next time we service the 1249 * objects off the GPU active list. Next time we service the
@@ -1388,23 +1256,83 @@ out:
1388 case 0: 1256 case 0:
1389 case -ERESTARTSYS: 1257 case -ERESTARTSYS:
1390 case -EINTR: 1258 case -EINTR:
1391 case -EBUSY:
1392 /*
1393 * EBUSY is ok: this just means that another thread
1394 * already did the job.
1395 */
1396 return VM_FAULT_NOPAGE; 1259 return VM_FAULT_NOPAGE;
1397 case -ENOMEM: 1260 case -ENOMEM:
1398 return VM_FAULT_OOM; 1261 return VM_FAULT_OOM;
1399 case -ENOSPC:
1400 return VM_FAULT_SIGBUS;
1401 default: 1262 default:
1402 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1403 return VM_FAULT_SIGBUS; 1263 return VM_FAULT_SIGBUS;
1404 } 1264 }
1405} 1265}
1406 1266
1407/** 1267/**
1268 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1269 * @obj: obj in question
1270 *
1271 * GEM memory mapping works by handing back to userspace a fake mmap offset
1272 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1273 * up the object based on the offset and sets up the various memory mapping
1274 * structures.
1275 *
1276 * This routine allocates and attaches a fake offset for @obj.
1277 */
1278static int
1279i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
1280{
1281 struct drm_device *dev = obj->base.dev;
1282 struct drm_gem_mm *mm = dev->mm_private;
1283 struct drm_map_list *list;
1284 struct drm_local_map *map;
1285 int ret = 0;
1286
1287 /* Set the object up for mmap'ing */
1288 list = &obj->base.map_list;
1289 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1290 if (!list->map)
1291 return -ENOMEM;
1292
1293 map = list->map;
1294 map->type = _DRM_GEM;
1295 map->size = obj->base.size;
1296 map->handle = obj;
1297
1298 /* Get a DRM GEM mmap offset allocated... */
1299 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1300 obj->base.size / PAGE_SIZE,
1301 0, 0);
1302 if (!list->file_offset_node) {
1303 DRM_ERROR("failed to allocate offset for bo %d\n",
1304 obj->base.name);
1305 ret = -ENOSPC;
1306 goto out_free_list;
1307 }
1308
1309 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1310 obj->base.size / PAGE_SIZE,
1311 0);
1312 if (!list->file_offset_node) {
1313 ret = -ENOMEM;
1314 goto out_free_list;
1315 }
1316
1317 list->hash.key = list->file_offset_node->start;
1318 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1319 if (ret) {
1320 DRM_ERROR("failed to add to map hash\n");
1321 goto out_free_mm;
1322 }
1323
1324 return 0;
1325
1326out_free_mm:
1327 drm_mm_put_block(list->file_offset_node);
1328out_free_list:
1329 kfree(list->map);
1330 list->map = NULL;
1331
1332 return ret;
1333}
1334
1335/**
1408 * i915_gem_release_mmap - remove physical page mappings 1336 * i915_gem_release_mmap - remove physical page mappings
1409 * @obj: obj in question 1337 * @obj: obj in question
1410 * 1338 *
@@ -1432,6 +1360,19 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1432 obj->fault_mappable = false; 1360 obj->fault_mappable = false;
1433} 1361}
1434 1362
1363static void
1364i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
1365{
1366 struct drm_device *dev = obj->base.dev;
1367 struct drm_gem_mm *mm = dev->mm_private;
1368 struct drm_map_list *list = &obj->base.map_list;
1369
1370 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1371 drm_mm_put_block(list->file_offset_node);
1372 kfree(list->map);
1373 list->map = NULL;
1374}
1375
1435static uint32_t 1376static uint32_t
1436i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) 1377i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1437{ 1378{
@@ -1509,48 +1450,6 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1509 return i915_gem_get_gtt_size(dev, size, tiling_mode); 1450 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1510} 1451}
1511 1452
1512static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1513{
1514 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1515 int ret;
1516
1517 if (obj->base.map_list.map)
1518 return 0;
1519
1520 dev_priv->mm.shrinker_no_lock_stealing = true;
1521
1522 ret = drm_gem_create_mmap_offset(&obj->base);
1523 if (ret != -ENOSPC)
1524 goto out;
1525
1526 /* Badly fragmented mmap space? The only way we can recover
1527 * space is by destroying unwanted objects. We can't randomly release
1528 * mmap_offsets as userspace expects them to be persistent for the
1529 * lifetime of the objects. The closest we can is to release the
1530 * offsets on purgeable objects by truncating it and marking it purged,
1531 * which prevents userspace from ever using that object again.
1532 */
1533 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1534 ret = drm_gem_create_mmap_offset(&obj->base);
1535 if (ret != -ENOSPC)
1536 goto out;
1537
1538 i915_gem_shrink_all(dev_priv);
1539 ret = drm_gem_create_mmap_offset(&obj->base);
1540out:
1541 dev_priv->mm.shrinker_no_lock_stealing = false;
1542
1543 return ret;
1544}
1545
1546static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1547{
1548 if (!obj->base.map_list.map)
1549 return;
1550
1551 drm_gem_free_mmap_offset(&obj->base);
1552}
1553
1554int 1453int
1555i915_gem_mmap_gtt(struct drm_file *file, 1454i915_gem_mmap_gtt(struct drm_file *file,
1556 struct drm_device *dev, 1455 struct drm_device *dev,
@@ -1561,6 +1460,9 @@ i915_gem_mmap_gtt(struct drm_file *file,
1561 struct drm_i915_gem_object *obj; 1460 struct drm_i915_gem_object *obj;
1562 int ret; 1461 int ret;
1563 1462
1463 if (!(dev->driver->driver_features & DRIVER_GEM))
1464 return -ENODEV;
1465
1564 ret = i915_mutex_lock_interruptible(dev); 1466 ret = i915_mutex_lock_interruptible(dev);
1565 if (ret) 1467 if (ret)
1566 return ret; 1468 return ret;
@@ -1582,9 +1484,11 @@ i915_gem_mmap_gtt(struct drm_file *file,
1582 goto out; 1484 goto out;
1583 } 1485 }
1584 1486
1585 ret = i915_gem_object_create_mmap_offset(obj); 1487 if (!obj->base.map_list.map) {
1586 if (ret) 1488 ret = i915_gem_create_mmap_offset(obj);
1587 goto out; 1489 if (ret)
1490 goto out;
1491 }
1588 1492
1589 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; 1493 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1590 1494
@@ -1616,269 +1520,93 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1616{ 1520{
1617 struct drm_i915_gem_mmap_gtt *args = data; 1521 struct drm_i915_gem_mmap_gtt *args = data;
1618 1522
1523 if (!(dev->driver->driver_features & DRIVER_GEM))
1524 return -ENODEV;
1525
1619 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 1526 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1620} 1527}
1621 1528
1622/* Immediately discard the backing storage */ 1529
1623static void 1530static int
1624i915_gem_object_truncate(struct drm_i915_gem_object *obj) 1531i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1532 gfp_t gfpmask)
1625{ 1533{
1534 int page_count, i;
1535 struct address_space *mapping;
1626 struct inode *inode; 1536 struct inode *inode;
1537 struct page *page;
1627 1538
1628 i915_gem_object_free_mmap_offset(obj); 1539 /* Get the list of pages out of our struct file. They'll be pinned
1629 1540 * at this point until we release them.
1630 if (obj->base.filp == NULL)
1631 return;
1632
1633 /* Our goal here is to return as much of the memory as
1634 * is possible back to the system as we are called from OOM.
1635 * To do this we must instruct the shmfs to drop all of its
1636 * backing pages, *now*.
1637 */ 1541 */
1542 page_count = obj->base.size / PAGE_SIZE;
1543 BUG_ON(obj->pages != NULL);
1544 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1545 if (obj->pages == NULL)
1546 return -ENOMEM;
1547
1638 inode = obj->base.filp->f_path.dentry->d_inode; 1548 inode = obj->base.filp->f_path.dentry->d_inode;
1639 shmem_truncate_range(inode, 0, (loff_t)-1); 1549 mapping = inode->i_mapping;
1550 gfpmask |= mapping_gfp_mask(mapping);
1640 1551
1641 obj->madv = __I915_MADV_PURGED; 1552 for (i = 0; i < page_count; i++) {
1642} 1553 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
1554 if (IS_ERR(page))
1555 goto err_pages;
1643 1556
1644static inline int 1557 obj->pages[i] = page;
1645i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) 1558 }
1646{ 1559
1647 return obj->madv == I915_MADV_DONTNEED; 1560 if (obj->tiling_mode != I915_TILING_NONE)
1561 i915_gem_object_do_bit_17_swizzle(obj);
1562
1563 return 0;
1564
1565err_pages:
1566 while (i--)
1567 page_cache_release(obj->pages[i]);
1568
1569 drm_free_large(obj->pages);
1570 obj->pages = NULL;
1571 return PTR_ERR(page);
1648} 1572}
1649 1573
1650static void 1574static void
1651i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 1575i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1652{ 1576{
1653 int page_count = obj->base.size / PAGE_SIZE; 1577 int page_count = obj->base.size / PAGE_SIZE;
1654 struct scatterlist *sg; 1578 int i;
1655 int ret, i;
1656 1579
1657 BUG_ON(obj->madv == __I915_MADV_PURGED); 1580 BUG_ON(obj->madv == __I915_MADV_PURGED);
1658 1581
1659 ret = i915_gem_object_set_to_cpu_domain(obj, true); 1582 if (obj->tiling_mode != I915_TILING_NONE)
1660 if (ret) {
1661 /* In the event of a disaster, abandon all caches and
1662 * hope for the best.
1663 */
1664 WARN_ON(ret != -EIO);
1665 i915_gem_clflush_object(obj);
1666 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1667 }
1668
1669 if (i915_gem_object_needs_bit17_swizzle(obj))
1670 i915_gem_object_save_bit_17_swizzle(obj); 1583 i915_gem_object_save_bit_17_swizzle(obj);
1671 1584
1672 if (obj->madv == I915_MADV_DONTNEED) 1585 if (obj->madv == I915_MADV_DONTNEED)
1673 obj->dirty = 0; 1586 obj->dirty = 0;
1674 1587
1675 for_each_sg(obj->pages->sgl, sg, page_count, i) { 1588 for (i = 0; i < page_count; i++) {
1676 struct page *page = sg_page(sg);
1677
1678 if (obj->dirty) 1589 if (obj->dirty)
1679 set_page_dirty(page); 1590 set_page_dirty(obj->pages[i]);
1680 1591
1681 if (obj->madv == I915_MADV_WILLNEED) 1592 if (obj->madv == I915_MADV_WILLNEED)
1682 mark_page_accessed(page); 1593 mark_page_accessed(obj->pages[i]);
1683 1594
1684 page_cache_release(page); 1595 page_cache_release(obj->pages[i]);
1685 } 1596 }
1686 obj->dirty = 0; 1597 obj->dirty = 0;
1687 1598
1688 sg_free_table(obj->pages); 1599 drm_free_large(obj->pages);
1689 kfree(obj->pages);
1690}
1691
1692static int
1693i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1694{
1695 const struct drm_i915_gem_object_ops *ops = obj->ops;
1696
1697 if (obj->pages == NULL)
1698 return 0;
1699
1700 BUG_ON(obj->gtt_space);
1701
1702 if (obj->pages_pin_count)
1703 return -EBUSY;
1704
1705 /* ->put_pages might need to allocate memory for the bit17 swizzle
1706 * array, hence protect them from being reaped by removing them from gtt
1707 * lists early. */
1708 list_del(&obj->gtt_list);
1709
1710 ops->put_pages(obj);
1711 obj->pages = NULL; 1600 obj->pages = NULL;
1712
1713 if (i915_gem_object_is_purgeable(obj))
1714 i915_gem_object_truncate(obj);
1715
1716 return 0;
1717}
1718
1719static long
1720__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1721 bool purgeable_only)
1722{
1723 struct drm_i915_gem_object *obj, *next;
1724 long count = 0;
1725
1726 list_for_each_entry_safe(obj, next,
1727 &dev_priv->mm.unbound_list,
1728 gtt_list) {
1729 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1730 i915_gem_object_put_pages(obj) == 0) {
1731 count += obj->base.size >> PAGE_SHIFT;
1732 if (count >= target)
1733 return count;
1734 }
1735 }
1736
1737 list_for_each_entry_safe(obj, next,
1738 &dev_priv->mm.inactive_list,
1739 mm_list) {
1740 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1741 i915_gem_object_unbind(obj) == 0 &&
1742 i915_gem_object_put_pages(obj) == 0) {
1743 count += obj->base.size >> PAGE_SHIFT;
1744 if (count >= target)
1745 return count;
1746 }
1747 }
1748
1749 return count;
1750}
1751
1752static long
1753i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1754{
1755 return __i915_gem_shrink(dev_priv, target, true);
1756}
1757
1758static void
1759i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1760{
1761 struct drm_i915_gem_object *obj, *next;
1762
1763 i915_gem_evict_everything(dev_priv->dev);
1764
1765 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
1766 i915_gem_object_put_pages(obj);
1767}
1768
1769static int
1770i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1771{
1772 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1773 int page_count, i;
1774 struct address_space *mapping;
1775 struct sg_table *st;
1776 struct scatterlist *sg;
1777 struct page *page;
1778 gfp_t gfp;
1779
1780 /* Assert that the object is not currently in any GPU domain. As it
1781 * wasn't in the GTT, there shouldn't be any way it could have been in
1782 * a GPU cache
1783 */
1784 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1785 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1786
1787 st = kmalloc(sizeof(*st), GFP_KERNEL);
1788 if (st == NULL)
1789 return -ENOMEM;
1790
1791 page_count = obj->base.size / PAGE_SIZE;
1792 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1793 sg_free_table(st);
1794 kfree(st);
1795 return -ENOMEM;
1796 }
1797
1798 /* Get the list of pages out of our struct file. They'll be pinned
1799 * at this point until we release them.
1800 *
1801 * Fail silently without starting the shrinker
1802 */
1803 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
1804 gfp = mapping_gfp_mask(mapping);
1805 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1806 gfp &= ~(__GFP_IO | __GFP_WAIT);
1807 for_each_sg(st->sgl, sg, page_count, i) {
1808 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1809 if (IS_ERR(page)) {
1810 i915_gem_purge(dev_priv, page_count);
1811 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1812 }
1813 if (IS_ERR(page)) {
1814 /* We've tried hard to allocate the memory by reaping
1815 * our own buffer, now let the real VM do its job and
1816 * go down in flames if truly OOM.
1817 */
1818 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
1819 gfp |= __GFP_IO | __GFP_WAIT;
1820
1821 i915_gem_shrink_all(dev_priv);
1822 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1823 if (IS_ERR(page))
1824 goto err_pages;
1825
1826 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1827 gfp &= ~(__GFP_IO | __GFP_WAIT);
1828 }
1829
1830 sg_set_page(sg, page, PAGE_SIZE, 0);
1831 }
1832
1833 obj->pages = st;
1834
1835 if (i915_gem_object_needs_bit17_swizzle(obj))
1836 i915_gem_object_do_bit_17_swizzle(obj);
1837
1838 return 0;
1839
1840err_pages:
1841 for_each_sg(st->sgl, sg, i, page_count)
1842 page_cache_release(sg_page(sg));
1843 sg_free_table(st);
1844 kfree(st);
1845 return PTR_ERR(page);
1846}
1847
1848/* Ensure that the associated pages are gathered from the backing storage
1849 * and pinned into our object. i915_gem_object_get_pages() may be called
1850 * multiple times before they are released by a single call to
1851 * i915_gem_object_put_pages() - once the pages are no longer referenced
1852 * either as a result of memory pressure (reaping pages under the shrinker)
1853 * or as the object is itself released.
1854 */
1855int
1856i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1857{
1858 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1859 const struct drm_i915_gem_object_ops *ops = obj->ops;
1860 int ret;
1861
1862 if (obj->pages)
1863 return 0;
1864
1865 BUG_ON(obj->pages_pin_count);
1866
1867 ret = ops->get_pages(obj);
1868 if (ret)
1869 return ret;
1870
1871 list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
1872 return 0;
1873} 1601}
1874 1602
1875void 1603void
1876i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1604i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1877 struct intel_ring_buffer *ring) 1605 struct intel_ring_buffer *ring,
1606 u32 seqno)
1878{ 1607{
1879 struct drm_device *dev = obj->base.dev; 1608 struct drm_device *dev = obj->base.dev;
1880 struct drm_i915_private *dev_priv = dev->dev_private; 1609 struct drm_i915_private *dev_priv = dev->dev_private;
1881 u32 seqno = intel_ring_get_seqno(ring);
1882 1610
1883 BUG_ON(ring == NULL); 1611 BUG_ON(ring == NULL);
1884 obj->ring = ring; 1612 obj->ring = ring;
@@ -1893,149 +1621,134 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1893 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); 1621 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1894 list_move_tail(&obj->ring_list, &ring->active_list); 1622 list_move_tail(&obj->ring_list, &ring->active_list);
1895 1623
1896 obj->last_read_seqno = seqno; 1624 obj->last_rendering_seqno = seqno;
1897
1898 if (obj->fenced_gpu_access) { 1625 if (obj->fenced_gpu_access) {
1899 obj->last_fenced_seqno = seqno; 1626 struct drm_i915_fence_reg *reg;
1900 1627
1901 /* Bump MRU to take account of the delayed flush */ 1628 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1902 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1903 struct drm_i915_fence_reg *reg;
1904 1629
1905 reg = &dev_priv->fence_regs[obj->fence_reg]; 1630 obj->last_fenced_seqno = seqno;
1906 list_move_tail(&reg->lru_list, 1631 obj->last_fenced_ring = ring;
1907 &dev_priv->mm.fence_list); 1632
1908 } 1633 reg = &dev_priv->fence_regs[obj->fence_reg];
1634 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
1909 } 1635 }
1910} 1636}
1911 1637
1912static void 1638static void
1913i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 1639i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1640{
1641 list_del_init(&obj->ring_list);
1642 obj->last_rendering_seqno = 0;
1643}
1644
1645static void
1646i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1914{ 1647{
1915 struct drm_device *dev = obj->base.dev; 1648 struct drm_device *dev = obj->base.dev;
1916 struct drm_i915_private *dev_priv = dev->dev_private; 1649 drm_i915_private_t *dev_priv = dev->dev_private;
1917 1650
1918 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1919 BUG_ON(!obj->active); 1651 BUG_ON(!obj->active);
1652 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1920 1653
1921 if (obj->pin_count) /* are we a framebuffer? */ 1654 i915_gem_object_move_off_active(obj);
1922 intel_mark_fb_idle(obj); 1655}
1923 1656
1924 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 1657static void
1658i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1659{
1660 struct drm_device *dev = obj->base.dev;
1661 struct drm_i915_private *dev_priv = dev->dev_private;
1925 1662
1926 list_del_init(&obj->ring_list); 1663 if (obj->pin_count != 0)
1927 obj->ring = NULL; 1664 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1665 else
1666 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1928 1667
1929 obj->last_read_seqno = 0; 1668 BUG_ON(!list_empty(&obj->gpu_write_list));
1930 obj->last_write_seqno = 0; 1669 BUG_ON(!obj->active);
1931 obj->base.write_domain = 0; 1670 obj->ring = NULL;
1932 1671
1933 obj->last_fenced_seqno = 0; 1672 i915_gem_object_move_off_active(obj);
1934 obj->fenced_gpu_access = false; 1673 obj->fenced_gpu_access = false;
1935 1674
1936 obj->active = 0; 1675 obj->active = 0;
1676 obj->pending_gpu_write = false;
1937 drm_gem_object_unreference(&obj->base); 1677 drm_gem_object_unreference(&obj->base);
1938 1678
1939 WARN_ON(i915_verify_lists(dev)); 1679 WARN_ON(i915_verify_lists(dev));
1940} 1680}
1941 1681
1942static int 1682/* Immediately discard the backing storage */
1943i915_gem_handle_seqno_wrap(struct drm_device *dev) 1683static void
1684i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1944{ 1685{
1945 struct drm_i915_private *dev_priv = dev->dev_private; 1686 struct inode *inode;
1946 struct intel_ring_buffer *ring;
1947 int ret, i, j;
1948 1687
1949 /* The hardware uses various monotonic 32-bit counters, if we 1688 /* Our goal here is to return as much of the memory as
1950 * detect that they will wraparound we need to idle the GPU 1689 * is possible back to the system as we are called from OOM.
1951 * and reset those counters. 1690 * To do this we must instruct the shmfs to drop all of its
1691 * backing pages, *now*.
1952 */ 1692 */
1953 ret = 0; 1693 inode = obj->base.filp->f_path.dentry->d_inode;
1954 for_each_ring(ring, dev_priv, i) { 1694 shmem_truncate_range(inode, 0, (loff_t)-1);
1955 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1956 ret |= ring->sync_seqno[j] != 0;
1957 }
1958 if (ret == 0)
1959 return ret;
1960
1961 ret = i915_gpu_idle(dev);
1962 if (ret)
1963 return ret;
1964
1965 i915_gem_retire_requests(dev);
1966 for_each_ring(ring, dev_priv, i) {
1967 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1968 ring->sync_seqno[j] = 0;
1969 }
1970 1695
1971 return 0; 1696 obj->madv = __I915_MADV_PURGED;
1972} 1697}
1973 1698
1974int 1699static inline int
1975i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) 1700i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1976{ 1701{
1977 struct drm_i915_private *dev_priv = dev->dev_private; 1702 return obj->madv == I915_MADV_DONTNEED;
1703}
1978 1704
1979 /* reserve 0 for non-seqno */ 1705static void
1980 if (dev_priv->next_seqno == 0) { 1706i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1981 int ret = i915_gem_handle_seqno_wrap(dev); 1707 uint32_t flush_domains)
1982 if (ret) 1708{
1983 return ret; 1709 struct drm_i915_gem_object *obj, *next;
1984 1710
1985 dev_priv->next_seqno = 1; 1711 list_for_each_entry_safe(obj, next,
1712 &ring->gpu_write_list,
1713 gpu_write_list) {
1714 if (obj->base.write_domain & flush_domains) {
1715 uint32_t old_write_domain = obj->base.write_domain;
1716
1717 obj->base.write_domain = 0;
1718 list_del_init(&obj->gpu_write_list);
1719 i915_gem_object_move_to_active(obj, ring,
1720 i915_gem_next_request_seqno(ring));
1721
1722 trace_i915_gem_object_change_domain(obj,
1723 obj->base.read_domains,
1724 old_write_domain);
1725 }
1986 } 1726 }
1987
1988 *seqno = dev_priv->next_seqno++;
1989 return 0;
1990} 1727}
1991 1728
1992int 1729int
1993i915_add_request(struct intel_ring_buffer *ring, 1730i915_add_request(struct intel_ring_buffer *ring,
1994 struct drm_file *file, 1731 struct drm_file *file,
1995 u32 *out_seqno) 1732 struct drm_i915_gem_request *request)
1996{ 1733{
1997 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1734 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1998 struct drm_i915_gem_request *request; 1735 uint32_t seqno;
1999 u32 request_ring_position;
2000 int was_empty; 1736 int was_empty;
2001 int ret; 1737 int ret;
2002 1738
2003 /* 1739 BUG_ON(request == NULL);
2004 * Emit any outstanding flushes - execbuf can fail to emit the flush
2005 * after having emitted the batchbuffer command. Hence we need to fix
2006 * things up similar to emitting the lazy request. The difference here
2007 * is that the flush _must_ happen before the next request, no matter
2008 * what.
2009 */
2010 ret = intel_ring_flush_all_caches(ring);
2011 if (ret)
2012 return ret;
2013
2014 request = kmalloc(sizeof(*request), GFP_KERNEL);
2015 if (request == NULL)
2016 return -ENOMEM;
2017 1740
1741 ret = ring->add_request(ring, &seqno);
1742 if (ret)
1743 return ret;
2018 1744
2019 /* Record the position of the start of the request so that 1745 trace_i915_gem_request_add(ring, seqno);
2020 * should we detect the updated seqno part-way through the
2021 * GPU processing the request, we never over-estimate the
2022 * position of the head.
2023 */
2024 request_ring_position = intel_ring_get_tail(ring);
2025
2026 ret = ring->add_request(ring);
2027 if (ret) {
2028 kfree(request);
2029 return ret;
2030 }
2031 1746
2032 request->seqno = intel_ring_get_seqno(ring); 1747 request->seqno = seqno;
2033 request->ring = ring; 1748 request->ring = ring;
2034 request->tail = request_ring_position;
2035 request->emitted_jiffies = jiffies; 1749 request->emitted_jiffies = jiffies;
2036 was_empty = list_empty(&ring->request_list); 1750 was_empty = list_empty(&ring->request_list);
2037 list_add_tail(&request->list, &ring->request_list); 1751 list_add_tail(&request->list, &ring->request_list);
2038 request->file_priv = NULL;
2039 1752
2040 if (file) { 1753 if (file) {
2041 struct drm_i915_file_private *file_priv = file->driver_priv; 1754 struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -2047,24 +1760,18 @@ i915_add_request(struct intel_ring_buffer *ring,
2047 spin_unlock(&file_priv->mm.lock); 1760 spin_unlock(&file_priv->mm.lock);
2048 } 1761 }
2049 1762
2050 trace_i915_gem_request_add(ring, request->seqno); 1763 ring->outstanding_lazy_request = false;
2051 ring->outstanding_lazy_request = 0;
2052 1764
2053 if (!dev_priv->mm.suspended) { 1765 if (!dev_priv->mm.suspended) {
2054 if (i915_enable_hangcheck) { 1766 if (i915_enable_hangcheck) {
2055 mod_timer(&dev_priv->hangcheck_timer, 1767 mod_timer(&dev_priv->hangcheck_timer,
2056 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 1768 jiffies +
1769 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
2057 } 1770 }
2058 if (was_empty) { 1771 if (was_empty)
2059 queue_delayed_work(dev_priv->wq, 1772 queue_delayed_work(dev_priv->wq,
2060 &dev_priv->mm.retire_work, 1773 &dev_priv->mm.retire_work, HZ);
2061 round_jiffies_up_relative(HZ));
2062 intel_mark_busy(dev_priv->dev);
2063 }
2064 } 1774 }
2065
2066 if (out_seqno)
2067 *out_seqno = request->seqno;
2068 return 0; 1775 return 0;
2069} 1776}
2070 1777
@@ -2106,6 +1813,8 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2106 struct drm_i915_gem_object, 1813 struct drm_i915_gem_object,
2107 ring_list); 1814 ring_list);
2108 1815
1816 obj->base.write_domain = 0;
1817 list_del_init(&obj->gpu_write_list);
2109 i915_gem_object_move_to_inactive(obj); 1818 i915_gem_object_move_to_inactive(obj);
2110 } 1819 }
2111} 1820}
@@ -2115,31 +1824,46 @@ static void i915_gem_reset_fences(struct drm_device *dev)
2115 struct drm_i915_private *dev_priv = dev->dev_private; 1824 struct drm_i915_private *dev_priv = dev->dev_private;
2116 int i; 1825 int i;
2117 1826
2118 for (i = 0; i < dev_priv->num_fence_regs; i++) { 1827 for (i = 0; i < 16; i++) {
2119 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 1828 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1829 struct drm_i915_gem_object *obj = reg->obj;
2120 1830
2121 i915_gem_write_fence(dev, i, NULL); 1831 if (!obj)
1832 continue;
2122 1833
2123 if (reg->obj) 1834 if (obj->tiling_mode)
2124 i915_gem_object_fence_lost(reg->obj); 1835 i915_gem_release_mmap(obj);
2125 1836
2126 reg->pin_count = 0; 1837 reg->obj->fence_reg = I915_FENCE_REG_NONE;
2127 reg->obj = NULL; 1838 reg->obj->fenced_gpu_access = false;
2128 INIT_LIST_HEAD(&reg->lru_list); 1839 reg->obj->last_fenced_seqno = 0;
1840 reg->obj->last_fenced_ring = NULL;
1841 i915_gem_clear_fence_reg(dev, reg);
2129 } 1842 }
2130
2131 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
2132} 1843}
2133 1844
2134void i915_gem_reset(struct drm_device *dev) 1845void i915_gem_reset(struct drm_device *dev)
2135{ 1846{
2136 struct drm_i915_private *dev_priv = dev->dev_private; 1847 struct drm_i915_private *dev_priv = dev->dev_private;
2137 struct drm_i915_gem_object *obj; 1848 struct drm_i915_gem_object *obj;
2138 struct intel_ring_buffer *ring;
2139 int i; 1849 int i;
2140 1850
2141 for_each_ring(ring, dev_priv, i) 1851 for (i = 0; i < I915_NUM_RINGS; i++)
2142 i915_gem_reset_ring_lists(dev_priv, ring); 1852 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
1853
1854 /* Remove anything from the flushing lists. The GPU cache is likely
1855 * to be lost on reset along with the data, so simply move the
1856 * lost bo to the inactive list.
1857 */
1858 while (!list_empty(&dev_priv->mm.flushing_list)) {
1859 obj= list_first_entry(&dev_priv->mm.flushing_list,
1860 struct drm_i915_gem_object,
1861 mm_list);
1862
1863 obj->base.write_domain = 0;
1864 list_del_init(&obj->gpu_write_list);
1865 i915_gem_object_move_to_inactive(obj);
1866 }
2143 1867
2144 /* Move everything out of the GPU domains to ensure we do any 1868 /* Move everything out of the GPU domains to ensure we do any
2145 * necessary invalidation upon reuse. 1869 * necessary invalidation upon reuse.
@@ -2158,17 +1882,22 @@ void i915_gem_reset(struct drm_device *dev)
2158/** 1882/**
2159 * This function clears the request list as sequence numbers are passed. 1883 * This function clears the request list as sequence numbers are passed.
2160 */ 1884 */
2161void 1885static void
2162i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) 1886i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2163{ 1887{
2164 uint32_t seqno; 1888 uint32_t seqno;
1889 int i;
2165 1890
2166 if (list_empty(&ring->request_list)) 1891 if (list_empty(&ring->request_list))
2167 return; 1892 return;
2168 1893
2169 WARN_ON(i915_verify_lists(ring->dev)); 1894 WARN_ON(i915_verify_lists(ring->dev));
2170 1895
2171 seqno = ring->get_seqno(ring, true); 1896 seqno = ring->get_seqno(ring);
1897
1898 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1899 if (seqno >= ring->sync_seqno[i])
1900 ring->sync_seqno[i] = 0;
2172 1901
2173 while (!list_empty(&ring->request_list)) { 1902 while (!list_empty(&ring->request_list)) {
2174 struct drm_i915_gem_request *request; 1903 struct drm_i915_gem_request *request;
@@ -2181,12 +1910,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2181 break; 1910 break;
2182 1911
2183 trace_i915_gem_request_retire(ring, request->seqno); 1912 trace_i915_gem_request_retire(ring, request->seqno);
2184 /* We know the GPU must have read the request to have
2185 * sent us the seqno + interrupt, so use the position
2186 * of tail of the request to update the last known position
2187 * of the GPU head.
2188 */
2189 ring->last_retired_head = request->tail;
2190 1913
2191 list_del(&request->list); 1914 list_del(&request->list);
2192 i915_gem_request_remove_from_client(request); 1915 i915_gem_request_remove_from_client(request);
@@ -2199,14 +1922,17 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2199 while (!list_empty(&ring->active_list)) { 1922 while (!list_empty(&ring->active_list)) {
2200 struct drm_i915_gem_object *obj; 1923 struct drm_i915_gem_object *obj;
2201 1924
2202 obj = list_first_entry(&ring->active_list, 1925 obj= list_first_entry(&ring->active_list,
2203 struct drm_i915_gem_object, 1926 struct drm_i915_gem_object,
2204 ring_list); 1927 ring_list);
2205 1928
2206 if (!i915_seqno_passed(seqno, obj->last_read_seqno)) 1929 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
2207 break; 1930 break;
2208 1931
2209 i915_gem_object_move_to_inactive(obj); 1932 if (obj->base.write_domain != 0)
1933 i915_gem_object_move_to_flushing(obj);
1934 else
1935 i915_gem_object_move_to_inactive(obj);
2210 } 1936 }
2211 1937
2212 if (unlikely(ring->trace_irq_seqno && 1938 if (unlikely(ring->trace_irq_seqno &&
@@ -2222,11 +1948,24 @@ void
2222i915_gem_retire_requests(struct drm_device *dev) 1948i915_gem_retire_requests(struct drm_device *dev)
2223{ 1949{
2224 drm_i915_private_t *dev_priv = dev->dev_private; 1950 drm_i915_private_t *dev_priv = dev->dev_private;
2225 struct intel_ring_buffer *ring;
2226 int i; 1951 int i;
2227 1952
2228 for_each_ring(ring, dev_priv, i) 1953 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
2229 i915_gem_retire_requests_ring(ring); 1954 struct drm_i915_gem_object *obj, *next;
1955
1956 /* We must be careful that during unbind() we do not
1957 * accidentally infinitely recurse into retire requests.
1958 * Currently:
1959 * retire -> free -> unbind -> wait -> retire_ring
1960 */
1961 list_for_each_entry_safe(obj, next,
1962 &dev_priv->mm.deferred_free_list,
1963 mm_list)
1964 i915_gem_free_object_tail(obj);
1965 }
1966
1967 for (i = 0; i < I915_NUM_RINGS; i++)
1968 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
2230} 1969}
2231 1970
2232static void 1971static void
@@ -2234,7 +1973,6 @@ i915_gem_retire_work_handler(struct work_struct *work)
2234{ 1973{
2235 drm_i915_private_t *dev_priv; 1974 drm_i915_private_t *dev_priv;
2236 struct drm_device *dev; 1975 struct drm_device *dev;
2237 struct intel_ring_buffer *ring;
2238 bool idle; 1976 bool idle;
2239 int i; 1977 int i;
2240 1978
@@ -2244,8 +1982,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
2244 1982
2245 /* Come back later if the device is busy... */ 1983 /* Come back later if the device is busy... */
2246 if (!mutex_trylock(&dev->struct_mutex)) { 1984 if (!mutex_trylock(&dev->struct_mutex)) {
2247 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 1985 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2248 round_jiffies_up_relative(HZ));
2249 return; 1986 return;
2250 } 1987 }
2251 1988
@@ -2255,172 +1992,150 @@ i915_gem_retire_work_handler(struct work_struct *work)
2255 * objects indefinitely. 1992 * objects indefinitely.
2256 */ 1993 */
2257 idle = true; 1994 idle = true;
2258 for_each_ring(ring, dev_priv, i) { 1995 for (i = 0; i < I915_NUM_RINGS; i++) {
2259 if (ring->gpu_caches_dirty) 1996 struct intel_ring_buffer *ring = &dev_priv->ring[i];
2260 i915_add_request(ring, NULL, NULL); 1997
1998 if (!list_empty(&ring->gpu_write_list)) {
1999 struct drm_i915_gem_request *request;
2000 int ret;
2001
2002 ret = i915_gem_flush_ring(ring,
2003 0, I915_GEM_GPU_DOMAINS);
2004 request = kzalloc(sizeof(*request), GFP_KERNEL);
2005 if (ret || request == NULL ||
2006 i915_add_request(ring, NULL, request))
2007 kfree(request);
2008 }
2261 2009
2262 idle &= list_empty(&ring->request_list); 2010 idle &= list_empty(&ring->request_list);
2263 } 2011 }
2264 2012
2265 if (!dev_priv->mm.suspended && !idle) 2013 if (!dev_priv->mm.suspended && !idle)
2266 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 2014 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2267 round_jiffies_up_relative(HZ));
2268 if (idle)
2269 intel_mark_idle(dev);
2270 2015
2271 mutex_unlock(&dev->struct_mutex); 2016 mutex_unlock(&dev->struct_mutex);
2272} 2017}
2273 2018
2274/** 2019/**
2275 * Ensures that an object will eventually get non-busy by flushing any required 2020 * Waits for a sequence number to be signaled, and cleans up the
2276 * write domains, emitting any outstanding lazy request and retiring and 2021 * request and object lists appropriately for that event.
2277 * completed requests.
2278 */ 2022 */
2279static int 2023int
2280i915_gem_object_flush_active(struct drm_i915_gem_object *obj) 2024i915_wait_request(struct intel_ring_buffer *ring,
2025 uint32_t seqno)
2281{ 2026{
2282 int ret; 2027 drm_i915_private_t *dev_priv = ring->dev->dev_private;
2283 2028 u32 ier;
2284 if (obj->active) { 2029 int ret = 0;
2285 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2286 if (ret)
2287 return ret;
2288 2030
2289 i915_gem_retire_requests_ring(obj->ring); 2031 BUG_ON(seqno == 0);
2290 }
2291 2032
2292 return 0; 2033 if (atomic_read(&dev_priv->mm.wedged)) {
2293} 2034 struct completion *x = &dev_priv->error_completion;
2035 bool recovery_complete;
2036 unsigned long flags;
2294 2037
2295/** 2038 /* Give the error handler a chance to run. */
2296 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT 2039 spin_lock_irqsave(&x->wait.lock, flags);
2297 * @DRM_IOCTL_ARGS: standard ioctl arguments 2040 recovery_complete = x->done > 0;
2298 * 2041 spin_unlock_irqrestore(&x->wait.lock, flags);
2299 * Returns 0 if successful, else an error is returned with the remaining time in
2300 * the timeout parameter.
2301 * -ETIME: object is still busy after timeout
2302 * -ERESTARTSYS: signal interrupted the wait
2303 * -ENONENT: object doesn't exist
2304 * Also possible, but rare:
2305 * -EAGAIN: GPU wedged
2306 * -ENOMEM: damn
2307 * -ENODEV: Internal IRQ fail
2308 * -E?: The add request failed
2309 *
2310 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2311 * non-zero timeout parameter the wait ioctl will wait for the given number of
2312 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2313 * without holding struct_mutex the object may become re-busied before this
2314 * function completes. A similar but shorter * race condition exists in the busy
2315 * ioctl
2316 */
2317int
2318i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2319{
2320 struct drm_i915_gem_wait *args = data;
2321 struct drm_i915_gem_object *obj;
2322 struct intel_ring_buffer *ring = NULL;
2323 struct timespec timeout_stack, *timeout = NULL;
2324 u32 seqno = 0;
2325 int ret = 0;
2326 2042
2327 if (args->timeout_ns >= 0) { 2043 return recovery_complete ? -EIO : -EAGAIN;
2328 timeout_stack = ns_to_timespec(args->timeout_ns);
2329 timeout = &timeout_stack;
2330 } 2044 }
2331 2045
2332 ret = i915_mutex_lock_interruptible(dev); 2046 if (seqno == ring->outstanding_lazy_request) {
2333 if (ret) 2047 struct drm_i915_gem_request *request;
2334 return ret;
2335 2048
2336 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle)); 2049 request = kzalloc(sizeof(*request), GFP_KERNEL);
2337 if (&obj->base == NULL) { 2050 if (request == NULL)
2338 mutex_unlock(&dev->struct_mutex); 2051 return -ENOMEM;
2339 return -ENOENT;
2340 }
2341 2052
2342 /* Need to make sure the object gets inactive eventually. */ 2053 ret = i915_add_request(ring, NULL, request);
2343 ret = i915_gem_object_flush_active(obj); 2054 if (ret) {
2344 if (ret) 2055 kfree(request);
2345 goto out; 2056 return ret;
2057 }
2346 2058
2347 if (obj->active) { 2059 seqno = request->seqno;
2348 seqno = obj->last_read_seqno;
2349 ring = obj->ring;
2350 } 2060 }
2351 2061
2352 if (seqno == 0) 2062 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
2353 goto out; 2063 if (HAS_PCH_SPLIT(ring->dev))
2064 ier = I915_READ(DEIER) | I915_READ(GTIER);
2065 else
2066 ier = I915_READ(IER);
2067 if (!ier) {
2068 DRM_ERROR("something (likely vbetool) disabled "
2069 "interrupts, re-enabling\n");
2070 ring->dev->driver->irq_preinstall(ring->dev);
2071 ring->dev->driver->irq_postinstall(ring->dev);
2072 }
2354 2073
2355 /* Do this after OLR check to make sure we make forward progress polling 2074 trace_i915_gem_request_wait_begin(ring, seqno);
2356 * on this IOCTL with a 0 timeout (like busy ioctl)
2357 */
2358 if (!args->timeout_ns) {
2359 ret = -ETIME;
2360 goto out;
2361 }
2362 2075
2363 drm_gem_object_unreference(&obj->base); 2076 ring->waiting_seqno = seqno;
2364 mutex_unlock(&dev->struct_mutex); 2077 if (ring->irq_get(ring)) {
2078 if (dev_priv->mm.interruptible)
2079 ret = wait_event_interruptible(ring->irq_queue,
2080 i915_seqno_passed(ring->get_seqno(ring), seqno)
2081 || atomic_read(&dev_priv->mm.wedged));
2082 else
2083 wait_event(ring->irq_queue,
2084 i915_seqno_passed(ring->get_seqno(ring), seqno)
2085 || atomic_read(&dev_priv->mm.wedged));
2086
2087 ring->irq_put(ring);
2088 } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
2089 seqno) ||
2090 atomic_read(&dev_priv->mm.wedged), 3000))
2091 ret = -EBUSY;
2092 ring->waiting_seqno = 0;
2365 2093
2366 ret = __wait_seqno(ring, seqno, true, timeout); 2094 trace_i915_gem_request_wait_end(ring, seqno);
2367 if (timeout) {
2368 WARN_ON(!timespec_valid(timeout));
2369 args->timeout_ns = timespec_to_ns(timeout);
2370 } 2095 }
2371 return ret; 2096 if (atomic_read(&dev_priv->mm.wedged))
2097 ret = -EAGAIN;
2098
2099 if (ret && ret != -ERESTARTSYS)
2100 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2101 __func__, ret, seqno, ring->get_seqno(ring),
2102 dev_priv->next_seqno);
2103
2104 /* Directly dispatch request retiring. While we have the work queue
2105 * to handle this, the waiter on a request often wants an associated
2106 * buffer to have made it to the inactive list, and we would need
2107 * a separate wait queue to handle that.
2108 */
2109 if (ret == 0)
2110 i915_gem_retire_requests_ring(ring);
2372 2111
2373out:
2374 drm_gem_object_unreference(&obj->base);
2375 mutex_unlock(&dev->struct_mutex);
2376 return ret; 2112 return ret;
2377} 2113}
2378 2114
2379/** 2115/**
2380 * i915_gem_object_sync - sync an object to a ring. 2116 * Ensures that all rendering to the object has completed and the object is
2381 * 2117 * safe to unbind from the GTT or access from the CPU.
2382 * @obj: object which may be in use on another ring.
2383 * @to: ring we wish to use the object on. May be NULL.
2384 *
2385 * This code is meant to abstract object synchronization with the GPU.
2386 * Calling with NULL implies synchronizing the object with the CPU
2387 * rather than a particular GPU ring.
2388 *
2389 * Returns 0 if successful, else propagates up the lower layer error.
2390 */ 2118 */
2391int 2119int
2392i915_gem_object_sync(struct drm_i915_gem_object *obj, 2120i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2393 struct intel_ring_buffer *to)
2394{ 2121{
2395 struct intel_ring_buffer *from = obj->ring; 2122 int ret;
2396 u32 seqno;
2397 int ret, idx;
2398
2399 if (from == NULL || to == from)
2400 return 0;
2401
2402 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2403 return i915_gem_object_wait_rendering(obj, false);
2404
2405 idx = intel_ring_sync_index(from, to);
2406
2407 seqno = obj->last_read_seqno;
2408 if (seqno <= from->sync_seqno[idx])
2409 return 0;
2410 2123
2411 ret = i915_gem_check_olr(obj->ring, seqno); 2124 /* This function only exists to support waiting for existing rendering,
2412 if (ret) 2125 * not for emitting required flushes.
2413 return ret; 2126 */
2127 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2414 2128
2415 ret = to->sync_to(to, from, seqno); 2129 /* If there is rendering queued on the buffer being evicted, wait for
2416 if (!ret) 2130 * it.
2417 /* We use last_read_seqno because sync_to() 2131 */
2418 * might have just caused seqno wrap under 2132 if (obj->active) {
2419 * the radar. 2133 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
2420 */ 2134 if (ret)
2421 from->sync_seqno[idx] = obj->last_read_seqno; 2135 return ret;
2136 }
2422 2137
2423 return ret; 2138 return 0;
2424} 2139}
2425 2140
2426static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) 2141static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
@@ -2453,19 +2168,18 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2453int 2168int
2454i915_gem_object_unbind(struct drm_i915_gem_object *obj) 2169i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2455{ 2170{
2456 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2457 int ret = 0; 2171 int ret = 0;
2458 2172
2459 if (obj->gtt_space == NULL) 2173 if (obj->gtt_space == NULL)
2460 return 0; 2174 return 0;
2461 2175
2462 if (obj->pin_count) 2176 if (obj->pin_count != 0) {
2463 return -EBUSY; 2177 DRM_ERROR("Attempting to unbind pinned buffer\n");
2464 2178 return -EINVAL;
2465 BUG_ON(obj->pages == NULL); 2179 }
2466 2180
2467 ret = i915_gem_object_finish_gpu(obj); 2181 ret = i915_gem_object_finish_gpu(obj);
2468 if (ret) 2182 if (ret == -ERESTARTSYS)
2469 return ret; 2183 return ret;
2470 /* Continue on if we fail due to EIO, the GPU is hung so we 2184 /* Continue on if we fail due to EIO, the GPU is hung so we
2471 * should be safe and we need to cleanup or else we might 2185 * should be safe and we need to cleanup or else we might
@@ -2474,23 +2188,34 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2474 2188
2475 i915_gem_object_finish_gtt(obj); 2189 i915_gem_object_finish_gtt(obj);
2476 2190
2191 /* Move the object to the CPU domain to ensure that
2192 * any possible CPU writes while it's not in the GTT
2193 * are flushed when we go to remap it.
2194 */
2195 if (ret == 0)
2196 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2197 if (ret == -ERESTARTSYS)
2198 return ret;
2199 if (ret) {
2200 /* In the event of a disaster, abandon all caches and
2201 * hope for the best.
2202 */
2203 i915_gem_clflush_object(obj);
2204 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2205 }
2206
2477 /* release the fence reg _after_ flushing */ 2207 /* release the fence reg _after_ flushing */
2478 ret = i915_gem_object_put_fence(obj); 2208 ret = i915_gem_object_put_fence(obj);
2479 if (ret) 2209 if (ret == -ERESTARTSYS)
2480 return ret; 2210 return ret;
2481 2211
2482 trace_i915_gem_object_unbind(obj); 2212 trace_i915_gem_object_unbind(obj);
2483 2213
2484 if (obj->has_global_gtt_mapping) 2214 i915_gem_gtt_unbind_object(obj);
2485 i915_gem_gtt_unbind_object(obj); 2215 i915_gem_object_put_pages_gtt(obj);
2486 if (obj->has_aliasing_ppgtt_mapping) {
2487 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2488 obj->has_aliasing_ppgtt_mapping = 0;
2489 }
2490 i915_gem_gtt_finish_object(obj);
2491 2216
2492 list_del(&obj->mm_list); 2217 list_del_init(&obj->gtt_list);
2493 list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); 2218 list_del_init(&obj->mm_list);
2494 /* Avoid an unnecessary call to unbind on rebind. */ 2219 /* Avoid an unnecessary call to unbind on rebind. */
2495 obj->map_and_fenceable = true; 2220 obj->map_and_fenceable = true;
2496 2221
@@ -2498,22 +2223,66 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2498 obj->gtt_space = NULL; 2223 obj->gtt_space = NULL;
2499 obj->gtt_offset = 0; 2224 obj->gtt_offset = 0;
2500 2225
2226 if (i915_gem_object_is_purgeable(obj))
2227 i915_gem_object_truncate(obj);
2228
2229 return ret;
2230}
2231
2232int
2233i915_gem_flush_ring(struct intel_ring_buffer *ring,
2234 uint32_t invalidate_domains,
2235 uint32_t flush_domains)
2236{
2237 int ret;
2238
2239 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2240 return 0;
2241
2242 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2243
2244 ret = ring->flush(ring, invalidate_domains, flush_domains);
2245 if (ret)
2246 return ret;
2247
2248 if (flush_domains & I915_GEM_GPU_DOMAINS)
2249 i915_gem_process_flushing_list(ring, flush_domains);
2250
2501 return 0; 2251 return 0;
2502} 2252}
2503 2253
2504int i915_gpu_idle(struct drm_device *dev) 2254static int i915_ring_idle(struct intel_ring_buffer *ring)
2505{ 2255{
2506 drm_i915_private_t *dev_priv = dev->dev_private; 2256 int ret;
2507 struct intel_ring_buffer *ring;
2508 int ret, i;
2509 2257
2510 /* Flush everything onto the inactive list. */ 2258 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2511 for_each_ring(ring, dev_priv, i) { 2259 return 0;
2512 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); 2260
2261 if (!list_empty(&ring->gpu_write_list)) {
2262 ret = i915_gem_flush_ring(ring,
2263 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2513 if (ret) 2264 if (ret)
2514 return ret; 2265 return ret;
2266 }
2267
2268 return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
2269}
2270
2271int
2272i915_gpu_idle(struct drm_device *dev)
2273{
2274 drm_i915_private_t *dev_priv = dev->dev_private;
2275 bool lists_empty;
2276 int ret, i;
2515 2277
2516 ret = intel_ring_idle(ring); 2278 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2279 list_empty(&dev_priv->mm.active_list));
2280 if (lists_empty)
2281 return 0;
2282
2283 /* Flush everything onto the inactive list. */
2284 for (i = 0; i < I915_NUM_RINGS; i++) {
2285 ret = i915_ring_idle(&dev_priv->ring[i]);
2517 if (ret) 2286 if (ret)
2518 return ret; 2287 return ret;
2519 } 2288 }
@@ -2521,179 +2290,208 @@ int i915_gpu_idle(struct drm_device *dev)
2521 return 0; 2290 return 0;
2522} 2291}
2523 2292
2524static void sandybridge_write_fence_reg(struct drm_device *dev, int reg, 2293static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2525 struct drm_i915_gem_object *obj) 2294 struct intel_ring_buffer *pipelined)
2526{ 2295{
2296 struct drm_device *dev = obj->base.dev;
2527 drm_i915_private_t *dev_priv = dev->dev_private; 2297 drm_i915_private_t *dev_priv = dev->dev_private;
2298 u32 size = obj->gtt_space->size;
2299 int regnum = obj->fence_reg;
2528 uint64_t val; 2300 uint64_t val;
2529 2301
2530 if (obj) { 2302 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2531 u32 size = obj->gtt_space->size; 2303 0xfffff000) << 32;
2304 val |= obj->gtt_offset & 0xfffff000;
2305 val |= (uint64_t)((obj->stride / 128) - 1) <<
2306 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2532 2307
2533 val = (uint64_t)((obj->gtt_offset + size - 4096) & 2308 if (obj->tiling_mode == I915_TILING_Y)
2534 0xfffff000) << 32; 2309 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2535 val |= obj->gtt_offset & 0xfffff000; 2310 val |= I965_FENCE_REG_VALID;
2536 val |= (uint64_t)((obj->stride / 128) - 1) <<
2537 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2538 2311
2539 if (obj->tiling_mode == I915_TILING_Y) 2312 if (pipelined) {
2540 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2313 int ret = intel_ring_begin(pipelined, 6);
2541 val |= I965_FENCE_REG_VALID; 2314 if (ret)
2315 return ret;
2316
2317 intel_ring_emit(pipelined, MI_NOOP);
2318 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2319 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2320 intel_ring_emit(pipelined, (u32)val);
2321 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2322 intel_ring_emit(pipelined, (u32)(val >> 32));
2323 intel_ring_advance(pipelined);
2542 } else 2324 } else
2543 val = 0; 2325 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2544 2326
2545 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val); 2327 return 0;
2546 POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
2547} 2328}
2548 2329
2549static void i965_write_fence_reg(struct drm_device *dev, int reg, 2330static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2550 struct drm_i915_gem_object *obj) 2331 struct intel_ring_buffer *pipelined)
2551{ 2332{
2333 struct drm_device *dev = obj->base.dev;
2552 drm_i915_private_t *dev_priv = dev->dev_private; 2334 drm_i915_private_t *dev_priv = dev->dev_private;
2335 u32 size = obj->gtt_space->size;
2336 int regnum = obj->fence_reg;
2553 uint64_t val; 2337 uint64_t val;
2554 2338
2555 if (obj) { 2339 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2556 u32 size = obj->gtt_space->size; 2340 0xfffff000) << 32;
2341 val |= obj->gtt_offset & 0xfffff000;
2342 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2343 if (obj->tiling_mode == I915_TILING_Y)
2344 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2345 val |= I965_FENCE_REG_VALID;
2557 2346
2558 val = (uint64_t)((obj->gtt_offset + size - 4096) & 2347 if (pipelined) {
2559 0xfffff000) << 32; 2348 int ret = intel_ring_begin(pipelined, 6);
2560 val |= obj->gtt_offset & 0xfffff000; 2349 if (ret)
2561 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; 2350 return ret;
2562 if (obj->tiling_mode == I915_TILING_Y) 2351
2563 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2352 intel_ring_emit(pipelined, MI_NOOP);
2564 val |= I965_FENCE_REG_VALID; 2353 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2354 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2355 intel_ring_emit(pipelined, (u32)val);
2356 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2357 intel_ring_emit(pipelined, (u32)(val >> 32));
2358 intel_ring_advance(pipelined);
2565 } else 2359 } else
2566 val = 0; 2360 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2567 2361
2568 I915_WRITE64(FENCE_REG_965_0 + reg * 8, val); 2362 return 0;
2569 POSTING_READ(FENCE_REG_965_0 + reg * 8);
2570} 2363}
2571 2364
2572static void i915_write_fence_reg(struct drm_device *dev, int reg, 2365static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2573 struct drm_i915_gem_object *obj) 2366 struct intel_ring_buffer *pipelined)
2574{ 2367{
2368 struct drm_device *dev = obj->base.dev;
2575 drm_i915_private_t *dev_priv = dev->dev_private; 2369 drm_i915_private_t *dev_priv = dev->dev_private;
2576 u32 val; 2370 u32 size = obj->gtt_space->size;
2371 u32 fence_reg, val, pitch_val;
2372 int tile_width;
2373
2374 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2375 (size & -size) != size ||
2376 (obj->gtt_offset & (size - 1)),
2377 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2378 obj->gtt_offset, obj->map_and_fenceable, size))
2379 return -EINVAL;
2577 2380
2578 if (obj) { 2381 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2579 u32 size = obj->gtt_space->size; 2382 tile_width = 128;
2580 int pitch_val; 2383 else
2581 int tile_width; 2384 tile_width = 512;
2385
2386 /* Note: pitch better be a power of two tile widths */
2387 pitch_val = obj->stride / tile_width;
2388 pitch_val = ffs(pitch_val) - 1;
2389
2390 val = obj->gtt_offset;
2391 if (obj->tiling_mode == I915_TILING_Y)
2392 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2393 val |= I915_FENCE_SIZE_BITS(size);
2394 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2395 val |= I830_FENCE_REG_VALID;
2396
2397 fence_reg = obj->fence_reg;
2398 if (fence_reg < 8)
2399 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2400 else
2401 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2582 2402
2583 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || 2403 if (pipelined) {
2584 (size & -size) != size || 2404 int ret = intel_ring_begin(pipelined, 4);
2585 (obj->gtt_offset & (size - 1)), 2405 if (ret)
2586 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", 2406 return ret;
2587 obj->gtt_offset, obj->map_and_fenceable, size);
2588 2407
2589 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) 2408 intel_ring_emit(pipelined, MI_NOOP);
2590 tile_width = 128; 2409 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2591 else 2410 intel_ring_emit(pipelined, fence_reg);
2592 tile_width = 512; 2411 intel_ring_emit(pipelined, val);
2593 2412 intel_ring_advance(pipelined);
2594 /* Note: pitch better be a power of two tile widths */
2595 pitch_val = obj->stride / tile_width;
2596 pitch_val = ffs(pitch_val) - 1;
2597
2598 val = obj->gtt_offset;
2599 if (obj->tiling_mode == I915_TILING_Y)
2600 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2601 val |= I915_FENCE_SIZE_BITS(size);
2602 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2603 val |= I830_FENCE_REG_VALID;
2604 } else 2413 } else
2605 val = 0; 2414 I915_WRITE(fence_reg, val);
2606 2415
2607 if (reg < 8) 2416 return 0;
2608 reg = FENCE_REG_830_0 + reg * 4;
2609 else
2610 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2611
2612 I915_WRITE(reg, val);
2613 POSTING_READ(reg);
2614} 2417}
2615 2418
2616static void i830_write_fence_reg(struct drm_device *dev, int reg, 2419static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2617 struct drm_i915_gem_object *obj) 2420 struct intel_ring_buffer *pipelined)
2618{ 2421{
2422 struct drm_device *dev = obj->base.dev;
2619 drm_i915_private_t *dev_priv = dev->dev_private; 2423 drm_i915_private_t *dev_priv = dev->dev_private;
2424 u32 size = obj->gtt_space->size;
2425 int regnum = obj->fence_reg;
2620 uint32_t val; 2426 uint32_t val;
2427 uint32_t pitch_val;
2621 2428
2622 if (obj) { 2429 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2623 u32 size = obj->gtt_space->size; 2430 (size & -size) != size ||
2624 uint32_t pitch_val; 2431 (obj->gtt_offset & (size - 1)),
2625 2432 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2626 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || 2433 obj->gtt_offset, size))
2627 (size & -size) != size || 2434 return -EINVAL;
2628 (obj->gtt_offset & (size - 1)),
2629 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2630 obj->gtt_offset, size);
2631
2632 pitch_val = obj->stride / 128;
2633 pitch_val = ffs(pitch_val) - 1;
2634
2635 val = obj->gtt_offset;
2636 if (obj->tiling_mode == I915_TILING_Y)
2637 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2638 val |= I830_FENCE_SIZE_BITS(size);
2639 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2640 val |= I830_FENCE_REG_VALID;
2641 } else
2642 val = 0;
2643 2435
2644 I915_WRITE(FENCE_REG_830_0 + reg * 4, val); 2436 pitch_val = obj->stride / 128;
2645 POSTING_READ(FENCE_REG_830_0 + reg * 4); 2437 pitch_val = ffs(pitch_val) - 1;
2646}
2647 2438
2648static void i915_gem_write_fence(struct drm_device *dev, int reg, 2439 val = obj->gtt_offset;
2649 struct drm_i915_gem_object *obj) 2440 if (obj->tiling_mode == I915_TILING_Y)
2650{ 2441 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2651 switch (INTEL_INFO(dev)->gen) { 2442 val |= I830_FENCE_SIZE_BITS(size);
2652 case 7: 2443 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2653 case 6: sandybridge_write_fence_reg(dev, reg, obj); break; 2444 val |= I830_FENCE_REG_VALID;
2654 case 5: 2445
2655 case 4: i965_write_fence_reg(dev, reg, obj); break; 2446 if (pipelined) {
2656 case 3: i915_write_fence_reg(dev, reg, obj); break; 2447 int ret = intel_ring_begin(pipelined, 4);
2657 case 2: i830_write_fence_reg(dev, reg, obj); break; 2448 if (ret)
2658 default: break; 2449 return ret;
2659 } 2450
2451 intel_ring_emit(pipelined, MI_NOOP);
2452 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2453 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2454 intel_ring_emit(pipelined, val);
2455 intel_ring_advance(pipelined);
2456 } else
2457 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2458
2459 return 0;
2660} 2460}
2661 2461
2662static inline int fence_number(struct drm_i915_private *dev_priv, 2462static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2663 struct drm_i915_fence_reg *fence)
2664{ 2463{
2665 return fence - dev_priv->fence_regs; 2464 return i915_seqno_passed(ring->get_seqno(ring), seqno);
2666} 2465}
2667 2466
2668static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, 2467static int
2669 struct drm_i915_fence_reg *fence, 2468i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2670 bool enable) 2469 struct intel_ring_buffer *pipelined)
2671{ 2470{
2672 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2471 int ret;
2673 int reg = fence_number(dev_priv, fence);
2674 2472
2675 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); 2473 if (obj->fenced_gpu_access) {
2474 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2475 ret = i915_gem_flush_ring(obj->last_fenced_ring,
2476 0, obj->base.write_domain);
2477 if (ret)
2478 return ret;
2479 }
2676 2480
2677 if (enable) { 2481 obj->fenced_gpu_access = false;
2678 obj->fence_reg = reg;
2679 fence->obj = obj;
2680 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2681 } else {
2682 obj->fence_reg = I915_FENCE_REG_NONE;
2683 fence->obj = NULL;
2684 list_del_init(&fence->lru_list);
2685 } 2482 }
2686}
2687 2483
2688static int 2484 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2689i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) 2485 if (!ring_passed_seqno(obj->last_fenced_ring,
2690{ 2486 obj->last_fenced_seqno)) {
2691 if (obj->last_fenced_seqno) { 2487 ret = i915_wait_request(obj->last_fenced_ring,
2692 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); 2488 obj->last_fenced_seqno);
2693 if (ret) 2489 if (ret)
2694 return ret; 2490 return ret;
2491 }
2695 2492
2696 obj->last_fenced_seqno = 0; 2493 obj->last_fenced_seqno = 0;
2494 obj->last_fenced_ring = NULL;
2697 } 2495 }
2698 2496
2699 /* Ensure that all CPU reads are completed before installing a fence 2497 /* Ensure that all CPU reads are completed before installing a fence
@@ -2702,36 +2500,38 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2702 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) 2500 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2703 mb(); 2501 mb();
2704 2502
2705 obj->fenced_gpu_access = false;
2706 return 0; 2503 return 0;
2707} 2504}
2708 2505
2709int 2506int
2710i915_gem_object_put_fence(struct drm_i915_gem_object *obj) 2507i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2711{ 2508{
2712 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2713 int ret; 2509 int ret;
2714 2510
2715 ret = i915_gem_object_flush_fence(obj); 2511 if (obj->tiling_mode)
2512 i915_gem_release_mmap(obj);
2513
2514 ret = i915_gem_object_flush_fence(obj, NULL);
2716 if (ret) 2515 if (ret)
2717 return ret; 2516 return ret;
2718 2517
2719 if (obj->fence_reg == I915_FENCE_REG_NONE) 2518 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2720 return 0; 2519 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2520 i915_gem_clear_fence_reg(obj->base.dev,
2521 &dev_priv->fence_regs[obj->fence_reg]);
2721 2522
2722 i915_gem_object_update_fence(obj, 2523 obj->fence_reg = I915_FENCE_REG_NONE;
2723 &dev_priv->fence_regs[obj->fence_reg], 2524 }
2724 false);
2725 i915_gem_object_fence_lost(obj);
2726 2525
2727 return 0; 2526 return 0;
2728} 2527}
2729 2528
2730static struct drm_i915_fence_reg * 2529static struct drm_i915_fence_reg *
2731i915_find_fence_reg(struct drm_device *dev) 2530i915_find_fence_reg(struct drm_device *dev,
2531 struct intel_ring_buffer *pipelined)
2732{ 2532{
2733 struct drm_i915_private *dev_priv = dev->dev_private; 2533 struct drm_i915_private *dev_priv = dev->dev_private;
2734 struct drm_i915_fence_reg *reg, *avail; 2534 struct drm_i915_fence_reg *reg, *first, *avail;
2735 int i; 2535 int i;
2736 2536
2737 /* First try to find a free reg */ 2537 /* First try to find a free reg */
@@ -2741,7 +2541,7 @@ i915_find_fence_reg(struct drm_device *dev)
2741 if (!reg->obj) 2541 if (!reg->obj)
2742 return reg; 2542 return reg;
2743 2543
2744 if (!reg->pin_count) 2544 if (!reg->obj->pin_count)
2745 avail = reg; 2545 avail = reg;
2746 } 2546 }
2747 2547
@@ -2749,147 +2549,202 @@ i915_find_fence_reg(struct drm_device *dev)
2749 return NULL; 2549 return NULL;
2750 2550
2751 /* None available, try to steal one or wait for a user to finish */ 2551 /* None available, try to steal one or wait for a user to finish */
2552 avail = first = NULL;
2752 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { 2553 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2753 if (reg->pin_count) 2554 if (reg->obj->pin_count)
2754 continue; 2555 continue;
2755 2556
2756 return reg; 2557 if (first == NULL)
2558 first = reg;
2559
2560 if (!pipelined ||
2561 !reg->obj->last_fenced_ring ||
2562 reg->obj->last_fenced_ring == pipelined) {
2563 avail = reg;
2564 break;
2565 }
2757 } 2566 }
2758 2567
2759 return NULL; 2568 if (avail == NULL)
2569 avail = first;
2570
2571 return avail;
2760} 2572}
2761 2573
2762/** 2574/**
2763 * i915_gem_object_get_fence - set up fencing for an object 2575 * i915_gem_object_get_fence - set up a fence reg for an object
2764 * @obj: object to map through a fence reg 2576 * @obj: object to map through a fence reg
2577 * @pipelined: ring on which to queue the change, or NULL for CPU access
2578 * @interruptible: must we wait uninterruptibly for the register to retire?
2765 * 2579 *
2766 * When mapping objects through the GTT, userspace wants to be able to write 2580 * When mapping objects through the GTT, userspace wants to be able to write
2767 * to them without having to worry about swizzling if the object is tiled. 2581 * to them without having to worry about swizzling if the object is tiled.
2582 *
2768 * This function walks the fence regs looking for a free one for @obj, 2583 * This function walks the fence regs looking for a free one for @obj,
2769 * stealing one if it can't find any. 2584 * stealing one if it can't find any.
2770 * 2585 *
2771 * It then sets up the reg based on the object's properties: address, pitch 2586 * It then sets up the reg based on the object's properties: address, pitch
2772 * and tiling format. 2587 * and tiling format.
2773 *
2774 * For an untiled surface, this removes any existing fence.
2775 */ 2588 */
2776int 2589int
2777i915_gem_object_get_fence(struct drm_i915_gem_object *obj) 2590i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2591 struct intel_ring_buffer *pipelined)
2778{ 2592{
2779 struct drm_device *dev = obj->base.dev; 2593 struct drm_device *dev = obj->base.dev;
2780 struct drm_i915_private *dev_priv = dev->dev_private; 2594 struct drm_i915_private *dev_priv = dev->dev_private;
2781 bool enable = obj->tiling_mode != I915_TILING_NONE;
2782 struct drm_i915_fence_reg *reg; 2595 struct drm_i915_fence_reg *reg;
2783 int ret; 2596 int ret;
2784 2597
2785 /* Have we updated the tiling parameters upon the object and so 2598 /* XXX disable pipelining. There are bugs. Shocking. */
2786 * will need to serialise the write to the associated fence register? 2599 pipelined = NULL;
2787 */
2788 if (obj->fence_dirty) {
2789 ret = i915_gem_object_flush_fence(obj);
2790 if (ret)
2791 return ret;
2792 }
2793 2600
2794 /* Just update our place in the LRU if our fence is getting reused. */ 2601 /* Just update our place in the LRU if our fence is getting reused. */
2795 if (obj->fence_reg != I915_FENCE_REG_NONE) { 2602 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2796 reg = &dev_priv->fence_regs[obj->fence_reg]; 2603 reg = &dev_priv->fence_regs[obj->fence_reg];
2797 if (!obj->fence_dirty) { 2604 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2798 list_move_tail(&reg->lru_list,
2799 &dev_priv->mm.fence_list);
2800 return 0;
2801 }
2802 } else if (enable) {
2803 reg = i915_find_fence_reg(dev);
2804 if (reg == NULL)
2805 return -EDEADLK;
2806
2807 if (reg->obj) {
2808 struct drm_i915_gem_object *old = reg->obj;
2809 2605
2810 ret = i915_gem_object_flush_fence(old); 2606 if (obj->tiling_changed) {
2607 ret = i915_gem_object_flush_fence(obj, pipelined);
2811 if (ret) 2608 if (ret)
2812 return ret; 2609 return ret;
2813 2610
2814 i915_gem_object_fence_lost(old); 2611 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2612 pipelined = NULL;
2613
2614 if (pipelined) {
2615 reg->setup_seqno =
2616 i915_gem_next_request_seqno(pipelined);
2617 obj->last_fenced_seqno = reg->setup_seqno;
2618 obj->last_fenced_ring = pipelined;
2619 }
2620
2621 goto update;
2815 } 2622 }
2816 } else 2623
2624 if (!pipelined) {
2625 if (reg->setup_seqno) {
2626 if (!ring_passed_seqno(obj->last_fenced_ring,
2627 reg->setup_seqno)) {
2628 ret = i915_wait_request(obj->last_fenced_ring,
2629 reg->setup_seqno);
2630 if (ret)
2631 return ret;
2632 }
2633
2634 reg->setup_seqno = 0;
2635 }
2636 } else if (obj->last_fenced_ring &&
2637 obj->last_fenced_ring != pipelined) {
2638 ret = i915_gem_object_flush_fence(obj, pipelined);
2639 if (ret)
2640 return ret;
2641 }
2642
2817 return 0; 2643 return 0;
2644 }
2818 2645
2819 i915_gem_object_update_fence(obj, reg, enable); 2646 reg = i915_find_fence_reg(dev, pipelined);
2820 obj->fence_dirty = false; 2647 if (reg == NULL)
2648 return -ENOSPC;
2821 2649
2822 return 0; 2650 ret = i915_gem_object_flush_fence(obj, pipelined);
2823} 2651 if (ret)
2652 return ret;
2824 2653
2825static bool i915_gem_valid_gtt_space(struct drm_device *dev, 2654 if (reg->obj) {
2826 struct drm_mm_node *gtt_space, 2655 struct drm_i915_gem_object *old = reg->obj;
2827 unsigned long cache_level)
2828{
2829 struct drm_mm_node *other;
2830 2656
2831 /* On non-LLC machines we have to be careful when putting differing 2657 drm_gem_object_reference(&old->base);
2832 * types of snoopable memory together to avoid the prefetcher 2658
2833 * crossing memory domains and dieing. 2659 if (old->tiling_mode)
2834 */ 2660 i915_gem_release_mmap(old);
2835 if (HAS_LLC(dev)) 2661
2836 return true; 2662 ret = i915_gem_object_flush_fence(old, pipelined);
2663 if (ret) {
2664 drm_gem_object_unreference(&old->base);
2665 return ret;
2666 }
2667
2668 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2669 pipelined = NULL;
2837 2670
2838 if (gtt_space == NULL) 2671 old->fence_reg = I915_FENCE_REG_NONE;
2839 return true; 2672 old->last_fenced_ring = pipelined;
2673 old->last_fenced_seqno =
2674 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2840 2675
2841 if (list_empty(&gtt_space->node_list)) 2676 drm_gem_object_unreference(&old->base);
2842 return true; 2677 } else if (obj->last_fenced_seqno == 0)
2678 pipelined = NULL;
2843 2679
2844 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); 2680 reg->obj = obj;
2845 if (other->allocated && !other->hole_follows && other->color != cache_level) 2681 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2846 return false; 2682 obj->fence_reg = reg - dev_priv->fence_regs;
2683 obj->last_fenced_ring = pipelined;
2847 2684
2848 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); 2685 reg->setup_seqno =
2849 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level) 2686 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2850 return false; 2687 obj->last_fenced_seqno = reg->setup_seqno;
2851 2688
2852 return true; 2689update:
2690 obj->tiling_changed = false;
2691 switch (INTEL_INFO(dev)->gen) {
2692 case 7:
2693 case 6:
2694 ret = sandybridge_write_fence_reg(obj, pipelined);
2695 break;
2696 case 5:
2697 case 4:
2698 ret = i965_write_fence_reg(obj, pipelined);
2699 break;
2700 case 3:
2701 ret = i915_write_fence_reg(obj, pipelined);
2702 break;
2703 case 2:
2704 ret = i830_write_fence_reg(obj, pipelined);
2705 break;
2706 }
2707
2708 return ret;
2853} 2709}
2854 2710
2855static void i915_gem_verify_gtt(struct drm_device *dev) 2711/**
2712 * i915_gem_clear_fence_reg - clear out fence register info
2713 * @obj: object to clear
2714 *
2715 * Zeroes out the fence register itself and clears out the associated
2716 * data structures in dev_priv and obj.
2717 */
2718static void
2719i915_gem_clear_fence_reg(struct drm_device *dev,
2720 struct drm_i915_fence_reg *reg)
2856{ 2721{
2857#if WATCH_GTT 2722 drm_i915_private_t *dev_priv = dev->dev_private;
2858 struct drm_i915_private *dev_priv = dev->dev_private; 2723 uint32_t fence_reg = reg - dev_priv->fence_regs;
2859 struct drm_i915_gem_object *obj;
2860 int err = 0;
2861
2862 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
2863 if (obj->gtt_space == NULL) {
2864 printk(KERN_ERR "object found on GTT list with no space reserved\n");
2865 err++;
2866 continue;
2867 }
2868 2724
2869 if (obj->cache_level != obj->gtt_space->color) { 2725 switch (INTEL_INFO(dev)->gen) {
2870 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", 2726 case 7:
2871 obj->gtt_space->start, 2727 case 6:
2872 obj->gtt_space->start + obj->gtt_space->size, 2728 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2873 obj->cache_level, 2729 break;
2874 obj->gtt_space->color); 2730 case 5:
2875 err++; 2731 case 4:
2876 continue; 2732 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2877 } 2733 break;
2734 case 3:
2735 if (fence_reg >= 8)
2736 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2737 else
2738 case 2:
2739 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2878 2740
2879 if (!i915_gem_valid_gtt_space(dev, 2741 I915_WRITE(fence_reg, 0);
2880 obj->gtt_space, 2742 break;
2881 obj->cache_level)) {
2882 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
2883 obj->gtt_space->start,
2884 obj->gtt_space->start + obj->gtt_space->size,
2885 obj->cache_level);
2886 err++;
2887 continue;
2888 }
2889 } 2743 }
2890 2744
2891 WARN_ON(err); 2745 list_del_init(&reg->lru_list);
2892#endif 2746 reg->obj = NULL;
2747 reg->setup_seqno = 0;
2893} 2748}
2894 2749
2895/** 2750/**
@@ -2898,12 +2753,12 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
2898static int 2753static int
2899i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 2754i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2900 unsigned alignment, 2755 unsigned alignment,
2901 bool map_and_fenceable, 2756 bool map_and_fenceable)
2902 bool nonblocking)
2903{ 2757{
2904 struct drm_device *dev = obj->base.dev; 2758 struct drm_device *dev = obj->base.dev;
2905 drm_i915_private_t *dev_priv = dev->dev_private; 2759 drm_i915_private_t *dev_priv = dev->dev_private;
2906 struct drm_mm_node *node; 2760 struct drm_mm_node *free_space;
2761 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2907 u32 size, fence_size, fence_alignment, unfenced_alignment; 2762 u32 size, fence_size, fence_alignment, unfenced_alignment;
2908 bool mappable, fenceable; 2763 bool mappable, fenceable;
2909 int ret; 2764 int ret;
@@ -2943,69 +2798,98 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2943 return -E2BIG; 2798 return -E2BIG;
2944 } 2799 }
2945 2800
2946 ret = i915_gem_object_get_pages(obj);
2947 if (ret)
2948 return ret;
2949
2950 i915_gem_object_pin_pages(obj);
2951
2952 node = kzalloc(sizeof(*node), GFP_KERNEL);
2953 if (node == NULL) {
2954 i915_gem_object_unpin_pages(obj);
2955 return -ENOMEM;
2956 }
2957
2958 search_free: 2801 search_free:
2959 if (map_and_fenceable) 2802 if (map_and_fenceable)
2960 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, 2803 free_space =
2961 size, alignment, obj->cache_level, 2804 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2962 0, dev_priv->mm.gtt_mappable_end); 2805 size, alignment, 0,
2806 dev_priv->mm.gtt_mappable_end,
2807 0);
2963 else 2808 else
2964 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node, 2809 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2965 size, alignment, obj->cache_level); 2810 size, alignment, 0);
2966 if (ret) { 2811
2812 if (free_space != NULL) {
2813 if (map_and_fenceable)
2814 obj->gtt_space =
2815 drm_mm_get_block_range_generic(free_space,
2816 size, alignment, 0,
2817 dev_priv->mm.gtt_mappable_end,
2818 0);
2819 else
2820 obj->gtt_space =
2821 drm_mm_get_block(free_space, size, alignment);
2822 }
2823 if (obj->gtt_space == NULL) {
2824 /* If the gtt is empty and we're still having trouble
2825 * fitting our object in, we're out of memory.
2826 */
2967 ret = i915_gem_evict_something(dev, size, alignment, 2827 ret = i915_gem_evict_something(dev, size, alignment,
2968 obj->cache_level, 2828 map_and_fenceable);
2969 map_and_fenceable, 2829 if (ret)
2970 nonblocking); 2830 return ret;
2971 if (ret == 0) 2831
2832 goto search_free;
2833 }
2834
2835 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2836 if (ret) {
2837 drm_mm_put_block(obj->gtt_space);
2838 obj->gtt_space = NULL;
2839
2840 if (ret == -ENOMEM) {
2841 /* first try to reclaim some memory by clearing the GTT */
2842 ret = i915_gem_evict_everything(dev, false);
2843 if (ret) {
2844 /* now try to shrink everyone else */
2845 if (gfpmask) {
2846 gfpmask = 0;
2847 goto search_free;
2848 }
2849
2850 return -ENOMEM;
2851 }
2852
2972 goto search_free; 2853 goto search_free;
2854 }
2973 2855
2974 i915_gem_object_unpin_pages(obj);
2975 kfree(node);
2976 return ret; 2856 return ret;
2977 } 2857 }
2978 if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
2979 i915_gem_object_unpin_pages(obj);
2980 drm_mm_put_block(node);
2981 return -EINVAL;
2982 }
2983 2858
2984 ret = i915_gem_gtt_prepare_object(obj); 2859 ret = i915_gem_gtt_bind_object(obj);
2985 if (ret) { 2860 if (ret) {
2986 i915_gem_object_unpin_pages(obj); 2861 i915_gem_object_put_pages_gtt(obj);
2987 drm_mm_put_block(node); 2862 drm_mm_put_block(obj->gtt_space);
2988 return ret; 2863 obj->gtt_space = NULL;
2864
2865 if (i915_gem_evict_everything(dev, false))
2866 return ret;
2867
2868 goto search_free;
2989 } 2869 }
2990 2870
2991 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); 2871 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2992 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 2872 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2993 2873
2994 obj->gtt_space = node; 2874 /* Assert that the object is not currently in any GPU domain. As it
2995 obj->gtt_offset = node->start; 2875 * wasn't in the GTT, there shouldn't be any way it could have been in
2876 * a GPU cache
2877 */
2878 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2879 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2880
2881 obj->gtt_offset = obj->gtt_space->start;
2996 2882
2997 fenceable = 2883 fenceable =
2998 node->size == fence_size && 2884 obj->gtt_space->size == fence_size &&
2999 (node->start & (fence_alignment - 1)) == 0; 2885 (obj->gtt_space->start & (fence_alignment -1)) == 0;
3000 2886
3001 mappable = 2887 mappable =
3002 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; 2888 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
3003 2889
3004 obj->map_and_fenceable = mappable && fenceable; 2890 obj->map_and_fenceable = mappable && fenceable;
3005 2891
3006 i915_gem_object_unpin_pages(obj);
3007 trace_i915_gem_object_bind(obj, map_and_fenceable); 2892 trace_i915_gem_object_bind(obj, map_and_fenceable);
3008 i915_gem_verify_gtt(dev);
3009 return 0; 2893 return 0;
3010} 2894}
3011 2895
@@ -3032,7 +2916,18 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3032 2916
3033 trace_i915_gem_object_clflush(obj); 2917 trace_i915_gem_object_clflush(obj);
3034 2918
3035 drm_clflush_sg(obj->pages); 2919 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2920}
2921
2922/** Flushes any GPU write domain for the object if it's dirty. */
2923static int
2924i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2925{
2926 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2927 return 0;
2928
2929 /* Queue the GPU write cache flushing we need. */
2930 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
3036} 2931}
3037 2932
3038/** Flushes the GTT write domain for the object if it's dirty. */ 2933/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3072,7 +2967,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3072 return; 2967 return;
3073 2968
3074 i915_gem_clflush_object(obj); 2969 i915_gem_clflush_object(obj);
3075 i915_gem_chipset_flush(obj->base.dev); 2970 intel_gtt_chipset_flush();
3076 old_write_domain = obj->base.write_domain; 2971 old_write_domain = obj->base.write_domain;
3077 obj->base.write_domain = 0; 2972 obj->base.write_domain = 0;
3078 2973
@@ -3090,7 +2985,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3090int 2985int
3091i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 2986i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3092{ 2987{
3093 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3094 uint32_t old_write_domain, old_read_domains; 2988 uint32_t old_write_domain, old_read_domains;
3095 int ret; 2989 int ret;
3096 2990
@@ -3101,10 +2995,16 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3101 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 2995 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3102 return 0; 2996 return 0;
3103 2997
3104 ret = i915_gem_object_wait_rendering(obj, !write); 2998 ret = i915_gem_object_flush_gpu_write_domain(obj);
3105 if (ret) 2999 if (ret)
3106 return ret; 3000 return ret;
3107 3001
3002 if (obj->pending_gpu_write || write) {
3003 ret = i915_gem_object_wait_rendering(obj);
3004 if (ret)
3005 return ret;
3006 }
3007
3108 i915_gem_object_flush_cpu_write_domain(obj); 3008 i915_gem_object_flush_cpu_write_domain(obj);
3109 3009
3110 old_write_domain = obj->base.write_domain; 3010 old_write_domain = obj->base.write_domain;
@@ -3125,18 +3025,12 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3125 old_read_domains, 3025 old_read_domains,
3126 old_write_domain); 3026 old_write_domain);
3127 3027
3128 /* And bump the LRU for this access */
3129 if (i915_gem_object_is_inactive(obj))
3130 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3131
3132 return 0; 3028 return 0;
3133} 3029}
3134 3030
3135int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3031int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3136 enum i915_cache_level cache_level) 3032 enum i915_cache_level cache_level)
3137{ 3033{
3138 struct drm_device *dev = obj->base.dev;
3139 drm_i915_private_t *dev_priv = dev->dev_private;
3140 int ret; 3034 int ret;
3141 3035
3142 if (obj->cache_level == cache_level) 3036 if (obj->cache_level == cache_level)
@@ -3147,12 +3041,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3147 return -EBUSY; 3041 return -EBUSY;
3148 } 3042 }
3149 3043
3150 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
3151 ret = i915_gem_object_unbind(obj);
3152 if (ret)
3153 return ret;
3154 }
3155
3156 if (obj->gtt_space) { 3044 if (obj->gtt_space) {
3157 ret = i915_gem_object_finish_gpu(obj); 3045 ret = i915_gem_object_finish_gpu(obj);
3158 if (ret) 3046 if (ret)
@@ -3164,19 +3052,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3164 * registers with snooped memory, so relinquish any fences 3052 * registers with snooped memory, so relinquish any fences
3165 * currently pointing to our region in the aperture. 3053 * currently pointing to our region in the aperture.
3166 */ 3054 */
3167 if (INTEL_INFO(dev)->gen < 6) { 3055 if (INTEL_INFO(obj->base.dev)->gen < 6) {
3168 ret = i915_gem_object_put_fence(obj); 3056 ret = i915_gem_object_put_fence(obj);
3169 if (ret) 3057 if (ret)
3170 return ret; 3058 return ret;
3171 } 3059 }
3172 3060
3173 if (obj->has_global_gtt_mapping) 3061 i915_gem_gtt_rebind_object(obj, cache_level);
3174 i915_gem_gtt_bind_object(obj, cache_level);
3175 if (obj->has_aliasing_ppgtt_mapping)
3176 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3177 obj, cache_level);
3178
3179 obj->gtt_space->color = cache_level;
3180 } 3062 }
3181 3063
3182 if (cache_level == I915_CACHE_NONE) { 3064 if (cache_level == I915_CACHE_NONE) {
@@ -3203,76 +3085,18 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3203 } 3085 }
3204 3086
3205 obj->cache_level = cache_level; 3087 obj->cache_level = cache_level;
3206 i915_gem_verify_gtt(dev);
3207 return 0; 3088 return 0;
3208} 3089}
3209 3090
3210int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3211 struct drm_file *file)
3212{
3213 struct drm_i915_gem_caching *args = data;
3214 struct drm_i915_gem_object *obj;
3215 int ret;
3216
3217 ret = i915_mutex_lock_interruptible(dev);
3218 if (ret)
3219 return ret;
3220
3221 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3222 if (&obj->base == NULL) {
3223 ret = -ENOENT;
3224 goto unlock;
3225 }
3226
3227 args->caching = obj->cache_level != I915_CACHE_NONE;
3228
3229 drm_gem_object_unreference(&obj->base);
3230unlock:
3231 mutex_unlock(&dev->struct_mutex);
3232 return ret;
3233}
3234
3235int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3236 struct drm_file *file)
3237{
3238 struct drm_i915_gem_caching *args = data;
3239 struct drm_i915_gem_object *obj;
3240 enum i915_cache_level level;
3241 int ret;
3242
3243 switch (args->caching) {
3244 case I915_CACHING_NONE:
3245 level = I915_CACHE_NONE;
3246 break;
3247 case I915_CACHING_CACHED:
3248 level = I915_CACHE_LLC;
3249 break;
3250 default:
3251 return -EINVAL;
3252 }
3253
3254 ret = i915_mutex_lock_interruptible(dev);
3255 if (ret)
3256 return ret;
3257
3258 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3259 if (&obj->base == NULL) {
3260 ret = -ENOENT;
3261 goto unlock;
3262 }
3263
3264 ret = i915_gem_object_set_cache_level(obj, level);
3265
3266 drm_gem_object_unreference(&obj->base);
3267unlock:
3268 mutex_unlock(&dev->struct_mutex);
3269 return ret;
3270}
3271
3272/* 3091/*
3273 * Prepare buffer for display plane (scanout, cursors, etc). 3092 * Prepare buffer for display plane (scanout, cursors, etc).
3274 * Can be called from an uninterruptible phase (modesetting) and allows 3093 * Can be called from an uninterruptible phase (modesetting) and allows
3275 * any flushes to be pipelined (for pageflips). 3094 * any flushes to be pipelined (for pageflips).
3095 *
3096 * For the display plane, we want to be in the GTT but out of any write
3097 * domains. So in many ways this looks like set_to_gtt_domain() apart from the
3098 * ability to pipeline the waits, pinning and any additional subtleties
3099 * that may differentiate the display plane from ordinary buffers.
3276 */ 3100 */
3277int 3101int
3278i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3102i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
@@ -3282,9 +3106,13 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3282 u32 old_read_domains, old_write_domain; 3106 u32 old_read_domains, old_write_domain;
3283 int ret; 3107 int ret;
3284 3108
3109 ret = i915_gem_object_flush_gpu_write_domain(obj);
3110 if (ret)
3111 return ret;
3112
3285 if (pipelined != obj->ring) { 3113 if (pipelined != obj->ring) {
3286 ret = i915_gem_object_sync(obj, pipelined); 3114 ret = i915_gem_object_wait_rendering(obj);
3287 if (ret) 3115 if (ret == -ERESTARTSYS)
3288 return ret; 3116 return ret;
3289 } 3117 }
3290 3118
@@ -3305,7 +3133,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3305 * (e.g. libkms for the bootup splash), we have to ensure that we 3133 * (e.g. libkms for the bootup splash), we have to ensure that we
3306 * always use map_and_fenceable for all scanout buffers. 3134 * always use map_and_fenceable for all scanout buffers.
3307 */ 3135 */
3308 ret = i915_gem_object_pin(obj, alignment, true, false); 3136 ret = i915_gem_object_pin(obj, alignment, true);
3309 if (ret) 3137 if (ret)
3310 return ret; 3138 return ret;
3311 3139
@@ -3317,7 +3145,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3317 /* It should now be out of any other write domains, and we can update 3145 /* It should now be out of any other write domains, and we can update
3318 * the domain values for our changes. 3146 * the domain values for our changes.
3319 */ 3147 */
3320 obj->base.write_domain = 0; 3148 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3321 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3149 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3322 3150
3323 trace_i915_gem_object_change_domain(obj, 3151 trace_i915_gem_object_change_domain(obj,
@@ -3335,13 +3163,16 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3335 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) 3163 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3336 return 0; 3164 return 0;
3337 3165
3338 ret = i915_gem_object_wait_rendering(obj, false); 3166 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3339 if (ret) 3167 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
3340 return ret; 3168 if (ret)
3169 return ret;
3170 }
3341 3171
3342 /* Ensure that we invalidate the GPU's caches and TLBs. */ 3172 /* Ensure that we invalidate the GPU's caches and TLBs. */
3343 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; 3173 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3344 return 0; 3174
3175 return i915_gem_object_wait_rendering(obj);
3345} 3176}
3346 3177
3347/** 3178/**
@@ -3350,7 +3181,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3350 * This function returns when the move is complete, including waiting on 3181 * This function returns when the move is complete, including waiting on
3351 * flushes to occur. 3182 * flushes to occur.
3352 */ 3183 */
3353int 3184static int
3354i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) 3185i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3355{ 3186{
3356 uint32_t old_write_domain, old_read_domains; 3187 uint32_t old_write_domain, old_read_domains;
@@ -3359,12 +3190,21 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3359 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) 3190 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3360 return 0; 3191 return 0;
3361 3192
3362 ret = i915_gem_object_wait_rendering(obj, !write); 3193 ret = i915_gem_object_flush_gpu_write_domain(obj);
3194 if (ret)
3195 return ret;
3196
3197 ret = i915_gem_object_wait_rendering(obj);
3363 if (ret) 3198 if (ret)
3364 return ret; 3199 return ret;
3365 3200
3366 i915_gem_object_flush_gtt_write_domain(obj); 3201 i915_gem_object_flush_gtt_write_domain(obj);
3367 3202
3203 /* If we have a partially-valid cache of the object in the CPU,
3204 * finish invalidating it and free the per-page flags.
3205 */
3206 i915_gem_object_set_to_full_cpu_read_domain(obj);
3207
3368 old_write_domain = obj->base.write_domain; 3208 old_write_domain = obj->base.write_domain;
3369 old_read_domains = obj->base.read_domains; 3209 old_read_domains = obj->base.read_domains;
3370 3210
@@ -3395,6 +3235,113 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3395 return 0; 3235 return 0;
3396} 3236}
3397 3237
3238/**
3239 * Moves the object from a partially CPU read to a full one.
3240 *
3241 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3242 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3243 */
3244static void
3245i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3246{
3247 if (!obj->page_cpu_valid)
3248 return;
3249
3250 /* If we're partially in the CPU read domain, finish moving it in.
3251 */
3252 if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3253 int i;
3254
3255 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3256 if (obj->page_cpu_valid[i])
3257 continue;
3258 drm_clflush_pages(obj->pages + i, 1);
3259 }
3260 }
3261
3262 /* Free the page_cpu_valid mappings which are now stale, whether
3263 * or not we've got I915_GEM_DOMAIN_CPU.
3264 */
3265 kfree(obj->page_cpu_valid);
3266 obj->page_cpu_valid = NULL;
3267}
3268
3269/**
3270 * Set the CPU read domain on a range of the object.
3271 *
3272 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3273 * not entirely valid. The page_cpu_valid member of the object flags which
3274 * pages have been flushed, and will be respected by
3275 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3276 * of the whole object.
3277 *
3278 * This function returns when the move is complete, including waiting on
3279 * flushes to occur.
3280 */
3281static int
3282i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3283 uint64_t offset, uint64_t size)
3284{
3285 uint32_t old_read_domains;
3286 int i, ret;
3287
3288 if (offset == 0 && size == obj->base.size)
3289 return i915_gem_object_set_to_cpu_domain(obj, 0);
3290
3291 ret = i915_gem_object_flush_gpu_write_domain(obj);
3292 if (ret)
3293 return ret;
3294
3295 ret = i915_gem_object_wait_rendering(obj);
3296 if (ret)
3297 return ret;
3298
3299 i915_gem_object_flush_gtt_write_domain(obj);
3300
3301 /* If we're already fully in the CPU read domain, we're done. */
3302 if (obj->page_cpu_valid == NULL &&
3303 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3304 return 0;
3305
3306 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3307 * newly adding I915_GEM_DOMAIN_CPU
3308 */
3309 if (obj->page_cpu_valid == NULL) {
3310 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3311 GFP_KERNEL);
3312 if (obj->page_cpu_valid == NULL)
3313 return -ENOMEM;
3314 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3315 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3316
3317 /* Flush the cache on any pages that are still invalid from the CPU's
3318 * perspective.
3319 */
3320 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3321 i++) {
3322 if (obj->page_cpu_valid[i])
3323 continue;
3324
3325 drm_clflush_pages(obj->pages + i, 1);
3326
3327 obj->page_cpu_valid[i] = 1;
3328 }
3329
3330 /* It should now be out of any other write domains, and we can update
3331 * the domain values for our changes.
3332 */
3333 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3334
3335 old_read_domains = obj->base.read_domains;
3336 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3337
3338 trace_i915_gem_object_change_domain(obj,
3339 old_read_domains,
3340 obj->base.write_domain);
3341
3342 return 0;
3343}
3344
3398/* Throttle our rendering by waiting until the ring has completed our requests 3345/* Throttle our rendering by waiting until the ring has completed our requests
3399 * emitted over 20 msec ago. 3346 * emitted over 20 msec ago.
3400 * 3347 *
@@ -3432,7 +3379,24 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3432 if (seqno == 0) 3379 if (seqno == 0)
3433 return 0; 3380 return 0;
3434 3381
3435 ret = __wait_seqno(ring, seqno, true, NULL); 3382 ret = 0;
3383 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3384 /* And wait for the seqno passing without holding any locks and
3385 * causing extra latency for others. This is safe as the irq
3386 * generation is designed to be run atomically and so is
3387 * lockless.
3388 */
3389 if (ring->irq_get(ring)) {
3390 ret = wait_event_interruptible(ring->irq_queue,
3391 i915_seqno_passed(ring->get_seqno(ring), seqno)
3392 || atomic_read(&dev_priv->mm.wedged));
3393 ring->irq_put(ring);
3394
3395 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3396 ret = -EIO;
3397 }
3398 }
3399
3436 if (ret == 0) 3400 if (ret == 0)
3437 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 3401 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3438 3402
@@ -3442,13 +3406,14 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3442int 3406int
3443i915_gem_object_pin(struct drm_i915_gem_object *obj, 3407i915_gem_object_pin(struct drm_i915_gem_object *obj,
3444 uint32_t alignment, 3408 uint32_t alignment,
3445 bool map_and_fenceable, 3409 bool map_and_fenceable)
3446 bool nonblocking)
3447{ 3410{
3411 struct drm_device *dev = obj->base.dev;
3412 struct drm_i915_private *dev_priv = dev->dev_private;
3448 int ret; 3413 int ret;
3449 3414
3450 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 3415 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
3451 return -EBUSY; 3416 WARN_ON(i915_verify_lists(dev));
3452 3417
3453 if (obj->gtt_space != NULL) { 3418 if (obj->gtt_space != NULL) {
3454 if ((alignment && obj->gtt_offset & (alignment - 1)) || 3419 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
@@ -3467,35 +3432,40 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3467 } 3432 }
3468 3433
3469 if (obj->gtt_space == NULL) { 3434 if (obj->gtt_space == NULL) {
3470 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3471
3472 ret = i915_gem_object_bind_to_gtt(obj, alignment, 3435 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3473 map_and_fenceable, 3436 map_and_fenceable);
3474 nonblocking);
3475 if (ret) 3437 if (ret)
3476 return ret; 3438 return ret;
3477
3478 if (!dev_priv->mm.aliasing_ppgtt)
3479 i915_gem_gtt_bind_object(obj, obj->cache_level);
3480 } 3439 }
3481 3440
3482 if (!obj->has_global_gtt_mapping && map_and_fenceable) 3441 if (obj->pin_count++ == 0) {
3483 i915_gem_gtt_bind_object(obj, obj->cache_level); 3442 if (!obj->active)
3484 3443 list_move_tail(&obj->mm_list,
3485 obj->pin_count++; 3444 &dev_priv->mm.pinned_list);
3445 }
3486 obj->pin_mappable |= map_and_fenceable; 3446 obj->pin_mappable |= map_and_fenceable;
3487 3447
3448 WARN_ON(i915_verify_lists(dev));
3488 return 0; 3449 return 0;
3489} 3450}
3490 3451
3491void 3452void
3492i915_gem_object_unpin(struct drm_i915_gem_object *obj) 3453i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3493{ 3454{
3455 struct drm_device *dev = obj->base.dev;
3456 drm_i915_private_t *dev_priv = dev->dev_private;
3457
3458 WARN_ON(i915_verify_lists(dev));
3494 BUG_ON(obj->pin_count == 0); 3459 BUG_ON(obj->pin_count == 0);
3495 BUG_ON(obj->gtt_space == NULL); 3460 BUG_ON(obj->gtt_space == NULL);
3496 3461
3497 if (--obj->pin_count == 0) 3462 if (--obj->pin_count == 0) {
3463 if (!obj->active)
3464 list_move_tail(&obj->mm_list,
3465 &dev_priv->mm.inactive_list);
3498 obj->pin_mappable = false; 3466 obj->pin_mappable = false;
3467 }
3468 WARN_ON(i915_verify_lists(dev));
3499} 3469}
3500 3470
3501int 3471int
@@ -3529,15 +3499,14 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3529 goto out; 3499 goto out;
3530 } 3500 }
3531 3501
3532 if (obj->user_pin_count == 0) { 3502 obj->user_pin_count++;
3533 ret = i915_gem_object_pin(obj, args->alignment, true, false); 3503 obj->pin_filp = file;
3504 if (obj->user_pin_count == 1) {
3505 ret = i915_gem_object_pin(obj, args->alignment, true);
3534 if (ret) 3506 if (ret)
3535 goto out; 3507 goto out;
3536 } 3508 }
3537 3509
3538 obj->user_pin_count++;
3539 obj->pin_filp = file;
3540
3541 /* XXX - flush the CPU caches for pinned objects 3510 /* XXX - flush the CPU caches for pinned objects
3542 * as the X server doesn't manage domains yet 3511 * as the X server doesn't manage domains yet
3543 */ 3512 */
@@ -3610,12 +3579,38 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3610 * become non-busy without any further actions, therefore emit any 3579 * become non-busy without any further actions, therefore emit any
3611 * necessary flushes here. 3580 * necessary flushes here.
3612 */ 3581 */
3613 ret = i915_gem_object_flush_active(obj);
3614
3615 args->busy = obj->active; 3582 args->busy = obj->active;
3616 if (obj->ring) { 3583 if (args->busy) {
3617 BUILD_BUG_ON(I915_NUM_RINGS > 16); 3584 /* Unconditionally flush objects, even when the gpu still uses this
3618 args->busy |= intel_ring_flag(obj->ring) << 16; 3585 * object. Userspace calling this function indicates that it wants to
3586 * use this buffer rather sooner than later, so issuing the required
3587 * flush earlier is beneficial.
3588 */
3589 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3590 ret = i915_gem_flush_ring(obj->ring,
3591 0, obj->base.write_domain);
3592 } else if (obj->ring->outstanding_lazy_request ==
3593 obj->last_rendering_seqno) {
3594 struct drm_i915_gem_request *request;
3595
3596 /* This ring is not being cleared by active usage,
3597 * so emit a request to do so.
3598 */
3599 request = kzalloc(sizeof(*request), GFP_KERNEL);
3600 if (request)
3601 ret = i915_add_request(obj->ring, NULL,request);
3602 else
3603 ret = -ENOMEM;
3604 }
3605
3606 /* Update the active list for the hardware's current position.
3607 * Otherwise this only updates on a delayed timer or when irqs
3608 * are actually unmasked, and our working set ends up being
3609 * larger than required.
3610 */
3611 i915_gem_retire_requests_ring(obj->ring);
3612
3613 args->busy = obj->active;
3619 } 3614 }
3620 3615
3621 drm_gem_object_unreference(&obj->base); 3616 drm_gem_object_unreference(&obj->base);
@@ -3628,7 +3623,7 @@ int
3628i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 3623i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3629 struct drm_file *file_priv) 3624 struct drm_file *file_priv)
3630{ 3625{
3631 return i915_gem_ring_throttle(dev, file_priv); 3626 return i915_gem_ring_throttle(dev, file_priv);
3632} 3627}
3633 3628
3634int 3629int
@@ -3665,8 +3660,9 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3665 if (obj->madv != __I915_MADV_PURGED) 3660 if (obj->madv != __I915_MADV_PURGED)
3666 obj->madv = args->madv; 3661 obj->madv = args->madv;
3667 3662
3668 /* if the object is no longer attached, discard its backing storage */ 3663 /* if the object is no longer bound, discard its backing storage */
3669 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL) 3664 if (i915_gem_object_is_purgeable(obj) &&
3665 obj->gtt_space == NULL)
3670 i915_gem_object_truncate(obj); 3666 i915_gem_object_truncate(obj);
3671 3667
3672 args->retained = obj->madv != __I915_MADV_PURGED; 3668 args->retained = obj->madv != __I915_MADV_PURGED;
@@ -3678,35 +3674,12 @@ unlock:
3678 return ret; 3674 return ret;
3679} 3675}
3680 3676
3681void i915_gem_object_init(struct drm_i915_gem_object *obj,
3682 const struct drm_i915_gem_object_ops *ops)
3683{
3684 INIT_LIST_HEAD(&obj->mm_list);
3685 INIT_LIST_HEAD(&obj->gtt_list);
3686 INIT_LIST_HEAD(&obj->ring_list);
3687 INIT_LIST_HEAD(&obj->exec_list);
3688
3689 obj->ops = ops;
3690
3691 obj->fence_reg = I915_FENCE_REG_NONE;
3692 obj->madv = I915_MADV_WILLNEED;
3693 /* Avoid an unnecessary call to unbind on the first bind. */
3694 obj->map_and_fenceable = true;
3695
3696 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3697}
3698
3699static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3700 .get_pages = i915_gem_object_get_pages_gtt,
3701 .put_pages = i915_gem_object_put_pages_gtt,
3702};
3703
3704struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 3677struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3705 size_t size) 3678 size_t size)
3706{ 3679{
3680 struct drm_i915_private *dev_priv = dev->dev_private;
3707 struct drm_i915_gem_object *obj; 3681 struct drm_i915_gem_object *obj;
3708 struct address_space *mapping; 3682 struct address_space *mapping;
3709 u32 mask;
3710 3683
3711 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 3684 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3712 if (obj == NULL) 3685 if (obj == NULL)
@@ -3717,23 +3690,16 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3717 return NULL; 3690 return NULL;
3718 } 3691 }
3719 3692
3720 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3721 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3722 /* 965gm cannot relocate objects above 4GiB. */
3723 mask &= ~__GFP_HIGHMEM;
3724 mask |= __GFP_DMA32;
3725 }
3726
3727 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 3693 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3728 mapping_set_gfp_mask(mapping, mask); 3694 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3729 3695
3730 i915_gem_object_init(obj, &i915_gem_object_ops); 3696 i915_gem_info_add_obj(dev_priv, size);
3731 3697
3732 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3698 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3733 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3699 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3734 3700
3735 if (HAS_LLC(dev)) { 3701 if (IS_GEN6(dev)) {
3736 /* On some devices, we can have the GPU use the LLC (the CPU 3702 /* On Gen6, we can have the GPU use the LLC (the CPU
3737 * cache) for about a 10% performance improvement 3703 * cache) for about a 10% performance improvement
3738 * compared to uncached. Graphics requests other than 3704 * compared to uncached. Graphics requests other than
3739 * display scanout are coherent with the CPU in 3705 * display scanout are coherent with the CPU in
@@ -3749,6 +3715,17 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3749 } else 3715 } else
3750 obj->cache_level = I915_CACHE_NONE; 3716 obj->cache_level = I915_CACHE_NONE;
3751 3717
3718 obj->base.driver_private = NULL;
3719 obj->fence_reg = I915_FENCE_REG_NONE;
3720 INIT_LIST_HEAD(&obj->mm_list);
3721 INIT_LIST_HEAD(&obj->gtt_list);
3722 INIT_LIST_HEAD(&obj->ring_list);
3723 INIT_LIST_HEAD(&obj->exec_list);
3724 INIT_LIST_HEAD(&obj->gpu_write_list);
3725 obj->madv = I915_MADV_WILLNEED;
3726 /* Avoid an unnecessary call to unbind on the first bind. */
3727 obj->map_and_fenceable = true;
3728
3752 return obj; 3729 return obj;
3753} 3730}
3754 3731
@@ -3759,45 +3736,46 @@ int i915_gem_init_object(struct drm_gem_object *obj)
3759 return 0; 3736 return 0;
3760} 3737}
3761 3738
3762void i915_gem_free_object(struct drm_gem_object *gem_obj) 3739static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
3763{ 3740{
3764 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3765 struct drm_device *dev = obj->base.dev; 3741 struct drm_device *dev = obj->base.dev;
3766 drm_i915_private_t *dev_priv = dev->dev_private; 3742 drm_i915_private_t *dev_priv = dev->dev_private;
3743 int ret;
3767 3744
3768 trace_i915_gem_object_destroy(obj); 3745 ret = i915_gem_object_unbind(obj);
3769 3746 if (ret == -ERESTARTSYS) {
3770 if (obj->phys_obj) 3747 list_move(&obj->mm_list,
3771 i915_gem_detach_phys_object(dev, obj); 3748 &dev_priv->mm.deferred_free_list);
3772 3749 return;
3773 obj->pin_count = 0;
3774 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3775 bool was_interruptible;
3776
3777 was_interruptible = dev_priv->mm.interruptible;
3778 dev_priv->mm.interruptible = false;
3779
3780 WARN_ON(i915_gem_object_unbind(obj));
3781
3782 dev_priv->mm.interruptible = was_interruptible;
3783 } 3750 }
3784 3751
3785 obj->pages_pin_count = 0; 3752 trace_i915_gem_object_destroy(obj);
3786 i915_gem_object_put_pages(obj);
3787 i915_gem_object_free_mmap_offset(obj);
3788
3789 BUG_ON(obj->pages);
3790 3753
3791 if (obj->base.import_attach) 3754 if (obj->base.map_list.map)
3792 drm_prime_gem_destroy(&obj->base, NULL); 3755 i915_gem_free_mmap_offset(obj);
3793 3756
3794 drm_gem_object_release(&obj->base); 3757 drm_gem_object_release(&obj->base);
3795 i915_gem_info_remove_obj(dev_priv, obj->base.size); 3758 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3796 3759
3760 kfree(obj->page_cpu_valid);
3797 kfree(obj->bit_17); 3761 kfree(obj->bit_17);
3798 kfree(obj); 3762 kfree(obj);
3799} 3763}
3800 3764
3765void i915_gem_free_object(struct drm_gem_object *gem_obj)
3766{
3767 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3768 struct drm_device *dev = obj->base.dev;
3769
3770 while (obj->pin_count > 0)
3771 i915_gem_object_unpin(obj);
3772
3773 if (obj->phys_obj)
3774 i915_gem_detach_phys_object(dev, obj);
3775
3776 i915_gem_free_object_tail(obj);
3777}
3778
3801int 3779int
3802i915_gem_idle(struct drm_device *dev) 3780i915_gem_idle(struct drm_device *dev)
3803{ 3781{
@@ -3816,11 +3794,15 @@ i915_gem_idle(struct drm_device *dev)
3816 mutex_unlock(&dev->struct_mutex); 3794 mutex_unlock(&dev->struct_mutex);
3817 return ret; 3795 return ret;
3818 } 3796 }
3819 i915_gem_retire_requests(dev);
3820 3797
3821 /* Under UMS, be paranoid and evict. */ 3798 /* Under UMS, be paranoid and evict. */
3822 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3799 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
3823 i915_gem_evict_everything(dev); 3800 ret = i915_gem_evict_inactive(dev, false);
3801 if (ret) {
3802 mutex_unlock(&dev->struct_mutex);
3803 return ret;
3804 }
3805 }
3824 3806
3825 i915_gem_reset_fences(dev); 3807 i915_gem_reset_fences(dev);
3826 3808
@@ -3842,91 +3824,12 @@ i915_gem_idle(struct drm_device *dev)
3842 return 0; 3824 return 0;
3843} 3825}
3844 3826
3845void i915_gem_l3_remap(struct drm_device *dev)
3846{
3847 drm_i915_private_t *dev_priv = dev->dev_private;
3848 u32 misccpctl;
3849 int i;
3850
3851 if (!IS_IVYBRIDGE(dev))
3852 return;
3853
3854 if (!dev_priv->l3_parity.remap_info)
3855 return;
3856
3857 misccpctl = I915_READ(GEN7_MISCCPCTL);
3858 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3859 POSTING_READ(GEN7_MISCCPCTL);
3860
3861 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3862 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3863 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
3864 DRM_DEBUG("0x%x was already programmed to %x\n",
3865 GEN7_L3LOG_BASE + i, remap);
3866 if (remap && !dev_priv->l3_parity.remap_info[i/4])
3867 DRM_DEBUG_DRIVER("Clearing remapped register\n");
3868 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
3869 }
3870
3871 /* Make sure all the writes land before disabling dop clock gating */
3872 POSTING_READ(GEN7_L3LOG_BASE);
3873
3874 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
3875}
3876
3877void i915_gem_init_swizzling(struct drm_device *dev)
3878{
3879 drm_i915_private_t *dev_priv = dev->dev_private;
3880
3881 if (INTEL_INFO(dev)->gen < 5 ||
3882 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3883 return;
3884
3885 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3886 DISP_TILE_SURFACE_SWIZZLING);
3887
3888 if (IS_GEN5(dev))
3889 return;
3890
3891 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3892 if (IS_GEN6(dev))
3893 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3894 else
3895 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3896}
3897
3898static bool
3899intel_enable_blt(struct drm_device *dev)
3900{
3901 if (!HAS_BLT(dev))
3902 return false;
3903
3904 /* The blitter was dysfunctional on early prototypes */
3905 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
3906 DRM_INFO("BLT not supported on this pre-production hardware;"
3907 " graphics performance will be degraded.\n");
3908 return false;
3909 }
3910
3911 return true;
3912}
3913
3914int 3827int
3915i915_gem_init_hw(struct drm_device *dev) 3828i915_gem_init_ringbuffer(struct drm_device *dev)
3916{ 3829{
3917 drm_i915_private_t *dev_priv = dev->dev_private; 3830 drm_i915_private_t *dev_priv = dev->dev_private;
3918 int ret; 3831 int ret;
3919 3832
3920 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
3921 return -EIO;
3922
3923 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
3924 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
3925
3926 i915_gem_l3_remap(dev);
3927
3928 i915_gem_init_swizzling(dev);
3929
3930 ret = intel_init_render_ring_buffer(dev); 3833 ret = intel_init_render_ring_buffer(dev);
3931 if (ret) 3834 if (ret)
3932 return ret; 3835 return ret;
@@ -3937,7 +3840,7 @@ i915_gem_init_hw(struct drm_device *dev)
3937 goto cleanup_render_ring; 3840 goto cleanup_render_ring;
3938 } 3841 }
3939 3842
3940 if (intel_enable_blt(dev)) { 3843 if (HAS_BLT(dev)) {
3941 ret = intel_init_blt_ring_buffer(dev); 3844 ret = intel_init_blt_ring_buffer(dev);
3942 if (ret) 3845 if (ret)
3943 goto cleanup_bsd_ring; 3846 goto cleanup_bsd_ring;
@@ -3945,13 +3848,6 @@ i915_gem_init_hw(struct drm_device *dev)
3945 3848
3946 dev_priv->next_seqno = 1; 3849 dev_priv->next_seqno = 1;
3947 3850
3948 /*
3949 * XXX: There was some w/a described somewhere suggesting loading
3950 * contexts before PPGTT.
3951 */
3952 i915_gem_context_init(dev);
3953 i915_gem_init_ppgtt(dev);
3954
3955 return 0; 3851 return 0;
3956 3852
3957cleanup_bsd_ring: 3853cleanup_bsd_ring:
@@ -3961,80 +3857,14 @@ cleanup_render_ring:
3961 return ret; 3857 return ret;
3962} 3858}
3963 3859
3964static bool
3965intel_enable_ppgtt(struct drm_device *dev)
3966{
3967 if (i915_enable_ppgtt >= 0)
3968 return i915_enable_ppgtt;
3969
3970#ifdef CONFIG_INTEL_IOMMU
3971 /* Disable ppgtt on SNB if VT-d is on. */
3972 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
3973 return false;
3974#endif
3975
3976 return true;
3977}
3978
3979int i915_gem_init(struct drm_device *dev)
3980{
3981 struct drm_i915_private *dev_priv = dev->dev_private;
3982 unsigned long gtt_size, mappable_size;
3983 int ret;
3984
3985 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
3986 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
3987
3988 mutex_lock(&dev->struct_mutex);
3989 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
3990 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
3991 * aperture accordingly when using aliasing ppgtt. */
3992 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
3993
3994 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
3995
3996 ret = i915_gem_init_aliasing_ppgtt(dev);
3997 if (ret) {
3998 mutex_unlock(&dev->struct_mutex);
3999 return ret;
4000 }
4001 } else {
4002 /* Let GEM Manage all of the aperture.
4003 *
4004 * However, leave one page at the end still bound to the scratch
4005 * page. There are a number of places where the hardware
4006 * apparently prefetches past the end of the object, and we've
4007 * seen multiple hangs with the GPU head pointer stuck in a
4008 * batchbuffer bound at the last page of the aperture. One page
4009 * should be enough to keep any prefetching inside of the
4010 * aperture.
4011 */
4012 i915_gem_init_global_gtt(dev, 0, mappable_size,
4013 gtt_size);
4014 }
4015
4016 ret = i915_gem_init_hw(dev);
4017 mutex_unlock(&dev->struct_mutex);
4018 if (ret) {
4019 i915_gem_cleanup_aliasing_ppgtt(dev);
4020 return ret;
4021 }
4022
4023 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4024 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4025 dev_priv->dri1.allow_batchbuffer = 1;
4026 return 0;
4027}
4028
4029void 3860void
4030i915_gem_cleanup_ringbuffer(struct drm_device *dev) 3861i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4031{ 3862{
4032 drm_i915_private_t *dev_priv = dev->dev_private; 3863 drm_i915_private_t *dev_priv = dev->dev_private;
4033 struct intel_ring_buffer *ring;
4034 int i; 3864 int i;
4035 3865
4036 for_each_ring(ring, dev_priv, i) 3866 for (i = 0; i < I915_NUM_RINGS; i++)
4037 intel_cleanup_ring_buffer(ring); 3867 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
4038} 3868}
4039 3869
4040int 3870int
@@ -4042,7 +3872,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4042 struct drm_file *file_priv) 3872 struct drm_file *file_priv)
4043{ 3873{
4044 drm_i915_private_t *dev_priv = dev->dev_private; 3874 drm_i915_private_t *dev_priv = dev->dev_private;
4045 int ret; 3875 int ret, i;
4046 3876
4047 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3877 if (drm_core_check_feature(dev, DRIVER_MODESET))
4048 return 0; 3878 return 0;
@@ -4055,13 +3885,19 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4055 mutex_lock(&dev->struct_mutex); 3885 mutex_lock(&dev->struct_mutex);
4056 dev_priv->mm.suspended = 0; 3886 dev_priv->mm.suspended = 0;
4057 3887
4058 ret = i915_gem_init_hw(dev); 3888 ret = i915_gem_init_ringbuffer(dev);
4059 if (ret != 0) { 3889 if (ret != 0) {
4060 mutex_unlock(&dev->struct_mutex); 3890 mutex_unlock(&dev->struct_mutex);
4061 return ret; 3891 return ret;
4062 } 3892 }
4063 3893
4064 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 3894 BUG_ON(!list_empty(&dev_priv->mm.active_list));
3895 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3896 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3897 for (i = 0; i < I915_NUM_RINGS; i++) {
3898 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3899 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3900 }
4065 mutex_unlock(&dev->struct_mutex); 3901 mutex_unlock(&dev->struct_mutex);
4066 3902
4067 ret = drm_irq_install(dev); 3903 ret = drm_irq_install(dev);
@@ -4108,6 +3944,7 @@ init_ring_lists(struct intel_ring_buffer *ring)
4108{ 3944{
4109 INIT_LIST_HEAD(&ring->active_list); 3945 INIT_LIST_HEAD(&ring->active_list);
4110 INIT_LIST_HEAD(&ring->request_list); 3946 INIT_LIST_HEAD(&ring->request_list);
3947 INIT_LIST_HEAD(&ring->gpu_write_list);
4111} 3948}
4112 3949
4113void 3950void
@@ -4117,13 +3954,15 @@ i915_gem_load(struct drm_device *dev)
4117 drm_i915_private_t *dev_priv = dev->dev_private; 3954 drm_i915_private_t *dev_priv = dev->dev_private;
4118 3955
4119 INIT_LIST_HEAD(&dev_priv->mm.active_list); 3956 INIT_LIST_HEAD(&dev_priv->mm.active_list);
3957 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4120 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 3958 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4121 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 3959 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
4122 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4123 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 3960 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3961 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
3962 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
4124 for (i = 0; i < I915_NUM_RINGS; i++) 3963 for (i = 0; i < I915_NUM_RINGS; i++)
4125 init_ring_lists(&dev_priv->ring[i]); 3964 init_ring_lists(&dev_priv->ring[i]);
4126 for (i = 0; i < I915_MAX_NUM_FENCES; i++) 3965 for (i = 0; i < 16; i++)
4127 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 3966 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4128 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 3967 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4129 i915_gem_retire_work_handler); 3968 i915_gem_retire_work_handler);
@@ -4131,8 +3970,12 @@ i915_gem_load(struct drm_device *dev)
4131 3970
4132 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 3971 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4133 if (IS_GEN3(dev)) { 3972 if (IS_GEN3(dev)) {
4134 I915_WRITE(MI_ARB_STATE, 3973 u32 tmp = I915_READ(MI_ARB_STATE);
4135 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); 3974 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3975 /* arb state is a masked write, so set bit + bit in mask */
3976 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3977 I915_WRITE(MI_ARB_STATE, tmp);
3978 }
4136 } 3979 }
4137 3980
4138 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; 3981 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
@@ -4147,7 +3990,9 @@ i915_gem_load(struct drm_device *dev)
4147 dev_priv->num_fence_regs = 8; 3990 dev_priv->num_fence_regs = 8;
4148 3991
4149 /* Initialize fence registers to zero */ 3992 /* Initialize fence registers to zero */
4150 i915_gem_reset_fences(dev); 3993 for (i = 0; i < dev_priv->num_fence_regs; i++) {
3994 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
3995 }
4151 3996
4152 i915_gem_detect_bit_6_swizzle(dev); 3997 i915_gem_detect_bit_6_swizzle(dev);
4153 init_waitqueue_head(&dev_priv->pending_flip_queue); 3998 init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -4252,7 +4097,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4252 page_cache_release(page); 4097 page_cache_release(page);
4253 } 4098 }
4254 } 4099 }
4255 i915_gem_chipset_flush(dev); 4100 intel_gtt_chipset_flush();
4256 4101
4257 obj->phys_obj->cur_obj = NULL; 4102 obj->phys_obj->cur_obj = NULL;
4258 obj->phys_obj = NULL; 4103 obj->phys_obj = NULL;
@@ -4339,7 +4184,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
4339 return -EFAULT; 4184 return -EFAULT;
4340 } 4185 }
4341 4186
4342 i915_gem_chipset_flush(dev); 4187 intel_gtt_chipset_flush();
4343 return 0; 4188 return 0;
4344} 4189}
4345 4190
@@ -4364,17 +4209,16 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4364 spin_unlock(&file_priv->mm.lock); 4209 spin_unlock(&file_priv->mm.lock);
4365} 4210}
4366 4211
4367static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) 4212static int
4213i915_gpu_is_active(struct drm_device *dev)
4368{ 4214{
4369 if (!mutex_is_locked(mutex)) 4215 drm_i915_private_t *dev_priv = dev->dev_private;
4370 return false; 4216 int lists_empty;
4371 4217
4372#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) 4218 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4373 return mutex->owner == task; 4219 list_empty(&dev_priv->mm.active_list);
4374#else 4220
4375 /* Since UP may be pre-empted, we cannot assume that we own the lock */ 4221 return !lists_empty;
4376 return false;
4377#endif
4378} 4222}
4379 4223
4380static int 4224static int
@@ -4385,39 +4229,60 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4385 struct drm_i915_private, 4229 struct drm_i915_private,
4386 mm.inactive_shrinker); 4230 mm.inactive_shrinker);
4387 struct drm_device *dev = dev_priv->dev; 4231 struct drm_device *dev = dev_priv->dev;
4388 struct drm_i915_gem_object *obj; 4232 struct drm_i915_gem_object *obj, *next;
4389 int nr_to_scan = sc->nr_to_scan; 4233 int nr_to_scan = sc->nr_to_scan;
4390 bool unlock = true;
4391 int cnt; 4234 int cnt;
4392 4235
4393 if (!mutex_trylock(&dev->struct_mutex)) { 4236 if (!mutex_trylock(&dev->struct_mutex))
4394 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 4237 return 0;
4395 return 0;
4396
4397 if (dev_priv->mm.shrinker_no_lock_stealing)
4398 return 0;
4399 4238
4400 unlock = false; 4239 /* "fast-path" to count number of available objects */
4240 if (nr_to_scan == 0) {
4241 cnt = 0;
4242 list_for_each_entry(obj,
4243 &dev_priv->mm.inactive_list,
4244 mm_list)
4245 cnt++;
4246 mutex_unlock(&dev->struct_mutex);
4247 return cnt / 100 * sysctl_vfs_cache_pressure;
4401 } 4248 }
4402 4249
4403 if (nr_to_scan) { 4250rescan:
4404 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); 4251 /* first scan for clean buffers */
4405 if (nr_to_scan > 0) 4252 i915_gem_retire_requests(dev);
4406 nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan, 4253
4407 false); 4254 list_for_each_entry_safe(obj, next,
4408 if (nr_to_scan > 0) 4255 &dev_priv->mm.inactive_list,
4409 i915_gem_shrink_all(dev_priv); 4256 mm_list) {
4257 if (i915_gem_object_is_purgeable(obj)) {
4258 if (i915_gem_object_unbind(obj) == 0 &&
4259 --nr_to_scan == 0)
4260 break;
4261 }
4410 } 4262 }
4411 4263
4264 /* second pass, evict/count anything still on the inactive list */
4412 cnt = 0; 4265 cnt = 0;
4413 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) 4266 list_for_each_entry_safe(obj, next,
4414 if (obj->pages_pin_count == 0) 4267 &dev_priv->mm.inactive_list,
4415 cnt += obj->base.size >> PAGE_SHIFT; 4268 mm_list) {
4416 list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list) 4269 if (nr_to_scan &&
4417 if (obj->pin_count == 0 && obj->pages_pin_count == 0) 4270 i915_gem_object_unbind(obj) == 0)
4418 cnt += obj->base.size >> PAGE_SHIFT; 4271 nr_to_scan--;
4419 4272 else
4420 if (unlock) 4273 cnt++;
4421 mutex_unlock(&dev->struct_mutex); 4274 }
4422 return cnt; 4275
4276 if (nr_to_scan && i915_gpu_is_active(dev)) {
4277 /*
4278 * We are desperate for pages, so as a last resort, wait
4279 * for the GPU to finish and discard whatever we can.
4280 * This has a dramatic impact to reduce the number of
4281 * OOM-killer events whilst running the GPU aggressively.
4282 */
4283 if (i915_gpu_idle(dev) == 0)
4284 goto rescan;
4285 }
4286 mutex_unlock(&dev->struct_mutex);
4287 return cnt / 100 * sysctl_vfs_cache_pressure;
4423} 4288}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
deleted file mode 100644
index a3f06bcad55..00000000000
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ /dev/null
@@ -1,533 +0,0 @@
1/*
2 * Copyright © 2011-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28/*
29 * This file implements HW context support. On gen5+ a HW context consists of an
30 * opaque GPU object which is referenced at times of context saves and restores.
31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33 * something like a context does exist for the media ring, the code only
34 * supports contexts for the render ring.
35 *
36 * In software, there is a distinction between contexts created by the user,
37 * and the default HW context. The default HW context is used by GPU clients
38 * that do not request setup of their own hardware context. The default
39 * context's state is never restored to help prevent programming errors. This
40 * would happen if a client ran and piggy-backed off another clients GPU state.
41 * The default context only exists to give the GPU some offset to load as the
42 * current to invoke a save of the context we actually care about. In fact, the
43 * code could likely be constructed, albeit in a more complicated fashion, to
44 * never use the default context, though that limits the driver's ability to
45 * swap out, and/or destroy other contexts.
46 *
47 * All other contexts are created as a request by the GPU client. These contexts
48 * store GPU state, and thus allow GPU clients to not re-emit state (and
49 * potentially query certain state) at any time. The kernel driver makes
50 * certain that the appropriate commands are inserted.
51 *
52 * The context life cycle is semi-complicated in that context BOs may live
53 * longer than the context itself because of the way the hardware, and object
54 * tracking works. Below is a very crude representation of the state machine
55 * describing the context life.
56 * refcount pincount active
57 * S0: initial state 0 0 0
58 * S1: context created 1 0 0
59 * S2: context is currently running 2 1 X
60 * S3: GPU referenced, but not current 2 0 1
61 * S4: context is current, but destroyed 1 1 0
62 * S5: like S3, but destroyed 1 0 1
63 *
64 * The most common (but not all) transitions:
65 * S0->S1: client creates a context
66 * S1->S2: client submits execbuf with context
67 * S2->S3: other clients submits execbuf with context
68 * S3->S1: context object was retired
69 * S3->S2: clients submits another execbuf
70 * S2->S4: context destroy called with current context
71 * S3->S5->S0: destroy path
72 * S4->S5->S0: destroy path on current context
73 *
74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the
76 * GPU. The GPU has loaded it's state already and has stored away the gtt
77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset.
80 *
81 * An "active context' is one which was previously the "current context" and is
82 * on the active list waiting for the next context switch to occur. Until this
83 * happens, the object must remain at the same gtt offset. It is therefore
84 * possible to destroy a context, but it is still active.
85 *
86 */
87
88#include <drm/drmP.h>
89#include <drm/i915_drm.h>
90#include "i915_drv.h"
91
92/* This is a HW constraint. The value below is the largest known requirement
93 * I've seen in a spec to date, and that was a workaround for a non-shipping
94 * part. It should be safe to decrease this, but it's more future proof as is.
95 */
96#define CONTEXT_ALIGN (64<<10)
97
98static struct i915_hw_context *
99i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
100static int do_switch(struct i915_hw_context *to);
101
102static int get_context_size(struct drm_device *dev)
103{
104 struct drm_i915_private *dev_priv = dev->dev_private;
105 int ret;
106 u32 reg;
107
108 switch (INTEL_INFO(dev)->gen) {
109 case 6:
110 reg = I915_READ(CXT_SIZE);
111 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
112 break;
113 case 7:
114 reg = I915_READ(GEN7_CXT_SIZE);
115 if (IS_HASWELL(dev))
116 ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
117 else
118 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
119 break;
120 default:
121 BUG();
122 }
123
124 return ret;
125}
126
127static void do_destroy(struct i915_hw_context *ctx)
128{
129 struct drm_device *dev = ctx->obj->base.dev;
130 struct drm_i915_private *dev_priv = dev->dev_private;
131
132 if (ctx->file_priv)
133 idr_remove(&ctx->file_priv->context_idr, ctx->id);
134 else
135 BUG_ON(ctx != dev_priv->ring[RCS].default_context);
136
137 drm_gem_object_unreference(&ctx->obj->base);
138 kfree(ctx);
139}
140
141static struct i915_hw_context *
142create_hw_context(struct drm_device *dev,
143 struct drm_i915_file_private *file_priv)
144{
145 struct drm_i915_private *dev_priv = dev->dev_private;
146 struct i915_hw_context *ctx;
147 int ret, id;
148
149 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
150 if (ctx == NULL)
151 return ERR_PTR(-ENOMEM);
152
153 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
154 if (ctx->obj == NULL) {
155 kfree(ctx);
156 DRM_DEBUG_DRIVER("Context object allocated failed\n");
157 return ERR_PTR(-ENOMEM);
158 }
159
160 /* The ring associated with the context object is handled by the normal
161 * object tracking code. We give an initial ring value simple to pass an
162 * assertion in the context switch code.
163 */
164 ctx->ring = &dev_priv->ring[RCS];
165
166 /* Default context will never have a file_priv */
167 if (file_priv == NULL)
168 return ctx;
169
170 ctx->file_priv = file_priv;
171
172again:
173 if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
174 ret = -ENOMEM;
175 DRM_DEBUG_DRIVER("idr allocation failed\n");
176 goto err_out;
177 }
178
179 ret = idr_get_new_above(&file_priv->context_idr, ctx,
180 DEFAULT_CONTEXT_ID + 1, &id);
181 if (ret == 0)
182 ctx->id = id;
183
184 if (ret == -EAGAIN)
185 goto again;
186 else if (ret)
187 goto err_out;
188
189 return ctx;
190
191err_out:
192 do_destroy(ctx);
193 return ERR_PTR(ret);
194}
195
196static inline bool is_default_context(struct i915_hw_context *ctx)
197{
198 return (ctx == ctx->ring->default_context);
199}
200
201/**
202 * The default context needs to exist per ring that uses contexts. It stores the
203 * context state of the GPU for applications that don't utilize HW contexts, as
204 * well as an idle case.
205 */
206static int create_default_context(struct drm_i915_private *dev_priv)
207{
208 struct i915_hw_context *ctx;
209 int ret;
210
211 BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
212
213 ctx = create_hw_context(dev_priv->dev, NULL);
214 if (IS_ERR(ctx))
215 return PTR_ERR(ctx);
216
217 /* We may need to do things with the shrinker which require us to
218 * immediately switch back to the default context. This can cause a
219 * problem as pinning the default context also requires GTT space which
220 * may not be available. To avoid this we always pin the
221 * default context.
222 */
223 dev_priv->ring[RCS].default_context = ctx;
224 ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
225 if (ret)
226 goto err_destroy;
227
228 ret = do_switch(ctx);
229 if (ret)
230 goto err_unpin;
231
232 DRM_DEBUG_DRIVER("Default HW context loaded\n");
233 return 0;
234
235err_unpin:
236 i915_gem_object_unpin(ctx->obj);
237err_destroy:
238 do_destroy(ctx);
239 return ret;
240}
241
242void i915_gem_context_init(struct drm_device *dev)
243{
244 struct drm_i915_private *dev_priv = dev->dev_private;
245 uint32_t ctx_size;
246
247 if (!HAS_HW_CONTEXTS(dev)) {
248 dev_priv->hw_contexts_disabled = true;
249 return;
250 }
251
252 /* If called from reset, or thaw... we've been here already */
253 if (dev_priv->hw_contexts_disabled ||
254 dev_priv->ring[RCS].default_context)
255 return;
256
257 ctx_size = get_context_size(dev);
258 dev_priv->hw_context_size = get_context_size(dev);
259 dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096);
260
261 if (ctx_size <= 0 || ctx_size > (1<<20)) {
262 dev_priv->hw_contexts_disabled = true;
263 return;
264 }
265
266 if (create_default_context(dev_priv)) {
267 dev_priv->hw_contexts_disabled = true;
268 return;
269 }
270
271 DRM_DEBUG_DRIVER("HW context support initialized\n");
272}
273
274void i915_gem_context_fini(struct drm_device *dev)
275{
276 struct drm_i915_private *dev_priv = dev->dev_private;
277
278 if (dev_priv->hw_contexts_disabled)
279 return;
280
281 /* The only known way to stop the gpu from accessing the hw context is
282 * to reset it. Do this as the very last operation to avoid confusing
283 * other code, leading to spurious errors. */
284 intel_gpu_reset(dev);
285
286 i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj);
287
288 do_destroy(dev_priv->ring[RCS].default_context);
289}
290
291static int context_idr_cleanup(int id, void *p, void *data)
292{
293 struct i915_hw_context *ctx = p;
294
295 BUG_ON(id == DEFAULT_CONTEXT_ID);
296
297 do_destroy(ctx);
298
299 return 0;
300}
301
302void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
303{
304 struct drm_i915_file_private *file_priv = file->driver_priv;
305
306 mutex_lock(&dev->struct_mutex);
307 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
308 idr_destroy(&file_priv->context_idr);
309 mutex_unlock(&dev->struct_mutex);
310}
311
312static struct i915_hw_context *
313i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
314{
315 return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
316}
317
318static inline int
319mi_set_context(struct intel_ring_buffer *ring,
320 struct i915_hw_context *new_context,
321 u32 hw_flags)
322{
323 int ret;
324
325 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
326 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
327 * explicitly, so we rely on the value at ring init, stored in
328 * itlb_before_ctx_switch.
329 */
330 if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
331 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
332 if (ret)
333 return ret;
334 }
335
336 ret = intel_ring_begin(ring, 6);
337 if (ret)
338 return ret;
339
340 if (IS_GEN7(ring->dev))
341 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
342 else
343 intel_ring_emit(ring, MI_NOOP);
344
345 intel_ring_emit(ring, MI_NOOP);
346 intel_ring_emit(ring, MI_SET_CONTEXT);
347 intel_ring_emit(ring, new_context->obj->gtt_offset |
348 MI_MM_SPACE_GTT |
349 MI_SAVE_EXT_STATE_EN |
350 MI_RESTORE_EXT_STATE_EN |
351 hw_flags);
352 /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
353 intel_ring_emit(ring, MI_NOOP);
354
355 if (IS_GEN7(ring->dev))
356 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
357 else
358 intel_ring_emit(ring, MI_NOOP);
359
360 intel_ring_advance(ring);
361
362 return ret;
363}
364
365static int do_switch(struct i915_hw_context *to)
366{
367 struct intel_ring_buffer *ring = to->ring;
368 struct drm_i915_gem_object *from_obj = ring->last_context_obj;
369 u32 hw_flags = 0;
370 int ret;
371
372 BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
373
374 if (from_obj == to->obj)
375 return 0;
376
377 ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
378 if (ret)
379 return ret;
380
381 /* Clear this page out of any CPU caches for coherent swap-in/out. Note
382 * that thanks to write = false in this call and us not setting any gpu
383 * write domains when putting a context object onto the active list
384 * (when switching away from it), this won't block.
385 * XXX: We need a real interface to do this instead of trickery. */
386 ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
387 if (ret) {
388 i915_gem_object_unpin(to->obj);
389 return ret;
390 }
391
392 if (!to->obj->has_global_gtt_mapping)
393 i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
394
395 if (!to->is_initialized || is_default_context(to))
396 hw_flags |= MI_RESTORE_INHIBIT;
397 else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
398 hw_flags |= MI_FORCE_RESTORE;
399
400 ret = mi_set_context(ring, to, hw_flags);
401 if (ret) {
402 i915_gem_object_unpin(to->obj);
403 return ret;
404 }
405
406 /* The backing object for the context is done after switching to the
407 * *next* context. Therefore we cannot retire the previous context until
408 * the next context has already started running. In fact, the below code
409 * is a bit suboptimal because the retiring can occur simply after the
410 * MI_SET_CONTEXT instead of when the next seqno has completed.
411 */
412 if (from_obj != NULL) {
413 from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
414 i915_gem_object_move_to_active(from_obj, ring);
415 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
416 * whole damn pipeline, we don't need to explicitly mark the
417 * object dirty. The only exception is that the context must be
418 * correct in case the object gets swapped out. Ideally we'd be
419 * able to defer doing this until we know the object would be
420 * swapped, but there is no way to do that yet.
421 */
422 from_obj->dirty = 1;
423 BUG_ON(from_obj->ring != ring);
424 i915_gem_object_unpin(from_obj);
425
426 drm_gem_object_unreference(&from_obj->base);
427 }
428
429 drm_gem_object_reference(&to->obj->base);
430 ring->last_context_obj = to->obj;
431 to->is_initialized = true;
432
433 return 0;
434}
435
436/**
437 * i915_switch_context() - perform a GPU context switch.
438 * @ring: ring for which we'll execute the context switch
439 * @file_priv: file_priv associated with the context, may be NULL
440 * @id: context id number
441 * @seqno: sequence number by which the new context will be switched to
442 * @flags:
443 *
444 * The context life cycle is simple. The context refcount is incremented and
445 * decremented by 1 and create and destroy. If the context is in use by the GPU,
446 * it will have a refoucnt > 1. This allows us to destroy the context abstract
447 * object while letting the normal object tracking destroy the backing BO.
448 */
449int i915_switch_context(struct intel_ring_buffer *ring,
450 struct drm_file *file,
451 int to_id)
452{
453 struct drm_i915_private *dev_priv = ring->dev->dev_private;
454 struct i915_hw_context *to;
455
456 if (dev_priv->hw_contexts_disabled)
457 return 0;
458
459 if (ring != &dev_priv->ring[RCS])
460 return 0;
461
462 if (to_id == DEFAULT_CONTEXT_ID) {
463 to = ring->default_context;
464 } else {
465 if (file == NULL)
466 return -EINVAL;
467
468 to = i915_gem_context_get(file->driver_priv, to_id);
469 if (to == NULL)
470 return -ENOENT;
471 }
472
473 return do_switch(to);
474}
475
476int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
477 struct drm_file *file)
478{
479 struct drm_i915_private *dev_priv = dev->dev_private;
480 struct drm_i915_gem_context_create *args = data;
481 struct drm_i915_file_private *file_priv = file->driver_priv;
482 struct i915_hw_context *ctx;
483 int ret;
484
485 if (!(dev->driver->driver_features & DRIVER_GEM))
486 return -ENODEV;
487
488 if (dev_priv->hw_contexts_disabled)
489 return -ENODEV;
490
491 ret = i915_mutex_lock_interruptible(dev);
492 if (ret)
493 return ret;
494
495 ctx = create_hw_context(dev, file_priv);
496 mutex_unlock(&dev->struct_mutex);
497 if (IS_ERR(ctx))
498 return PTR_ERR(ctx);
499
500 args->ctx_id = ctx->id;
501 DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
502
503 return 0;
504}
505
506int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
507 struct drm_file *file)
508{
509 struct drm_i915_gem_context_destroy *args = data;
510 struct drm_i915_file_private *file_priv = file->driver_priv;
511 struct i915_hw_context *ctx;
512 int ret;
513
514 if (!(dev->driver->driver_features & DRIVER_GEM))
515 return -ENODEV;
516
517 ret = i915_mutex_lock_interruptible(dev);
518 if (ret)
519 return ret;
520
521 ctx = i915_gem_context_get(file_priv, args->ctx_id);
522 if (!ctx) {
523 mutex_unlock(&dev->struct_mutex);
524 return -ENOENT;
525 }
526
527 do_destroy(ctx);
528
529 mutex_unlock(&dev->struct_mutex);
530
531 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
532 return 0;
533}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 582e6a5f3da..8da1899bd24 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -25,8 +25,9 @@
25 * 25 *
26 */ 26 */
27 27
28#include <drm/drmP.h> 28#include "drmP.h"
29#include <drm/i915_drm.h> 29#include "drm.h"
30#include "i915_drm.h"
30#include "i915_drv.h" 31#include "i915_drv.h"
31 32
32#if WATCH_LISTS 33#if WATCH_LISTS
@@ -71,7 +72,7 @@ i915_verify_lists(struct drm_device *dev)
71 break; 72 break;
72 } else if (!obj->active || 73 } else if (!obj->active ||
73 (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 || 74 (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
74 list_empty(&obj->gpu_write_list)) { 75 list_empty(&obj->gpu_write_list)){
75 DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n", 76 DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
76 obj, 77 obj,
77 obj->active, 78 obj->active,
@@ -113,6 +114,22 @@ i915_verify_lists(struct drm_device *dev)
113 } 114 }
114 } 115 }
115 116
117 list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
118 if (obj->base.dev != dev ||
119 !atomic_read(&obj->base.refcount.refcount)) {
120 DRM_ERROR("freed pinned %p\n", obj);
121 err++;
122 break;
123 } else if (!obj->pin_count || obj->active ||
124 (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
125 DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
126 obj,
127 obj->pin_count, obj->active,
128 obj->base.write_domain);
129 err++;
130 }
131 }
132
116 return warned = err; 133 return warned = err;
117} 134}
118#endif /* WATCH_INACTIVE */ 135#endif /* WATCH_INACTIVE */
@@ -131,8 +148,7 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
131 __func__, obj, obj->gtt_offset, handle, 148 __func__, obj, obj->gtt_offset, handle,
132 obj->size / 1024); 149 obj->size / 1024);
133 150
134 gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset, 151 gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
135 obj->base.size);
136 if (gtt_mapping == NULL) { 152 if (gtt_mapping == NULL) {
137 DRM_ERROR("failed to map GTT space\n"); 153 DRM_ERROR("failed to map GTT space\n");
138 return; 154 return;
@@ -141,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
141 for (page = 0; page < obj->size / PAGE_SIZE; page++) { 157 for (page = 0; page < obj->size / PAGE_SIZE; page++) {
142 int i; 158 int i;
143 159
144 backing_map = kmap_atomic(obj->pages[page]); 160 backing_map = kmap_atomic(obj->pages[page], KM_USER0);
145 161
146 if (backing_map == NULL) { 162 if (backing_map == NULL) {
147 DRM_ERROR("failed to map backing page\n"); 163 DRM_ERROR("failed to map backing page\n");
@@ -165,13 +181,13 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
165 } 181 }
166 } 182 }
167 } 183 }
168 kunmap_atomic(backing_map); 184 kunmap_atomic(backing_map, KM_USER0);
169 backing_map = NULL; 185 backing_map = NULL;
170 } 186 }
171 187
172 out: 188 out:
173 if (backing_map != NULL) 189 if (backing_map != NULL)
174 kunmap_atomic(backing_map); 190 kunmap_atomic(backing_map, KM_USER0);
175 iounmap(gtt_mapping); 191 iounmap(gtt_mapping);
176 192
177 /* give syslog time to catch up */ 193 /* give syslog time to catch up */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
deleted file mode 100644
index abeaafef6d7..00000000000
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ /dev/null
@@ -1,305 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
26#include <drm/drmP.h>
27#include "i915_drv.h"
28#include <linux/dma-buf.h>
29
30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
31 enum dma_data_direction dir)
32{
33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
34 struct sg_table *st;
35 struct scatterlist *src, *dst;
36 int ret, i;
37
38 ret = i915_mutex_lock_interruptible(obj->base.dev);
39 if (ret)
40 return ERR_PTR(ret);
41
42 ret = i915_gem_object_get_pages(obj);
43 if (ret) {
44 st = ERR_PTR(ret);
45 goto out;
46 }
47
48 /* Copy sg so that we make an independent mapping */
49 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
50 if (st == NULL) {
51 st = ERR_PTR(-ENOMEM);
52 goto out;
53 }
54
55 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
56 if (ret) {
57 kfree(st);
58 st = ERR_PTR(ret);
59 goto out;
60 }
61
62 src = obj->pages->sgl;
63 dst = st->sgl;
64 for (i = 0; i < obj->pages->nents; i++) {
65 sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
66 dst = sg_next(dst);
67 src = sg_next(src);
68 }
69
70 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
71 sg_free_table(st);
72 kfree(st);
73 st = ERR_PTR(-ENOMEM);
74 goto out;
75 }
76
77 i915_gem_object_pin_pages(obj);
78
79out:
80 mutex_unlock(&obj->base.dev->struct_mutex);
81 return st;
82}
83
84static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
85 struct sg_table *sg,
86 enum dma_data_direction dir)
87{
88 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
89 sg_free_table(sg);
90 kfree(sg);
91}
92
93static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
94{
95 struct drm_i915_gem_object *obj = dma_buf->priv;
96
97 if (obj->base.export_dma_buf == dma_buf) {
98 /* drop the reference on the export fd holds */
99 obj->base.export_dma_buf = NULL;
100 drm_gem_object_unreference_unlocked(&obj->base);
101 }
102}
103
104static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
105{
106 struct drm_i915_gem_object *obj = dma_buf->priv;
107 struct drm_device *dev = obj->base.dev;
108 struct scatterlist *sg;
109 struct page **pages;
110 int ret, i;
111
112 ret = i915_mutex_lock_interruptible(dev);
113 if (ret)
114 return ERR_PTR(ret);
115
116 if (obj->dma_buf_vmapping) {
117 obj->vmapping_count++;
118 goto out_unlock;
119 }
120
121 ret = i915_gem_object_get_pages(obj);
122 if (ret)
123 goto error;
124
125 ret = -ENOMEM;
126
127 pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
128 if (pages == NULL)
129 goto error;
130
131 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
132 pages[i] = sg_page(sg);
133
134 obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
135 drm_free_large(pages);
136
137 if (!obj->dma_buf_vmapping)
138 goto error;
139
140 obj->vmapping_count = 1;
141 i915_gem_object_pin_pages(obj);
142out_unlock:
143 mutex_unlock(&dev->struct_mutex);
144 return obj->dma_buf_vmapping;
145
146error:
147 mutex_unlock(&dev->struct_mutex);
148 return ERR_PTR(ret);
149}
150
151static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
152{
153 struct drm_i915_gem_object *obj = dma_buf->priv;
154 struct drm_device *dev = obj->base.dev;
155 int ret;
156
157 ret = i915_mutex_lock_interruptible(dev);
158 if (ret)
159 return;
160
161 if (--obj->vmapping_count == 0) {
162 vunmap(obj->dma_buf_vmapping);
163 obj->dma_buf_vmapping = NULL;
164
165 i915_gem_object_unpin_pages(obj);
166 }
167 mutex_unlock(&dev->struct_mutex);
168}
169
170static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
171{
172 return NULL;
173}
174
175static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
176{
177
178}
179static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
180{
181 return NULL;
182}
183
184static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
185{
186
187}
188
189static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
190{
191 return -EINVAL;
192}
193
194static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
195{
196 struct drm_i915_gem_object *obj = dma_buf->priv;
197 struct drm_device *dev = obj->base.dev;
198 int ret;
199 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
200
201 ret = i915_mutex_lock_interruptible(dev);
202 if (ret)
203 return ret;
204
205 ret = i915_gem_object_set_to_cpu_domain(obj, write);
206 mutex_unlock(&dev->struct_mutex);
207 return ret;
208}
209
210static const struct dma_buf_ops i915_dmabuf_ops = {
211 .map_dma_buf = i915_gem_map_dma_buf,
212 .unmap_dma_buf = i915_gem_unmap_dma_buf,
213 .release = i915_gem_dmabuf_release,
214 .kmap = i915_gem_dmabuf_kmap,
215 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
216 .kunmap = i915_gem_dmabuf_kunmap,
217 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
218 .mmap = i915_gem_dmabuf_mmap,
219 .vmap = i915_gem_dmabuf_vmap,
220 .vunmap = i915_gem_dmabuf_vunmap,
221 .begin_cpu_access = i915_gem_begin_cpu_access,
222};
223
224struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
225 struct drm_gem_object *gem_obj, int flags)
226{
227 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
228
229 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
230}
231
232static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
233{
234 struct sg_table *sg;
235
236 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
237 if (IS_ERR(sg))
238 return PTR_ERR(sg);
239
240 obj->pages = sg;
241 obj->has_dma_mapping = true;
242 return 0;
243}
244
245static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
246{
247 dma_buf_unmap_attachment(obj->base.import_attach,
248 obj->pages, DMA_BIDIRECTIONAL);
249 obj->has_dma_mapping = false;
250}
251
252static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
253 .get_pages = i915_gem_object_get_pages_dmabuf,
254 .put_pages = i915_gem_object_put_pages_dmabuf,
255};
256
257struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
258 struct dma_buf *dma_buf)
259{
260 struct dma_buf_attachment *attach;
261 struct drm_i915_gem_object *obj;
262 int ret;
263
264 /* is this one of own objects? */
265 if (dma_buf->ops == &i915_dmabuf_ops) {
266 obj = dma_buf->priv;
267 /* is it from our device? */
268 if (obj->base.dev == dev) {
269 /*
270 * Importing dmabuf exported from out own gem increases
271 * refcount on gem itself instead of f_count of dmabuf.
272 */
273 drm_gem_object_reference(&obj->base);
274 dma_buf_put(dma_buf);
275 return &obj->base;
276 }
277 }
278
279 /* need to attach */
280 attach = dma_buf_attach(dma_buf, dev->dev);
281 if (IS_ERR(attach))
282 return ERR_CAST(attach);
283
284
285 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
286 if (obj == NULL) {
287 ret = -ENOMEM;
288 goto fail_detach;
289 }
290
291 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
292 if (ret) {
293 kfree(obj);
294 goto fail_detach;
295 }
296
297 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
298 obj->base.import_attach = attach;
299
300 return &obj->base;
301
302fail_detach:
303 dma_buf_detach(dma_buf, attach);
304 return ERR_PTR(ret);
305}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 776a3225184..da05a2692a7 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -26,31 +26,44 @@
26 * 26 *
27 */ 27 */
28 28
29#include <drm/drmP.h> 29#include "drmP.h"
30#include "drm.h"
30#include "i915_drv.h" 31#include "i915_drv.h"
31#include <drm/i915_drm.h> 32#include "i915_drm.h"
32#include "i915_trace.h" 33#include "i915_trace.h"
33 34
34static bool 35static bool
35mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) 36mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
36{ 37{
37 if (obj->pin_count)
38 return false;
39
40 list_add(&obj->exec_list, unwind); 38 list_add(&obj->exec_list, unwind);
39 drm_gem_object_reference(&obj->base);
41 return drm_mm_scan_add_block(obj->gtt_space); 40 return drm_mm_scan_add_block(obj->gtt_space);
42} 41}
43 42
44int 43int
45i915_gem_evict_something(struct drm_device *dev, int min_size, 44i915_gem_evict_something(struct drm_device *dev, int min_size,
46 unsigned alignment, unsigned cache_level, 45 unsigned alignment, bool mappable)
47 bool mappable, bool nonblocking)
48{ 46{
49 drm_i915_private_t *dev_priv = dev->dev_private; 47 drm_i915_private_t *dev_priv = dev->dev_private;
50 struct list_head eviction_list, unwind_list; 48 struct list_head eviction_list, unwind_list;
51 struct drm_i915_gem_object *obj; 49 struct drm_i915_gem_object *obj;
52 int ret = 0; 50 int ret = 0;
53 51
52 i915_gem_retire_requests(dev);
53
54 /* Re-check for free space after retiring requests */
55 if (mappable) {
56 if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
57 min_size, alignment, 0,
58 dev_priv->mm.gtt_mappable_end,
59 0))
60 return 0;
61 } else {
62 if (drm_mm_search_free(&dev_priv->mm.gtt_space,
63 min_size, alignment, 0))
64 return 0;
65 }
66
54 trace_i915_gem_evict(dev, min_size, alignment, mappable); 67 trace_i915_gem_evict(dev, min_size, alignment, mappable);
55 68
56 /* 69 /*
@@ -78,12 +91,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
78 91
79 INIT_LIST_HEAD(&unwind_list); 92 INIT_LIST_HEAD(&unwind_list);
80 if (mappable) 93 if (mappable)
81 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, 94 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
82 min_size, alignment, cache_level, 95 alignment, 0,
83 0, dev_priv->mm.gtt_mappable_end); 96 dev_priv->mm.gtt_mappable_end);
84 else 97 else
85 drm_mm_init_scan(&dev_priv->mm.gtt_space, 98 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
86 min_size, alignment, cache_level);
87 99
88 /* First see if there is a large enough contiguous idle region... */ 100 /* First see if there is a large enough contiguous idle region... */
89 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { 101 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
@@ -91,16 +103,32 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
91 goto found; 103 goto found;
92 } 104 }
93 105
94 if (nonblocking)
95 goto none;
96
97 /* Now merge in the soon-to-be-expired objects... */ 106 /* Now merge in the soon-to-be-expired objects... */
98 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 107 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
108 /* Does the object require an outstanding flush? */
109 if (obj->base.write_domain || obj->pin_count)
110 continue;
111
112 if (mark_free(obj, &unwind_list))
113 goto found;
114 }
115
116 /* Finally add anything with a pending flush (in order of retirement) */
117 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
118 if (obj->pin_count)
119 continue;
120
121 if (mark_free(obj, &unwind_list))
122 goto found;
123 }
124 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
125 if (! obj->base.write_domain || obj->pin_count)
126 continue;
127
99 if (mark_free(obj, &unwind_list)) 128 if (mark_free(obj, &unwind_list))
100 goto found; 129 goto found;
101 } 130 }
102 131
103none:
104 /* Nothing found, clean up and bail out! */ 132 /* Nothing found, clean up and bail out! */
105 while (!list_empty(&unwind_list)) { 133 while (!list_empty(&unwind_list)) {
106 obj = list_first_entry(&unwind_list, 134 obj = list_first_entry(&unwind_list,
@@ -111,6 +139,7 @@ none:
111 BUG_ON(ret); 139 BUG_ON(ret);
112 140
113 list_del_init(&obj->exec_list); 141 list_del_init(&obj->exec_list);
142 drm_gem_object_unreference(&obj->base);
114 } 143 }
115 144
116 /* We expect the caller to unpin, evict all and try again, or give up. 145 /* We expect the caller to unpin, evict all and try again, or give up.
@@ -129,10 +158,10 @@ found:
129 exec_list); 158 exec_list);
130 if (drm_mm_scan_remove_block(obj->gtt_space)) { 159 if (drm_mm_scan_remove_block(obj->gtt_space)) {
131 list_move(&obj->exec_list, &eviction_list); 160 list_move(&obj->exec_list, &eviction_list);
132 drm_gem_object_reference(&obj->base);
133 continue; 161 continue;
134 } 162 }
135 list_del_init(&obj->exec_list); 163 list_del_init(&obj->exec_list);
164 drm_gem_object_unreference(&obj->base);
136 } 165 }
137 166
138 /* Unbinding will emit any required flushes */ 167 /* Unbinding will emit any required flushes */
@@ -151,35 +180,45 @@ found:
151} 180}
152 181
153int 182int
154i915_gem_evict_everything(struct drm_device *dev) 183i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
155{ 184{
156 drm_i915_private_t *dev_priv = dev->dev_private; 185 drm_i915_private_t *dev_priv = dev->dev_private;
157 struct drm_i915_gem_object *obj, *next;
158 bool lists_empty;
159 int ret; 186 int ret;
187 bool lists_empty;
160 188
161 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 189 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
190 list_empty(&dev_priv->mm.flushing_list) &&
162 list_empty(&dev_priv->mm.active_list)); 191 list_empty(&dev_priv->mm.active_list));
163 if (lists_empty) 192 if (lists_empty)
164 return -ENOSPC; 193 return -ENOSPC;
165 194
166 trace_i915_gem_evict_everything(dev); 195 trace_i915_gem_evict_everything(dev, purgeable_only);
167 196
168 /* The gpu_idle will flush everything in the write domain to the 197 /* Flush everything (on to the inactive lists) and evict */
169 * active list. Then we must move everything off the active list
170 * with retire requests.
171 */
172 ret = i915_gpu_idle(dev); 198 ret = i915_gpu_idle(dev);
173 if (ret) 199 if (ret)
174 return ret; 200 return ret;
175 201
176 i915_gem_retire_requests(dev); 202 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
203
204 return i915_gem_evict_inactive(dev, purgeable_only);
205}
206
207/** Unbinds all inactive objects. */
208int
209i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
210{
211 drm_i915_private_t *dev_priv = dev->dev_private;
212 struct drm_i915_gem_object *obj, *next;
177 213
178 /* Having flushed everything, unbind() should never raise an error */
179 list_for_each_entry_safe(obj, next, 214 list_for_each_entry_safe(obj, next,
180 &dev_priv->mm.inactive_list, mm_list) 215 &dev_priv->mm.inactive_list, mm_list) {
181 if (obj->pin_count == 0) 216 if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
182 WARN_ON(i915_gem_object_unbind(obj)); 217 int ret = i915_gem_object_unbind(obj);
218 if (ret)
219 return ret;
220 }
221 }
183 222
184 return 0; 223 return 0;
185} 224}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d6a994a0739..4934cf84c32 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -26,12 +26,186 @@
26 * 26 *
27 */ 27 */
28 28
29#include <drm/drmP.h> 29#include "drmP.h"
30#include <drm/i915_drm.h> 30#include "drm.h"
31#include "i915_drm.h"
31#include "i915_drv.h" 32#include "i915_drv.h"
32#include "i915_trace.h" 33#include "i915_trace.h"
33#include "intel_drv.h" 34#include "intel_drv.h"
34#include <linux/dma_remapping.h> 35
36struct change_domains {
37 uint32_t invalidate_domains;
38 uint32_t flush_domains;
39 uint32_t flush_rings;
40 uint32_t flips;
41};
42
43/*
44 * Set the next domain for the specified object. This
45 * may not actually perform the necessary flushing/invaliding though,
46 * as that may want to be batched with other set_domain operations
47 *
48 * This is (we hope) the only really tricky part of gem. The goal
49 * is fairly simple -- track which caches hold bits of the object
50 * and make sure they remain coherent. A few concrete examples may
51 * help to explain how it works. For shorthand, we use the notation
52 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
53 * a pair of read and write domain masks.
54 *
55 * Case 1: the batch buffer
56 *
57 * 1. Allocated
58 * 2. Written by CPU
59 * 3. Mapped to GTT
60 * 4. Read by GPU
61 * 5. Unmapped from GTT
62 * 6. Freed
63 *
64 * Let's take these a step at a time
65 *
66 * 1. Allocated
67 * Pages allocated from the kernel may still have
68 * cache contents, so we set them to (CPU, CPU) always.
69 * 2. Written by CPU (using pwrite)
70 * The pwrite function calls set_domain (CPU, CPU) and
71 * this function does nothing (as nothing changes)
72 * 3. Mapped by GTT
73 * This function asserts that the object is not
74 * currently in any GPU-based read or write domains
75 * 4. Read by GPU
76 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
77 * As write_domain is zero, this function adds in the
78 * current read domains (CPU+COMMAND, 0).
79 * flush_domains is set to CPU.
80 * invalidate_domains is set to COMMAND
81 * clflush is run to get data out of the CPU caches
82 * then i915_dev_set_domain calls i915_gem_flush to
83 * emit an MI_FLUSH and drm_agp_chipset_flush
84 * 5. Unmapped from GTT
85 * i915_gem_object_unbind calls set_domain (CPU, CPU)
86 * flush_domains and invalidate_domains end up both zero
87 * so no flushing/invalidating happens
88 * 6. Freed
89 * yay, done
90 *
91 * Case 2: The shared render buffer
92 *
93 * 1. Allocated
94 * 2. Mapped to GTT
95 * 3. Read/written by GPU
96 * 4. set_domain to (CPU,CPU)
97 * 5. Read/written by CPU
98 * 6. Read/written by GPU
99 *
100 * 1. Allocated
101 * Same as last example, (CPU, CPU)
102 * 2. Mapped to GTT
103 * Nothing changes (assertions find that it is not in the GPU)
104 * 3. Read/written by GPU
105 * execbuffer calls set_domain (RENDER, RENDER)
106 * flush_domains gets CPU
107 * invalidate_domains gets GPU
108 * clflush (obj)
109 * MI_FLUSH and drm_agp_chipset_flush
110 * 4. set_domain (CPU, CPU)
111 * flush_domains gets GPU
112 * invalidate_domains gets CPU
113 * wait_rendering (obj) to make sure all drawing is complete.
114 * This will include an MI_FLUSH to get the data from GPU
115 * to memory
116 * clflush (obj) to invalidate the CPU cache
117 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
118 * 5. Read/written by CPU
119 * cache lines are loaded and dirtied
120 * 6. Read written by GPU
121 * Same as last GPU access
122 *
123 * Case 3: The constant buffer
124 *
125 * 1. Allocated
126 * 2. Written by CPU
127 * 3. Read by GPU
128 * 4. Updated (written) by CPU again
129 * 5. Read by GPU
130 *
131 * 1. Allocated
132 * (CPU, CPU)
133 * 2. Written by CPU
134 * (CPU, CPU)
135 * 3. Read by GPU
136 * (CPU+RENDER, 0)
137 * flush_domains = CPU
138 * invalidate_domains = RENDER
139 * clflush (obj)
140 * MI_FLUSH
141 * drm_agp_chipset_flush
142 * 4. Updated (written) by CPU again
143 * (CPU, CPU)
144 * flush_domains = 0 (no previous write domain)
145 * invalidate_domains = 0 (no new read domains)
146 * 5. Read by GPU
147 * (CPU+RENDER, 0)
148 * flush_domains = CPU
149 * invalidate_domains = RENDER
150 * clflush (obj)
151 * MI_FLUSH
152 * drm_agp_chipset_flush
153 */
154static void
155i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
156 struct intel_ring_buffer *ring,
157 struct change_domains *cd)
158{
159 uint32_t invalidate_domains = 0, flush_domains = 0;
160
161 /*
162 * If the object isn't moving to a new write domain,
163 * let the object stay in multiple read domains
164 */
165 if (obj->base.pending_write_domain == 0)
166 obj->base.pending_read_domains |= obj->base.read_domains;
167
168 /*
169 * Flush the current write domain if
170 * the new read domains don't match. Invalidate
171 * any read domains which differ from the old
172 * write domain
173 */
174 if (obj->base.write_domain &&
175 (((obj->base.write_domain != obj->base.pending_read_domains ||
176 obj->ring != ring)) ||
177 (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
178 flush_domains |= obj->base.write_domain;
179 invalidate_domains |=
180 obj->base.pending_read_domains & ~obj->base.write_domain;
181 }
182 /*
183 * Invalidate any read caches which may have
184 * stale data. That is, any new read domains.
185 */
186 invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
187 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
188 i915_gem_clflush_object(obj);
189
190 if (obj->base.pending_write_domain)
191 cd->flips |= atomic_read(&obj->pending_flip);
192
193 /* The actual obj->write_domain will be updated with
194 * pending_write_domain after we emit the accumulated flush for all
195 * of our domain changes in execbuffers (which clears objects'
196 * write_domains). So if we have a current write domain that we
197 * aren't changing, set pending_write_domain to that.
198 */
199 if (flush_domains == 0 && obj->base.pending_write_domain == 0)
200 obj->base.pending_write_domain = obj->base.write_domain;
201
202 cd->invalidate_domains |= invalidate_domains;
203 cd->flush_domains |= flush_domains;
204 if (flush_domains & I915_GEM_GPU_DOMAINS)
205 cd->flush_rings |= obj->ring->id;
206 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
207 cd->flush_rings |= ring->id;
208}
35 209
36struct eb_objects { 210struct eb_objects {
37 int and; 211 int and;
@@ -43,7 +217,6 @@ eb_create(int size)
43{ 217{
44 struct eb_objects *eb; 218 struct eb_objects *eb;
45 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; 219 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
46 BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
47 while (count > size) 220 while (count > size)
48 count >>= 1; 221 count >>= 1;
49 eb = kzalloc(count*sizeof(struct hlist_head) + 222 eb = kzalloc(count*sizeof(struct hlist_head) +
@@ -92,13 +265,6 @@ eb_destroy(struct eb_objects *eb)
92 kfree(eb); 265 kfree(eb);
93} 266}
94 267
95static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
96{
97 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
98 !obj->map_and_fenceable ||
99 obj->cache_level != I915_CACHE_NONE);
100}
101
102static int 268static int
103i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 269i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
104 struct eb_objects *eb, 270 struct eb_objects *eb,
@@ -106,7 +272,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
106{ 272{
107 struct drm_device *dev = obj->base.dev; 273 struct drm_device *dev = obj->base.dev;
108 struct drm_gem_object *target_obj; 274 struct drm_gem_object *target_obj;
109 struct drm_i915_gem_object *target_i915_obj;
110 uint32_t target_offset; 275 uint32_t target_offset;
111 int ret = -EINVAL; 276 int ret = -EINVAL;
112 277
@@ -115,22 +280,20 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
115 if (unlikely(target_obj == NULL)) 280 if (unlikely(target_obj == NULL))
116 return -ENOENT; 281 return -ENOENT;
117 282
118 target_i915_obj = to_intel_bo(target_obj); 283 target_offset = to_intel_bo(target_obj)->gtt_offset;
119 target_offset = target_i915_obj->gtt_offset; 284
120 285 /* The target buffer should have appeared before us in the
121 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and 286 * exec_object list, so it should have a GTT space bound by now.
122 * pipe_control writes because the gpu doesn't properly redirect them 287 */
123 * through the ppgtt for non_secure batchbuffers. */ 288 if (unlikely(target_offset == 0)) {
124 if (unlikely(IS_GEN6(dev) && 289 DRM_ERROR("No GTT space found for object %d\n",
125 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && 290 reloc->target_handle);
126 !target_i915_obj->has_global_gtt_mapping)) { 291 return ret;
127 i915_gem_gtt_bind_object(target_i915_obj,
128 target_i915_obj->cache_level);
129 } 292 }
130 293
131 /* Validate that the target is in a valid r/w GPU domain */ 294 /* Validate that the target is in a valid r/w GPU domain */
132 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { 295 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
133 DRM_DEBUG("reloc with multiple write domains: " 296 DRM_ERROR("reloc with multiple write domains: "
134 "obj %p target %d offset %d " 297 "obj %p target %d offset %d "
135 "read %08x write %08x", 298 "read %08x write %08x",
136 obj, reloc->target_handle, 299 obj, reloc->target_handle,
@@ -139,9 +302,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
139 reloc->write_domain); 302 reloc->write_domain);
140 return ret; 303 return ret;
141 } 304 }
142 if (unlikely((reloc->write_domain | reloc->read_domains) 305 if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
143 & ~I915_GEM_GPU_DOMAINS)) { 306 DRM_ERROR("reloc with read/write CPU domains: "
144 DRM_DEBUG("reloc with read/write non-GPU domains: "
145 "obj %p target %d offset %d " 307 "obj %p target %d offset %d "
146 "read %08x write %08x", 308 "read %08x write %08x",
147 obj, reloc->target_handle, 309 obj, reloc->target_handle,
@@ -152,7 +314,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
152 } 314 }
153 if (unlikely(reloc->write_domain && target_obj->pending_write_domain && 315 if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
154 reloc->write_domain != target_obj->pending_write_domain)) { 316 reloc->write_domain != target_obj->pending_write_domain)) {
155 DRM_DEBUG("Write domain conflict: " 317 DRM_ERROR("Write domain conflict: "
156 "obj %p target %d offset %d " 318 "obj %p target %d offset %d "
157 "new %08x old %08x\n", 319 "new %08x old %08x\n",
158 obj, reloc->target_handle, 320 obj, reloc->target_handle,
@@ -173,7 +335,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
173 335
174 /* Check that the relocation address is valid... */ 336 /* Check that the relocation address is valid... */
175 if (unlikely(reloc->offset > obj->base.size - 4)) { 337 if (unlikely(reloc->offset > obj->base.size - 4)) {
176 DRM_DEBUG("Relocation beyond object bounds: " 338 DRM_ERROR("Relocation beyond object bounds: "
177 "obj %p target %d offset %d size %d.\n", 339 "obj %p target %d offset %d size %d.\n",
178 obj, reloc->target_handle, 340 obj, reloc->target_handle,
179 (int) reloc->offset, 341 (int) reloc->offset,
@@ -181,28 +343,19 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
181 return ret; 343 return ret;
182 } 344 }
183 if (unlikely(reloc->offset & 3)) { 345 if (unlikely(reloc->offset & 3)) {
184 DRM_DEBUG("Relocation not 4-byte aligned: " 346 DRM_ERROR("Relocation not 4-byte aligned: "
185 "obj %p target %d offset %d.\n", 347 "obj %p target %d offset %d.\n",
186 obj, reloc->target_handle, 348 obj, reloc->target_handle,
187 (int) reloc->offset); 349 (int) reloc->offset);
188 return ret; 350 return ret;
189 } 351 }
190 352
191 /* We can't wait for rendering with pagefaults disabled */
192 if (obj->active && in_atomic())
193 return -EFAULT;
194
195 reloc->delta += target_offset; 353 reloc->delta += target_offset;
196 if (use_cpu_reloc(obj)) { 354 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
197 uint32_t page_offset = reloc->offset & ~PAGE_MASK; 355 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
198 char *vaddr; 356 char *vaddr;
199 357
200 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 358 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
201 if (ret)
202 return ret;
203
204 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
205 reloc->offset >> PAGE_SHIFT));
206 *(uint32_t *)(vaddr + page_offset) = reloc->delta; 359 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
207 kunmap_atomic(vaddr); 360 kunmap_atomic(vaddr);
208 } else { 361 } else {
@@ -210,11 +363,11 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
210 uint32_t __iomem *reloc_entry; 363 uint32_t __iomem *reloc_entry;
211 void __iomem *reloc_page; 364 void __iomem *reloc_page;
212 365
213 ret = i915_gem_object_set_to_gtt_domain(obj, true); 366 /* We can't wait for rendering with pagefaults disabled */
214 if (ret) 367 if (obj->active && in_atomic())
215 return ret; 368 return -EFAULT;
216 369
217 ret = i915_gem_object_put_fence(obj); 370 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
218 if (ret) 371 if (ret)
219 return ret; 372 return ret;
220 373
@@ -238,46 +391,30 @@ static int
238i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, 391i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
239 struct eb_objects *eb) 392 struct eb_objects *eb)
240{ 393{
241#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
242 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
243 struct drm_i915_gem_relocation_entry __user *user_relocs; 394 struct drm_i915_gem_relocation_entry __user *user_relocs;
244 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 395 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
245 int remain, ret; 396 int i, ret;
246 397
247 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; 398 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
399 for (i = 0; i < entry->relocation_count; i++) {
400 struct drm_i915_gem_relocation_entry reloc;
248 401
249 remain = entry->relocation_count; 402 if (__copy_from_user_inatomic(&reloc,
250 while (remain) { 403 user_relocs+i,
251 struct drm_i915_gem_relocation_entry *r = stack_reloc; 404 sizeof(reloc)))
252 int count = remain;
253 if (count > ARRAY_SIZE(stack_reloc))
254 count = ARRAY_SIZE(stack_reloc);
255 remain -= count;
256
257 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
258 return -EFAULT; 405 return -EFAULT;
259 406
260 do { 407 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
261 u64 offset = r->presumed_offset; 408 if (ret)
262 409 return ret;
263 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
264 if (ret)
265 return ret;
266
267 if (r->presumed_offset != offset &&
268 __copy_to_user_inatomic(&user_relocs->presumed_offset,
269 &r->presumed_offset,
270 sizeof(r->presumed_offset))) {
271 return -EFAULT;
272 }
273 410
274 user_relocs++; 411 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
275 r++; 412 &reloc.presumed_offset,
276 } while (--count); 413 sizeof(reloc.presumed_offset)))
414 return -EFAULT;
277 } 415 }
278 416
279 return 0; 417 return 0;
280#undef N_RELOC
281} 418}
282 419
283static int 420static int
@@ -323,91 +460,15 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
323 return ret; 460 return ret;
324} 461}
325 462
326#define __EXEC_OBJECT_HAS_PIN (1<<31)
327#define __EXEC_OBJECT_HAS_FENCE (1<<30)
328
329static int
330need_reloc_mappable(struct drm_i915_gem_object *obj)
331{
332 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
333 return entry->relocation_count && !use_cpu_reloc(obj);
334}
335
336static int
337i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
338 struct intel_ring_buffer *ring)
339{
340 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
341 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
342 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
343 bool need_fence, need_mappable;
344 int ret;
345
346 need_fence =
347 has_fenced_gpu_access &&
348 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
349 obj->tiling_mode != I915_TILING_NONE;
350 need_mappable = need_fence || need_reloc_mappable(obj);
351
352 ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
353 if (ret)
354 return ret;
355
356 entry->flags |= __EXEC_OBJECT_HAS_PIN;
357
358 if (has_fenced_gpu_access) {
359 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
360 ret = i915_gem_object_get_fence(obj);
361 if (ret)
362 return ret;
363
364 if (i915_gem_object_pin_fence(obj))
365 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
366
367 obj->pending_fenced_gpu_access = true;
368 }
369 }
370
371 /* Ensure ppgtt mapping exists if needed */
372 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
373 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
374 obj, obj->cache_level);
375
376 obj->has_aliasing_ppgtt_mapping = 1;
377 }
378
379 entry->offset = obj->gtt_offset;
380 return 0;
381}
382
383static void
384i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
385{
386 struct drm_i915_gem_exec_object2 *entry;
387
388 if (!obj->gtt_space)
389 return;
390
391 entry = obj->exec_entry;
392
393 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
394 i915_gem_object_unpin_fence(obj);
395
396 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
397 i915_gem_object_unpin(obj);
398
399 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
400}
401
402static int 463static int
403i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 464i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
404 struct drm_file *file, 465 struct drm_file *file,
405 struct list_head *objects) 466 struct list_head *objects)
406{ 467{
407 struct drm_i915_gem_object *obj; 468 struct drm_i915_gem_object *obj;
408 struct list_head ordered_objects; 469 int ret, retry;
409 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 470 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
410 int retry; 471 struct list_head ordered_objects;
411 472
412 INIT_LIST_HEAD(&ordered_objects); 473 INIT_LIST_HEAD(&ordered_objects);
413 while (!list_empty(objects)) { 474 while (!list_empty(objects)) {
@@ -423,7 +484,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
423 has_fenced_gpu_access && 484 has_fenced_gpu_access &&
424 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 485 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
425 obj->tiling_mode != I915_TILING_NONE; 486 obj->tiling_mode != I915_TILING_NONE;
426 need_mappable = need_fence || need_reloc_mappable(obj); 487 need_mappable =
488 entry->relocation_count ? true : need_fence;
427 489
428 if (need_mappable) 490 if (need_mappable)
429 list_move(&obj->exec_list, &ordered_objects); 491 list_move(&obj->exec_list, &ordered_objects);
@@ -432,7 +494,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
432 494
433 obj->base.pending_read_domains = 0; 495 obj->base.pending_read_domains = 0;
434 obj->base.pending_write_domain = 0; 496 obj->base.pending_write_domain = 0;
435 obj->pending_fenced_gpu_access = false;
436 } 497 }
437 list_splice(&ordered_objects, objects); 498 list_splice(&ordered_objects, objects);
438 499
@@ -445,18 +506,17 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
445 * 2. Bind new objects. 506 * 2. Bind new objects.
446 * 3. Decrement pin count. 507 * 3. Decrement pin count.
447 * 508 *
448 * This avoid unnecessary unbinding of later objects in order to make 509 * This avoid unnecessary unbinding of later objects in order to makr
449 * room for the earlier objects *unless* we need to defragment. 510 * room for the earlier objects *unless* we need to defragment.
450 */ 511 */
451 retry = 0; 512 retry = 0;
452 do { 513 do {
453 int ret = 0; 514 ret = 0;
454 515
455 /* Unbind any ill-fitting objects or pin. */ 516 /* Unbind any ill-fitting objects or pin. */
456 list_for_each_entry(obj, objects, exec_list) { 517 list_for_each_entry(obj, objects, exec_list) {
457 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 518 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
458 bool need_fence, need_mappable; 519 bool need_fence, need_mappable;
459
460 if (!obj->gtt_space) 520 if (!obj->gtt_space)
461 continue; 521 continue;
462 522
@@ -464,38 +524,94 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
464 has_fenced_gpu_access && 524 has_fenced_gpu_access &&
465 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 525 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
466 obj->tiling_mode != I915_TILING_NONE; 526 obj->tiling_mode != I915_TILING_NONE;
467 need_mappable = need_fence || need_reloc_mappable(obj); 527 need_mappable =
528 entry->relocation_count ? true : need_fence;
468 529
469 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || 530 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
470 (need_mappable && !obj->map_and_fenceable)) 531 (need_mappable && !obj->map_and_fenceable))
471 ret = i915_gem_object_unbind(obj); 532 ret = i915_gem_object_unbind(obj);
472 else 533 else
473 ret = i915_gem_execbuffer_reserve_object(obj, ring); 534 ret = i915_gem_object_pin(obj,
535 entry->alignment,
536 need_mappable);
474 if (ret) 537 if (ret)
475 goto err; 538 goto err;
539
540 entry++;
476 } 541 }
477 542
478 /* Bind fresh objects */ 543 /* Bind fresh objects */
479 list_for_each_entry(obj, objects, exec_list) { 544 list_for_each_entry(obj, objects, exec_list) {
480 if (obj->gtt_space) 545 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
481 continue; 546 bool need_fence;
482 547
483 ret = i915_gem_execbuffer_reserve_object(obj, ring); 548 need_fence =
484 if (ret) 549 has_fenced_gpu_access &&
485 goto err; 550 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
551 obj->tiling_mode != I915_TILING_NONE;
552
553 if (!obj->gtt_space) {
554 bool need_mappable =
555 entry->relocation_count ? true : need_fence;
556
557 ret = i915_gem_object_pin(obj,
558 entry->alignment,
559 need_mappable);
560 if (ret)
561 break;
562 }
563
564 if (has_fenced_gpu_access) {
565 if (need_fence) {
566 ret = i915_gem_object_get_fence(obj, ring);
567 if (ret)
568 break;
569 } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
570 obj->tiling_mode == I915_TILING_NONE) {
571 /* XXX pipelined! */
572 ret = i915_gem_object_put_fence(obj);
573 if (ret)
574 break;
575 }
576 obj->pending_fenced_gpu_access = need_fence;
577 }
578
579 entry->offset = obj->gtt_offset;
486 } 580 }
487 581
488err: /* Decrement pin count for bound objects */ 582 /* Decrement pin count for bound objects */
489 list_for_each_entry(obj, objects, exec_list) 583 list_for_each_entry(obj, objects, exec_list) {
490 i915_gem_execbuffer_unreserve_object(obj); 584 if (obj->gtt_space)
585 i915_gem_object_unpin(obj);
586 }
491 587
492 if (ret != -ENOSPC || retry++) 588 if (ret != -ENOSPC || retry > 1)
493 return ret; 589 return ret;
494 590
495 ret = i915_gem_evict_everything(ring->dev); 591 /* First attempt, just clear anything that is purgeable.
592 * Second attempt, clear the entire GTT.
593 */
594 ret = i915_gem_evict_everything(ring->dev, retry == 0);
496 if (ret) 595 if (ret)
497 return ret; 596 return ret;
597
598 retry++;
498 } while (1); 599 } while (1);
600
601err:
602 obj = list_entry(obj->exec_list.prev,
603 struct drm_i915_gem_object,
604 exec_list);
605 while (objects != &obj->exec_list) {
606 if (obj->gtt_space)
607 i915_gem_object_unpin(obj);
608
609 obj = list_entry(obj->exec_list.prev,
610 struct drm_i915_gem_object,
611 exec_list);
612 }
613
614 return ret;
499} 615}
500 616
501static int 617static int
@@ -565,7 +681,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
565 obj = to_intel_bo(drm_gem_object_lookup(dev, file, 681 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
566 exec[i].handle)); 682 exec[i].handle));
567 if (&obj->base == NULL) { 683 if (&obj->base == NULL) {
568 DRM_DEBUG("Invalid object handle %d at index %d\n", 684 DRM_ERROR("Invalid object handle %d at index %d\n",
569 exec[i].handle, i); 685 exec[i].handle, i);
570 ret = -ENOENT; 686 ret = -ENOENT;
571 goto err; 687 goto err;
@@ -602,6 +718,76 @@ err:
602} 718}
603 719
604static int 720static int
721i915_gem_execbuffer_flush(struct drm_device *dev,
722 uint32_t invalidate_domains,
723 uint32_t flush_domains,
724 uint32_t flush_rings)
725{
726 drm_i915_private_t *dev_priv = dev->dev_private;
727 int i, ret;
728
729 if (flush_domains & I915_GEM_DOMAIN_CPU)
730 intel_gtt_chipset_flush();
731
732 if (flush_domains & I915_GEM_DOMAIN_GTT)
733 wmb();
734
735 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
736 for (i = 0; i < I915_NUM_RINGS; i++)
737 if (flush_rings & (1 << i)) {
738 ret = i915_gem_flush_ring(&dev_priv->ring[i],
739 invalidate_domains,
740 flush_domains);
741 if (ret)
742 return ret;
743 }
744 }
745
746 return 0;
747}
748
749static int
750i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
751 struct intel_ring_buffer *to)
752{
753 struct intel_ring_buffer *from = obj->ring;
754 u32 seqno;
755 int ret, idx;
756
757 if (from == NULL || to == from)
758 return 0;
759
760 /* XXX gpu semaphores are implicated in various hard hangs on SNB */
761 if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
762 return i915_gem_object_wait_rendering(obj);
763
764 idx = intel_ring_sync_index(from, to);
765
766 seqno = obj->last_rendering_seqno;
767 if (seqno <= from->sync_seqno[idx])
768 return 0;
769
770 if (seqno == from->outstanding_lazy_request) {
771 struct drm_i915_gem_request *request;
772
773 request = kzalloc(sizeof(*request), GFP_KERNEL);
774 if (request == NULL)
775 return -ENOMEM;
776
777 ret = i915_add_request(from, NULL, request);
778 if (ret) {
779 kfree(request);
780 return ret;
781 }
782
783 seqno = request->seqno;
784 }
785
786 from->sync_seqno[idx] = seqno;
787 return intel_ring_sync(to, from, seqno - 1);
788}
789
790static int
605i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) 791i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
606{ 792{
607 u32 plane, flip_mask; 793 u32 plane, flip_mask;
@@ -633,45 +819,41 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
633 return 0; 819 return 0;
634} 820}
635 821
822
636static int 823static int
637i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, 824i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
638 struct list_head *objects) 825 struct list_head *objects)
639{ 826{
640 struct drm_i915_gem_object *obj; 827 struct drm_i915_gem_object *obj;
641 uint32_t flush_domains = 0; 828 struct change_domains cd;
642 uint32_t flips = 0;
643 int ret; 829 int ret;
644 830
645 list_for_each_entry(obj, objects, exec_list) { 831 memset(&cd, 0, sizeof(cd));
646 ret = i915_gem_object_sync(obj, ring); 832 list_for_each_entry(obj, objects, exec_list)
833 i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
834
835 if (cd.invalidate_domains | cd.flush_domains) {
836 ret = i915_gem_execbuffer_flush(ring->dev,
837 cd.invalidate_domains,
838 cd.flush_domains,
839 cd.flush_rings);
647 if (ret) 840 if (ret)
648 return ret; 841 return ret;
649
650 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
651 i915_gem_clflush_object(obj);
652
653 if (obj->base.pending_write_domain)
654 flips |= atomic_read(&obj->pending_flip);
655
656 flush_domains |= obj->base.write_domain;
657 } 842 }
658 843
659 if (flips) { 844 if (cd.flips) {
660 ret = i915_gem_execbuffer_wait_for_flips(ring, flips); 845 ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
661 if (ret) 846 if (ret)
662 return ret; 847 return ret;
663 } 848 }
664 849
665 if (flush_domains & I915_GEM_DOMAIN_CPU) 850 list_for_each_entry(obj, objects, exec_list) {
666 i915_gem_chipset_flush(ring->dev); 851 ret = i915_gem_execbuffer_sync_rings(obj, ring);
667 852 if (ret)
668 if (flush_domains & I915_GEM_DOMAIN_GTT) 853 return ret;
669 wmb(); 854 }
670 855
671 /* Unconditionally invalidate gpu caches and ensure that we do flush 856 return 0;
672 * any residual writes from the previous batch.
673 */
674 return intel_ring_invalidate_all_caches(ring);
675} 857}
676 858
677static bool 859static bool
@@ -704,7 +886,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
704 if (!access_ok(VERIFY_WRITE, ptr, length)) 886 if (!access_ok(VERIFY_WRITE, ptr, length))
705 return -EFAULT; 887 return -EFAULT;
706 888
707 if (fault_in_multipages_readable(ptr, length)) 889 if (fault_in_pages_readable(ptr, length))
708 return -EFAULT; 890 return -EFAULT;
709 } 891 }
710 892
@@ -713,24 +895,27 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
713 895
714static void 896static void
715i915_gem_execbuffer_move_to_active(struct list_head *objects, 897i915_gem_execbuffer_move_to_active(struct list_head *objects,
716 struct intel_ring_buffer *ring) 898 struct intel_ring_buffer *ring,
899 u32 seqno)
717{ 900{
718 struct drm_i915_gem_object *obj; 901 struct drm_i915_gem_object *obj;
719 902
720 list_for_each_entry(obj, objects, exec_list) { 903 list_for_each_entry(obj, objects, exec_list) {
721 u32 old_read = obj->base.read_domains; 904 u32 old_read = obj->base.read_domains;
722 u32 old_write = obj->base.write_domain; 905 u32 old_write = obj->base.write_domain;
906
723 907
724 obj->base.read_domains = obj->base.pending_read_domains; 908 obj->base.read_domains = obj->base.pending_read_domains;
725 obj->base.write_domain = obj->base.pending_write_domain; 909 obj->base.write_domain = obj->base.pending_write_domain;
726 obj->fenced_gpu_access = obj->pending_fenced_gpu_access; 910 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
727 911
728 i915_gem_object_move_to_active(obj, ring); 912 i915_gem_object_move_to_active(obj, ring, seqno);
729 if (obj->base.write_domain) { 913 if (obj->base.write_domain) {
730 obj->dirty = 1; 914 obj->dirty = 1;
731 obj->last_write_seqno = intel_ring_get_seqno(ring); 915 obj->pending_gpu_write = true;
732 if (obj->pin_count) /* check for potential scanout */ 916 list_move_tail(&obj->gpu_write_list,
733 intel_mark_fb_busy(obj); 917 &ring->gpu_write_list);
918 intel_mark_busy(ring->dev, obj);
734 } 919 }
735 920
736 trace_i915_gem_object_change_domain(obj, old_read, old_write); 921 trace_i915_gem_object_change_domain(obj, old_read, old_write);
@@ -742,36 +927,29 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
742 struct drm_file *file, 927 struct drm_file *file,
743 struct intel_ring_buffer *ring) 928 struct intel_ring_buffer *ring)
744{ 929{
745 /* Unconditionally force add_request to emit a full flush. */ 930 struct drm_i915_gem_request *request;
746 ring->gpu_caches_dirty = true; 931 u32 invalidate;
747
748 /* Add a breadcrumb for the completion of the batch buffer */
749 (void)i915_add_request(ring, file, NULL);
750}
751
752static int
753i915_reset_gen7_sol_offsets(struct drm_device *dev,
754 struct intel_ring_buffer *ring)
755{
756 drm_i915_private_t *dev_priv = dev->dev_private;
757 int ret, i;
758 932
759 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) 933 /*
760 return 0; 934 * Ensure that the commands in the batch buffer are
761 935 * finished before the interrupt fires.
762 ret = intel_ring_begin(ring, 4 * 3); 936 *
763 if (ret) 937 * The sampler always gets flushed on i965 (sigh).
764 return ret; 938 */
765 939 invalidate = I915_GEM_DOMAIN_COMMAND;
766 for (i = 0; i < 4; i++) { 940 if (INTEL_INFO(dev)->gen >= 4)
767 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 941 invalidate |= I915_GEM_DOMAIN_SAMPLER;
768 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i)); 942 if (ring->flush(ring, invalidate, 0)) {
769 intel_ring_emit(ring, 0); 943 i915_gem_next_request_seqno(ring);
944 return;
770 } 945 }
771 946
772 intel_ring_advance(ring); 947 /* Add a breadcrumb for the completion of the batch buffer */
773 948 request = kzalloc(sizeof(*request), GFP_KERNEL);
774 return 0; 949 if (request == NULL || i915_add_request(ring, file, request)) {
950 i915_gem_next_request_seqno(ring);
951 kfree(request);
952 }
775} 953}
776 954
777static int 955static int
@@ -786,14 +964,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
786 struct drm_i915_gem_object *batch_obj; 964 struct drm_i915_gem_object *batch_obj;
787 struct drm_clip_rect *cliprects = NULL; 965 struct drm_clip_rect *cliprects = NULL;
788 struct intel_ring_buffer *ring; 966 struct intel_ring_buffer *ring;
789 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
790 u32 exec_start, exec_len; 967 u32 exec_start, exec_len;
791 u32 mask; 968 u32 seqno;
792 u32 flags;
793 int ret, mode, i; 969 int ret, mode, i;
794 970
795 if (!i915_gem_check_execbuffer(args)) { 971 if (!i915_gem_check_execbuffer(args)) {
796 DRM_DEBUG("execbuf with invalid offset/length\n"); 972 DRM_ERROR("execbuf with invalid offset/length\n");
797 return -EINVAL; 973 return -EINVAL;
798 } 974 }
799 975
@@ -801,50 +977,32 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
801 if (ret) 977 if (ret)
802 return ret; 978 return ret;
803 979
804 flags = 0;
805 if (args->flags & I915_EXEC_SECURE) {
806 if (!file->is_master || !capable(CAP_SYS_ADMIN))
807 return -EPERM;
808
809 flags |= I915_DISPATCH_SECURE;
810 }
811 if (args->flags & I915_EXEC_IS_PINNED)
812 flags |= I915_DISPATCH_PINNED;
813
814 switch (args->flags & I915_EXEC_RING_MASK) { 980 switch (args->flags & I915_EXEC_RING_MASK) {
815 case I915_EXEC_DEFAULT: 981 case I915_EXEC_DEFAULT:
816 case I915_EXEC_RENDER: 982 case I915_EXEC_RENDER:
817 ring = &dev_priv->ring[RCS]; 983 ring = &dev_priv->ring[RCS];
818 break; 984 break;
819 case I915_EXEC_BSD: 985 case I915_EXEC_BSD:
820 ring = &dev_priv->ring[VCS]; 986 if (!HAS_BSD(dev)) {
821 if (ctx_id != 0) { 987 DRM_ERROR("execbuf with invalid ring (BSD)\n");
822 DRM_DEBUG("Ring %s doesn't support contexts\n", 988 return -EINVAL;
823 ring->name);
824 return -EPERM;
825 } 989 }
990 ring = &dev_priv->ring[VCS];
826 break; 991 break;
827 case I915_EXEC_BLT: 992 case I915_EXEC_BLT:
828 ring = &dev_priv->ring[BCS]; 993 if (!HAS_BLT(dev)) {
829 if (ctx_id != 0) { 994 DRM_ERROR("execbuf with invalid ring (BLT)\n");
830 DRM_DEBUG("Ring %s doesn't support contexts\n", 995 return -EINVAL;
831 ring->name);
832 return -EPERM;
833 } 996 }
997 ring = &dev_priv->ring[BCS];
834 break; 998 break;
835 default: 999 default:
836 DRM_DEBUG("execbuf with unknown ring: %d\n", 1000 DRM_ERROR("execbuf with unknown ring: %d\n",
837 (int)(args->flags & I915_EXEC_RING_MASK));
838 return -EINVAL;
839 }
840 if (!intel_ring_initialized(ring)) {
841 DRM_DEBUG("execbuf with invalid ring: %d\n",
842 (int)(args->flags & I915_EXEC_RING_MASK)); 1001 (int)(args->flags & I915_EXEC_RING_MASK));
843 return -EINVAL; 1002 return -EINVAL;
844 } 1003 }
845 1004
846 mode = args->flags & I915_EXEC_CONSTANTS_MASK; 1005 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
847 mask = I915_EXEC_CONSTANTS_MASK;
848 switch (mode) { 1006 switch (mode) {
849 case I915_EXEC_CONSTANTS_REL_GENERAL: 1007 case I915_EXEC_CONSTANTS_REL_GENERAL:
850 case I915_EXEC_CONSTANTS_ABSOLUTE: 1008 case I915_EXEC_CONSTANTS_ABSOLUTE:
@@ -858,35 +1016,33 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
858 mode == I915_EXEC_CONSTANTS_REL_SURFACE) 1016 mode == I915_EXEC_CONSTANTS_REL_SURFACE)
859 return -EINVAL; 1017 return -EINVAL;
860 1018
861 /* The HW changed the meaning on this bit on gen6 */ 1019 ret = intel_ring_begin(ring, 4);
862 if (INTEL_INFO(dev)->gen >= 6) 1020 if (ret)
863 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; 1021 return ret;
1022
1023 intel_ring_emit(ring, MI_NOOP);
1024 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1025 intel_ring_emit(ring, INSTPM);
1026 intel_ring_emit(ring,
1027 I915_EXEC_CONSTANTS_MASK << 16 | mode);
1028 intel_ring_advance(ring);
1029
1030 dev_priv->relative_constants_mode = mode;
864 } 1031 }
865 break; 1032 break;
866 default: 1033 default:
867 DRM_DEBUG("execbuf with unknown constants: %d\n", mode); 1034 DRM_ERROR("execbuf with unknown constants: %d\n", mode);
868 return -EINVAL; 1035 return -EINVAL;
869 } 1036 }
870 1037
871 if (args->buffer_count < 1) { 1038 if (args->buffer_count < 1) {
872 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); 1039 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
873 return -EINVAL; 1040 return -EINVAL;
874 } 1041 }
875 1042
876 if (args->num_cliprects != 0) { 1043 if (args->num_cliprects != 0) {
877 if (ring != &dev_priv->ring[RCS]) { 1044 if (ring != &dev_priv->ring[RCS]) {
878 DRM_DEBUG("clip rectangles are only valid with the render ring\n"); 1045 DRM_ERROR("clip rectangles are only valid with the render ring\n");
879 return -EINVAL;
880 }
881
882 if (INTEL_INFO(dev)->gen >= 5) {
883 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
884 return -EINVAL;
885 }
886
887 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
888 DRM_DEBUG("execbuf with %u cliprects\n",
889 args->num_cliprects);
890 return -EINVAL; 1046 return -EINVAL;
891 } 1047 }
892 1048
@@ -931,7 +1087,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
931 obj = to_intel_bo(drm_gem_object_lookup(dev, file, 1087 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
932 exec[i].handle)); 1088 exec[i].handle));
933 if (&obj->base == NULL) { 1089 if (&obj->base == NULL) {
934 DRM_DEBUG("Invalid object handle %d at index %d\n", 1090 DRM_ERROR("Invalid object handle %d at index %d\n",
935 exec[i].handle, i); 1091 exec[i].handle, i);
936 /* prevent error path from reading uninitialized data */ 1092 /* prevent error path from reading uninitialized data */
937 ret = -ENOENT; 1093 ret = -ENOENT;
@@ -939,7 +1095,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
939 } 1095 }
940 1096
941 if (!list_empty(&obj->exec_list)) { 1097 if (!list_empty(&obj->exec_list)) {
942 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", 1098 DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
943 obj, exec[i].handle, i); 1099 obj, exec[i].handle, i);
944 ret = -EINVAL; 1100 ret = -EINVAL;
945 goto err; 1101 goto err;
@@ -977,47 +1133,32 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
977 1133
978 /* Set the pending read domains for the batch buffer to COMMAND */ 1134 /* Set the pending read domains for the batch buffer to COMMAND */
979 if (batch_obj->base.pending_write_domain) { 1135 if (batch_obj->base.pending_write_domain) {
980 DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); 1136 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
981 ret = -EINVAL; 1137 ret = -EINVAL;
982 goto err; 1138 goto err;
983 } 1139 }
984 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; 1140 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
985 1141
986 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
987 * batch" bit. Hence we need to pin secure batches into the global gtt.
988 * hsw should have this fixed, but let's be paranoid and do it
989 * unconditionally for now. */
990 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
991 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
992
993 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); 1142 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
994 if (ret) 1143 if (ret)
995 goto err; 1144 goto err;
996 1145
997 ret = i915_switch_context(ring, file, ctx_id); 1146 seqno = i915_gem_next_request_seqno(ring);
998 if (ret) 1147 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
999 goto err; 1148 if (seqno < ring->sync_seqno[i]) {
1000 1149 /* The GPU can not handle its semaphore value wrapping,
1001 if (ring == &dev_priv->ring[RCS] && 1150 * so every billion or so execbuffers, we need to stall
1002 mode != dev_priv->relative_constants_mode) { 1151 * the GPU in order to reset the counters.
1003 ret = intel_ring_begin(ring, 4); 1152 */
1004 if (ret) 1153 ret = i915_gpu_idle(dev);
1154 if (ret)
1005 goto err; 1155 goto err;
1006 1156
1007 intel_ring_emit(ring, MI_NOOP); 1157 BUG_ON(ring->sync_seqno[i]);
1008 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 1158 }
1009 intel_ring_emit(ring, INSTPM);
1010 intel_ring_emit(ring, mask << 16 | mode);
1011 intel_ring_advance(ring);
1012
1013 dev_priv->relative_constants_mode = mode;
1014 } 1159 }
1015 1160
1016 if (args->flags & I915_EXEC_GEN7_SOL_RESET) { 1161 trace_i915_gem_ring_dispatch(ring, seqno);
1017 ret = i915_reset_gen7_sol_offsets(dev, ring);
1018 if (ret)
1019 goto err;
1020 }
1021 1162
1022 exec_start = batch_obj->gtt_offset + args->batch_start_offset; 1163 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1023 exec_len = args->batch_len; 1164 exec_len = args->batch_len;
@@ -1029,22 +1170,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1029 goto err; 1170 goto err;
1030 1171
1031 ret = ring->dispatch_execbuffer(ring, 1172 ret = ring->dispatch_execbuffer(ring,
1032 exec_start, exec_len, 1173 exec_start, exec_len);
1033 flags);
1034 if (ret) 1174 if (ret)
1035 goto err; 1175 goto err;
1036 } 1176 }
1037 } else { 1177 } else {
1038 ret = ring->dispatch_execbuffer(ring, 1178 ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
1039 exec_start, exec_len,
1040 flags);
1041 if (ret) 1179 if (ret)
1042 goto err; 1180 goto err;
1043 } 1181 }
1044 1182
1045 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); 1183 i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
1046
1047 i915_gem_execbuffer_move_to_active(&objects, ring);
1048 i915_gem_execbuffer_retire_commands(dev, file, ring); 1184 i915_gem_execbuffer_retire_commands(dev, file, ring);
1049 1185
1050err: 1186err:
@@ -1081,7 +1217,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1081 int ret, i; 1217 int ret, i;
1082 1218
1083 if (args->buffer_count < 1) { 1219 if (args->buffer_count < 1) {
1084 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); 1220 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1085 return -EINVAL; 1221 return -EINVAL;
1086 } 1222 }
1087 1223
@@ -1089,17 +1225,18 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1089 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); 1225 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1090 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); 1226 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1091 if (exec_list == NULL || exec2_list == NULL) { 1227 if (exec_list == NULL || exec2_list == NULL) {
1092 DRM_DEBUG("Failed to allocate exec list for %d buffers\n", 1228 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
1093 args->buffer_count); 1229 args->buffer_count);
1094 drm_free_large(exec_list); 1230 drm_free_large(exec_list);
1095 drm_free_large(exec2_list); 1231 drm_free_large(exec2_list);
1096 return -ENOMEM; 1232 return -ENOMEM;
1097 } 1233 }
1098 ret = copy_from_user(exec_list, 1234 ret = copy_from_user(exec_list,
1099 (void __user *)(uintptr_t)args->buffers_ptr, 1235 (struct drm_i915_relocation_entry __user *)
1236 (uintptr_t) args->buffers_ptr,
1100 sizeof(*exec_list) * args->buffer_count); 1237 sizeof(*exec_list) * args->buffer_count);
1101 if (ret != 0) { 1238 if (ret != 0) {
1102 DRM_DEBUG("copy %d exec entries failed %d\n", 1239 DRM_ERROR("copy %d exec entries failed %d\n",
1103 args->buffer_count, ret); 1240 args->buffer_count, ret);
1104 drm_free_large(exec_list); 1241 drm_free_large(exec_list);
1105 drm_free_large(exec2_list); 1242 drm_free_large(exec2_list);
@@ -1127,7 +1264,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1127 exec2.num_cliprects = args->num_cliprects; 1264 exec2.num_cliprects = args->num_cliprects;
1128 exec2.cliprects_ptr = args->cliprects_ptr; 1265 exec2.cliprects_ptr = args->cliprects_ptr;
1129 exec2.flags = I915_EXEC_RENDER; 1266 exec2.flags = I915_EXEC_RENDER;
1130 i915_execbuffer2_set_context_id(exec2, 0);
1131 1267
1132 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); 1268 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1133 if (!ret) { 1269 if (!ret) {
@@ -1135,12 +1271,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1135 for (i = 0; i < args->buffer_count; i++) 1271 for (i = 0; i < args->buffer_count; i++)
1136 exec_list[i].offset = exec2_list[i].offset; 1272 exec_list[i].offset = exec2_list[i].offset;
1137 /* ... and back out to userspace */ 1273 /* ... and back out to userspace */
1138 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr, 1274 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1275 (uintptr_t) args->buffers_ptr,
1139 exec_list, 1276 exec_list,
1140 sizeof(*exec_list) * args->buffer_count); 1277 sizeof(*exec_list) * args->buffer_count);
1141 if (ret) { 1278 if (ret) {
1142 ret = -EFAULT; 1279 ret = -EFAULT;
1143 DRM_DEBUG("failed to copy %d exec entries " 1280 DRM_ERROR("failed to copy %d exec entries "
1144 "back to user (%d)\n", 1281 "back to user (%d)\n",
1145 args->buffer_count, ret); 1282 args->buffer_count, ret);
1146 } 1283 }
@@ -1159,9 +1296,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1159 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 1296 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1160 int ret; 1297 int ret;
1161 1298
1162 if (args->buffer_count < 1 || 1299 if (args->buffer_count < 1) {
1163 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) { 1300 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
1164 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1165 return -EINVAL; 1301 return -EINVAL;
1166 } 1302 }
1167 1303
@@ -1171,7 +1307,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1171 exec2_list = drm_malloc_ab(sizeof(*exec2_list), 1307 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1172 args->buffer_count); 1308 args->buffer_count);
1173 if (exec2_list == NULL) { 1309 if (exec2_list == NULL) {
1174 DRM_DEBUG("Failed to allocate exec list for %d buffers\n", 1310 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
1175 args->buffer_count); 1311 args->buffer_count);
1176 return -ENOMEM; 1312 return -ENOMEM;
1177 } 1313 }
@@ -1180,7 +1316,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1180 (uintptr_t) args->buffers_ptr, 1316 (uintptr_t) args->buffers_ptr,
1181 sizeof(*exec2_list) * args->buffer_count); 1317 sizeof(*exec2_list) * args->buffer_count);
1182 if (ret != 0) { 1318 if (ret != 0) {
1183 DRM_DEBUG("copy %d exec entries failed %d\n", 1319 DRM_ERROR("copy %d exec entries failed %d\n",
1184 args->buffer_count, ret); 1320 args->buffer_count, ret);
1185 drm_free_large(exec2_list); 1321 drm_free_large(exec2_list);
1186 return -EFAULT; 1322 return -EFAULT;
@@ -1189,12 +1325,13 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1189 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 1325 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1190 if (!ret) { 1326 if (!ret) {
1191 /* Copy the new buffer offsets back to the user's exec list. */ 1327 /* Copy the new buffer offsets back to the user's exec list. */
1192 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr, 1328 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1329 (uintptr_t) args->buffers_ptr,
1193 exec2_list, 1330 exec2_list,
1194 sizeof(*exec2_list) * args->buffer_count); 1331 sizeof(*exec2_list) * args->buffer_count);
1195 if (ret) { 1332 if (ret) {
1196 ret = -EFAULT; 1333 ret = -EFAULT;
1197 DRM_DEBUG("failed to copy %d exec entries " 1334 DRM_ERROR("failed to copy %d exec entries "
1198 "back to user (%d)\n", 1335 "back to user (%d)\n",
1199 args->buffer_count, ret); 1336 args->buffer_count, ret);
1200 } 1337 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2c150dee78a..7a709cd8d54 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -22,369 +22,31 @@
22 * 22 *
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include "drmP.h"
26#include <drm/i915_drm.h> 26#include "drm.h"
27#include "i915_drm.h"
27#include "i915_drv.h" 28#include "i915_drv.h"
28#include "i915_trace.h" 29#include "i915_trace.h"
29#include "intel_drv.h" 30#include "intel_drv.h"
30 31
31typedef uint32_t gtt_pte_t; 32/* XXX kill agp_type! */
32 33static unsigned int cache_level_to_agp_type(struct drm_device *dev,
33/* PPGTT stuff */ 34 enum i915_cache_level cache_level)
34#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
35
36#define GEN6_PDE_VALID (1 << 0)
37/* gen6+ has bit 11-4 for physical addr bit 39-32 */
38#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
39
40#define GEN6_PTE_VALID (1 << 0)
41#define GEN6_PTE_UNCACHED (1 << 1)
42#define HSW_PTE_UNCACHED (0)
43#define GEN6_PTE_CACHE_LLC (2 << 1)
44#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
45#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
46
47static inline gtt_pte_t pte_encode(struct drm_device *dev,
48 dma_addr_t addr,
49 enum i915_cache_level level)
50{ 35{
51 gtt_pte_t pte = GEN6_PTE_VALID; 36 switch (cache_level) {
52 pte |= GEN6_PTE_ADDR_ENCODE(addr);
53
54 switch (level) {
55 case I915_CACHE_LLC_MLC: 37 case I915_CACHE_LLC_MLC:
56 /* Haswell doesn't set L3 this way */ 38 if (INTEL_INFO(dev)->gen >= 6)
57 if (IS_HASWELL(dev)) 39 return AGP_USER_CACHED_MEMORY_LLC_MLC;
58 pte |= GEN6_PTE_CACHE_LLC; 40 /* Older chipsets do not have this extra level of CPU
59 else 41 * cacheing, so fallthrough and request the PTE simply
60 pte |= GEN6_PTE_CACHE_LLC_MLC; 42 * as cached.
61 break; 43 */
62 case I915_CACHE_LLC: 44 case I915_CACHE_LLC:
63 pte |= GEN6_PTE_CACHE_LLC; 45 return AGP_USER_CACHED_MEMORY;
64 break;
65 case I915_CACHE_NONE:
66 if (IS_HASWELL(dev))
67 pte |= HSW_PTE_UNCACHED;
68 else
69 pte |= GEN6_PTE_UNCACHED;
70 break;
71 default: 46 default:
72 BUG(); 47 case I915_CACHE_NONE:
73 } 48 return AGP_USER_MEMORY;
74
75
76 return pte;
77}
78
79/* PPGTT support for Sandybdrige/Gen6 and later */
80static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
81 unsigned first_entry,
82 unsigned num_entries)
83{
84 gtt_pte_t *pt_vaddr;
85 gtt_pte_t scratch_pte;
86 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
87 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
88 unsigned last_pte, i;
89
90 scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
91 I915_CACHE_LLC);
92
93 while (num_entries) {
94 last_pte = first_pte + num_entries;
95 if (last_pte > I915_PPGTT_PT_ENTRIES)
96 last_pte = I915_PPGTT_PT_ENTRIES;
97
98 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
99
100 for (i = first_pte; i < last_pte; i++)
101 pt_vaddr[i] = scratch_pte;
102
103 kunmap_atomic(pt_vaddr);
104
105 num_entries -= last_pte - first_pte;
106 first_pte = 0;
107 act_pd++;
108 }
109}
110
111int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
112{
113 struct drm_i915_private *dev_priv = dev->dev_private;
114 struct i915_hw_ppgtt *ppgtt;
115 unsigned first_pd_entry_in_global_pt;
116 int i;
117 int ret = -ENOMEM;
118
119 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
120 * entries. For aliasing ppgtt support we just steal them at the end for
121 * now. */
122 first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
123
124 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
125 if (!ppgtt)
126 return ret;
127
128 ppgtt->dev = dev;
129 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
130 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
131 GFP_KERNEL);
132 if (!ppgtt->pt_pages)
133 goto err_ppgtt;
134
135 for (i = 0; i < ppgtt->num_pd_entries; i++) {
136 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
137 if (!ppgtt->pt_pages[i])
138 goto err_pt_alloc;
139 }
140
141 if (dev_priv->mm.gtt->needs_dmar) {
142 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
143 *ppgtt->num_pd_entries,
144 GFP_KERNEL);
145 if (!ppgtt->pt_dma_addr)
146 goto err_pt_alloc;
147
148 for (i = 0; i < ppgtt->num_pd_entries; i++) {
149 dma_addr_t pt_addr;
150
151 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
152 0, 4096,
153 PCI_DMA_BIDIRECTIONAL);
154
155 if (pci_dma_mapping_error(dev->pdev,
156 pt_addr)) {
157 ret = -EIO;
158 goto err_pd_pin;
159
160 }
161 ppgtt->pt_dma_addr[i] = pt_addr;
162 }
163 }
164
165 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
166
167 i915_ppgtt_clear_range(ppgtt, 0,
168 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
169
170 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
171
172 dev_priv->mm.aliasing_ppgtt = ppgtt;
173
174 return 0;
175
176err_pd_pin:
177 if (ppgtt->pt_dma_addr) {
178 for (i--; i >= 0; i--)
179 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
180 4096, PCI_DMA_BIDIRECTIONAL);
181 }
182err_pt_alloc:
183 kfree(ppgtt->pt_dma_addr);
184 for (i = 0; i < ppgtt->num_pd_entries; i++) {
185 if (ppgtt->pt_pages[i])
186 __free_page(ppgtt->pt_pages[i]);
187 }
188 kfree(ppgtt->pt_pages);
189err_ppgtt:
190 kfree(ppgtt);
191
192 return ret;
193}
194
195void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
196{
197 struct drm_i915_private *dev_priv = dev->dev_private;
198 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
199 int i;
200
201 if (!ppgtt)
202 return;
203
204 if (ppgtt->pt_dma_addr) {
205 for (i = 0; i < ppgtt->num_pd_entries; i++)
206 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
207 4096, PCI_DMA_BIDIRECTIONAL);
208 }
209
210 kfree(ppgtt->pt_dma_addr);
211 for (i = 0; i < ppgtt->num_pd_entries; i++)
212 __free_page(ppgtt->pt_pages[i]);
213 kfree(ppgtt->pt_pages);
214 kfree(ppgtt);
215}
216
217static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
218 const struct sg_table *pages,
219 unsigned first_entry,
220 enum i915_cache_level cache_level)
221{
222 gtt_pte_t *pt_vaddr;
223 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
224 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
225 unsigned i, j, m, segment_len;
226 dma_addr_t page_addr;
227 struct scatterlist *sg;
228
229 /* init sg walking */
230 sg = pages->sgl;
231 i = 0;
232 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
233 m = 0;
234
235 while (i < pages->nents) {
236 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
237
238 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
239 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
240 pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
241 cache_level);
242
243 /* grab the next page */
244 if (++m == segment_len) {
245 if (++i == pages->nents)
246 break;
247
248 sg = sg_next(sg);
249 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
250 m = 0;
251 }
252 }
253
254 kunmap_atomic(pt_vaddr);
255
256 first_pte = 0;
257 act_pd++;
258 }
259}
260
261void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
262 struct drm_i915_gem_object *obj,
263 enum i915_cache_level cache_level)
264{
265 i915_ppgtt_insert_sg_entries(ppgtt,
266 obj->pages,
267 obj->gtt_space->start >> PAGE_SHIFT,
268 cache_level);
269}
270
271void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
272 struct drm_i915_gem_object *obj)
273{
274 i915_ppgtt_clear_range(ppgtt,
275 obj->gtt_space->start >> PAGE_SHIFT,
276 obj->base.size >> PAGE_SHIFT);
277}
278
279void i915_gem_init_ppgtt(struct drm_device *dev)
280{
281 drm_i915_private_t *dev_priv = dev->dev_private;
282 uint32_t pd_offset;
283 struct intel_ring_buffer *ring;
284 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
285 uint32_t __iomem *pd_addr;
286 uint32_t pd_entry;
287 int i;
288
289 if (!dev_priv->mm.aliasing_ppgtt)
290 return;
291
292
293 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
294 for (i = 0; i < ppgtt->num_pd_entries; i++) {
295 dma_addr_t pt_addr;
296
297 if (dev_priv->mm.gtt->needs_dmar)
298 pt_addr = ppgtt->pt_dma_addr[i];
299 else
300 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
301
302 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
303 pd_entry |= GEN6_PDE_VALID;
304
305 writel(pd_entry, pd_addr + i);
306 }
307 readl(pd_addr);
308
309 pd_offset = ppgtt->pd_offset;
310 pd_offset /= 64; /* in cachelines, */
311 pd_offset <<= 16;
312
313 if (INTEL_INFO(dev)->gen == 6) {
314 uint32_t ecochk, gab_ctl, ecobits;
315
316 ecobits = I915_READ(GAC_ECO_BITS);
317 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
318
319 gab_ctl = I915_READ(GAB_CTL);
320 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
321
322 ecochk = I915_READ(GAM_ECOCHK);
323 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
324 ECOCHK_PPGTT_CACHE64B);
325 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
326 } else if (INTEL_INFO(dev)->gen >= 7) {
327 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
328 /* GFX_MODE is per-ring on gen7+ */
329 }
330
331 for_each_ring(ring, dev_priv, i) {
332 if (INTEL_INFO(dev)->gen >= 7)
333 I915_WRITE(RING_MODE_GEN7(ring),
334 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
335
336 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
337 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
338 }
339}
340
341static bool do_idling(struct drm_i915_private *dev_priv)
342{
343 bool ret = dev_priv->mm.interruptible;
344
345 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
346 dev_priv->mm.interruptible = false;
347 if (i915_gpu_idle(dev_priv->dev)) {
348 DRM_ERROR("Couldn't idle GPU\n");
349 /* Wait a bit, in hopes it avoids the hang */
350 udelay(10);
351 }
352 }
353
354 return ret;
355}
356
357static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
358{
359 if (unlikely(dev_priv->mm.gtt->do_idle_maps))
360 dev_priv->mm.interruptible = interruptible;
361}
362
363
364static void i915_ggtt_clear_range(struct drm_device *dev,
365 unsigned first_entry,
366 unsigned num_entries)
367{
368 struct drm_i915_private *dev_priv = dev->dev_private;
369 gtt_pte_t scratch_pte;
370 gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
371 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
372 int i;
373
374 if (INTEL_INFO(dev)->gen < 6) {
375 intel_gtt_clear_range(first_entry, num_entries);
376 return;
377 } 49 }
378
379 if (WARN(num_entries > max_entries,
380 "First entry = %d; Num entries = %d (max=%d)\n",
381 first_entry, num_entries, max_entries))
382 num_entries = max_entries;
383
384 scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
385 for (i = 0; i < num_entries; i++)
386 iowrite32(scratch_pte, &gtt_base[i]);
387 readl(gtt_base);
388} 50}
389 51
390void i915_gem_restore_gtt_mappings(struct drm_device *dev) 52void i915_gem_restore_gtt_mappings(struct drm_device *dev)
@@ -393,316 +55,73 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
393 struct drm_i915_gem_object *obj; 55 struct drm_i915_gem_object *obj;
394 56
395 /* First fill our portion of the GTT with scratch pages */ 57 /* First fill our portion of the GTT with scratch pages */
396 i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE, 58 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
397 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 59 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
398 60
399 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 61 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
400 i915_gem_clflush_object(obj); 62 i915_gem_clflush_object(obj);
401 i915_gem_gtt_bind_object(obj, obj->cache_level); 63 i915_gem_gtt_rebind_object(obj, obj->cache_level);
402 } 64 }
403 65
404 i915_gem_chipset_flush(dev); 66 intel_gtt_chipset_flush();
405} 67}
406 68
407int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) 69int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
408{ 70{
409 if (obj->has_dma_mapping) 71 struct drm_device *dev = obj->base.dev;
410 return 0; 72 struct drm_i915_private *dev_priv = dev->dev_private;
73 unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
74 int ret;
411 75
412 if (!dma_map_sg(&obj->base.dev->pdev->dev, 76 if (dev_priv->mm.gtt->needs_dmar) {
413 obj->pages->sgl, obj->pages->nents, 77 ret = intel_gtt_map_memory(obj->pages,
414 PCI_DMA_BIDIRECTIONAL)) 78 obj->base.size >> PAGE_SHIFT,
415 return -ENOSPC; 79 &obj->sg_list,
80 &obj->num_sg);
81 if (ret != 0)
82 return ret;
83
84 intel_gtt_insert_sg_entries(obj->sg_list,
85 obj->num_sg,
86 obj->gtt_space->start >> PAGE_SHIFT,
87 agp_type);
88 } else
89 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
90 obj->base.size >> PAGE_SHIFT,
91 obj->pages,
92 agp_type);
416 93
417 return 0; 94 return 0;
418} 95}
419 96
420/* 97void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
421 * Binds an object into the global gtt with the specified cache level. The object 98 enum i915_cache_level cache_level)
422 * will be accessible to the GPU via commands whose operands reference offsets
423 * within the global GTT as well as accessible by the GPU through the GMADR
424 * mapped BAR (dev_priv->mm.gtt->gtt).
425 */
426static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
427 enum i915_cache_level level)
428{ 99{
429 struct drm_device *dev = obj->base.dev; 100 struct drm_device *dev = obj->base.dev;
430 struct drm_i915_private *dev_priv = dev->dev_private; 101 struct drm_i915_private *dev_priv = dev->dev_private;
431 struct sg_table *st = obj->pages; 102 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
432 struct scatterlist *sg = st->sgl;
433 const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
434 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
435 gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
436 int unused, i = 0;
437 unsigned int len, m = 0;
438 dma_addr_t addr;
439
440 for_each_sg(st->sgl, sg, st->nents, unused) {
441 len = sg_dma_len(sg) >> PAGE_SHIFT;
442 for (m = 0; m < len; m++) {
443 addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
444 iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
445 i++;
446 }
447 }
448 103
449 BUG_ON(i > max_entries); 104 if (dev_priv->mm.gtt->needs_dmar) {
450 BUG_ON(i != obj->base.size / PAGE_SIZE); 105 BUG_ON(!obj->sg_list);
451
452 /* XXX: This serves as a posting read to make sure that the PTE has
453 * actually been updated. There is some concern that even though
454 * registers and PTEs are within the same BAR that they are potentially
455 * of NUMA access patterns. Therefore, even with the way we assume
456 * hardware should work, we must keep this posting read for paranoia.
457 */
458 if (i != 0)
459 WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
460
461 /* This next bit makes the above posting read even more important. We
462 * want to flush the TLBs only after we're certain all the PTE updates
463 * have finished.
464 */
465 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
466 POSTING_READ(GFX_FLSH_CNTL_GEN6);
467}
468 106
469void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 107 intel_gtt_insert_sg_entries(obj->sg_list,
470 enum i915_cache_level cache_level) 108 obj->num_sg,
471{
472 struct drm_device *dev = obj->base.dev;
473 if (INTEL_INFO(dev)->gen < 6) {
474 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
475 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
476 intel_gtt_insert_sg_entries(obj->pages,
477 obj->gtt_space->start >> PAGE_SHIFT, 109 obj->gtt_space->start >> PAGE_SHIFT,
478 flags); 110 agp_type);
479 } else { 111 } else
480 gen6_ggtt_bind_object(obj, cache_level); 112 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
481 } 113 obj->base.size >> PAGE_SHIFT,
482 114 obj->pages,
483 obj->has_global_gtt_mapping = 1; 115 agp_type);
484} 116}
485 117
486void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 118void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
487{ 119{
488 i915_ggtt_clear_range(obj->base.dev, 120 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
489 obj->gtt_space->start >> PAGE_SHIFT,
490 obj->base.size >> PAGE_SHIFT); 121 obj->base.size >> PAGE_SHIFT);
491 122
492 obj->has_global_gtt_mapping = 0; 123 if (obj->sg_list) {
493} 124 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
494 125 obj->sg_list = NULL;
495void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
496{
497 struct drm_device *dev = obj->base.dev;
498 struct drm_i915_private *dev_priv = dev->dev_private;
499 bool interruptible;
500
501 interruptible = do_idling(dev_priv);
502
503 if (!obj->has_dma_mapping)
504 dma_unmap_sg(&dev->pdev->dev,
505 obj->pages->sgl, obj->pages->nents,
506 PCI_DMA_BIDIRECTIONAL);
507
508 undo_idling(dev_priv, interruptible);
509}
510
511static void i915_gtt_color_adjust(struct drm_mm_node *node,
512 unsigned long color,
513 unsigned long *start,
514 unsigned long *end)
515{
516 if (node->color != color)
517 *start += 4096;
518
519 if (!list_empty(&node->node_list)) {
520 node = list_entry(node->node_list.next,
521 struct drm_mm_node,
522 node_list);
523 if (node->allocated && node->color != color)
524 *end -= 4096;
525 } 126 }
526} 127}
527
528void i915_gem_init_global_gtt(struct drm_device *dev,
529 unsigned long start,
530 unsigned long mappable_end,
531 unsigned long end)
532{
533 drm_i915_private_t *dev_priv = dev->dev_private;
534
535 /* Substract the guard page ... */
536 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
537 if (!HAS_LLC(dev))
538 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
539
540 dev_priv->mm.gtt_start = start;
541 dev_priv->mm.gtt_mappable_end = mappable_end;
542 dev_priv->mm.gtt_end = end;
543 dev_priv->mm.gtt_total = end - start;
544 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
545
546 /* ... but ensure that we clear the entire range. */
547 i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
548}
549
550static int setup_scratch_page(struct drm_device *dev)
551{
552 struct drm_i915_private *dev_priv = dev->dev_private;
553 struct page *page;
554 dma_addr_t dma_addr;
555
556 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
557 if (page == NULL)
558 return -ENOMEM;
559 get_page(page);
560 set_pages_uc(page, 1);
561
562#ifdef CONFIG_INTEL_IOMMU
563 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
564 PCI_DMA_BIDIRECTIONAL);
565 if (pci_dma_mapping_error(dev->pdev, dma_addr))
566 return -EINVAL;
567#else
568 dma_addr = page_to_phys(page);
569#endif
570 dev_priv->mm.gtt->scratch_page = page;
571 dev_priv->mm.gtt->scratch_page_dma = dma_addr;
572
573 return 0;
574}
575
576static void teardown_scratch_page(struct drm_device *dev)
577{
578 struct drm_i915_private *dev_priv = dev->dev_private;
579 set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
580 pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
581 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
582 put_page(dev_priv->mm.gtt->scratch_page);
583 __free_page(dev_priv->mm.gtt->scratch_page);
584}
585
586static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
587{
588 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
589 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
590 return snb_gmch_ctl << 20;
591}
592
593static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
594{
595 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
596 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
597 return snb_gmch_ctl << 25; /* 32 MB units */
598}
599
600static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
601{
602 static const int stolen_decoder[] = {
603 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
604 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
605 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
606 return stolen_decoder[snb_gmch_ctl] << 20;
607}
608
609int i915_gem_gtt_init(struct drm_device *dev)
610{
611 struct drm_i915_private *dev_priv = dev->dev_private;
612 phys_addr_t gtt_bus_addr;
613 u16 snb_gmch_ctl;
614 int ret;
615
616 /* On modern platforms we need not worry ourself with the legacy
617 * hostbridge query stuff. Skip it entirely
618 */
619 if (INTEL_INFO(dev)->gen < 6) {
620 ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
621 if (!ret) {
622 DRM_ERROR("failed to set up gmch\n");
623 return -EIO;
624 }
625
626 dev_priv->mm.gtt = intel_gtt_get();
627 if (!dev_priv->mm.gtt) {
628 DRM_ERROR("Failed to initialize GTT\n");
629 intel_gmch_remove();
630 return -ENODEV;
631 }
632 return 0;
633 }
634
635 dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
636 if (!dev_priv->mm.gtt)
637 return -ENOMEM;
638
639 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
640 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
641
642#ifdef CONFIG_INTEL_IOMMU
643 dev_priv->mm.gtt->needs_dmar = 1;
644#endif
645
646 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
647 gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
648 dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
649
650 /* i9xx_setup */
651 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
652 dev_priv->mm.gtt->gtt_total_entries =
653 gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
654 if (INTEL_INFO(dev)->gen < 7)
655 dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
656 else
657 dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
658
659 dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
660 /* 64/512MB is the current min/max we actually know of, but this is just a
661 * coarse sanity check.
662 */
663 if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
664 dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
665 DRM_ERROR("Unknown GMADR entries (%d)\n",
666 dev_priv->mm.gtt->gtt_mappable_entries);
667 ret = -ENXIO;
668 goto err_out;
669 }
670
671 ret = setup_scratch_page(dev);
672 if (ret) {
673 DRM_ERROR("Scratch setup failed\n");
674 goto err_out;
675 }
676
677 dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
678 dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
679 if (!dev_priv->mm.gtt->gtt) {
680 DRM_ERROR("Failed to map the gtt page table\n");
681 teardown_scratch_page(dev);
682 ret = -ENOMEM;
683 goto err_out;
684 }
685
686 /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
687 DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
688 DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
689 DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
690
691 return 0;
692
693err_out:
694 kfree(dev_priv->mm.gtt);
695 if (INTEL_INFO(dev)->gen < 6)
696 intel_gmch_remove();
697 return ret;
698}
699
700void i915_gem_gtt_fini(struct drm_device *dev)
701{
702 struct drm_i915_private *dev_priv = dev->dev_private;
703 iounmap(dev_priv->mm.gtt->gtt);
704 teardown_scratch_page(dev);
705 if (INTEL_INFO(dev)->gen < 6)
706 intel_gmch_remove();
707 kfree(dev_priv->mm.gtt);
708}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
deleted file mode 100644
index 8e91083b126..00000000000
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ /dev/null
@@ -1,201 +0,0 @@
1/*
2 * Copyright © 2008-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
29#include <drm/drmP.h>
30#include <drm/i915_drm.h>
31#include "i915_drv.h"
32
33/*
34 * The BIOS typically reserves some of the system's memory for the exclusive
35 * use of the integrated graphics. This memory is no longer available for
36 * use by the OS and so the user finds that his system has less memory
37 * available than he put in. We refer to this memory as stolen.
38 *
39 * The BIOS will allocate its framebuffer from the stolen memory. Our
40 * goal is try to reuse that object for our own fbcon which must always
41 * be available for panics. Anything else we can reuse the stolen memory
42 * for is a boon.
43 */
44
45#define PTE_ADDRESS_MASK 0xfffff000
46#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
47#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
48#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
49#define PTE_MAPPING_TYPE_CACHED (3 << 1)
50#define PTE_MAPPING_TYPE_MASK (3 << 1)
51#define PTE_VALID (1 << 0)
52
53/**
54 * i915_stolen_to_phys - take an offset into stolen memory and turn it into
55 * a physical one
56 * @dev: drm device
57 * @offset: address to translate
58 *
59 * Some chip functions require allocations from stolen space and need the
60 * physical address of the memory in question.
61 */
62static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
63{
64 struct drm_i915_private *dev_priv = dev->dev_private;
65 struct pci_dev *pdev = dev_priv->bridge_dev;
66 u32 base;
67
68#if 0
69 /* On the machines I have tested the Graphics Base of Stolen Memory
70 * is unreliable, so compute the base by subtracting the stolen memory
71 * from the Top of Low Usable DRAM which is where the BIOS places
72 * the graphics stolen memory.
73 */
74 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
75 /* top 32bits are reserved = 0 */
76 pci_read_config_dword(pdev, 0xA4, &base);
77 } else {
78 /* XXX presume 8xx is the same as i915 */
79 pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
80 }
81#else
82 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
83 u16 val;
84 pci_read_config_word(pdev, 0xb0, &val);
85 base = val >> 4 << 20;
86 } else {
87 u8 val;
88 pci_read_config_byte(pdev, 0x9c, &val);
89 base = val >> 3 << 27;
90 }
91 base -= dev_priv->mm.gtt->stolen_size;
92#endif
93
94 return base + offset;
95}
96
97static void i915_warn_stolen(struct drm_device *dev)
98{
99 DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
100 DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
101}
102
103static void i915_setup_compression(struct drm_device *dev, int size)
104{
105 struct drm_i915_private *dev_priv = dev->dev_private;
106 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
107 unsigned long cfb_base;
108 unsigned long ll_base = 0;
109
110 /* Just in case the BIOS is doing something questionable. */
111 intel_disable_fbc(dev);
112
113 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
114 if (compressed_fb)
115 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
116 if (!compressed_fb)
117 goto err;
118
119 cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
120 if (!cfb_base)
121 goto err_fb;
122
123 if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
124 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
125 4096, 4096, 0);
126 if (compressed_llb)
127 compressed_llb = drm_mm_get_block(compressed_llb,
128 4096, 4096);
129 if (!compressed_llb)
130 goto err_fb;
131
132 ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
133 if (!ll_base)
134 goto err_llb;
135 }
136
137 dev_priv->cfb_size = size;
138
139 dev_priv->compressed_fb = compressed_fb;
140 if (HAS_PCH_SPLIT(dev))
141 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
142 else if (IS_GM45(dev)) {
143 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
144 } else {
145 I915_WRITE(FBC_CFB_BASE, cfb_base);
146 I915_WRITE(FBC_LL_BASE, ll_base);
147 dev_priv->compressed_llb = compressed_llb;
148 }
149
150 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
151 cfb_base, ll_base, size >> 20);
152 return;
153
154err_llb:
155 drm_mm_put_block(compressed_llb);
156err_fb:
157 drm_mm_put_block(compressed_fb);
158err:
159 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
160 i915_warn_stolen(dev);
161}
162
163static void i915_cleanup_compression(struct drm_device *dev)
164{
165 struct drm_i915_private *dev_priv = dev->dev_private;
166
167 drm_mm_put_block(dev_priv->compressed_fb);
168 if (dev_priv->compressed_llb)
169 drm_mm_put_block(dev_priv->compressed_llb);
170}
171
172void i915_gem_cleanup_stolen(struct drm_device *dev)
173{
174 if (I915_HAS_FBC(dev) && i915_powersave)
175 i915_cleanup_compression(dev);
176}
177
178int i915_gem_init_stolen(struct drm_device *dev)
179{
180 struct drm_i915_private *dev_priv = dev->dev_private;
181 unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
182
183 /* Basic memrange allocator for stolen space */
184 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
185
186 /* Try to set up FBC with a reasonable compressed buffer size */
187 if (I915_HAS_FBC(dev) && i915_powersave) {
188 int cfb_size;
189
190 /* Leave 1M for line length buffer & misc. */
191
192 /* Try to get a 32M buffer... */
193 if (prealloc_size > (36*1024*1024))
194 cfb_size = 32*1024*1024;
195 else /* fall back to 7/8 of the stolen space */
196 cfb_size = prealloc_size * 7 / 8;
197 i915_setup_compression(dev, cfb_size);
198 }
199
200 return 0;
201}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index cedbfd7b3df..99c4faa59d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,10 +25,11 @@
25 * 25 *
26 */ 26 */
27 27
28#include <linux/string.h> 28#include "linux/string.h"
29#include <linux/bitops.h> 29#include "linux/bitops.h"
30#include <drm/drmP.h> 30#include "drmP.h"
31#include <drm/i915_drm.h> 31#include "drm.h"
32#include "i915_drm.h"
32#include "i915_drv.h" 33#include "i915_drv.h"
33 34
34/** @file i915_gem_tiling.c 35/** @file i915_gem_tiling.c
@@ -91,28 +92,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
91 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
92 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
93 94
94 if (IS_VALLEYVIEW(dev)) { 95 if (INTEL_INFO(dev)->gen >= 5) {
95 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
96 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
97 } else if (INTEL_INFO(dev)->gen >= 6) {
98 uint32_t dimm_c0, dimm_c1;
99 dimm_c0 = I915_READ(MAD_DIMM_C0);
100 dimm_c1 = I915_READ(MAD_DIMM_C1);
101 dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
102 dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
103 /* Enable swizzling when the channels are populated with
104 * identically sized dimms. We don't need to check the 3rd
105 * channel because no cpu with gpu attached ships in that
106 * configuration. Also, swizzling only makes sense for 2
107 * channels anyway. */
108 if (dimm_c0 == dimm_c1) {
109 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
110 swizzle_y = I915_BIT_6_SWIZZLE_9;
111 } else {
112 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
113 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
114 }
115 } else if (IS_GEN5(dev)) {
116 /* On Ironlake whatever DRAM config, GPU always do 96 /* On Ironlake whatever DRAM config, GPU always do
117 * same swizzling setup. 97 * same swizzling setup.
118 */ 98 */
@@ -124,10 +104,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
124 */ 104 */
125 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 105 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
126 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 106 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
127 } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) { 107 } else if (IS_MOBILE(dev)) {
128 uint32_t dcc; 108 uint32_t dcc;
129 109
130 /* On 9xx chipsets, channel interleave by the CPU is 110 /* On mobile 9xx chipsets, channel interleave by the CPU is
131 * determined by DCC. For single-channel, neither the CPU 111 * determined by DCC. For single-channel, neither the CPU
132 * nor the GPU do swizzling. For dual channel interleaved, 112 * nor the GPU do swizzling. For dual channel interleaved,
133 * the GPU's interleave is bit 9 and 10 for X tiled, and bit 113 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
@@ -356,15 +336,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
356 /* We need to rebind the object if its current allocation 336 /* We need to rebind the object if its current allocation
357 * no longer meets the alignment restrictions for its new 337 * no longer meets the alignment restrictions for its new
358 * tiling mode. Otherwise we can just leave it alone, but 338 * tiling mode. Otherwise we can just leave it alone, but
359 * need to ensure that any fence register is updated before 339 * need to ensure that any fence register is cleared.
360 * the next fenced (either through the GTT or by the BLT unit
361 * on older GPUs) access.
362 *
363 * After updating the tiling parameters, we then flag whether
364 * we need to update an associated fence register. Note this
365 * has to also include the unfenced register the GPU uses
366 * whilst executing a fenced command for an untiled object.
367 */ 340 */
341 i915_gem_release_mmap(obj);
368 342
369 obj->map_and_fenceable = 343 obj->map_and_fenceable =
370 obj->gtt_space == NULL || 344 obj->gtt_space == NULL ||
@@ -382,15 +356,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
382 } 356 }
383 357
384 if (ret == 0) { 358 if (ret == 0) {
385 obj->fence_dirty = 359 obj->tiling_changed = true;
386 obj->fenced_gpu_access ||
387 obj->fence_reg != I915_FENCE_REG_NONE;
388
389 obj->tiling_mode = args->tiling_mode; 360 obj->tiling_mode = args->tiling_mode;
390 obj->stride = args->stride; 361 obj->stride = args->stride;
391
392 /* Force the fence to be reacquired for GTT access */
393 i915_gem_release_mmap(obj);
394 } 362 }
395 } 363 }
396 /* we have to maintain this existing ABI... */ 364 /* we have to maintain this existing ABI... */
@@ -472,20 +440,23 @@ i915_gem_swizzle_page(struct page *page)
472void 440void
473i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) 441i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
474{ 442{
475 struct scatterlist *sg; 443 struct drm_device *dev = obj->base.dev;
444 drm_i915_private_t *dev_priv = dev->dev_private;
476 int page_count = obj->base.size >> PAGE_SHIFT; 445 int page_count = obj->base.size >> PAGE_SHIFT;
477 int i; 446 int i;
478 447
448 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
449 return;
450
479 if (obj->bit_17 == NULL) 451 if (obj->bit_17 == NULL)
480 return; 452 return;
481 453
482 for_each_sg(obj->pages->sgl, sg, page_count, i) { 454 for (i = 0; i < page_count; i++) {
483 struct page *page = sg_page(sg); 455 char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
484 char new_bit_17 = page_to_phys(page) >> 17;
485 if ((new_bit_17 & 0x1) != 456 if ((new_bit_17 & 0x1) !=
486 (test_bit(i, obj->bit_17) != 0)) { 457 (test_bit(i, obj->bit_17) != 0)) {
487 i915_gem_swizzle_page(page); 458 i915_gem_swizzle_page(obj->pages[i]);
488 set_page_dirty(page); 459 set_page_dirty(obj->pages[i]);
489 } 460 }
490 } 461 }
491} 462}
@@ -493,10 +464,14 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
493void 464void
494i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) 465i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
495{ 466{
496 struct scatterlist *sg; 467 struct drm_device *dev = obj->base.dev;
468 drm_i915_private_t *dev_priv = dev->dev_private;
497 int page_count = obj->base.size >> PAGE_SHIFT; 469 int page_count = obj->base.size >> PAGE_SHIFT;
498 int i; 470 int i;
499 471
472 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
473 return;
474
500 if (obj->bit_17 == NULL) { 475 if (obj->bit_17 == NULL) {
501 obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * 476 obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
502 sizeof(long), GFP_KERNEL); 477 sizeof(long), GFP_KERNEL);
@@ -507,9 +482,8 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
507 } 482 }
508 } 483 }
509 484
510 for_each_sg(obj->pages->sgl, sg, page_count, i) { 485 for (i = 0; i < page_count; i++) {
511 struct page *page = sg_page(sg); 486 if (page_to_phys(obj->pages[i]) & (1 << 17))
512 if (page_to_phys(page) & (1 << 17))
513 __set_bit(i, obj->bit_17); 487 __set_bit(i, obj->bit_17);
514 else 488 else
515 __clear_bit(i, obj->bit_17); 489 __clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 3c59584161c..13b028994b2 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -31,9 +31,9 @@
31 */ 31 */
32#include <linux/compat.h> 32#include <linux/compat.h>
33 33
34#include <drm/drmP.h> 34#include "drmP.h"
35#include <drm/i915_drm.h> 35#include "drm.h"
36#include "i915_drv.h" 36#include "i915_drm.h"
37 37
38typedef struct _drm_i915_batchbuffer32 { 38typedef struct _drm_i915_batchbuffer32 {
39 int start; /* agp offset */ 39 int start; /* agp offset */
@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
181 (unsigned long)request); 181 (unsigned long)request);
182} 182}
183 183
184static drm_ioctl_compat_t *i915_compat_ioctls[] = { 184drm_ioctl_compat_t *i915_compat_ioctls[] = {
185 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer, 185 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
186 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer, 186 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
187 [DRM_I915_GETPARAM] = compat_i915_getparam, 187 [DRM_I915_GETPARAM] = compat_i915_getparam,
@@ -189,7 +189,6 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
189 [DRM_I915_ALLOC] = compat_i915_alloc 189 [DRM_I915_ALLOC] = compat_i915_alloc
190}; 190};
191 191
192#ifdef CONFIG_COMPAT
193/** 192/**
194 * Called whenever a 32-bit process running under a 64-bit kernel 193 * Called whenever a 32-bit process running under a 64-bit kernel
195 * performs an ioctl on /dev/dri/card<n>. 194 * performs an ioctl on /dev/dri/card<n>.
@@ -218,4 +217,3 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
218 217
219 return ret; 218 return ret;
220} 219}
221#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2220dec3e5d..73248d0ea17 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -26,16 +26,44 @@
26 * 26 *
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/sysrq.h> 29#include <linux/sysrq.h>
32#include <linux/slab.h> 30#include <linux/slab.h>
33#include <drm/drmP.h> 31#include "drmP.h"
34#include <drm/i915_drm.h> 32#include "drm.h"
33#include "i915_drm.h"
35#include "i915_drv.h" 34#include "i915_drv.h"
36#include "i915_trace.h" 35#include "i915_trace.h"
37#include "intel_drv.h" 36#include "intel_drv.h"
38 37
38#define MAX_NOPID ((u32)~0)
39
40/**
41 * Interrupts that are always left unmasked.
42 *
43 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
44 * we leave them always unmasked in IMR and then control enabling them through
45 * PIPESTAT alone.
46 */
47#define I915_INTERRUPT_ENABLE_FIX \
48 (I915_ASLE_INTERRUPT | \
49 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
50 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
51 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
52 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
54
55/** Interrupts that we mask and unmask at runtime. */
56#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
57
58#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
59 PIPE_VBLANK_INTERRUPT_STATUS)
60
61#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
62 PIPE_VBLANK_INTERRUPT_ENABLE)
63
64#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
65 DRM_I915_VBLANK_PIPE_B)
66
39/* For display hotplug interrupt */ 67/* For display hotplug interrupt */
40static void 68static void
41ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 69ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -90,10 +118,6 @@ void intel_enable_asle(struct drm_device *dev)
90 drm_i915_private_t *dev_priv = dev->dev_private; 118 drm_i915_private_t *dev_priv = dev->dev_private;
91 unsigned long irqflags; 119 unsigned long irqflags;
92 120
93 /* FIXME: opregion/asle for VLV */
94 if (IS_VALLEYVIEW(dev))
95 return;
96
97 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 121 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
98 122
99 if (HAS_PCH_SPLIT(dev)) 123 if (HAS_PCH_SPLIT(dev))
@@ -122,10 +146,7 @@ static int
122i915_pipe_enabled(struct drm_device *dev, int pipe) 146i915_pipe_enabled(struct drm_device *dev, int pipe)
123{ 147{
124 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 148 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
125 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 149 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
126 pipe);
127
128 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
129} 150}
130 151
131/* Called from drm generic code, passed a 'crtc', which 152/* Called from drm generic code, passed a 'crtc', which
@@ -185,8 +206,6 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
185 int vbl_start, vbl_end, htotal, vtotal; 206 int vbl_start, vbl_end, htotal, vtotal;
186 bool in_vbl = true; 207 bool in_vbl = true;
187 int ret = 0; 208 int ret = 0;
188 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
189 pipe);
190 209
191 if (!i915_pipe_enabled(dev, pipe)) { 210 if (!i915_pipe_enabled(dev, pipe)) {
192 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 211 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
@@ -195,7 +214,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
195 } 214 }
196 215
197 /* Get vtotal. */ 216 /* Get vtotal. */
198 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 217 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
199 218
200 if (INTEL_INFO(dev)->gen >= 4) { 219 if (INTEL_INFO(dev)->gen >= 4) {
201 /* No obvious pixelcount register. Only query vertical 220 /* No obvious pixelcount register. Only query vertical
@@ -215,13 +234,13 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
215 */ 234 */
216 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 235 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
217 236
218 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 237 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
219 *vpos = position / htotal; 238 *vpos = position / htotal;
220 *hpos = position - (*vpos * htotal); 239 *hpos = position - (*vpos * htotal);
221 } 240 }
222 241
223 /* Query vblank area. */ 242 /* Query vblank area. */
224 vbl = I915_READ(VBLANK(cpu_transcoder)); 243 vbl = I915_READ(VBLANK(pipe));
225 244
226 /* Test position against vblank region. */ 245 /* Test position against vblank region. */
227 vbl_start = vbl & 0x1fff; 246 vbl_start = vbl & 0x1fff;
@@ -300,21 +319,11 @@ static void i915_hotplug_work_func(struct work_struct *work)
300 drm_helper_hpd_irq_event(dev); 319 drm_helper_hpd_irq_event(dev);
301} 320}
302 321
303/* defined intel_pm.c */ 322static void i915_handle_rps_change(struct drm_device *dev)
304extern spinlock_t mchdev_lock;
305
306static void ironlake_handle_rps_change(struct drm_device *dev)
307{ 323{
308 drm_i915_private_t *dev_priv = dev->dev_private; 324 drm_i915_private_t *dev_priv = dev->dev_private;
309 u32 busy_up, busy_down, max_avg, min_avg; 325 u32 busy_up, busy_down, max_avg, min_avg;
310 u8 new_delay; 326 u8 new_delay = dev_priv->cur_delay;
311 unsigned long flags;
312
313 spin_lock_irqsave(&mchdev_lock, flags);
314
315 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
316
317 new_delay = dev_priv->ips.cur_delay;
318 327
319 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 328 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
320 busy_up = I915_READ(RCPREVBSYTUPAVG); 329 busy_up = I915_READ(RCPREVBSYTUPAVG);
@@ -324,21 +333,19 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
324 333
325 /* Handle RCS change request from hw */ 334 /* Handle RCS change request from hw */
326 if (busy_up > max_avg) { 335 if (busy_up > max_avg) {
327 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 336 if (dev_priv->cur_delay != dev_priv->max_delay)
328 new_delay = dev_priv->ips.cur_delay - 1; 337 new_delay = dev_priv->cur_delay - 1;
329 if (new_delay < dev_priv->ips.max_delay) 338 if (new_delay < dev_priv->max_delay)
330 new_delay = dev_priv->ips.max_delay; 339 new_delay = dev_priv->max_delay;
331 } else if (busy_down < min_avg) { 340 } else if (busy_down < min_avg) {
332 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 341 if (dev_priv->cur_delay != dev_priv->min_delay)
333 new_delay = dev_priv->ips.cur_delay + 1; 342 new_delay = dev_priv->cur_delay + 1;
334 if (new_delay > dev_priv->ips.min_delay) 343 if (new_delay > dev_priv->min_delay)
335 new_delay = dev_priv->ips.min_delay; 344 new_delay = dev_priv->min_delay;
336 } 345 }
337 346
338 if (ironlake_set_drps(dev, new_delay)) 347 if (ironlake_set_drps(dev, new_delay))
339 dev_priv->ips.cur_delay = new_delay; 348 dev_priv->cur_delay = new_delay;
340
341 spin_unlock_irqrestore(&mchdev_lock, flags);
342 349
343 return; 350 return;
344} 351}
@@ -347,271 +354,83 @@ static void notify_ring(struct drm_device *dev,
347 struct intel_ring_buffer *ring) 354 struct intel_ring_buffer *ring)
348{ 355{
349 struct drm_i915_private *dev_priv = dev->dev_private; 356 struct drm_i915_private *dev_priv = dev->dev_private;
357 u32 seqno;
350 358
351 if (ring->obj == NULL) 359 if (ring->obj == NULL)
352 return; 360 return;
353 361
354 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 362 seqno = ring->get_seqno(ring);
363 trace_i915_gem_request_complete(ring, seqno);
355 364
365 ring->irq_seqno = seqno;
356 wake_up_all(&ring->irq_queue); 366 wake_up_all(&ring->irq_queue);
357 if (i915_enable_hangcheck) { 367 if (i915_enable_hangcheck) {
358 dev_priv->hangcheck_count = 0; 368 dev_priv->hangcheck_count = 0;
359 mod_timer(&dev_priv->hangcheck_timer, 369 mod_timer(&dev_priv->hangcheck_timer,
360 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 370 jiffies +
371 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
361 } 372 }
362} 373}
363 374
364static void gen6_pm_rps_work(struct work_struct *work) 375static void gen6_pm_rps_work(struct work_struct *work)
365{ 376{
366 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 377 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
367 rps.work); 378 rps_work);
379 u8 new_delay = dev_priv->cur_delay;
368 u32 pm_iir, pm_imr; 380 u32 pm_iir, pm_imr;
369 u8 new_delay;
370 381
371 spin_lock_irq(&dev_priv->rps.lock); 382 spin_lock_irq(&dev_priv->rps_lock);
372 pm_iir = dev_priv->rps.pm_iir; 383 pm_iir = dev_priv->pm_iir;
373 dev_priv->rps.pm_iir = 0; 384 dev_priv->pm_iir = 0;
374 pm_imr = I915_READ(GEN6_PMIMR); 385 pm_imr = I915_READ(GEN6_PMIMR);
375 I915_WRITE(GEN6_PMIMR, 0); 386 spin_unlock_irq(&dev_priv->rps_lock);
376 spin_unlock_irq(&dev_priv->rps.lock);
377 387
378 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) 388 if (!pm_iir)
379 return; 389 return;
380 390
381 mutex_lock(&dev_priv->rps.hw_lock);
382
383 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
384 new_delay = dev_priv->rps.cur_delay + 1;
385 else
386 new_delay = dev_priv->rps.cur_delay - 1;
387
388 /* sysfs frequency interfaces may have snuck in while servicing the
389 * interrupt
390 */
391 if (!(new_delay > dev_priv->rps.max_delay ||
392 new_delay < dev_priv->rps.min_delay)) {
393 gen6_set_rps(dev_priv->dev, new_delay);
394 }
395
396 mutex_unlock(&dev_priv->rps.hw_lock);
397}
398
399
400/**
401 * ivybridge_parity_work - Workqueue called when a parity error interrupt
402 * occurred.
403 * @work: workqueue struct
404 *
405 * Doesn't actually do anything except notify userspace. As a consequence of
406 * this event, userspace should try to remap the bad rows since statistically
407 * it is likely the same row is more likely to go bad again.
408 */
409static void ivybridge_parity_work(struct work_struct *work)
410{
411 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
412 l3_parity.error_work);
413 u32 error_status, row, bank, subbank;
414 char *parity_event[5];
415 uint32_t misccpctl;
416 unsigned long flags;
417
418 /* We must turn off DOP level clock gating to access the L3 registers.
419 * In order to prevent a get/put style interface, acquire struct mutex
420 * any time we access those registers.
421 */
422 mutex_lock(&dev_priv->dev->struct_mutex); 391 mutex_lock(&dev_priv->dev->struct_mutex);
423 392 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
424 misccpctl = I915_READ(GEN7_MISCCPCTL); 393 if (dev_priv->cur_delay != dev_priv->max_delay)
425 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 394 new_delay = dev_priv->cur_delay + 1;
426 POSTING_READ(GEN7_MISCCPCTL); 395 if (new_delay > dev_priv->max_delay)
427 396 new_delay = dev_priv->max_delay;
428 error_status = I915_READ(GEN7_L3CDERRST1); 397 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
429 row = GEN7_PARITY_ERROR_ROW(error_status); 398 gen6_gt_force_wake_get(dev_priv);
430 bank = GEN7_PARITY_ERROR_BANK(error_status); 399 if (dev_priv->cur_delay != dev_priv->min_delay)
431 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 400 new_delay = dev_priv->cur_delay - 1;
432 401 if (new_delay < dev_priv->min_delay) {
433 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 402 new_delay = dev_priv->min_delay;
434 GEN7_L3CDERRST1_ENABLE); 403 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
435 POSTING_READ(GEN7_L3CDERRST1); 404 I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
436 405 ((new_delay << 16) & 0x3f0000));
437 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 406 } else {
438 407 /* Make sure we continue to get down interrupts
439 spin_lock_irqsave(&dev_priv->irq_lock, flags); 408 * until we hit the minimum frequency */
440 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 409 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
441 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 410 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
442 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 411 }
443 412 gen6_gt_force_wake_put(dev_priv);
444 mutex_unlock(&dev_priv->dev->struct_mutex);
445
446 parity_event[0] = "L3_PARITY_ERROR=1";
447 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
448 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
449 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
450 parity_event[4] = NULL;
451
452 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
453 KOBJ_CHANGE, parity_event);
454
455 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
456 row, bank, subbank);
457
458 kfree(parity_event[3]);
459 kfree(parity_event[2]);
460 kfree(parity_event[1]);
461}
462
463static void ivybridge_handle_parity_error(struct drm_device *dev)
464{
465 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
466 unsigned long flags;
467
468 if (!HAS_L3_GPU_CACHE(dev))
469 return;
470
471 spin_lock_irqsave(&dev_priv->irq_lock, flags);
472 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
473 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
474 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
475
476 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
477}
478
479static void snb_gt_irq_handler(struct drm_device *dev,
480 struct drm_i915_private *dev_priv,
481 u32 gt_iir)
482{
483
484 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
485 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
486 notify_ring(dev, &dev_priv->ring[RCS]);
487 if (gt_iir & GEN6_BSD_USER_INTERRUPT)
488 notify_ring(dev, &dev_priv->ring[VCS]);
489 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
490 notify_ring(dev, &dev_priv->ring[BCS]);
491
492 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
493 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
494 GT_RENDER_CS_ERROR_INTERRUPT)) {
495 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
496 i915_handle_error(dev, false);
497 } 413 }
498 414
499 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) 415 gen6_set_rps(dev_priv->dev, new_delay);
500 ivybridge_handle_parity_error(dev); 416 dev_priv->cur_delay = new_delay;
501}
502
503static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
504 u32 pm_iir)
505{
506 unsigned long flags;
507 417
508 /* 418 /*
509 * IIR bits should never already be set because IMR should 419 * rps_lock not held here because clearing is non-destructive. There is
510 * prevent an interrupt from being shown in IIR. The warning 420 * an *extremely* unlikely race with gen6_rps_enable() that is prevented
511 * displays a case where we've unsafely cleared 421 * by holding struct_mutex for the duration of the write.
512 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
513 * type is not a problem, it displays a problem in the logic.
514 *
515 * The mask bit in IMR is cleared by dev_priv->rps.work.
516 */ 422 */
517 423 I915_WRITE(GEN6_PMIMR, pm_imr & ~pm_iir);
518 spin_lock_irqsave(&dev_priv->rps.lock, flags); 424 mutex_unlock(&dev_priv->dev->struct_mutex);
519 dev_priv->rps.pm_iir |= pm_iir;
520 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
521 POSTING_READ(GEN6_PMIMR);
522 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
523
524 queue_work(dev_priv->wq, &dev_priv->rps.work);
525}
526
527static irqreturn_t valleyview_irq_handler(int irq, void *arg)
528{
529 struct drm_device *dev = (struct drm_device *) arg;
530 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
531 u32 iir, gt_iir, pm_iir;
532 irqreturn_t ret = IRQ_NONE;
533 unsigned long irqflags;
534 int pipe;
535 u32 pipe_stats[I915_MAX_PIPES];
536 bool blc_event;
537
538 atomic_inc(&dev_priv->irq_received);
539
540 while (true) {
541 iir = I915_READ(VLV_IIR);
542 gt_iir = I915_READ(GTIIR);
543 pm_iir = I915_READ(GEN6_PMIIR);
544
545 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
546 goto out;
547
548 ret = IRQ_HANDLED;
549
550 snb_gt_irq_handler(dev, dev_priv, gt_iir);
551
552 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
553 for_each_pipe(pipe) {
554 int reg = PIPESTAT(pipe);
555 pipe_stats[pipe] = I915_READ(reg);
556
557 /*
558 * Clear the PIPE*STAT regs before the IIR
559 */
560 if (pipe_stats[pipe] & 0x8000ffff) {
561 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
562 DRM_DEBUG_DRIVER("pipe %c underrun\n",
563 pipe_name(pipe));
564 I915_WRITE(reg, pipe_stats[pipe]);
565 }
566 }
567 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
568
569 for_each_pipe(pipe) {
570 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
571 drm_handle_vblank(dev, pipe);
572
573 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
574 intel_prepare_page_flip(dev, pipe);
575 intel_finish_page_flip(dev, pipe);
576 }
577 }
578
579 /* Consume port. Then clear IIR or we'll miss events */
580 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
581 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
582
583 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
584 hotplug_status);
585 if (hotplug_status & dev_priv->hotplug_supported_mask)
586 queue_work(dev_priv->wq,
587 &dev_priv->hotplug_work);
588
589 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
590 I915_READ(PORT_HOTPLUG_STAT);
591 }
592
593 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
594 blc_event = true;
595
596 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
597 gen6_queue_rps_work(dev_priv, pm_iir);
598
599 I915_WRITE(GTIIR, gt_iir);
600 I915_WRITE(GEN6_PMIIR, pm_iir);
601 I915_WRITE(VLV_IIR, iir);
602 }
603
604out:
605 return ret;
606} 425}
607 426
608static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 427static void pch_irq_handler(struct drm_device *dev)
609{ 428{
610 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 429 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
430 u32 pch_iir;
611 int pipe; 431 int pipe;
612 432
613 if (pch_iir & SDE_HOTPLUG_MASK) 433 pch_iir = I915_READ(SDEIIR);
614 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
615 434
616 if (pch_iir & SDE_AUDIO_POWER_MASK) 435 if (pch_iir & SDE_AUDIO_POWER_MASK)
617 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 436 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
@@ -648,120 +467,109 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
648 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 467 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
649} 468}
650 469
651static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 470static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
652{
653 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
654 int pipe;
655
656 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
657 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
658
659 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
660 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
661 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
662 SDE_AUDIO_POWER_SHIFT_CPT);
663
664 if (pch_iir & SDE_AUX_MASK_CPT)
665 DRM_DEBUG_DRIVER("AUX channel interrupt\n");
666
667 if (pch_iir & SDE_GMBUS_CPT)
668 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
669
670 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
671 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
672
673 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
674 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
675
676 if (pch_iir & SDE_FDI_MASK_CPT)
677 for_each_pipe(pipe)
678 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
679 pipe_name(pipe),
680 I915_READ(FDI_RX_IIR(pipe)));
681}
682
683static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
684{ 471{
685 struct drm_device *dev = (struct drm_device *) arg; 472 struct drm_device *dev = (struct drm_device *) arg;
686 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 473 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
687 u32 de_iir, gt_iir, de_ier, pm_iir; 474 int ret = IRQ_NONE;
688 irqreturn_t ret = IRQ_NONE; 475 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
689 int i; 476 struct drm_i915_master_private *master_priv;
690 477
691 atomic_inc(&dev_priv->irq_received); 478 atomic_inc(&dev_priv->irq_received);
692 479
693 /* disable master interrupt before clearing iir */ 480 /* disable master interrupt before clearing iir */
694 de_ier = I915_READ(DEIER); 481 de_ier = I915_READ(DEIER);
695 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 482 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
483 POSTING_READ(DEIER);
696 484
485 de_iir = I915_READ(DEIIR);
697 gt_iir = I915_READ(GTIIR); 486 gt_iir = I915_READ(GTIIR);
698 if (gt_iir) { 487 pch_iir = I915_READ(SDEIIR);
699 snb_gt_irq_handler(dev, dev_priv, gt_iir); 488 pm_iir = I915_READ(GEN6_PMIIR);
700 I915_WRITE(GTIIR, gt_iir); 489
701 ret = IRQ_HANDLED; 490 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
491 goto done;
492
493 ret = IRQ_HANDLED;
494
495 if (dev->primary->master) {
496 master_priv = dev->primary->master->driver_priv;
497 if (master_priv->sarea_priv)
498 master_priv->sarea_priv->last_dispatch =
499 READ_BREADCRUMB(dev_priv);
702 } 500 }
703 501
704 de_iir = I915_READ(DEIIR); 502 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
705 if (de_iir) { 503 notify_ring(dev, &dev_priv->ring[RCS]);
706 if (de_iir & DE_GSE_IVB) 504 if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
707 intel_opregion_gse_intr(dev); 505 notify_ring(dev, &dev_priv->ring[VCS]);
708 506 if (gt_iir & GT_BLT_USER_INTERRUPT)
709 for (i = 0; i < 3; i++) { 507 notify_ring(dev, &dev_priv->ring[BCS]);
710 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
711 drm_handle_vblank(dev, i);
712 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
713 intel_prepare_page_flip(dev, i);
714 intel_finish_page_flip_plane(dev, i);
715 }
716 }
717 508
718 /* check event from PCH */ 509 if (de_iir & DE_GSE_IVB)
719 if (de_iir & DE_PCH_EVENT_IVB) { 510 intel_opregion_gse_intr(dev);
720 u32 pch_iir = I915_READ(SDEIIR);
721 511
722 cpt_irq_handler(dev, pch_iir); 512 if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
513 intel_prepare_page_flip(dev, 0);
514 intel_finish_page_flip_plane(dev, 0);
515 }
723 516
724 /* clear PCH hotplug event before clear CPU irq */ 517 if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
725 I915_WRITE(SDEIIR, pch_iir); 518 intel_prepare_page_flip(dev, 1);
726 } 519 intel_finish_page_flip_plane(dev, 1);
520 }
727 521
728 I915_WRITE(DEIIR, de_iir); 522 if (de_iir & DE_PIPEA_VBLANK_IVB)
729 ret = IRQ_HANDLED; 523 drm_handle_vblank(dev, 0);
524
525 if (de_iir & DE_PIPEB_VBLANK_IVB)
526 drm_handle_vblank(dev, 1);
527
528 /* check event from PCH */
529 if (de_iir & DE_PCH_EVENT_IVB) {
530 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
531 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
532 pch_irq_handler(dev);
730 } 533 }
731 534
732 pm_iir = I915_READ(GEN6_PMIIR); 535 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
733 if (pm_iir) { 536 unsigned long flags;
734 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 537 spin_lock_irqsave(&dev_priv->rps_lock, flags);
735 gen6_queue_rps_work(dev_priv, pm_iir); 538 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
736 I915_WRITE(GEN6_PMIIR, pm_iir); 539 I915_WRITE(GEN6_PMIMR, pm_iir);
737 ret = IRQ_HANDLED; 540 dev_priv->pm_iir |= pm_iir;
541 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
542 queue_work(dev_priv->wq, &dev_priv->rps_work);
738 } 543 }
739 544
545 /* should clear PCH hotplug event before clear CPU irq */
546 I915_WRITE(SDEIIR, pch_iir);
547 I915_WRITE(GTIIR, gt_iir);
548 I915_WRITE(DEIIR, de_iir);
549 I915_WRITE(GEN6_PMIIR, pm_iir);
550
551done:
740 I915_WRITE(DEIER, de_ier); 552 I915_WRITE(DEIER, de_ier);
741 POSTING_READ(DEIER); 553 POSTING_READ(DEIER);
742 554
743 return ret; 555 return ret;
744} 556}
745 557
746static void ilk_gt_irq_handler(struct drm_device *dev, 558static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
747 struct drm_i915_private *dev_priv,
748 u32 gt_iir)
749{
750 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
751 notify_ring(dev, &dev_priv->ring[RCS]);
752 if (gt_iir & GT_BSD_USER_INTERRUPT)
753 notify_ring(dev, &dev_priv->ring[VCS]);
754}
755
756static irqreturn_t ironlake_irq_handler(int irq, void *arg)
757{ 559{
758 struct drm_device *dev = (struct drm_device *) arg; 560 struct drm_device *dev = (struct drm_device *) arg;
759 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 561 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
760 int ret = IRQ_NONE; 562 int ret = IRQ_NONE;
761 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 563 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
564 u32 hotplug_mask;
565 struct drm_i915_master_private *master_priv;
566 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
762 567
763 atomic_inc(&dev_priv->irq_received); 568 atomic_inc(&dev_priv->irq_received);
764 569
570 if (IS_GEN6(dev))
571 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
572
765 /* disable master interrupt before clearing iir */ 573 /* disable master interrupt before clearing iir */
766 de_ier = I915_READ(DEIER); 574 de_ier = I915_READ(DEIER);
767 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 575 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -776,22 +584,30 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
776 (!IS_GEN6(dev) || pm_iir == 0)) 584 (!IS_GEN6(dev) || pm_iir == 0))
777 goto done; 585 goto done;
778 586
587 if (HAS_PCH_CPT(dev))
588 hotplug_mask = SDE_HOTPLUG_MASK_CPT;
589 else
590 hotplug_mask = SDE_HOTPLUG_MASK;
591
779 ret = IRQ_HANDLED; 592 ret = IRQ_HANDLED;
780 593
781 if (IS_GEN5(dev)) 594 if (dev->primary->master) {
782 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 595 master_priv = dev->primary->master->driver_priv;
783 else 596 if (master_priv->sarea_priv)
784 snb_gt_irq_handler(dev, dev_priv, gt_iir); 597 master_priv->sarea_priv->last_dispatch =
598 READ_BREADCRUMB(dev_priv);
599 }
600
601 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
602 notify_ring(dev, &dev_priv->ring[RCS]);
603 if (gt_iir & bsd_usr_interrupt)
604 notify_ring(dev, &dev_priv->ring[VCS]);
605 if (gt_iir & GT_BLT_USER_INTERRUPT)
606 notify_ring(dev, &dev_priv->ring[BCS]);
785 607
786 if (de_iir & DE_GSE) 608 if (de_iir & DE_GSE)
787 intel_opregion_gse_intr(dev); 609 intel_opregion_gse_intr(dev);
788 610
789 if (de_iir & DE_PIPEA_VBLANK)
790 drm_handle_vblank(dev, 0);
791
792 if (de_iir & DE_PIPEB_VBLANK)
793 drm_handle_vblank(dev, 1);
794
795 if (de_iir & DE_PLANEA_FLIP_DONE) { 611 if (de_iir & DE_PLANEA_FLIP_DONE) {
796 intel_prepare_page_flip(dev, 0); 612 intel_prepare_page_flip(dev, 0);
797 intel_finish_page_flip_plane(dev, 0); 613 intel_finish_page_flip_plane(dev, 0);
@@ -802,19 +618,42 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
802 intel_finish_page_flip_plane(dev, 1); 618 intel_finish_page_flip_plane(dev, 1);
803 } 619 }
804 620
621 if (de_iir & DE_PIPEA_VBLANK)
622 drm_handle_vblank(dev, 0);
623
624 if (de_iir & DE_PIPEB_VBLANK)
625 drm_handle_vblank(dev, 1);
626
805 /* check event from PCH */ 627 /* check event from PCH */
806 if (de_iir & DE_PCH_EVENT) { 628 if (de_iir & DE_PCH_EVENT) {
807 if (HAS_PCH_CPT(dev)) 629 if (pch_iir & hotplug_mask)
808 cpt_irq_handler(dev, pch_iir); 630 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
809 else 631 pch_irq_handler(dev);
810 ibx_irq_handler(dev, pch_iir);
811 } 632 }
812 633
813 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 634 if (de_iir & DE_PCU_EVENT) {
814 ironlake_handle_rps_change(dev); 635 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
636 i915_handle_rps_change(dev);
637 }
815 638
816 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 639 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
817 gen6_queue_rps_work(dev_priv, pm_iir); 640 /*
641 * IIR bits should never already be set because IMR should
642 * prevent an interrupt from being shown in IIR. The warning
643 * displays a case where we've unsafely cleared
644 * dev_priv->pm_iir. Although missing an interrupt of the same
645 * type is not a problem, it displays a problem in the logic.
646 *
647 * The mask bit in IMR is cleared by rps_work.
648 */
649 unsigned long flags;
650 spin_lock_irqsave(&dev_priv->rps_lock, flags);
651 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
652 I915_WRITE(GEN6_PMIMR, pm_iir);
653 dev_priv->pm_iir |= pm_iir;
654 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
655 queue_work(dev_priv->wq, &dev_priv->rps_work);
656 }
818 657
819 /* should clear PCH hotplug event before clear CPU irq */ 658 /* should clear PCH hotplug event before clear CPU irq */
820 I915_WRITE(SDEIIR, pch_iir); 659 I915_WRITE(SDEIIR, pch_iir);
@@ -850,7 +689,7 @@ static void i915_error_work_func(struct work_struct *work)
850 if (atomic_read(&dev_priv->mm.wedged)) { 689 if (atomic_read(&dev_priv->mm.wedged)) {
851 DRM_DEBUG_DRIVER("resetting chip\n"); 690 DRM_DEBUG_DRIVER("resetting chip\n");
852 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 691 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
853 if (!i915_reset(dev)) { 692 if (!i915_reset(dev, GRDOM_RENDER)) {
854 atomic_set(&dev_priv->mm.wedged, 0); 693 atomic_set(&dev_priv->mm.wedged, 0);
855 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 694 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
856 } 695 }
@@ -858,56 +697,28 @@ static void i915_error_work_func(struct work_struct *work)
858 } 697 }
859} 698}
860 699
861/* NB: please notice the memset */
862static void i915_get_extra_instdone(struct drm_device *dev,
863 uint32_t *instdone)
864{
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
867
868 switch(INTEL_INFO(dev)->gen) {
869 case 2:
870 case 3:
871 instdone[0] = I915_READ(INSTDONE);
872 break;
873 case 4:
874 case 5:
875 case 6:
876 instdone[0] = I915_READ(INSTDONE_I965);
877 instdone[1] = I915_READ(INSTDONE1);
878 break;
879 default:
880 WARN_ONCE(1, "Unsupported platform\n");
881 case 7:
882 instdone[0] = I915_READ(GEN7_INSTDONE_1);
883 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
884 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
885 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
886 break;
887 }
888}
889
890#ifdef CONFIG_DEBUG_FS 700#ifdef CONFIG_DEBUG_FS
891static struct drm_i915_error_object * 701static struct drm_i915_error_object *
892i915_error_object_create(struct drm_i915_private *dev_priv, 702i915_error_object_create(struct drm_i915_private *dev_priv,
893 struct drm_i915_gem_object *src) 703 struct drm_i915_gem_object *src)
894{ 704{
895 struct drm_i915_error_object *dst; 705 struct drm_i915_error_object *dst;
896 int i, count; 706 int page, page_count;
897 u32 reloc_offset; 707 u32 reloc_offset;
898 708
899 if (src == NULL || src->pages == NULL) 709 if (src == NULL || src->pages == NULL)
900 return NULL; 710 return NULL;
901 711
902 count = src->base.size / PAGE_SIZE; 712 page_count = src->base.size / PAGE_SIZE;
903 713
904 dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC); 714 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
905 if (dst == NULL) 715 if (dst == NULL)
906 return NULL; 716 return NULL;
907 717
908 reloc_offset = src->gtt_offset; 718 reloc_offset = src->gtt_offset;
909 for (i = 0; i < count; i++) { 719 for (page = 0; page < page_count; page++) {
910 unsigned long flags; 720 unsigned long flags;
721 void __iomem *s;
911 void *d; 722 void *d;
912 723
913 d = kmalloc(PAGE_SIZE, GFP_ATOMIC); 724 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
@@ -915,47 +726,24 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
915 goto unwind; 726 goto unwind;
916 727
917 local_irq_save(flags); 728 local_irq_save(flags);
918 if (reloc_offset < dev_priv->mm.gtt_mappable_end && 729 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
919 src->has_global_gtt_mapping) { 730 reloc_offset);
920 void __iomem *s; 731 memcpy_fromio(d, s, PAGE_SIZE);
921 732 io_mapping_unmap_atomic(s);
922 /* Simply ignore tiling or any overlapping fence.
923 * It's part of the error state, and this hopefully
924 * captures what the GPU read.
925 */
926
927 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
928 reloc_offset);
929 memcpy_fromio(d, s, PAGE_SIZE);
930 io_mapping_unmap_atomic(s);
931 } else {
932 struct page *page;
933 void *s;
934
935 page = i915_gem_object_get_page(src, i);
936
937 drm_clflush_pages(&page, 1);
938
939 s = kmap_atomic(page);
940 memcpy(d, s, PAGE_SIZE);
941 kunmap_atomic(s);
942
943 drm_clflush_pages(&page, 1);
944 }
945 local_irq_restore(flags); 733 local_irq_restore(flags);
946 734
947 dst->pages[i] = d; 735 dst->pages[page] = d;
948 736
949 reloc_offset += PAGE_SIZE; 737 reloc_offset += PAGE_SIZE;
950 } 738 }
951 dst->page_count = count; 739 dst->page_count = page_count;
952 dst->gtt_offset = src->gtt_offset; 740 dst->gtt_offset = src->gtt_offset;
953 741
954 return dst; 742 return dst;
955 743
956unwind: 744unwind:
957 while (i--) 745 while (page--)
958 kfree(dst->pages[i]); 746 kfree(dst->pages[page]);
959 kfree(dst); 747 kfree(dst);
960 return NULL; 748 return NULL;
961} 749}
@@ -974,74 +762,53 @@ i915_error_object_free(struct drm_i915_error_object *obj)
974 kfree(obj); 762 kfree(obj);
975} 763}
976 764
977void 765static void
978i915_error_state_free(struct kref *error_ref) 766i915_error_state_free(struct drm_device *dev,
767 struct drm_i915_error_state *error)
979{ 768{
980 struct drm_i915_error_state *error = container_of(error_ref,
981 typeof(*error), ref);
982 int i; 769 int i;
983 770
984 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 771 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++)
985 i915_error_object_free(error->ring[i].batchbuffer); 772 i915_error_object_free(error->batchbuffer[i]);
986 i915_error_object_free(error->ring[i].ringbuffer); 773
987 kfree(error->ring[i].requests); 774 for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++)
988 } 775 i915_error_object_free(error->ringbuffer[i]);
989 776
990 kfree(error->active_bo); 777 kfree(error->active_bo);
991 kfree(error->overlay); 778 kfree(error->overlay);
992 kfree(error); 779 kfree(error);
993} 780}
994static void capture_bo(struct drm_i915_error_buffer *err,
995 struct drm_i915_gem_object *obj)
996{
997 err->size = obj->base.size;
998 err->name = obj->base.name;
999 err->rseqno = obj->last_read_seqno;
1000 err->wseqno = obj->last_write_seqno;
1001 err->gtt_offset = obj->gtt_offset;
1002 err->read_domains = obj->base.read_domains;
1003 err->write_domain = obj->base.write_domain;
1004 err->fence_reg = obj->fence_reg;
1005 err->pinned = 0;
1006 if (obj->pin_count > 0)
1007 err->pinned = 1;
1008 if (obj->user_pin_count > 0)
1009 err->pinned = -1;
1010 err->tiling = obj->tiling_mode;
1011 err->dirty = obj->dirty;
1012 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1013 err->ring = obj->ring ? obj->ring->id : -1;
1014 err->cache_level = obj->cache_level;
1015}
1016 781
1017static u32 capture_active_bo(struct drm_i915_error_buffer *err, 782static u32 capture_bo_list(struct drm_i915_error_buffer *err,
1018 int count, struct list_head *head) 783 int count,
784 struct list_head *head)
1019{ 785{
1020 struct drm_i915_gem_object *obj; 786 struct drm_i915_gem_object *obj;
1021 int i = 0; 787 int i = 0;
1022 788
1023 list_for_each_entry(obj, head, mm_list) { 789 list_for_each_entry(obj, head, mm_list) {
1024 capture_bo(err++, obj); 790 err->size = obj->base.size;
1025 if (++i == count) 791 err->name = obj->base.name;
1026 break; 792 err->seqno = obj->last_rendering_seqno;
1027 } 793 err->gtt_offset = obj->gtt_offset;
1028 794 err->read_domains = obj->base.read_domains;
1029 return i; 795 err->write_domain = obj->base.write_domain;
1030} 796 err->fence_reg = obj->fence_reg;
1031 797 err->pinned = 0;
1032static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, 798 if (obj->pin_count > 0)
1033 int count, struct list_head *head) 799 err->pinned = 1;
1034{ 800 if (obj->user_pin_count > 0)
1035 struct drm_i915_gem_object *obj; 801 err->pinned = -1;
1036 int i = 0; 802 err->tiling = obj->tiling_mode;
803 err->dirty = obj->dirty;
804 err->purgeable = obj->madv != I915_MADV_WILLNEED;
805 err->ring = obj->ring ? obj->ring->id : 0;
806 err->cache_level = obj->cache_level;
1037 807
1038 list_for_each_entry(obj, head, gtt_list) {
1039 if (obj->pin_count == 0)
1040 continue;
1041
1042 capture_bo(err++, obj);
1043 if (++i == count) 808 if (++i == count)
1044 break; 809 break;
810
811 err++;
1045 } 812 }
1046 813
1047 return i; 814 return i;
@@ -1087,24 +854,12 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1087 if (!ring->get_seqno) 854 if (!ring->get_seqno)
1088 return NULL; 855 return NULL;
1089 856
1090 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { 857 seqno = ring->get_seqno(ring);
1091 u32 acthd = I915_READ(ACTHD);
1092
1093 if (WARN_ON(ring->id != RCS))
1094 return NULL;
1095
1096 obj = ring->private;
1097 if (acthd >= obj->gtt_offset &&
1098 acthd < obj->gtt_offset + obj->base.size)
1099 return i915_error_object_create(dev_priv, obj);
1100 }
1101
1102 seqno = ring->get_seqno(ring, false);
1103 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 858 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1104 if (obj->ring != ring) 859 if (obj->ring != ring)
1105 continue; 860 continue;
1106 861
1107 if (i915_seqno_passed(seqno, obj->last_read_seqno)) 862 if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
1108 continue; 863 continue;
1109 864
1110 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 865 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
@@ -1119,91 +874,6 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1119 return NULL; 874 return NULL;
1120} 875}
1121 876
1122static void i915_record_ring_state(struct drm_device *dev,
1123 struct drm_i915_error_state *error,
1124 struct intel_ring_buffer *ring)
1125{
1126 struct drm_i915_private *dev_priv = dev->dev_private;
1127
1128 if (INTEL_INFO(dev)->gen >= 6) {
1129 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1130 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1131 error->semaphore_mboxes[ring->id][0]
1132 = I915_READ(RING_SYNC_0(ring->mmio_base));
1133 error->semaphore_mboxes[ring->id][1]
1134 = I915_READ(RING_SYNC_1(ring->mmio_base));
1135 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1136 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1137 }
1138
1139 if (INTEL_INFO(dev)->gen >= 4) {
1140 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1141 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1142 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1143 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1144 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1145 if (ring->id == RCS)
1146 error->bbaddr = I915_READ64(BB_ADDR);
1147 } else {
1148 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1149 error->ipeir[ring->id] = I915_READ(IPEIR);
1150 error->ipehr[ring->id] = I915_READ(IPEHR);
1151 error->instdone[ring->id] = I915_READ(INSTDONE);
1152 }
1153
1154 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1155 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1156 error->seqno[ring->id] = ring->get_seqno(ring, false);
1157 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1158 error->head[ring->id] = I915_READ_HEAD(ring);
1159 error->tail[ring->id] = I915_READ_TAIL(ring);
1160
1161 error->cpu_ring_head[ring->id] = ring->head;
1162 error->cpu_ring_tail[ring->id] = ring->tail;
1163}
1164
1165static void i915_gem_record_rings(struct drm_device *dev,
1166 struct drm_i915_error_state *error)
1167{
1168 struct drm_i915_private *dev_priv = dev->dev_private;
1169 struct intel_ring_buffer *ring;
1170 struct drm_i915_gem_request *request;
1171 int i, count;
1172
1173 for_each_ring(ring, dev_priv, i) {
1174 i915_record_ring_state(dev, error, ring);
1175
1176 error->ring[i].batchbuffer =
1177 i915_error_first_batchbuffer(dev_priv, ring);
1178
1179 error->ring[i].ringbuffer =
1180 i915_error_object_create(dev_priv, ring->obj);
1181
1182 count = 0;
1183 list_for_each_entry(request, &ring->request_list, list)
1184 count++;
1185
1186 error->ring[i].num_requests = count;
1187 error->ring[i].requests =
1188 kmalloc(count*sizeof(struct drm_i915_error_request),
1189 GFP_ATOMIC);
1190 if (error->ring[i].requests == NULL) {
1191 error->ring[i].num_requests = 0;
1192 continue;
1193 }
1194
1195 count = 0;
1196 list_for_each_entry(request, &ring->request_list, list) {
1197 struct drm_i915_error_request *erq;
1198
1199 erq = &error->ring[i].requests[count++];
1200 erq->seqno = request->seqno;
1201 erq->jiffies = request->emitted_jiffies;
1202 erq->tail = request->tail;
1203 }
1204 }
1205}
1206
1207/** 877/**
1208 * i915_capture_error_state - capture an error record for later analysis 878 * i915_capture_error_state - capture an error record for later analysis
1209 * @dev: drm device 879 * @dev: drm device
@@ -1228,7 +898,7 @@ static void i915_capture_error_state(struct drm_device *dev)
1228 return; 898 return;
1229 899
1230 /* Account for pipe specific data like PIPE*STAT */ 900 /* Account for pipe specific data like PIPE*STAT */
1231 error = kzalloc(sizeof(*error), GFP_ATOMIC); 901 error = kmalloc(sizeof(*error), GFP_ATOMIC);
1232 if (!error) { 902 if (!error) {
1233 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 903 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1234 return; 904 return;
@@ -1237,35 +907,59 @@ static void i915_capture_error_state(struct drm_device *dev)
1237 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", 907 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1238 dev->primary->index); 908 dev->primary->index);
1239 909
1240 kref_init(&error->ref); 910 error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
1241 error->eir = I915_READ(EIR); 911 error->eir = I915_READ(EIR);
1242 error->pgtbl_er = I915_READ(PGTBL_ER); 912 error->pgtbl_er = I915_READ(PGTBL_ER);
1243 error->ccid = I915_READ(CCID);
1244
1245 if (HAS_PCH_SPLIT(dev))
1246 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1247 else if (IS_VALLEYVIEW(dev))
1248 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1249 else if (IS_GEN2(dev))
1250 error->ier = I915_READ16(IER);
1251 else
1252 error->ier = I915_READ(IER);
1253
1254 for_each_pipe(pipe) 913 for_each_pipe(pipe)
1255 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 914 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1256 915 error->instpm = I915_READ(INSTPM);
916 error->error = 0;
1257 if (INTEL_INFO(dev)->gen >= 6) { 917 if (INTEL_INFO(dev)->gen >= 6) {
1258 error->error = I915_READ(ERROR_GEN6); 918 error->error = I915_READ(ERROR_GEN6);
1259 error->done_reg = I915_READ(DONE_REG);
1260 }
1261 919
1262 if (INTEL_INFO(dev)->gen == 7) 920 error->bcs_acthd = I915_READ(BCS_ACTHD);
1263 error->err_int = I915_READ(GEN7_ERR_INT); 921 error->bcs_ipehr = I915_READ(BCS_IPEHR);
922 error->bcs_ipeir = I915_READ(BCS_IPEIR);
923 error->bcs_instdone = I915_READ(BCS_INSTDONE);
924 error->bcs_seqno = 0;
925 if (dev_priv->ring[BCS].get_seqno)
926 error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
927
928 error->vcs_acthd = I915_READ(VCS_ACTHD);
929 error->vcs_ipehr = I915_READ(VCS_IPEHR);
930 error->vcs_ipeir = I915_READ(VCS_IPEIR);
931 error->vcs_instdone = I915_READ(VCS_INSTDONE);
932 error->vcs_seqno = 0;
933 if (dev_priv->ring[VCS].get_seqno)
934 error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
935 }
936 if (INTEL_INFO(dev)->gen >= 4) {
937 error->ipeir = I915_READ(IPEIR_I965);
938 error->ipehr = I915_READ(IPEHR_I965);
939 error->instdone = I915_READ(INSTDONE_I965);
940 error->instps = I915_READ(INSTPS);
941 error->instdone1 = I915_READ(INSTDONE1);
942 error->acthd = I915_READ(ACTHD_I965);
943 error->bbaddr = I915_READ64(BB_ADDR);
944 } else {
945 error->ipeir = I915_READ(IPEIR);
946 error->ipehr = I915_READ(IPEHR);
947 error->instdone = I915_READ(INSTDONE);
948 error->acthd = I915_READ(ACTHD);
949 error->bbaddr = 0;
950 }
951 i915_gem_record_fences(dev, error);
1264 952
1265 i915_get_extra_instdone(dev, error->extra_instdone); 953 /* Record the active batch and ring buffers */
954 for (i = 0; i < I915_NUM_RINGS; i++) {
955 error->batchbuffer[i] =
956 i915_error_first_batchbuffer(dev_priv,
957 &dev_priv->ring[i]);
1266 958
1267 i915_gem_record_fences(dev, error); 959 error->ringbuffer[i] =
1268 i915_gem_record_rings(dev, error); 960 i915_error_object_create(dev_priv,
961 dev_priv->ring[i].obj);
962 }
1269 963
1270 /* Record buffers on the active and pinned lists. */ 964 /* Record buffers on the active and pinned lists. */
1271 error->active_bo = NULL; 965 error->active_bo = NULL;
@@ -1275,9 +969,8 @@ static void i915_capture_error_state(struct drm_device *dev)
1275 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 969 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1276 i++; 970 i++;
1277 error->active_bo_count = i; 971 error->active_bo_count = i;
1278 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) 972 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
1279 if (obj->pin_count) 973 i++;
1280 i++;
1281 error->pinned_bo_count = i - error->active_bo_count; 974 error->pinned_bo_count = i - error->active_bo_count;
1282 975
1283 error->active_bo = NULL; 976 error->active_bo = NULL;
@@ -1292,15 +985,15 @@ static void i915_capture_error_state(struct drm_device *dev)
1292 985
1293 if (error->active_bo) 986 if (error->active_bo)
1294 error->active_bo_count = 987 error->active_bo_count =
1295 capture_active_bo(error->active_bo, 988 capture_bo_list(error->active_bo,
1296 error->active_bo_count, 989 error->active_bo_count,
1297 &dev_priv->mm.active_list); 990 &dev_priv->mm.active_list);
1298 991
1299 if (error->pinned_bo) 992 if (error->pinned_bo)
1300 error->pinned_bo_count = 993 error->pinned_bo_count =
1301 capture_pinned_bo(error->pinned_bo, 994 capture_bo_list(error->pinned_bo,
1302 error->pinned_bo_count, 995 error->pinned_bo_count,
1303 &dev_priv->mm.bound_list); 996 &dev_priv->mm.pinned_list);
1304 997
1305 do_gettimeofday(&error->time); 998 do_gettimeofday(&error->time);
1306 999
@@ -1315,22 +1008,21 @@ static void i915_capture_error_state(struct drm_device *dev)
1315 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1008 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1316 1009
1317 if (error) 1010 if (error)
1318 i915_error_state_free(&error->ref); 1011 i915_error_state_free(dev, error);
1319} 1012}
1320 1013
1321void i915_destroy_error_state(struct drm_device *dev) 1014void i915_destroy_error_state(struct drm_device *dev)
1322{ 1015{
1323 struct drm_i915_private *dev_priv = dev->dev_private; 1016 struct drm_i915_private *dev_priv = dev->dev_private;
1324 struct drm_i915_error_state *error; 1017 struct drm_i915_error_state *error;
1325 unsigned long flags;
1326 1018
1327 spin_lock_irqsave(&dev_priv->error_lock, flags); 1019 spin_lock(&dev_priv->error_lock);
1328 error = dev_priv->first_error; 1020 error = dev_priv->first_error;
1329 dev_priv->first_error = NULL; 1021 dev_priv->first_error = NULL;
1330 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1022 spin_unlock(&dev_priv->error_lock);
1331 1023
1332 if (error) 1024 if (error)
1333 kref_put(&error->ref, i915_error_state_free); 1025 i915_error_state_free(dev, error);
1334} 1026}
1335#else 1027#else
1336#define i915_capture_error_state(x) 1028#define i915_capture_error_state(x)
@@ -1339,34 +1031,39 @@ void i915_destroy_error_state(struct drm_device *dev)
1339static void i915_report_and_clear_eir(struct drm_device *dev) 1031static void i915_report_and_clear_eir(struct drm_device *dev)
1340{ 1032{
1341 struct drm_i915_private *dev_priv = dev->dev_private; 1033 struct drm_i915_private *dev_priv = dev->dev_private;
1342 uint32_t instdone[I915_NUM_INSTDONE_REG];
1343 u32 eir = I915_READ(EIR); 1034 u32 eir = I915_READ(EIR);
1344 int pipe, i; 1035 int pipe;
1345 1036
1346 if (!eir) 1037 if (!eir)
1347 return; 1038 return;
1348 1039
1349 pr_err("render error detected, EIR: 0x%08x\n", eir); 1040 printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
1350 1041 eir);
1351 i915_get_extra_instdone(dev, instdone);
1352 1042
1353 if (IS_G4X(dev)) { 1043 if (IS_G4X(dev)) {
1354 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 1044 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1355 u32 ipeir = I915_READ(IPEIR_I965); 1045 u32 ipeir = I915_READ(IPEIR_I965);
1356 1046
1357 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1047 printk(KERN_ERR " IPEIR: 0x%08x\n",
1358 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1048 I915_READ(IPEIR_I965));
1359 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1049 printk(KERN_ERR " IPEHR: 0x%08x\n",
1360 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1050 I915_READ(IPEHR_I965));
1361 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1051 printk(KERN_ERR " INSTDONE: 0x%08x\n",
1362 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1052 I915_READ(INSTDONE_I965));
1053 printk(KERN_ERR " INSTPS: 0x%08x\n",
1054 I915_READ(INSTPS));
1055 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
1056 I915_READ(INSTDONE1));
1057 printk(KERN_ERR " ACTHD: 0x%08x\n",
1058 I915_READ(ACTHD_I965));
1363 I915_WRITE(IPEIR_I965, ipeir); 1059 I915_WRITE(IPEIR_I965, ipeir);
1364 POSTING_READ(IPEIR_I965); 1060 POSTING_READ(IPEIR_I965);
1365 } 1061 }
1366 if (eir & GM45_ERROR_PAGE_TABLE) { 1062 if (eir & GM45_ERROR_PAGE_TABLE) {
1367 u32 pgtbl_err = I915_READ(PGTBL_ER); 1063 u32 pgtbl_err = I915_READ(PGTBL_ER);
1368 pr_err("page table error\n"); 1064 printk(KERN_ERR "page table error\n");
1369 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1065 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
1066 pgtbl_err);
1370 I915_WRITE(PGTBL_ER, pgtbl_err); 1067 I915_WRITE(PGTBL_ER, pgtbl_err);
1371 POSTING_READ(PGTBL_ER); 1068 POSTING_READ(PGTBL_ER);
1372 } 1069 }
@@ -1375,40 +1072,53 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
1375 if (!IS_GEN2(dev)) { 1072 if (!IS_GEN2(dev)) {
1376 if (eir & I915_ERROR_PAGE_TABLE) { 1073 if (eir & I915_ERROR_PAGE_TABLE) {
1377 u32 pgtbl_err = I915_READ(PGTBL_ER); 1074 u32 pgtbl_err = I915_READ(PGTBL_ER);
1378 pr_err("page table error\n"); 1075 printk(KERN_ERR "page table error\n");
1379 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1076 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
1077 pgtbl_err);
1380 I915_WRITE(PGTBL_ER, pgtbl_err); 1078 I915_WRITE(PGTBL_ER, pgtbl_err);
1381 POSTING_READ(PGTBL_ER); 1079 POSTING_READ(PGTBL_ER);
1382 } 1080 }
1383 } 1081 }
1384 1082
1385 if (eir & I915_ERROR_MEMORY_REFRESH) { 1083 if (eir & I915_ERROR_MEMORY_REFRESH) {
1386 pr_err("memory refresh error:\n"); 1084 printk(KERN_ERR "memory refresh error:\n");
1387 for_each_pipe(pipe) 1085 for_each_pipe(pipe)
1388 pr_err("pipe %c stat: 0x%08x\n", 1086 printk(KERN_ERR "pipe %c stat: 0x%08x\n",
1389 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 1087 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1390 /* pipestat has already been acked */ 1088 /* pipestat has already been acked */
1391 } 1089 }
1392 if (eir & I915_ERROR_INSTRUCTION) { 1090 if (eir & I915_ERROR_INSTRUCTION) {
1393 pr_err("instruction error\n"); 1091 printk(KERN_ERR "instruction error\n");
1394 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1092 printk(KERN_ERR " INSTPM: 0x%08x\n",
1395 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1093 I915_READ(INSTPM));
1396 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1397 if (INTEL_INFO(dev)->gen < 4) { 1094 if (INTEL_INFO(dev)->gen < 4) {
1398 u32 ipeir = I915_READ(IPEIR); 1095 u32 ipeir = I915_READ(IPEIR);
1399 1096
1400 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1097 printk(KERN_ERR " IPEIR: 0x%08x\n",
1401 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1098 I915_READ(IPEIR));
1402 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 1099 printk(KERN_ERR " IPEHR: 0x%08x\n",
1100 I915_READ(IPEHR));
1101 printk(KERN_ERR " INSTDONE: 0x%08x\n",
1102 I915_READ(INSTDONE));
1103 printk(KERN_ERR " ACTHD: 0x%08x\n",
1104 I915_READ(ACTHD));
1403 I915_WRITE(IPEIR, ipeir); 1105 I915_WRITE(IPEIR, ipeir);
1404 POSTING_READ(IPEIR); 1106 POSTING_READ(IPEIR);
1405 } else { 1107 } else {
1406 u32 ipeir = I915_READ(IPEIR_I965); 1108 u32 ipeir = I915_READ(IPEIR_I965);
1407 1109
1408 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1110 printk(KERN_ERR " IPEIR: 0x%08x\n",
1409 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1111 I915_READ(IPEIR_I965));
1410 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1112 printk(KERN_ERR " IPEHR: 0x%08x\n",
1411 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1113 I915_READ(IPEHR_I965));
1114 printk(KERN_ERR " INSTDONE: 0x%08x\n",
1115 I915_READ(INSTDONE_I965));
1116 printk(KERN_ERR " INSTPS: 0x%08x\n",
1117 I915_READ(INSTPS));
1118 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
1119 I915_READ(INSTDONE1));
1120 printk(KERN_ERR " ACTHD: 0x%08x\n",
1121 I915_READ(ACTHD_I965));
1412 I915_WRITE(IPEIR_I965, ipeir); 1122 I915_WRITE(IPEIR_I965, ipeir);
1413 POSTING_READ(IPEIR_I965); 1123 POSTING_READ(IPEIR_I965);
1414 } 1124 }
@@ -1441,8 +1151,6 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
1441void i915_handle_error(struct drm_device *dev, bool wedged) 1151void i915_handle_error(struct drm_device *dev, bool wedged)
1442{ 1152{
1443 struct drm_i915_private *dev_priv = dev->dev_private; 1153 struct drm_i915_private *dev_priv = dev->dev_private;
1444 struct intel_ring_buffer *ring;
1445 int i;
1446 1154
1447 i915_capture_error_state(dev); 1155 i915_capture_error_state(dev);
1448 i915_report_and_clear_eir(dev); 1156 i915_report_and_clear_eir(dev);
@@ -1454,8 +1162,11 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
1454 /* 1162 /*
1455 * Wakeup waiting processes so they don't hang 1163 * Wakeup waiting processes so they don't hang
1456 */ 1164 */
1457 for_each_ring(ring, dev_priv, i) 1165 wake_up_all(&dev_priv->ring[RCS].irq_queue);
1458 wake_up_all(&ring->irq_queue); 1166 if (HAS_BSD(dev))
1167 wake_up_all(&dev_priv->ring[VCS].irq_queue);
1168 if (HAS_BLT(dev))
1169 wake_up_all(&dev_priv->ring[BCS].irq_queue);
1459 } 1170 }
1460 1171
1461 queue_work(dev_priv->wq, &dev_priv->error_work); 1172 queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -1478,9 +1189,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1478 spin_lock_irqsave(&dev->event_lock, flags); 1189 spin_lock_irqsave(&dev->event_lock, flags);
1479 work = intel_crtc->unpin_work; 1190 work = intel_crtc->unpin_work;
1480 1191
1481 if (work == NULL || 1192 if (work == NULL || work->pending || !work->enable_stall_check) {
1482 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1483 !work->enable_stall_check) {
1484 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 1193 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1485 spin_unlock_irqrestore(&dev->event_lock, flags); 1194 spin_unlock_irqrestore(&dev->event_lock, flags);
1486 return; 1195 return;
@@ -1490,12 +1199,11 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1490 obj = work->pending_flip_obj; 1199 obj = work->pending_flip_obj;
1491 if (INTEL_INFO(dev)->gen >= 4) { 1200 if (INTEL_INFO(dev)->gen >= 4) {
1492 int dspsurf = DSPSURF(intel_crtc->plane); 1201 int dspsurf = DSPSURF(intel_crtc->plane);
1493 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1202 stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
1494 obj->gtt_offset;
1495 } else { 1203 } else {
1496 int dspaddr = DSPADDR(intel_crtc->plane); 1204 int dspaddr = DSPADDR(intel_crtc->plane);
1497 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1205 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1498 crtc->y * crtc->fb->pitches[0] + 1206 crtc->y * crtc->fb->pitch +
1499 crtc->x * crtc->fb->bits_per_pixel/8); 1207 crtc->x * crtc->fb->bits_per_pixel/8);
1500 } 1208 }
1501 1209
@@ -1507,6 +1215,248 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1507 } 1215 }
1508} 1216}
1509 1217
1218static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1219{
1220 struct drm_device *dev = (struct drm_device *) arg;
1221 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1222 struct drm_i915_master_private *master_priv;
1223 u32 iir, new_iir;
1224 u32 pipe_stats[I915_MAX_PIPES];
1225 u32 vblank_status;
1226 int vblank = 0;
1227 unsigned long irqflags;
1228 int irq_received;
1229 int ret = IRQ_NONE, pipe;
1230 bool blc_event = false;
1231
1232 atomic_inc(&dev_priv->irq_received);
1233
1234 iir = I915_READ(IIR);
1235
1236 if (INTEL_INFO(dev)->gen >= 4)
1237 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
1238 else
1239 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
1240
1241 for (;;) {
1242 irq_received = iir != 0;
1243
1244 /* Can't rely on pipestat interrupt bit in iir as it might
1245 * have been cleared after the pipestat interrupt was received.
1246 * It doesn't set the bit in iir again, but it still produces
1247 * interrupts (for non-MSI).
1248 */
1249 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1250 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1251 i915_handle_error(dev, false);
1252
1253 for_each_pipe(pipe) {
1254 int reg = PIPESTAT(pipe);
1255 pipe_stats[pipe] = I915_READ(reg);
1256
1257 /*
1258 * Clear the PIPE*STAT regs before the IIR
1259 */
1260 if (pipe_stats[pipe] & 0x8000ffff) {
1261 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1262 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1263 pipe_name(pipe));
1264 I915_WRITE(reg, pipe_stats[pipe]);
1265 irq_received = 1;
1266 }
1267 }
1268 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1269
1270 if (!irq_received)
1271 break;
1272
1273 ret = IRQ_HANDLED;
1274
1275 /* Consume port. Then clear IIR or we'll miss events */
1276 if ((I915_HAS_HOTPLUG(dev)) &&
1277 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
1278 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1279
1280 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1281 hotplug_status);
1282 if (hotplug_status & dev_priv->hotplug_supported_mask)
1283 queue_work(dev_priv->wq,
1284 &dev_priv->hotplug_work);
1285
1286 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1287 I915_READ(PORT_HOTPLUG_STAT);
1288 }
1289
1290 I915_WRITE(IIR, iir);
1291 new_iir = I915_READ(IIR); /* Flush posted writes */
1292
1293 if (dev->primary->master) {
1294 master_priv = dev->primary->master->driver_priv;
1295 if (master_priv->sarea_priv)
1296 master_priv->sarea_priv->last_dispatch =
1297 READ_BREADCRUMB(dev_priv);
1298 }
1299
1300 if (iir & I915_USER_INTERRUPT)
1301 notify_ring(dev, &dev_priv->ring[RCS]);
1302 if (iir & I915_BSD_USER_INTERRUPT)
1303 notify_ring(dev, &dev_priv->ring[VCS]);
1304
1305 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1306 intel_prepare_page_flip(dev, 0);
1307 if (dev_priv->flip_pending_is_done)
1308 intel_finish_page_flip_plane(dev, 0);
1309 }
1310
1311 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1312 intel_prepare_page_flip(dev, 1);
1313 if (dev_priv->flip_pending_is_done)
1314 intel_finish_page_flip_plane(dev, 1);
1315 }
1316
1317 for_each_pipe(pipe) {
1318 if (pipe_stats[pipe] & vblank_status &&
1319 drm_handle_vblank(dev, pipe)) {
1320 vblank++;
1321 if (!dev_priv->flip_pending_is_done) {
1322 i915_pageflip_stall_check(dev, pipe);
1323 intel_finish_page_flip(dev, pipe);
1324 }
1325 }
1326
1327 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1328 blc_event = true;
1329 }
1330
1331
1332 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1333 intel_opregion_asle_intr(dev);
1334
1335 /* With MSI, interrupts are only generated when iir
1336 * transitions from zero to nonzero. If another bit got
1337 * set while we were handling the existing iir bits, then
1338 * we would never get another interrupt.
1339 *
1340 * This is fine on non-MSI as well, as if we hit this path
1341 * we avoid exiting the interrupt handler only to generate
1342 * another one.
1343 *
1344 * Note that for MSI this could cause a stray interrupt report
1345 * if an interrupt landed in the time between writing IIR and
1346 * the posting read. This should be rare enough to never
1347 * trigger the 99% of 100,000 interrupts test for disabling
1348 * stray interrupts.
1349 */
1350 iir = new_iir;
1351 }
1352
1353 return ret;
1354}
1355
1356static int i915_emit_irq(struct drm_device * dev)
1357{
1358 drm_i915_private_t *dev_priv = dev->dev_private;
1359 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1360
1361 i915_kernel_lost_context(dev);
1362
1363 DRM_DEBUG_DRIVER("\n");
1364
1365 dev_priv->counter++;
1366 if (dev_priv->counter > 0x7FFFFFFFUL)
1367 dev_priv->counter = 1;
1368 if (master_priv->sarea_priv)
1369 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
1370
1371 if (BEGIN_LP_RING(4) == 0) {
1372 OUT_RING(MI_STORE_DWORD_INDEX);
1373 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1374 OUT_RING(dev_priv->counter);
1375 OUT_RING(MI_USER_INTERRUPT);
1376 ADVANCE_LP_RING();
1377 }
1378
1379 return dev_priv->counter;
1380}
1381
1382static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1383{
1384 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1385 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1386 int ret = 0;
1387 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1388
1389 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1390 READ_BREADCRUMB(dev_priv));
1391
1392 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
1393 if (master_priv->sarea_priv)
1394 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1395 return 0;
1396 }
1397
1398 if (master_priv->sarea_priv)
1399 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1400
1401 if (ring->irq_get(ring)) {
1402 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
1403 READ_BREADCRUMB(dev_priv) >= irq_nr);
1404 ring->irq_put(ring);
1405 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
1406 ret = -EBUSY;
1407
1408 if (ret == -EBUSY) {
1409 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1410 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
1411 }
1412
1413 return ret;
1414}
1415
1416/* Needs the lock as it touches the ring.
1417 */
1418int i915_irq_emit(struct drm_device *dev, void *data,
1419 struct drm_file *file_priv)
1420{
1421 drm_i915_private_t *dev_priv = dev->dev_private;
1422 drm_i915_irq_emit_t *emit = data;
1423 int result;
1424
1425 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
1426 DRM_ERROR("called with no initialization\n");
1427 return -EINVAL;
1428 }
1429
1430 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1431
1432 mutex_lock(&dev->struct_mutex);
1433 result = i915_emit_irq(dev);
1434 mutex_unlock(&dev->struct_mutex);
1435
1436 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
1437 DRM_ERROR("copy_to_user\n");
1438 return -EFAULT;
1439 }
1440
1441 return 0;
1442}
1443
1444/* Doesn't need the hardware lock.
1445 */
1446int i915_irq_wait(struct drm_device *dev, void *data,
1447 struct drm_file *file_priv)
1448{
1449 drm_i915_private_t *dev_priv = dev->dev_private;
1450 drm_i915_irq_wait_t *irqwait = data;
1451
1452 if (!dev_priv) {
1453 DRM_ERROR("called with no initialization\n");
1454 return -EINVAL;
1455 }
1456
1457 return i915_wait_irq(dev, irqwait->irq_seq);
1458}
1459
1510/* Called from drm generic code, passed 'crtc' which 1460/* Called from drm generic code, passed 'crtc' which
1511 * we use as a pipe index 1461 * we use as a pipe index
1512 */ 1462 */
@@ -1528,7 +1478,7 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
1528 1478
1529 /* maintain vblank delivery even in deep C-states */ 1479 /* maintain vblank delivery even in deep C-states */
1530 if (dev_priv->info->gen == 3) 1480 if (dev_priv->info->gen == 3)
1531 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 1481 I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
1532 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1482 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1533 1483
1534 return 0; 1484 return 0;
@@ -1544,7 +1494,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1544 1494
1545 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1495 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1546 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1496 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1547 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1497 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1548 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1498 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1549 1499
1550 return 0; 1500 return 0;
@@ -1559,31 +1509,8 @@ static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1559 return -EINVAL; 1509 return -EINVAL;
1560 1510
1561 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1511 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1562 ironlake_enable_display_irq(dev_priv, 1512 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1563 DE_PIPEA_VBLANK_IVB << (5 * pipe)); 1513 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1564 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1565
1566 return 0;
1567}
1568
1569static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1570{
1571 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1572 unsigned long irqflags;
1573 u32 imr;
1574
1575 if (!i915_pipe_enabled(dev, pipe))
1576 return -EINVAL;
1577
1578 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1579 imr = I915_READ(VLV_IMR);
1580 if (pipe == 0)
1581 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1582 else
1583 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1584 I915_WRITE(VLV_IMR, imr);
1585 i915_enable_pipestat(dev_priv, pipe,
1586 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1587 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1514 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1588 1515
1589 return 0; 1516 return 0;
@@ -1599,7 +1526,8 @@ static void i915_disable_vblank(struct drm_device *dev, int pipe)
1599 1526
1600 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1527 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1601 if (dev_priv->info->gen == 3) 1528 if (dev_priv->info->gen == 3)
1602 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 1529 I915_WRITE(INSTPM,
1530 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
1603 1531
1604 i915_disable_pipestat(dev_priv, pipe, 1532 i915_disable_pipestat(dev_priv, pipe,
1605 PIPE_VBLANK_INTERRUPT_ENABLE | 1533 PIPE_VBLANK_INTERRUPT_ENABLE |
@@ -1614,7 +1542,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1614 1542
1615 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1543 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1616 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1544 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1617 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1545 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1618 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1546 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1619} 1547}
1620 1548
@@ -1624,27 +1552,63 @@ static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1624 unsigned long irqflags; 1552 unsigned long irqflags;
1625 1553
1626 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1554 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1627 ironlake_disable_display_irq(dev_priv, 1555 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1628 DE_PIPEA_VBLANK_IVB << (pipe * 5)); 1556 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1629 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1557 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1630} 1558}
1631 1559
1632static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 1560/* Set the vblank monitor pipe
1561 */
1562int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1563 struct drm_file *file_priv)
1633{ 1564{
1634 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1565 drm_i915_private_t *dev_priv = dev->dev_private;
1635 unsigned long irqflags;
1636 u32 imr;
1637 1566
1638 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1567 if (!dev_priv) {
1639 i915_disable_pipestat(dev_priv, pipe, 1568 DRM_ERROR("called with no initialization\n");
1640 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1569 return -EINVAL;
1641 imr = I915_READ(VLV_IMR); 1570 }
1642 if (pipe == 0) 1571
1643 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1572 return 0;
1644 else 1573}
1645 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1574
1646 I915_WRITE(VLV_IMR, imr); 1575int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1647 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1576 struct drm_file *file_priv)
1577{
1578 drm_i915_private_t *dev_priv = dev->dev_private;
1579 drm_i915_vblank_pipe_t *pipe = data;
1580
1581 if (!dev_priv) {
1582 DRM_ERROR("called with no initialization\n");
1583 return -EINVAL;
1584 }
1585
1586 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1587
1588 return 0;
1589}
1590
1591/**
1592 * Schedule buffer swap at given vertical blank.
1593 */
1594int i915_vblank_swap(struct drm_device *dev, void *data,
1595 struct drm_file *file_priv)
1596{
1597 /* The delayed swap mechanism was fundamentally racy, and has been
1598 * removed. The model was that the client requested a delayed flip/swap
1599 * from the kernel, then waited for vblank before continuing to perform
1600 * rendering. The problem was that the kernel might wake the client
1601 * up before it dispatched the vblank swap (since the lock has to be
1602 * held while touching the ringbuffer), in which case the client would
1603 * clear and start the next frame before the swap occurred, and
1604 * flicker would occur in addition to likely missing the vblank.
1605 *
1606 * In the absence of this ioctl, userland falls back to a correct path
1607 * of waiting for a vblank, then dispatching the swap on its own.
1608 * Context switching to userland and back is plenty fast enough for
1609 * meeting the requirements of vblank swapping.
1610 */
1611 return -EINVAL;
1648} 1612}
1649 1613
1650static u32 1614static u32
@@ -1657,12 +1621,13 @@ ring_last_seqno(struct intel_ring_buffer *ring)
1657static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 1621static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1658{ 1622{
1659 if (list_empty(&ring->request_list) || 1623 if (list_empty(&ring->request_list) ||
1660 i915_seqno_passed(ring->get_seqno(ring, false), 1624 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1661 ring_last_seqno(ring))) {
1662 /* Issue a wake-up to catch stuck h/w. */ 1625 /* Issue a wake-up to catch stuck h/w. */
1663 if (waitqueue_active(&ring->irq_queue)) { 1626 if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
1664 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 1627 DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
1665 ring->name); 1628 ring->name,
1629 ring->waiting_seqno,
1630 ring->get_seqno(ring));
1666 wake_up_all(&ring->irq_queue); 1631 wake_up_all(&ring->irq_queue);
1667 *err = true; 1632 *err = true;
1668 } 1633 }
@@ -1682,35 +1647,13 @@ static bool kick_ring(struct intel_ring_buffer *ring)
1682 I915_WRITE_CTL(ring, tmp); 1647 I915_WRITE_CTL(ring, tmp);
1683 return true; 1648 return true;
1684 } 1649 }
1685 return false; 1650 if (IS_GEN6(dev) &&
1686} 1651 (tmp & RING_WAIT_SEMAPHORE)) {
1687 1652 DRM_ERROR("Kicking stuck semaphore on %s\n",
1688static bool i915_hangcheck_hung(struct drm_device *dev) 1653 ring->name);
1689{ 1654 I915_WRITE_CTL(ring, tmp);
1690 drm_i915_private_t *dev_priv = dev->dev_private; 1655 return true;
1691
1692 if (dev_priv->hangcheck_count++ > 1) {
1693 bool hung = true;
1694
1695 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1696 i915_handle_error(dev, true);
1697
1698 if (!IS_GEN2(dev)) {
1699 struct intel_ring_buffer *ring;
1700 int i;
1701
1702 /* Is the chip hanging on a WAIT_FOR_EVENT?
1703 * If so we can simply poke the RB_WAIT bit
1704 * and break the hang. This should work on
1705 * all but the second generation chipsets.
1706 */
1707 for_each_ring(ring, dev_priv, i)
1708 hung &= !kick_ring(ring);
1709 }
1710
1711 return hung;
1712 } 1656 }
1713
1714 return false; 1657 return false;
1715} 1658}
1716 1659
@@ -1724,50 +1667,72 @@ void i915_hangcheck_elapsed(unsigned long data)
1724{ 1667{
1725 struct drm_device *dev = (struct drm_device *)data; 1668 struct drm_device *dev = (struct drm_device *)data;
1726 drm_i915_private_t *dev_priv = dev->dev_private; 1669 drm_i915_private_t *dev_priv = dev->dev_private;
1727 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; 1670 uint32_t acthd, instdone, instdone1;
1728 struct intel_ring_buffer *ring; 1671 bool err = false;
1729 bool err = false, idle;
1730 int i;
1731 1672
1732 if (!i915_enable_hangcheck) 1673 if (!i915_enable_hangcheck)
1733 return; 1674 return;
1734 1675
1735 memset(acthd, 0, sizeof(acthd));
1736 idle = true;
1737 for_each_ring(ring, dev_priv, i) {
1738 idle &= i915_hangcheck_ring_idle(ring, &err);
1739 acthd[i] = intel_ring_get_active_head(ring);
1740 }
1741
1742 /* If all work is done then ACTHD clearly hasn't advanced. */ 1676 /* If all work is done then ACTHD clearly hasn't advanced. */
1743 if (idle) { 1677 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
1744 if (err) { 1678 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
1745 if (i915_hangcheck_hung(dev)) 1679 i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
1746 return;
1747
1748 goto repeat;
1749 }
1750
1751 dev_priv->hangcheck_count = 0; 1680 dev_priv->hangcheck_count = 0;
1681 if (err)
1682 goto repeat;
1752 return; 1683 return;
1753 } 1684 }
1754 1685
1755 i915_get_extra_instdone(dev, instdone); 1686 if (INTEL_INFO(dev)->gen < 4) {
1756 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && 1687 acthd = I915_READ(ACTHD);
1757 memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { 1688 instdone = I915_READ(INSTDONE);
1758 if (i915_hangcheck_hung(dev)) 1689 instdone1 = 0;
1690 } else {
1691 acthd = I915_READ(ACTHD_I965);
1692 instdone = I915_READ(INSTDONE_I965);
1693 instdone1 = I915_READ(INSTDONE1);
1694 }
1695
1696 if (dev_priv->last_acthd == acthd &&
1697 dev_priv->last_instdone == instdone &&
1698 dev_priv->last_instdone1 == instdone1) {
1699 if (dev_priv->hangcheck_count++ > 1) {
1700 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1701
1702 if (!IS_GEN2(dev)) {
1703 /* Is the chip hanging on a WAIT_FOR_EVENT?
1704 * If so we can simply poke the RB_WAIT bit
1705 * and break the hang. This should work on
1706 * all but the second generation chipsets.
1707 */
1708
1709 if (kick_ring(&dev_priv->ring[RCS]))
1710 goto repeat;
1711
1712 if (HAS_BSD(dev) &&
1713 kick_ring(&dev_priv->ring[VCS]))
1714 goto repeat;
1715
1716 if (HAS_BLT(dev) &&
1717 kick_ring(&dev_priv->ring[BCS]))
1718 goto repeat;
1719 }
1720
1721 i915_handle_error(dev, true);
1759 return; 1722 return;
1723 }
1760 } else { 1724 } else {
1761 dev_priv->hangcheck_count = 0; 1725 dev_priv->hangcheck_count = 0;
1762 1726
1763 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); 1727 dev_priv->last_acthd = acthd;
1764 memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); 1728 dev_priv->last_instdone = instdone;
1729 dev_priv->last_instdone1 = instdone1;
1765 } 1730 }
1766 1731
1767repeat: 1732repeat:
1768 /* Reset timer case chip hangs without another request being added */ 1733 /* Reset timer case chip hangs without another request being added */
1769 mod_timer(&dev_priv->hangcheck_timer, 1734 mod_timer(&dev_priv->hangcheck_timer,
1770 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 1735 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1771} 1736}
1772 1737
1773/* drm_dma.h hooks 1738/* drm_dma.h hooks
@@ -1778,7 +1743,23 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
1778 1743
1779 atomic_set(&dev_priv->irq_received, 0); 1744 atomic_set(&dev_priv->irq_received, 0);
1780 1745
1746 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1747 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1748 if (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
1749 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
1750
1781 I915_WRITE(HWSTAM, 0xeffe); 1751 I915_WRITE(HWSTAM, 0xeffe);
1752 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1753 /* Workaround stalls observed on Sandy Bridge GPUs by
1754 * making the blitter command streamer generate a
1755 * write to the Hardware Status Page for
1756 * MI_USER_INTERRUPT. This appears to serialize the
1757 * previous seqno write out before the interrupt
1758 * happens.
1759 */
1760 I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
1761 I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
1762 }
1782 1763
1783 /* XXX hotplug from PCH */ 1764 /* XXX hotplug from PCH */
1784 1765
@@ -1797,58 +1778,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
1797 POSTING_READ(SDEIER); 1778 POSTING_READ(SDEIER);
1798} 1779}
1799 1780
1800static void valleyview_irq_preinstall(struct drm_device *dev)
1801{
1802 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1803 int pipe;
1804
1805 atomic_set(&dev_priv->irq_received, 0);
1806
1807 /* VLV magic */
1808 I915_WRITE(VLV_IMR, 0);
1809 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1810 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1811 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1812
1813 /* and GT */
1814 I915_WRITE(GTIIR, I915_READ(GTIIR));
1815 I915_WRITE(GTIIR, I915_READ(GTIIR));
1816 I915_WRITE(GTIMR, 0xffffffff);
1817 I915_WRITE(GTIER, 0x0);
1818 POSTING_READ(GTIER);
1819
1820 I915_WRITE(DPINVGTT, 0xff);
1821
1822 I915_WRITE(PORT_HOTPLUG_EN, 0);
1823 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1824 for_each_pipe(pipe)
1825 I915_WRITE(PIPESTAT(pipe), 0xffff);
1826 I915_WRITE(VLV_IIR, 0xffffffff);
1827 I915_WRITE(VLV_IMR, 0xffffffff);
1828 I915_WRITE(VLV_IER, 0x0);
1829 POSTING_READ(VLV_IER);
1830}
1831
1832/*
1833 * Enable digital hotplug on the PCH, and configure the DP short pulse
1834 * duration to 2ms (which is the minimum in the Display Port spec)
1835 *
1836 * This register is the same on all known PCH chips.
1837 */
1838
1839static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1840{
1841 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1842 u32 hotplug;
1843
1844 hotplug = I915_READ(PCH_PORT_HOTPLUG);
1845 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1846 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1847 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1848 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1849 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1850}
1851
1852static int ironlake_irq_postinstall(struct drm_device *dev) 1781static int ironlake_irq_postinstall(struct drm_device *dev)
1853{ 1782{
1854 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1783 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1858,6 +1787,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1858 u32 render_irqs; 1787 u32 render_irqs;
1859 u32 hotplug_mask; 1788 u32 hotplug_mask;
1860 1789
1790 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
1791 if (HAS_BSD(dev))
1792 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
1793 if (HAS_BLT(dev))
1794 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
1795
1796 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1861 dev_priv->irq_mask = ~display_mask; 1797 dev_priv->irq_mask = ~display_mask;
1862 1798
1863 /* should always can generate irq */ 1799 /* should always can generate irq */
@@ -1874,8 +1810,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1874 if (IS_GEN6(dev)) 1810 if (IS_GEN6(dev))
1875 render_irqs = 1811 render_irqs =
1876 GT_USER_INTERRUPT | 1812 GT_USER_INTERRUPT |
1877 GEN6_BSD_USER_INTERRUPT | 1813 GT_GEN6_BSD_USER_INTERRUPT |
1878 GEN6_BLITTER_USER_INTERRUPT; 1814 GT_BLT_USER_INTERRUPT;
1879 else 1815 else
1880 render_irqs = 1816 render_irqs =
1881 GT_USER_INTERRUPT | 1817 GT_USER_INTERRUPT |
@@ -1904,8 +1840,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1904 I915_WRITE(SDEIER, hotplug_mask); 1840 I915_WRITE(SDEIER, hotplug_mask);
1905 POSTING_READ(SDEIER); 1841 POSTING_READ(SDEIER);
1906 1842
1907 ironlake_enable_pch_hotplug(dev);
1908
1909 if (IS_IRONLAKE_M(dev)) { 1843 if (IS_IRONLAKE_M(dev)) {
1910 /* Clear & enable PCU event interrupts */ 1844 /* Clear & enable PCU event interrupts */
1911 I915_WRITE(DEIIR, DE_PCU_EVENT); 1845 I915_WRITE(DEIIR, DE_PCU_EVENT);
@@ -1920,33 +1854,35 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
1920{ 1854{
1921 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1855 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1922 /* enable kind of interrupts always enabled */ 1856 /* enable kind of interrupts always enabled */
1923 u32 display_mask = 1857 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
1924 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | 1858 DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
1925 DE_PLANEC_FLIP_DONE_IVB | 1859 DE_PLANEB_FLIP_DONE_IVB;
1926 DE_PLANEB_FLIP_DONE_IVB |
1927 DE_PLANEA_FLIP_DONE_IVB;
1928 u32 render_irqs; 1860 u32 render_irqs;
1929 u32 hotplug_mask; 1861 u32 hotplug_mask;
1930 1862
1863 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
1864 if (HAS_BSD(dev))
1865 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
1866 if (HAS_BLT(dev))
1867 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
1868
1869 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1931 dev_priv->irq_mask = ~display_mask; 1870 dev_priv->irq_mask = ~display_mask;
1932 1871
1933 /* should always can generate irq */ 1872 /* should always can generate irq */
1934 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1873 I915_WRITE(DEIIR, I915_READ(DEIIR));
1935 I915_WRITE(DEIMR, dev_priv->irq_mask); 1874 I915_WRITE(DEIMR, dev_priv->irq_mask);
1936 I915_WRITE(DEIER, 1875 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
1937 display_mask | 1876 DE_PIPEB_VBLANK_IVB);
1938 DE_PIPEC_VBLANK_IVB |
1939 DE_PIPEB_VBLANK_IVB |
1940 DE_PIPEA_VBLANK_IVB);
1941 POSTING_READ(DEIER); 1877 POSTING_READ(DEIER);
1942 1878
1943 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 1879 dev_priv->gt_irq_mask = ~0;
1944 1880
1945 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1881 I915_WRITE(GTIIR, I915_READ(GTIIR));
1946 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1882 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1947 1883
1948 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 1884 render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT |
1949 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 1885 GT_BLT_USER_INTERRUPT;
1950 I915_WRITE(GTIER, render_irqs); 1886 I915_WRITE(GTIER, render_irqs);
1951 POSTING_READ(GTIER); 1887 POSTING_READ(GTIER);
1952 1888
@@ -1961,286 +1897,25 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
1961 I915_WRITE(SDEIER, hotplug_mask); 1897 I915_WRITE(SDEIER, hotplug_mask);
1962 POSTING_READ(SDEIER); 1898 POSTING_READ(SDEIER);
1963 1899
1964 ironlake_enable_pch_hotplug(dev);
1965
1966 return 0; 1900 return 0;
1967} 1901}
1968 1902
1969static int valleyview_irq_postinstall(struct drm_device *dev) 1903static void i915_driver_irq_preinstall(struct drm_device * dev)
1970{
1971 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1972 u32 enable_mask;
1973 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1974 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
1975 u32 render_irqs;
1976 u16 msid;
1977
1978 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1979 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1980 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1981 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1982 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1983
1984 /*
1985 *Leave vblank interrupts masked initially. enable/disable will
1986 * toggle them based on usage.
1987 */
1988 dev_priv->irq_mask = (~enable_mask) |
1989 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1990 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1991
1992 dev_priv->pipestat[0] = 0;
1993 dev_priv->pipestat[1] = 0;
1994
1995 /* Hack for broken MSIs on VLV */
1996 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
1997 pci_read_config_word(dev->pdev, 0x98, &msid);
1998 msid &= 0xff; /* mask out delivery bits */
1999 msid |= (1<<14);
2000 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
2001
2002 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2003 I915_WRITE(VLV_IER, enable_mask);
2004 I915_WRITE(VLV_IIR, 0xffffffff);
2005 I915_WRITE(PIPESTAT(0), 0xffff);
2006 I915_WRITE(PIPESTAT(1), 0xffff);
2007 POSTING_READ(VLV_IER);
2008
2009 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2010 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2011
2012 I915_WRITE(VLV_IIR, 0xffffffff);
2013 I915_WRITE(VLV_IIR, 0xffffffff);
2014
2015 I915_WRITE(GTIIR, I915_READ(GTIIR));
2016 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2017
2018 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2019 GEN6_BLITTER_USER_INTERRUPT;
2020 I915_WRITE(GTIER, render_irqs);
2021 POSTING_READ(GTIER);
2022
2023 /* ack & enable invalid PTE error interrupts */
2024#if 0 /* FIXME: add support to irq handler for checking these bits */
2025 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2026 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2027#endif
2028
2029 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2030 /* Note HDMI and DP share bits */
2031 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2032 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2033 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2034 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2035 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2036 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2037 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2038 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2039 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2040 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2041 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2042 hotplug_en |= CRT_HOTPLUG_INT_EN;
2043 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2044 }
2045
2046 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2047
2048 return 0;
2049}
2050
2051static void valleyview_irq_uninstall(struct drm_device *dev)
2052{
2053 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2054 int pipe;
2055
2056 if (!dev_priv)
2057 return;
2058
2059 for_each_pipe(pipe)
2060 I915_WRITE(PIPESTAT(pipe), 0xffff);
2061
2062 I915_WRITE(HWSTAM, 0xffffffff);
2063 I915_WRITE(PORT_HOTPLUG_EN, 0);
2064 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2065 for_each_pipe(pipe)
2066 I915_WRITE(PIPESTAT(pipe), 0xffff);
2067 I915_WRITE(VLV_IIR, 0xffffffff);
2068 I915_WRITE(VLV_IMR, 0xffffffff);
2069 I915_WRITE(VLV_IER, 0x0);
2070 POSTING_READ(VLV_IER);
2071}
2072
2073static void ironlake_irq_uninstall(struct drm_device *dev)
2074{
2075 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2076
2077 if (!dev_priv)
2078 return;
2079
2080 I915_WRITE(HWSTAM, 0xffffffff);
2081
2082 I915_WRITE(DEIMR, 0xffffffff);
2083 I915_WRITE(DEIER, 0x0);
2084 I915_WRITE(DEIIR, I915_READ(DEIIR));
2085
2086 I915_WRITE(GTIMR, 0xffffffff);
2087 I915_WRITE(GTIER, 0x0);
2088 I915_WRITE(GTIIR, I915_READ(GTIIR));
2089
2090 I915_WRITE(SDEIMR, 0xffffffff);
2091 I915_WRITE(SDEIER, 0x0);
2092 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2093}
2094
2095static void i8xx_irq_preinstall(struct drm_device * dev)
2096{ 1904{
2097 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1905 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2098 int pipe; 1906 int pipe;
2099 1907
2100 atomic_set(&dev_priv->irq_received, 0); 1908 atomic_set(&dev_priv->irq_received, 0);
2101 1909
2102 for_each_pipe(pipe) 1910 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2103 I915_WRITE(PIPESTAT(pipe), 0); 1911 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2104 I915_WRITE16(IMR, 0xffff);
2105 I915_WRITE16(IER, 0x0);
2106 POSTING_READ16(IER);
2107}
2108
2109static int i8xx_irq_postinstall(struct drm_device *dev)
2110{
2111 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2112
2113 dev_priv->pipestat[0] = 0;
2114 dev_priv->pipestat[1] = 0;
2115
2116 I915_WRITE16(EMR,
2117 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2118
2119 /* Unmask the interrupts that we always want on. */
2120 dev_priv->irq_mask =
2121 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2122 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2123 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2124 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2125 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2126 I915_WRITE16(IMR, dev_priv->irq_mask);
2127
2128 I915_WRITE16(IER,
2129 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2130 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2131 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2132 I915_USER_INTERRUPT);
2133 POSTING_READ16(IER);
2134
2135 return 0;
2136}
2137
2138static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2139{
2140 struct drm_device *dev = (struct drm_device *) arg;
2141 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2142 u16 iir, new_iir;
2143 u32 pipe_stats[2];
2144 unsigned long irqflags;
2145 int irq_received;
2146 int pipe;
2147 u16 flip_mask =
2148 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2149 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2150
2151 atomic_inc(&dev_priv->irq_received);
2152
2153 iir = I915_READ16(IIR);
2154 if (iir == 0)
2155 return IRQ_NONE;
2156
2157 while (iir & ~flip_mask) {
2158 /* Can't rely on pipestat interrupt bit in iir as it might
2159 * have been cleared after the pipestat interrupt was received.
2160 * It doesn't set the bit in iir again, but it still produces
2161 * interrupts (for non-MSI).
2162 */
2163 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2164 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2165 i915_handle_error(dev, false);
2166
2167 for_each_pipe(pipe) {
2168 int reg = PIPESTAT(pipe);
2169 pipe_stats[pipe] = I915_READ(reg);
2170
2171 /*
2172 * Clear the PIPE*STAT regs before the IIR
2173 */
2174 if (pipe_stats[pipe] & 0x8000ffff) {
2175 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2176 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2177 pipe_name(pipe));
2178 I915_WRITE(reg, pipe_stats[pipe]);
2179 irq_received = 1;
2180 }
2181 }
2182 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2183
2184 I915_WRITE16(IIR, iir & ~flip_mask);
2185 new_iir = I915_READ16(IIR); /* Flush posted writes */
2186
2187 i915_update_dri1_breadcrumb(dev);
2188
2189 if (iir & I915_USER_INTERRUPT)
2190 notify_ring(dev, &dev_priv->ring[RCS]);
2191
2192 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2193 drm_handle_vblank(dev, 0)) {
2194 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
2195 intel_prepare_page_flip(dev, 0);
2196 intel_finish_page_flip(dev, 0);
2197 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2198 }
2199 }
2200
2201 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2202 drm_handle_vblank(dev, 1)) {
2203 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
2204 intel_prepare_page_flip(dev, 1);
2205 intel_finish_page_flip(dev, 1);
2206 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2207 }
2208 }
2209
2210 iir = new_iir;
2211 }
2212
2213 return IRQ_HANDLED;
2214}
2215
2216static void i8xx_irq_uninstall(struct drm_device * dev)
2217{
2218 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2219 int pipe;
2220
2221 for_each_pipe(pipe) {
2222 /* Clear enable bits; then clear status bits */
2223 I915_WRITE(PIPESTAT(pipe), 0);
2224 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2225 }
2226 I915_WRITE16(IMR, 0xffff);
2227 I915_WRITE16(IER, 0x0);
2228 I915_WRITE16(IIR, I915_READ16(IIR));
2229}
2230
2231static void i915_irq_preinstall(struct drm_device * dev)
2232{
2233 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2234 int pipe;
2235
2236 atomic_set(&dev_priv->irq_received, 0);
2237 1912
2238 if (I915_HAS_HOTPLUG(dev)) { 1913 if (I915_HAS_HOTPLUG(dev)) {
2239 I915_WRITE(PORT_HOTPLUG_EN, 0); 1914 I915_WRITE(PORT_HOTPLUG_EN, 0);
2240 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1915 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2241 } 1916 }
2242 1917
2243 I915_WRITE16(HWSTAM, 0xeffe); 1918 I915_WRITE(HWSTAM, 0xeffe);
2244 for_each_pipe(pipe) 1919 for_each_pipe(pipe)
2245 I915_WRITE(PIPESTAT(pipe), 0); 1920 I915_WRITE(PIPESTAT(pipe), 0);
2246 I915_WRITE(IMR, 0xffffffff); 1921 I915_WRITE(IMR, 0xffffffff);
@@ -2248,31 +1923,23 @@ static void i915_irq_preinstall(struct drm_device * dev)
2248 POSTING_READ(IER); 1923 POSTING_READ(IER);
2249} 1924}
2250 1925
2251static int i915_irq_postinstall(struct drm_device *dev) 1926/*
1927 * Must be called after intel_modeset_init or hotplug interrupts won't be
1928 * enabled correctly.
1929 */
1930static int i915_driver_irq_postinstall(struct drm_device *dev)
2252{ 1931{
2253 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1932 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2254 u32 enable_mask; 1933 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
2255 1934 u32 error_mask;
2256 dev_priv->pipestat[0] = 0;
2257 dev_priv->pipestat[1] = 0;
2258 1935
2259 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 1936 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
2260 1937
2261 /* Unmask the interrupts that we always want on. */ 1938 /* Unmask the interrupts that we always want on. */
2262 dev_priv->irq_mask = 1939 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
2263 ~(I915_ASLE_INTERRUPT | 1940
2264 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1941 dev_priv->pipestat[0] = 0;
2265 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 1942 dev_priv->pipestat[1] = 0;
2266 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2267 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2268 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2269
2270 enable_mask =
2271 I915_ASLE_INTERRUPT |
2272 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2273 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2274 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2275 I915_USER_INTERRUPT;
2276 1943
2277 if (I915_HAS_HOTPLUG(dev)) { 1944 if (I915_HAS_HOTPLUG(dev)) {
2278 /* Enable in IER... */ 1945 /* Enable in IER... */
@@ -2281,6 +1948,21 @@ static int i915_irq_postinstall(struct drm_device *dev)
2281 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 1948 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2282 } 1949 }
2283 1950
1951 /*
1952 * Enable some error detection, note the instruction error mask
1953 * bit is reserved, so we leave it masked.
1954 */
1955 if (IS_G4X(dev)) {
1956 error_mask = ~(GM45_ERROR_PAGE_TABLE |
1957 GM45_ERROR_MEM_PRIV |
1958 GM45_ERROR_CP_PRIV |
1959 I915_ERROR_MEMORY_REFRESH);
1960 } else {
1961 error_mask = ~(I915_ERROR_PAGE_TABLE |
1962 I915_ERROR_MEMORY_REFRESH);
1963 }
1964 I915_WRITE(EMR, error_mask);
1965
2284 I915_WRITE(IMR, dev_priv->irq_mask); 1966 I915_WRITE(IMR, dev_priv->irq_mask);
2285 I915_WRITE(IER, enable_mask); 1967 I915_WRITE(IER, enable_mask);
2286 POSTING_READ(IER); 1968 POSTING_READ(IER);
@@ -2288,18 +1970,26 @@ static int i915_irq_postinstall(struct drm_device *dev)
2288 if (I915_HAS_HOTPLUG(dev)) { 1970 if (I915_HAS_HOTPLUG(dev)) {
2289 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1971 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2290 1972
1973 /* Note HDMI and DP share bits */
2291 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 1974 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2292 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 1975 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2293 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 1976 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2294 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 1977 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2295 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 1978 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2296 hotplug_en |= HDMID_HOTPLUG_INT_EN; 1979 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2297 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 1980 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
2298 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 1981 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2299 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 1982 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
2300 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 1983 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2301 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 1984 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2302 hotplug_en |= CRT_HOTPLUG_INT_EN; 1985 hotplug_en |= CRT_HOTPLUG_INT_EN;
1986
1987 /* Programming the CRT detection parameters tends
1988 to generate a spurious hotplug event about three
1989 seconds later. So just do it once.
1990 */
1991 if (IS_G4X(dev))
1992 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2303 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 1993 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2304 } 1994 }
2305 1995
@@ -2313,371 +2003,41 @@ static int i915_irq_postinstall(struct drm_device *dev)
2313 return 0; 2003 return 0;
2314} 2004}
2315 2005
2316static irqreturn_t i915_irq_handler(int irq, void *arg) 2006static void ironlake_irq_uninstall(struct drm_device *dev)
2317{ 2007{
2318 struct drm_device *dev = (struct drm_device *) arg;
2319 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2008 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2320 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2321 unsigned long irqflags;
2322 u32 flip_mask =
2323 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2324 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2325 u32 flip[2] = {
2326 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2327 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2328 };
2329 int pipe, ret = IRQ_NONE;
2330 2009
2331 atomic_inc(&dev_priv->irq_received); 2010 if (!dev_priv)
2332 2011 return;
2333 iir = I915_READ(IIR);
2334 do {
2335 bool irq_received = (iir & ~flip_mask) != 0;
2336 bool blc_event = false;
2337
2338 /* Can't rely on pipestat interrupt bit in iir as it might
2339 * have been cleared after the pipestat interrupt was received.
2340 * It doesn't set the bit in iir again, but it still produces
2341 * interrupts (for non-MSI).
2342 */
2343 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2344 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2345 i915_handle_error(dev, false);
2346
2347 for_each_pipe(pipe) {
2348 int reg = PIPESTAT(pipe);
2349 pipe_stats[pipe] = I915_READ(reg);
2350
2351 /* Clear the PIPE*STAT regs before the IIR */
2352 if (pipe_stats[pipe] & 0x8000ffff) {
2353 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2354 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2355 pipe_name(pipe));
2356 I915_WRITE(reg, pipe_stats[pipe]);
2357 irq_received = true;
2358 }
2359 }
2360 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2361
2362 if (!irq_received)
2363 break;
2364
2365 /* Consume port. Then clear IIR or we'll miss events */
2366 if ((I915_HAS_HOTPLUG(dev)) &&
2367 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2368 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2369
2370 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2371 hotplug_status);
2372 if (hotplug_status & dev_priv->hotplug_supported_mask)
2373 queue_work(dev_priv->wq,
2374 &dev_priv->hotplug_work);
2375
2376 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2377 POSTING_READ(PORT_HOTPLUG_STAT);
2378 }
2379
2380 I915_WRITE(IIR, iir & ~flip_mask);
2381 new_iir = I915_READ(IIR); /* Flush posted writes */
2382
2383 if (iir & I915_USER_INTERRUPT)
2384 notify_ring(dev, &dev_priv->ring[RCS]);
2385
2386 for_each_pipe(pipe) {
2387 int plane = pipe;
2388 if (IS_MOBILE(dev))
2389 plane = !plane;
2390 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2391 drm_handle_vblank(dev, pipe)) {
2392 if (iir & flip[plane]) {
2393 intel_prepare_page_flip(dev, plane);
2394 intel_finish_page_flip(dev, pipe);
2395 flip_mask &= ~flip[plane];
2396 }
2397 }
2398
2399 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2400 blc_event = true;
2401 }
2402 2012
2403 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2013 dev_priv->vblank_pipe = 0;
2404 intel_opregion_asle_intr(dev);
2405 2014
2406 /* With MSI, interrupts are only generated when iir 2015 I915_WRITE(HWSTAM, 0xffffffff);
2407 * transitions from zero to nonzero. If another bit got
2408 * set while we were handling the existing iir bits, then
2409 * we would never get another interrupt.
2410 *
2411 * This is fine on non-MSI as well, as if we hit this path
2412 * we avoid exiting the interrupt handler only to generate
2413 * another one.
2414 *
2415 * Note that for MSI this could cause a stray interrupt report
2416 * if an interrupt landed in the time between writing IIR and
2417 * the posting read. This should be rare enough to never
2418 * trigger the 99% of 100,000 interrupts test for disabling
2419 * stray interrupts.
2420 */
2421 ret = IRQ_HANDLED;
2422 iir = new_iir;
2423 } while (iir & ~flip_mask);
2424 2016
2425 i915_update_dri1_breadcrumb(dev); 2017 I915_WRITE(DEIMR, 0xffffffff);
2018 I915_WRITE(DEIER, 0x0);
2019 I915_WRITE(DEIIR, I915_READ(DEIIR));
2426 2020
2427 return ret; 2021 I915_WRITE(GTIMR, 0xffffffff);
2022 I915_WRITE(GTIER, 0x0);
2023 I915_WRITE(GTIIR, I915_READ(GTIIR));
2428} 2024}
2429 2025
2430static void i915_irq_uninstall(struct drm_device * dev) 2026static void i915_driver_irq_uninstall(struct drm_device * dev)
2431{ 2027{
2432 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2028 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2433 int pipe; 2029 int pipe;
2434 2030
2031 if (!dev_priv)
2032 return;
2033
2034 dev_priv->vblank_pipe = 0;
2035
2435 if (I915_HAS_HOTPLUG(dev)) { 2036 if (I915_HAS_HOTPLUG(dev)) {
2436 I915_WRITE(PORT_HOTPLUG_EN, 0); 2037 I915_WRITE(PORT_HOTPLUG_EN, 0);
2437 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2038 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2438 } 2039 }
2439 2040
2440 I915_WRITE16(HWSTAM, 0xffff);
2441 for_each_pipe(pipe) {
2442 /* Clear enable bits; then clear status bits */
2443 I915_WRITE(PIPESTAT(pipe), 0);
2444 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2445 }
2446 I915_WRITE(IMR, 0xffffffff);
2447 I915_WRITE(IER, 0x0);
2448
2449 I915_WRITE(IIR, I915_READ(IIR));
2450}
2451
2452static void i965_irq_preinstall(struct drm_device * dev)
2453{
2454 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2455 int pipe;
2456
2457 atomic_set(&dev_priv->irq_received, 0);
2458
2459 I915_WRITE(PORT_HOTPLUG_EN, 0);
2460 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2461
2462 I915_WRITE(HWSTAM, 0xeffe);
2463 for_each_pipe(pipe)
2464 I915_WRITE(PIPESTAT(pipe), 0);
2465 I915_WRITE(IMR, 0xffffffff);
2466 I915_WRITE(IER, 0x0);
2467 POSTING_READ(IER);
2468}
2469
2470static int i965_irq_postinstall(struct drm_device *dev)
2471{
2472 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2473 u32 hotplug_en;
2474 u32 enable_mask;
2475 u32 error_mask;
2476
2477 /* Unmask the interrupts that we always want on. */
2478 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2479 I915_DISPLAY_PORT_INTERRUPT |
2480 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2481 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2482 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2483 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2484 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2485
2486 enable_mask = ~dev_priv->irq_mask;
2487 enable_mask |= I915_USER_INTERRUPT;
2488
2489 if (IS_G4X(dev))
2490 enable_mask |= I915_BSD_USER_INTERRUPT;
2491
2492 dev_priv->pipestat[0] = 0;
2493 dev_priv->pipestat[1] = 0;
2494
2495 /*
2496 * Enable some error detection, note the instruction error mask
2497 * bit is reserved, so we leave it masked.
2498 */
2499 if (IS_G4X(dev)) {
2500 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2501 GM45_ERROR_MEM_PRIV |
2502 GM45_ERROR_CP_PRIV |
2503 I915_ERROR_MEMORY_REFRESH);
2504 } else {
2505 error_mask = ~(I915_ERROR_PAGE_TABLE |
2506 I915_ERROR_MEMORY_REFRESH);
2507 }
2508 I915_WRITE(EMR, error_mask);
2509
2510 I915_WRITE(IMR, dev_priv->irq_mask);
2511 I915_WRITE(IER, enable_mask);
2512 POSTING_READ(IER);
2513
2514 /* Note HDMI and DP share hotplug bits */
2515 hotplug_en = 0;
2516 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2517 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2518 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2519 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2520 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2521 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2522 if (IS_G4X(dev)) {
2523 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2524 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2525 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2526 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2527 } else {
2528 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2529 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2530 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2531 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2532 }
2533 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2534 hotplug_en |= CRT_HOTPLUG_INT_EN;
2535
2536 /* Programming the CRT detection parameters tends
2537 to generate a spurious hotplug event about three
2538 seconds later. So just do it once.
2539 */
2540 if (IS_G4X(dev))
2541 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2542 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2543 }
2544
2545 /* Ignore TV since it's buggy */
2546
2547 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2548
2549 intel_opregion_enable_asle(dev);
2550
2551 return 0;
2552}
2553
2554static irqreturn_t i965_irq_handler(int irq, void *arg)
2555{
2556 struct drm_device *dev = (struct drm_device *) arg;
2557 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2558 u32 iir, new_iir;
2559 u32 pipe_stats[I915_MAX_PIPES];
2560 unsigned long irqflags;
2561 int irq_received;
2562 int ret = IRQ_NONE, pipe;
2563
2564 atomic_inc(&dev_priv->irq_received);
2565
2566 iir = I915_READ(IIR);
2567
2568 for (;;) {
2569 bool blc_event = false;
2570
2571 irq_received = iir != 0;
2572
2573 /* Can't rely on pipestat interrupt bit in iir as it might
2574 * have been cleared after the pipestat interrupt was received.
2575 * It doesn't set the bit in iir again, but it still produces
2576 * interrupts (for non-MSI).
2577 */
2578 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2579 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2580 i915_handle_error(dev, false);
2581
2582 for_each_pipe(pipe) {
2583 int reg = PIPESTAT(pipe);
2584 pipe_stats[pipe] = I915_READ(reg);
2585
2586 /*
2587 * Clear the PIPE*STAT regs before the IIR
2588 */
2589 if (pipe_stats[pipe] & 0x8000ffff) {
2590 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2591 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2592 pipe_name(pipe));
2593 I915_WRITE(reg, pipe_stats[pipe]);
2594 irq_received = 1;
2595 }
2596 }
2597 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2598
2599 if (!irq_received)
2600 break;
2601
2602 ret = IRQ_HANDLED;
2603
2604 /* Consume port. Then clear IIR or we'll miss events */
2605 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2606 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2607
2608 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2609 hotplug_status);
2610 if (hotplug_status & dev_priv->hotplug_supported_mask)
2611 queue_work(dev_priv->wq,
2612 &dev_priv->hotplug_work);
2613
2614 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2615 I915_READ(PORT_HOTPLUG_STAT);
2616 }
2617
2618 I915_WRITE(IIR, iir);
2619 new_iir = I915_READ(IIR); /* Flush posted writes */
2620
2621 if (iir & I915_USER_INTERRUPT)
2622 notify_ring(dev, &dev_priv->ring[RCS]);
2623 if (iir & I915_BSD_USER_INTERRUPT)
2624 notify_ring(dev, &dev_priv->ring[VCS]);
2625
2626 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2627 intel_prepare_page_flip(dev, 0);
2628
2629 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2630 intel_prepare_page_flip(dev, 1);
2631
2632 for_each_pipe(pipe) {
2633 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2634 drm_handle_vblank(dev, pipe)) {
2635 i915_pageflip_stall_check(dev, pipe);
2636 intel_finish_page_flip(dev, pipe);
2637 }
2638
2639 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2640 blc_event = true;
2641 }
2642
2643
2644 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2645 intel_opregion_asle_intr(dev);
2646
2647 /* With MSI, interrupts are only generated when iir
2648 * transitions from zero to nonzero. If another bit got
2649 * set while we were handling the existing iir bits, then
2650 * we would never get another interrupt.
2651 *
2652 * This is fine on non-MSI as well, as if we hit this path
2653 * we avoid exiting the interrupt handler only to generate
2654 * another one.
2655 *
2656 * Note that for MSI this could cause a stray interrupt report
2657 * if an interrupt landed in the time between writing IIR and
2658 * the posting read. This should be rare enough to never
2659 * trigger the 99% of 100,000 interrupts test for disabling
2660 * stray interrupts.
2661 */
2662 iir = new_iir;
2663 }
2664
2665 i915_update_dri1_breadcrumb(dev);
2666
2667 return ret;
2668}
2669
2670static void i965_irq_uninstall(struct drm_device * dev)
2671{
2672 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2673 int pipe;
2674
2675 if (!dev_priv)
2676 return;
2677
2678 I915_WRITE(PORT_HOTPLUG_EN, 0);
2679 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2680
2681 I915_WRITE(HWSTAM, 0xffffffff); 2041 I915_WRITE(HWSTAM, 0xffffffff);
2682 for_each_pipe(pipe) 2042 for_each_pipe(pipe)
2683 I915_WRITE(PIPESTAT(pipe), 0); 2043 I915_WRITE(PIPESTAT(pipe), 0);
@@ -2692,16 +2052,9 @@ static void i965_irq_uninstall(struct drm_device * dev)
2692 2052
2693void intel_irq_init(struct drm_device *dev) 2053void intel_irq_init(struct drm_device *dev)
2694{ 2054{
2695 struct drm_i915_private *dev_priv = dev->dev_private;
2696
2697 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2698 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2699 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
2700 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
2701
2702 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2055 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2703 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2056 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2704 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 2057 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
2705 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 2058 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2706 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2059 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2707 } 2060 }
@@ -2712,14 +2065,7 @@ void intel_irq_init(struct drm_device *dev)
2712 dev->driver->get_vblank_timestamp = NULL; 2065 dev->driver->get_vblank_timestamp = NULL;
2713 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 2066 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2714 2067
2715 if (IS_VALLEYVIEW(dev)) { 2068 if (IS_IVYBRIDGE(dev)) {
2716 dev->driver->irq_handler = valleyview_irq_handler;
2717 dev->driver->irq_preinstall = valleyview_irq_preinstall;
2718 dev->driver->irq_postinstall = valleyview_irq_postinstall;
2719 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2720 dev->driver->enable_vblank = valleyview_enable_vblank;
2721 dev->driver->disable_vblank = valleyview_disable_vblank;
2722 } else if (IS_IVYBRIDGE(dev)) {
2723 /* Share pre & uninstall handlers with ILK/SNB */ 2069 /* Share pre & uninstall handlers with ILK/SNB */
2724 dev->driver->irq_handler = ivybridge_irq_handler; 2070 dev->driver->irq_handler = ivybridge_irq_handler;
2725 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2071 dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2727,14 +2073,6 @@ void intel_irq_init(struct drm_device *dev)
2727 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2073 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2728 dev->driver->enable_vblank = ivybridge_enable_vblank; 2074 dev->driver->enable_vblank = ivybridge_enable_vblank;
2729 dev->driver->disable_vblank = ivybridge_disable_vblank; 2075 dev->driver->disable_vblank = ivybridge_disable_vblank;
2730 } else if (IS_HASWELL(dev)) {
2731 /* Share interrupts handling with IVB */
2732 dev->driver->irq_handler = ivybridge_irq_handler;
2733 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2734 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2735 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2736 dev->driver->enable_vblank = ivybridge_enable_vblank;
2737 dev->driver->disable_vblank = ivybridge_disable_vblank;
2738 } else if (HAS_PCH_SPLIT(dev)) { 2076 } else if (HAS_PCH_SPLIT(dev)) {
2739 dev->driver->irq_handler = ironlake_irq_handler; 2077 dev->driver->irq_handler = ironlake_irq_handler;
2740 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2078 dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2743,22 +2081,10 @@ void intel_irq_init(struct drm_device *dev)
2743 dev->driver->enable_vblank = ironlake_enable_vblank; 2081 dev->driver->enable_vblank = ironlake_enable_vblank;
2744 dev->driver->disable_vblank = ironlake_disable_vblank; 2082 dev->driver->disable_vblank = ironlake_disable_vblank;
2745 } else { 2083 } else {
2746 if (INTEL_INFO(dev)->gen == 2) { 2084 dev->driver->irq_preinstall = i915_driver_irq_preinstall;
2747 dev->driver->irq_preinstall = i8xx_irq_preinstall; 2085 dev->driver->irq_postinstall = i915_driver_irq_postinstall;
2748 dev->driver->irq_postinstall = i8xx_irq_postinstall; 2086 dev->driver->irq_uninstall = i915_driver_irq_uninstall;
2749 dev->driver->irq_handler = i8xx_irq_handler; 2087 dev->driver->irq_handler = i915_driver_irq_handler;
2750 dev->driver->irq_uninstall = i8xx_irq_uninstall;
2751 } else if (INTEL_INFO(dev)->gen == 3) {
2752 dev->driver->irq_preinstall = i915_irq_preinstall;
2753 dev->driver->irq_postinstall = i915_irq_postinstall;
2754 dev->driver->irq_uninstall = i915_irq_uninstall;
2755 dev->driver->irq_handler = i915_irq_handler;
2756 } else {
2757 dev->driver->irq_preinstall = i965_irq_preinstall;
2758 dev->driver->irq_postinstall = i965_irq_postinstall;
2759 dev->driver->irq_uninstall = i965_irq_uninstall;
2760 dev->driver->irq_handler = i965_irq_handler;
2761 }
2762 dev->driver->enable_vblank = i915_enable_vblank; 2088 dev->driver->enable_vblank = i915_enable_vblank;
2763 dev->driver->disable_vblank = i915_disable_vblank; 2089 dev->driver->disable_vblank = i915_disable_vblank;
2764 } 2090 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 186ee5c85b5..2ae29de4172 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,12 +26,6 @@
26#define _I915_REG_H_ 26#define _I915_REG_H_
27 27
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) 28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
29#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
30
31#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
32
33#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
34#define _MASKED_BIT_DISABLE(a) ((a) << 16)
35 29
36/* 30/*
37 * The Bridge device's PCI config space has information about the 31 * The Bridge device's PCI config space has information about the
@@ -41,14 +35,6 @@
41 */ 35 */
42#define INTEL_GMCH_CTRL 0x52 36#define INTEL_GMCH_CTRL 0x52
43#define INTEL_GMCH_VGA_DISABLE (1 << 1) 37#define INTEL_GMCH_VGA_DISABLE (1 << 1)
44#define SNB_GMCH_CTRL 0x50
45#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
46#define SNB_GMCH_GGMS_MASK 0x3
47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
48#define SNB_GMCH_GMS_MASK 0x1f
49#define IVB_GMCH_GMS_SHIFT 4
50#define IVB_GMCH_GMS_MASK 0xf
51
52 38
53/* PCI config space */ 39/* PCI config space */
54 40
@@ -91,7 +77,6 @@
91#define GRDOM_FULL (0<<2) 77#define GRDOM_FULL (0<<2)
92#define GRDOM_RENDER (1<<2) 78#define GRDOM_RENDER (1<<2)
93#define GRDOM_MEDIA (3<<2) 79#define GRDOM_MEDIA (3<<2)
94#define GRDOM_RESET_ENABLE (1<<0)
95 80
96#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ 81#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
97#define GEN6_MBC_SNPCR_SHIFT 21 82#define GEN6_MBC_SNPCR_SHIFT 21
@@ -101,36 +86,12 @@
101#define GEN6_MBC_SNPCR_LOW (2<<21) 86#define GEN6_MBC_SNPCR_LOW (2<<21)
102#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ 87#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
103 88
104#define GEN6_MBCTL 0x0907c
105#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
106#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
107#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
108#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
109#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
110
111#define GEN6_GDRST 0x941c 89#define GEN6_GDRST 0x941c
112#define GEN6_GRDOM_FULL (1 << 0) 90#define GEN6_GRDOM_FULL (1 << 0)
113#define GEN6_GRDOM_RENDER (1 << 1) 91#define GEN6_GRDOM_RENDER (1 << 1)
114#define GEN6_GRDOM_MEDIA (1 << 2) 92#define GEN6_GRDOM_MEDIA (1 << 2)
115#define GEN6_GRDOM_BLT (1 << 3) 93#define GEN6_GRDOM_BLT (1 << 3)
116 94
117#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
118#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
119#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
120#define PP_DIR_DCLV_2G 0xffffffff
121
122#define GAM_ECOCHK 0x4090
123#define ECOCHK_SNB_BIT (1<<10)
124#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
125#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
126
127#define GAC_ECO_BITS 0x14090
128#define ECOBITS_PPGTT_CACHE64B (3<<8)
129#define ECOBITS_PPGTT_CACHE4B (0<<8)
130
131#define GAB_CTL 0x24000
132#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
133
134/* VGA stuff */ 95/* VGA stuff */
135 96
136#define VGA_ST01_MDA 0x3ba 97#define VGA_ST01_MDA 0x3ba
@@ -195,7 +156,7 @@
195#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) 156#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
196#define MI_SUSPEND_FLUSH_EN (1<<0) 157#define MI_SUSPEND_FLUSH_EN (1<<0)
197#define MI_REPORT_HEAD MI_INSTR(0x07, 0) 158#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
198#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) 159#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
199#define MI_OVERLAY_CONTINUE (0x0<<21) 160#define MI_OVERLAY_CONTINUE (0x0<<21)
200#define MI_OVERLAY_ON (0x1<<21) 161#define MI_OVERLAY_ON (0x1<<21)
201#define MI_OVERLAY_OFF (0x2<<21) 162#define MI_OVERLAY_OFF (0x2<<21)
@@ -203,17 +164,6 @@
203#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) 164#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
204#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) 165#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
205#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) 166#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
206/* IVB has funny definitions for which plane to flip. */
207#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19)
208#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19)
209#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
210#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
211#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
212#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
213#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
214#define MI_ARB_ENABLE (1<<0)
215#define MI_ARB_DISABLE (0<<0)
216
217#define MI_SET_CONTEXT MI_INSTR(0x18, 0) 167#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
218#define MI_MM_SPACE_GTT (1<<8) 168#define MI_MM_SPACE_GTT (1<<8)
219#define MI_MM_SPACE_PHYSICAL (0<<8) 169#define MI_MM_SPACE_PHYSICAL (0<<8)
@@ -233,32 +183,17 @@
233 */ 183 */
234#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 184#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
235#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 185#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
236#define MI_FLUSH_DW_STORE_INDEX (1<<21) 186#define MI_INVALIDATE_TLB (1<<18)
237#define MI_INVALIDATE_TLB (1<<18) 187#define MI_INVALIDATE_BSD (1<<7)
238#define MI_FLUSH_DW_OP_STOREDW (1<<14)
239#define MI_INVALIDATE_BSD (1<<7)
240#define MI_FLUSH_DW_USE_GTT (1<<2)
241#define MI_FLUSH_DW_USE_PPGTT (0<<2)
242#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 188#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
243#define MI_BATCH_NON_SECURE (1) 189#define MI_BATCH_NON_SECURE (1)
244/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ 190#define MI_BATCH_NON_SECURE_I965 (1<<8)
245#define MI_BATCH_NON_SECURE_I965 (1<<8)
246#define MI_BATCH_PPGTT_HSW (1<<8)
247#define MI_BATCH_NON_SECURE_HSW (1<<13)
248#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 191#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
249#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
250#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ 192#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
251#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) 193#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
252#define MI_SEMAPHORE_UPDATE (1<<21) 194#define MI_SEMAPHORE_UPDATE (1<<21)
253#define MI_SEMAPHORE_COMPARE (1<<20) 195#define MI_SEMAPHORE_COMPARE (1<<20)
254#define MI_SEMAPHORE_REGISTER (1<<18) 196#define MI_SEMAPHORE_REGISTER (1<<18)
255#define MI_SEMAPHORE_SYNC_RV (2<<16)
256#define MI_SEMAPHORE_SYNC_RB (0<<16)
257#define MI_SEMAPHORE_SYNC_VR (0<<16)
258#define MI_SEMAPHORE_SYNC_VB (2<<16)
259#define MI_SEMAPHORE_SYNC_BR (2<<16)
260#define MI_SEMAPHORE_SYNC_BV (0<<16)
261#define MI_SEMAPHORE_SYNC_INVALID (1<<0)
262/* 197/*
263 * 3D instructions used by the kernel 198 * 3D instructions used by the kernel
264 */ 199 */
@@ -300,23 +235,16 @@
300#define ASYNC_FLIP (1<<22) 235#define ASYNC_FLIP (1<<22)
301#define DISPLAY_PLANE_A (0<<20) 236#define DISPLAY_PLANE_A (0<<20)
302#define DISPLAY_PLANE_B (1<<20) 237#define DISPLAY_PLANE_B (1<<20)
303#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) 238#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
304#define PIPE_CONTROL_CS_STALL (1<<20) 239#define PIPE_CONTROL_QW_WRITE (1<<14)
305#define PIPE_CONTROL_TLB_INVALIDATE (1<<18) 240#define PIPE_CONTROL_DEPTH_STALL (1<<13)
306#define PIPE_CONTROL_QW_WRITE (1<<14) 241#define PIPE_CONTROL_WC_FLUSH (1<<12)
307#define PIPE_CONTROL_DEPTH_STALL (1<<13) 242#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
308#define PIPE_CONTROL_WRITE_FLUSH (1<<12) 243#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
309#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ 244#define PIPE_CONTROL_ISP_DIS (1<<9)
310#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */ 245#define PIPE_CONTROL_NOTIFY (1<<8)
311#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
312#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
313#define PIPE_CONTROL_NOTIFY (1<<8)
314#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
315#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
316#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
317#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1)
318#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
319#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ 246#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
247#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
320 248
321 249
322/* 250/*
@@ -327,65 +255,6 @@
327#define DEBUG_RESET_RENDER (1<<8) 255#define DEBUG_RESET_RENDER (1<<8)
328#define DEBUG_RESET_DISPLAY (1<<9) 256#define DEBUG_RESET_DISPLAY (1<<9)
329 257
330/*
331 * DPIO - a special bus for various display related registers to hide behind:
332 * 0x800c: m1, m2, n, p1, p2, k dividers
333 * 0x8014: REF and SFR select
334 * 0x8014: N divider, VCO select
335 * 0x801c/3c: core clock bits
336 * 0x8048/68: low pass filter coefficients
337 * 0x8100: fast clock controls
338 */
339#define DPIO_PKT 0x2100
340#define DPIO_RID (0<<24)
341#define DPIO_OP_WRITE (1<<16)
342#define DPIO_OP_READ (0<<16)
343#define DPIO_PORTID (0x12<<8)
344#define DPIO_BYTE (0xf<<4)
345#define DPIO_BUSY (1<<0) /* status only */
346#define DPIO_DATA 0x2104
347#define DPIO_REG 0x2108
348#define DPIO_CTL 0x2110
349#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
350#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
351#define DPIO_SFR_BYPASS (1<<1)
352#define DPIO_RESET (1<<0)
353
354#define _DPIO_DIV_A 0x800c
355#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
356#define DPIO_K_SHIFT (24) /* 4 bits */
357#define DPIO_P1_SHIFT (21) /* 3 bits */
358#define DPIO_P2_SHIFT (16) /* 5 bits */
359#define DPIO_N_SHIFT (12) /* 4 bits */
360#define DPIO_ENABLE_CALIBRATION (1<<11)
361#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
362#define DPIO_M2DIV_MASK 0xff
363#define _DPIO_DIV_B 0x802c
364#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
365
366#define _DPIO_REFSFR_A 0x8014
367#define DPIO_REFSEL_OVERRIDE 27
368#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
369#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
370#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
371#define DPIO_PLL_REFCLK_SEL_MASK 3
372#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
373#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
374#define _DPIO_REFSFR_B 0x8034
375#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
376
377#define _DPIO_CORE_CLK_A 0x801c
378#define _DPIO_CORE_CLK_B 0x803c
379#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
380
381#define _DPIO_LFP_COEFF_A 0x8048
382#define _DPIO_LFP_COEFF_B 0x8068
383#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B)
384
385#define DPIO_FASTCLK_DISABLE 0x8100
386
387#define DPIO_DATA_CHANNEL1 0x8220
388#define DPIO_DATA_CHANNEL2 0x8420
389 258
390/* 259/*
391 * Fence registers 260 * Fence registers
@@ -413,12 +282,6 @@
413#define FENCE_REG_SANDYBRIDGE_0 0x100000 282#define FENCE_REG_SANDYBRIDGE_0 0x100000
414#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 283#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
415 284
416/* control register for cpu gtt access */
417#define TILECTL 0x101000
418#define TILECTL_SWZCTL (1 << 0)
419#define TILECTL_TLB_PREFETCH_DIS (1 << 2)
420#define TILECTL_BACKSNOOP_DIS (1 << 3)
421
422/* 285/*
423 * Instruction and interrupt control regs 286 * Instruction and interrupt control regs
424 */ 287 */
@@ -433,27 +296,15 @@
433#define RING_CTL(base) ((base)+0x3c) 296#define RING_CTL(base) ((base)+0x3c)
434#define RING_SYNC_0(base) ((base)+0x40) 297#define RING_SYNC_0(base) ((base)+0x40)
435#define RING_SYNC_1(base) ((base)+0x44) 298#define RING_SYNC_1(base) ((base)+0x44)
436#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
437#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
438#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE))
439#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
440#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE))
441#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE))
442#define RING_MAX_IDLE(base) ((base)+0x54) 299#define RING_MAX_IDLE(base) ((base)+0x54)
443#define RING_HWS_PGA(base) ((base)+0x80) 300#define RING_HWS_PGA(base) ((base)+0x80)
444#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 301#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
445#define ARB_MODE 0x04030
446#define ARB_MODE_SWIZZLE_SNB (1<<4)
447#define ARB_MODE_SWIZZLE_IVB (1<<5)
448#define RENDER_HWS_PGA_GEN7 (0x04080) 302#define RENDER_HWS_PGA_GEN7 (0x04080)
449#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
450#define DONE_REG 0x40b0
451#define BSD_HWS_PGA_GEN7 (0x04180) 303#define BSD_HWS_PGA_GEN7 (0x04180)
452#define BLT_HWS_PGA_GEN7 (0x04280) 304#define BLT_HWS_PGA_GEN7 (0x04280)
453#define RING_ACTHD(base) ((base)+0x74) 305#define RING_ACTHD(base) ((base)+0x74)
454#define RING_NOPID(base) ((base)+0x94) 306#define RING_NOPID(base) ((base)+0x94)
455#define RING_IMR(base) ((base)+0xa8) 307#define RING_IMR(base) ((base)+0xa8)
456#define RING_TIMESTAMP(base) ((base)+0x358)
457#define TAIL_ADDR 0x001FFFF8 308#define TAIL_ADDR 0x001FFFF8
458#define HEAD_WRAP_COUNT 0xFFE00000 309#define HEAD_WRAP_COUNT 0xFFE00000
459#define HEAD_WRAP_ONE 0x00200000 310#define HEAD_WRAP_ONE 0x00200000
@@ -482,17 +333,6 @@
482#define IPEIR_I965 0x02064 333#define IPEIR_I965 0x02064
483#define IPEHR_I965 0x02068 334#define IPEHR_I965 0x02068
484#define INSTDONE_I965 0x0206c 335#define INSTDONE_I965 0x0206c
485#define GEN7_INSTDONE_1 0x0206c
486#define GEN7_SC_INSTDONE 0x07100
487#define GEN7_SAMPLER_INSTDONE 0x0e160
488#define GEN7_ROW_INSTDONE 0x0e164
489#define I915_NUM_INSTDONE_REG 4
490#define RING_IPEIR(base) ((base)+0x64)
491#define RING_IPEHR(base) ((base)+0x68)
492#define RING_INSTDONE(base) ((base)+0x6c)
493#define RING_INSTPS(base) ((base)+0x70)
494#define RING_DMA_FADD(base) ((base)+0x78)
495#define RING_INSTPM(base) ((base)+0xc0)
496#define INSTPS 0x02070 /* 965+ only */ 336#define INSTPS 0x02070 /* 965+ only */
497#define INSTDONE1 0x0207c /* 965+ only */ 337#define INSTDONE1 0x0207c /* 965+ only */
498#define ACTHD_I965 0x02074 338#define ACTHD_I965 0x02074
@@ -506,18 +346,22 @@
506#define INSTDONE 0x02090 346#define INSTDONE 0x02090
507#define NOPID 0x02094 347#define NOPID 0x02094
508#define HWSTAM 0x02098 348#define HWSTAM 0x02098
509#define DMA_FADD_I8XX 0x020d0 349#define VCS_INSTDONE 0x1206C
350#define VCS_IPEIR 0x12064
351#define VCS_IPEHR 0x12068
352#define VCS_ACTHD 0x12074
353#define BCS_INSTDONE 0x2206C
354#define BCS_IPEIR 0x22064
355#define BCS_IPEHR 0x22068
356#define BCS_ACTHD 0x22074
510 357
511#define ERROR_GEN6 0x040a0 358#define ERROR_GEN6 0x040a0
512#define GEN7_ERR_INT 0x44040
513#define ERR_INT_MMIO_UNCLAIMED (1<<13)
514 359
515/* GM45+ chicken bits -- debug workaround bits that may be required 360/* GM45+ chicken bits -- debug workaround bits that may be required
516 * for various sorts of correct behavior. The top 16 bits of each are 361 * for various sorts of correct behavior. The top 16 bits of each are
517 * the enables for writing to the corresponding low bit. 362 * the enables for writing to the corresponding low bit.
518 */ 363 */
519#define _3D_CHICKEN 0x02084 364#define _3D_CHICKEN 0x02084
520#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
521#define _3D_CHICKEN2 0x0208c 365#define _3D_CHICKEN2 0x0208c
522/* Disables pipelining of read flushes past the SF-WIZ interface. 366/* Disables pipelining of read flushes past the SF-WIZ interface.
523 * Required on all Ironlake steppings according to the B-Spec, but the 367 * Required on all Ironlake steppings according to the B-Spec, but the
@@ -525,20 +369,13 @@
525 */ 369 */
526# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) 370# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
527#define _3D_CHICKEN3 0x02090 371#define _3D_CHICKEN3 0x02090
528#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
529#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
530 372
531#define MI_MODE 0x0209c 373#define MI_MODE 0x0209c
532# define VS_TIMER_DISPATCH (1 << 6) 374# define VS_TIMER_DISPATCH (1 << 6)
533# define MI_FLUSH_ENABLE (1 << 12) 375# define MI_FLUSH_ENABLE (1 << 11)
534
535#define GEN6_GT_MODE 0x20d0
536#define GEN6_GT_MODE_HI (1 << 9)
537#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
538 376
539#define GFX_MODE 0x02520 377#define GFX_MODE 0x02520
540#define GFX_MODE_GEN7 0x0229c 378#define GFX_MODE_GEN7 0x0229c
541#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
542#define GFX_RUN_LIST_ENABLE (1<<15) 379#define GFX_RUN_LIST_ENABLE (1<<15)
543#define GFX_TLB_INVALIDATE_ALWAYS (1<<13) 380#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
544#define GFX_SURFACE_FAULT_ENABLE (1<<12) 381#define GFX_SURFACE_FAULT_ENABLE (1<<12)
@@ -546,20 +383,14 @@
546#define GFX_PSMI_GRANULARITY (1<<10) 383#define GFX_PSMI_GRANULARITY (1<<10)
547#define GFX_PPGTT_ENABLE (1<<9) 384#define GFX_PPGTT_ENABLE (1<<9)
548 385
549#define VLV_DISPLAY_BASE 0x180000 386#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
387#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
550 388
551#define SCPD0 0x0209c /* 915+ only */ 389#define SCPD0 0x0209c /* 915+ only */
552#define IER 0x020a0 390#define IER 0x020a0
553#define IIR 0x020a4 391#define IIR 0x020a4
554#define IMR 0x020a8 392#define IMR 0x020a8
555#define ISR 0x020ac 393#define ISR 0x020ac
556#define VLV_GUNIT_CLOCK_GATE 0x182060
557#define GCFG_DIS (1<<8)
558#define VLV_IIR_RW 0x182084
559#define VLV_IER 0x1820a0
560#define VLV_IIR 0x1820a4
561#define VLV_IMR 0x1820a8
562#define VLV_ISR 0x1820ac
563#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 394#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
564#define I915_DISPLAY_PORT_INTERRUPT (1<<17) 395#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
565#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) 396#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
@@ -592,7 +423,6 @@
592#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts 423#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
593 will not assert AGPBUSY# and will only 424 will not assert AGPBUSY# and will only
594 be delivered when out of C3. */ 425 be delivered when out of C3. */
595#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
596#define ACTHD 0x020c8 426#define ACTHD 0x020c8
597#define FW_BLC 0x020d8 427#define FW_BLC 0x020d8
598#define FW_BLC2 0x020dc 428#define FW_BLC2 0x020dc
@@ -605,6 +435,7 @@
605#define LM_BURST_LENGTH 0x00000700 435#define LM_BURST_LENGTH 0x00000700
606#define LM_FIFO_WATERMARK 0x0000001F 436#define LM_FIFO_WATERMARK 0x0000001F
607#define MI_ARB_STATE 0x020e4 /* 915+ only */ 437#define MI_ARB_STATE 0x020e4 /* 915+ only */
438#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
608 439
609/* Make render/texture TLB fetches lower priorty than associated data 440/* Make render/texture TLB fetches lower priorty than associated data
610 * fetches. This is not turned on by default 441 * fetches. This is not turned on by default
@@ -639,7 +470,7 @@
639 470
640/* Enables non-sequential data reads through arbiter 471/* Enables non-sequential data reads through arbiter
641 */ 472 */
642#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9) 473#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9)
643 474
644/* Disable FSB snooping of cacheable write cycles from binner/render 475/* Disable FSB snooping of cacheable write cycles from binner/render
645 * command stream 476 * command stream
@@ -669,28 +500,20 @@
669#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ 500#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
670 501
671#define CACHE_MODE_0 0x02120 /* 915+ only */ 502#define CACHE_MODE_0 0x02120 /* 915+ only */
672#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8) 503#define CM0_MASK_SHIFT 16
673#define CM0_IZ_OPT_DISABLE (1<<6) 504#define CM0_IZ_OPT_DISABLE (1<<6)
674#define CM0_ZR_OPT_DISABLE (1<<5) 505#define CM0_ZR_OPT_DISABLE (1<<5)
675#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
676#define CM0_DEPTH_EVICT_DISABLE (1<<4) 506#define CM0_DEPTH_EVICT_DISABLE (1<<4)
677#define CM0_COLOR_EVICT_DISABLE (1<<3) 507#define CM0_COLOR_EVICT_DISABLE (1<<3)
678#define CM0_DEPTH_WRITE_DISABLE (1<<1) 508#define CM0_DEPTH_WRITE_DISABLE (1<<1)
679#define CM0_RC_OP_FLUSH_DISABLE (1<<0) 509#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
680#define BB_ADDR 0x02140 /* 8 bytes */ 510#define BB_ADDR 0x02140 /* 8 bytes */
681#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 511#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
682#define GFX_FLSH_CNTL_GEN6 0x101008
683#define GFX_FLSH_CNTL_EN (1<<0)
684#define ECOSKPD 0x021d0 512#define ECOSKPD 0x021d0
685#define ECO_GATING_CX_ONLY (1<<3) 513#define ECO_GATING_CX_ONLY (1<<3)
686#define ECO_FLIP_DONE (1<<0) 514#define ECO_FLIP_DONE (1<<0)
687 515
688#define CACHE_MODE_1 0x7004 /* IVB+ */ 516/* GEN6 interrupt control */
689#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
690
691/* GEN6 interrupt control
692 * Note that the per-ring interrupt bits do alias with the global interrupt bits
693 * in GTIMR. */
694#define GEN6_RENDER_HWSTAM 0x2098 517#define GEN6_RENDER_HWSTAM 0x2098
695#define GEN6_RENDER_IMR 0x20a8 518#define GEN6_RENDER_IMR 0x20a8
696#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) 519#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
@@ -715,10 +538,10 @@
715#define GEN6_BLITTER_FBC_NOTIFY (1<<3) 538#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
716 539
717#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 540#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
718#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) 541#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
719#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) 542#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
720#define GEN6_BSD_SLEEP_INDICATOR (1 << 3) 543#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
721#define GEN6_BSD_GO_INDICATOR (1 << 4) 544#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
722 545
723#define GEN6_BSD_HWSTAM 0x12098 546#define GEN6_BSD_HWSTAM 0x12098
724#define GEN6_BSD_IMR 0x120a8 547#define GEN6_BSD_IMR 0x120a8
@@ -726,21 +549,6 @@
726 549
727#define GEN6_BSD_RNCID 0x12198 550#define GEN6_BSD_RNCID 0x12198
728 551
729#define GEN7_FF_THREAD_MODE 0x20a0
730#define GEN7_FF_SCHED_MASK 0x0077070
731#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
732#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
733#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
734#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
735#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
736#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
737#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
738#define GEN7_FF_VS_SCHED_HW (0x0<<12)
739#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
740#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
741#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
742#define GEN7_FF_DS_SCHED_HW (0x0<<4)
743
744/* 552/*
745 * Framebuffer compression (915+ only) 553 * Framebuffer compression (915+ only)
746 */ 554 */
@@ -818,7 +626,7 @@
818 626
819#define ILK_DISPLAY_CHICKEN1 0x42000 627#define ILK_DISPLAY_CHICKEN1 0x42000
820#define ILK_FBCQ_DIS (1<<22) 628#define ILK_FBCQ_DIS (1<<22)
821#define ILK_PABSTRETCH_DIS (1<<21) 629#define ILK_PABSTRETCH_DIS (1<<21)
822 630
823 631
824/* 632/*
@@ -869,9 +677,9 @@
869#define GMBUS_PORT_PANEL 3 677#define GMBUS_PORT_PANEL 3
870#define GMBUS_PORT_DPC 4 /* HDMIC */ 678#define GMBUS_PORT_DPC 4 /* HDMIC */
871#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */ 679#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
872#define GMBUS_PORT_DPD 6 /* HDMID */ 680 /* 6 reserved */
873#define GMBUS_PORT_RESERVED 7 /* 7 reserved */ 681#define GMBUS_PORT_DPD 7 /* HDMID */
874#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1) 682#define GMBUS_NUM_PORTS 8
875#define GMBUS1 0x5104 /* command/status */ 683#define GMBUS1 0x5104 /* command/status */
876#define GMBUS_SW_CLR_INT (1<<31) 684#define GMBUS_SW_CLR_INT (1<<31)
877#define GMBUS_SW_RDY (1<<30) 685#define GMBUS_SW_RDY (1<<30)
@@ -923,9 +731,7 @@
923#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) 731#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
924#define DPLL_VCO_ENABLE (1 << 31) 732#define DPLL_VCO_ENABLE (1 << 31)
925#define DPLL_DVO_HIGH_SPEED (1 << 30) 733#define DPLL_DVO_HIGH_SPEED (1 << 30)
926#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
927#define DPLL_SYNCLOCK_ENABLE (1 << 29) 734#define DPLL_SYNCLOCK_ENABLE (1 << 29)
928#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
929#define DPLL_VGA_MODE_DIS (1 << 28) 735#define DPLL_VGA_MODE_DIS (1 << 28)
930#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ 736#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
931#define DPLLB_MODE_LVDS (2 << 26) /* i915 */ 737#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
@@ -937,8 +743,6 @@
937#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ 743#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
938#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 744#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
939#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ 745#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
940#define DPLL_LOCK_VLV (1<<15)
941#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
942 746
943#define SRX_INDEX 0x3c4 747#define SRX_INDEX 0x3c4
944#define SRX_DATA 0x3c5 748#define SRX_DATA 0x3c5
@@ -1034,7 +838,6 @@
1034#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 838#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
1035#define _DPLL_B_MD 0x06020 /* 965+ only */ 839#define _DPLL_B_MD 0x06020 /* 965+ only */
1036#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) 840#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
1037
1038#define _FPA0 0x06040 841#define _FPA0 0x06040
1039#define _FPA1 0x06044 842#define _FPA1 0x06044
1040#define _FPB0 0x06048 843#define _FPB0 0x06048
@@ -1175,9 +978,6 @@
1175#define RAMCLK_GATE_D 0x6210 /* CRL only */ 978#define RAMCLK_GATE_D 0x6210 /* CRL only */
1176#define DEUC 0x6214 /* CRL only */ 979#define DEUC 0x6214 /* CRL only */
1177 980
1178#define FW_BLC_SELF_VLV 0x6500
1179#define FW_CSPWRDWNEN (1<<15)
1180
1181/* 981/*
1182 * Palette regs 982 * Palette regs
1183 */ 983 */
@@ -1217,29 +1017,6 @@
1217#define C0DRB3 0x10206 1017#define C0DRB3 0x10206
1218#define C1DRB3 0x10606 1018#define C1DRB3 0x10606
1219 1019
1220/** snb MCH registers for reading the DRAM channel configuration */
1221#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004)
1222#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008)
1223#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C)
1224#define MAD_DIMM_ECC_MASK (0x3 << 24)
1225#define MAD_DIMM_ECC_OFF (0x0 << 24)
1226#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
1227#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24)
1228#define MAD_DIMM_ECC_ON (0x3 << 24)
1229#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22)
1230#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21)
1231#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */
1232#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */
1233#define MAD_DIMM_B_DUAL_RANK (0x1 << 18)
1234#define MAD_DIMM_A_DUAL_RANK (0x1 << 17)
1235#define MAD_DIMM_A_SELECT (0x1 << 16)
1236/* DIMM sizes are in multiples of 256mb. */
1237#define MAD_DIMM_B_SIZE_SHIFT 8
1238#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT)
1239#define MAD_DIMM_A_SIZE_SHIFT 0
1240#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
1241
1242
1243/* Clocking configuration register */ 1020/* Clocking configuration register */
1244#define CLKCFG 0x10c00 1021#define CLKCFG 0x10c00
1245#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ 1022#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
@@ -1483,10 +1260,6 @@
1483#define DDRMPLL1 0X12c20 1260#define DDRMPLL1 0X12c20
1484#define PEG_BAND_GAP_DATA 0x14d68 1261#define PEG_BAND_GAP_DATA 0x14d68
1485 1262
1486#define GEN6_GT_THREAD_STATUS_REG 0x13805c
1487#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
1488#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
1489
1490#define GEN6_GT_PERF_STATUS 0x145948 1263#define GEN6_GT_PERF_STATUS 0x145948
1491#define GEN6_RP_STATE_LIMITS 0x145994 1264#define GEN6_RP_STATE_LIMITS 0x145994
1492#define GEN6_RP_STATE_CAP 0x145998 1265#define GEN6_RP_STATE_CAP 0x145998
@@ -1496,39 +1269,6 @@
1496 */ 1269 */
1497#define CCID 0x2180 1270#define CCID 0x2180
1498#define CCID_EN (1<<0) 1271#define CCID_EN (1<<0)
1499#define CXT_SIZE 0x21a0
1500#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f)
1501#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f)
1502#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f)
1503#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f)
1504#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f)
1505#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \
1506 GEN6_CXT_RING_SIZE(cxt_reg) + \
1507 GEN6_CXT_RENDER_SIZE(cxt_reg) + \
1508 GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
1509 GEN6_CXT_PIPELINE_SIZE(cxt_reg))
1510#define GEN7_CXT_SIZE 0x21a8
1511#define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f)
1512#define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7)
1513#define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f)
1514#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
1515#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
1516#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
1517#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \
1518 GEN7_CXT_RING_SIZE(ctx_reg) + \
1519 GEN7_CXT_RENDER_SIZE(ctx_reg) + \
1520 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
1521 GEN7_CXT_GT1_SIZE(ctx_reg) + \
1522 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
1523#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f)
1524#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7)
1525#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff)
1526#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \
1527 HSW_CXT_RING_SIZE(ctx_reg) + \
1528 HSW_CXT_RENDER_SIZE(ctx_reg) + \
1529 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
1530
1531
1532/* 1272/*
1533 * Overlay regs 1273 * Overlay regs
1534 */ 1274 */
@@ -1556,7 +1296,6 @@
1556#define _VSYNC_A 0x60014 1296#define _VSYNC_A 0x60014
1557#define _PIPEASRC 0x6001c 1297#define _PIPEASRC 0x6001c
1558#define _BCLRPAT_A 0x60020 1298#define _BCLRPAT_A 0x60020
1559#define _VSYNCSHIFT_A 0x60028
1560 1299
1561/* Pipe B timing regs */ 1300/* Pipe B timing regs */
1562#define _HTOTAL_B 0x61000 1301#define _HTOTAL_B 0x61000
@@ -1567,49 +1306,23 @@
1567#define _VSYNC_B 0x61014 1306#define _VSYNC_B 0x61014
1568#define _PIPEBSRC 0x6101c 1307#define _PIPEBSRC 0x6101c
1569#define _BCLRPAT_B 0x61020 1308#define _BCLRPAT_B 0x61020
1570#define _VSYNCSHIFT_B 0x61028
1571 1309
1572 1310#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
1573#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) 1311#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
1574#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B) 1312#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
1575#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B) 1313#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
1576#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B) 1314#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
1577#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B) 1315#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
1578#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
1579#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) 1316#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
1580#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
1581 1317
1582/* VGA port control */ 1318/* VGA port control */
1583#define ADPA 0x61100 1319#define ADPA 0x61100
1584#define PCH_ADPA 0xe1100
1585#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA)
1586
1587#define ADPA_DAC_ENABLE (1<<31) 1320#define ADPA_DAC_ENABLE (1<<31)
1588#define ADPA_DAC_DISABLE 0 1321#define ADPA_DAC_DISABLE 0
1589#define ADPA_PIPE_SELECT_MASK (1<<30) 1322#define ADPA_PIPE_SELECT_MASK (1<<30)
1590#define ADPA_PIPE_A_SELECT 0 1323#define ADPA_PIPE_A_SELECT 0
1591#define ADPA_PIPE_B_SELECT (1<<30) 1324#define ADPA_PIPE_B_SELECT (1<<30)
1592#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) 1325#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
1593/* CPT uses bits 29:30 for pch transcoder select */
1594#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
1595#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
1596#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
1597#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
1598#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
1599#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
1600#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
1601#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
1602#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
1603#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
1604#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
1605#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
1606#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
1607#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
1608#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
1609#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
1610#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
1611#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
1612#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
1613#define ADPA_USE_VGA_HVPOLARITY (1<<15) 1326#define ADPA_USE_VGA_HVPOLARITY (1<<15)
1614#define ADPA_SETS_HVPOLARITY 0 1327#define ADPA_SETS_HVPOLARITY 0
1615#define ADPA_VSYNC_CNTL_DISABLE (1<<11) 1328#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -1656,34 +1369,20 @@
1656#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 1369#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
1657 1370
1658#define PORT_HOTPLUG_STAT 0x61114 1371#define PORT_HOTPLUG_STAT 0x61114
1659/* HDMI/DP bits are gen4+ */ 1372#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
1660#define DPB_HOTPLUG_LIVE_STATUS (1 << 29) 1373#define DPB_HOTPLUG_INT_STATUS (1 << 29)
1661#define DPC_HOTPLUG_LIVE_STATUS (1 << 28) 1374#define HDMIC_HOTPLUG_INT_STATUS (1 << 28)
1662#define DPD_HOTPLUG_LIVE_STATUS (1 << 27) 1375#define DPC_HOTPLUG_INT_STATUS (1 << 28)
1663#define DPD_HOTPLUG_INT_STATUS (3 << 21) 1376#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
1664#define DPC_HOTPLUG_INT_STATUS (3 << 19) 1377#define DPD_HOTPLUG_INT_STATUS (1 << 27)
1665#define DPB_HOTPLUG_INT_STATUS (3 << 17)
1666/* HDMI bits are shared with the DP bits */
1667#define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29)
1668#define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28)
1669#define HDMID_HOTPLUG_LIVE_STATUS (1 << 27)
1670#define HDMID_HOTPLUG_INT_STATUS (3 << 21)
1671#define HDMIC_HOTPLUG_INT_STATUS (3 << 19)
1672#define HDMIB_HOTPLUG_INT_STATUS (3 << 17)
1673/* CRT/TV common between gen3+ */
1674#define CRT_HOTPLUG_INT_STATUS (1 << 11) 1378#define CRT_HOTPLUG_INT_STATUS (1 << 11)
1675#define TV_HOTPLUG_INT_STATUS (1 << 10) 1379#define TV_HOTPLUG_INT_STATUS (1 << 10)
1676#define CRT_HOTPLUG_MONITOR_MASK (3 << 8) 1380#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
1677#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) 1381#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
1678#define CRT_HOTPLUG_MONITOR_MONO (2 << 8) 1382#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
1679#define CRT_HOTPLUG_MONITOR_NONE (0 << 8) 1383#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
1680/* SDVO is different across gen3/4 */ 1384#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
1681#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3) 1385#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
1682#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
1683#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
1684#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
1685#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
1686#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
1687 1386
1688/* SDVO port control */ 1387/* SDVO port control */
1689#define SDVOB 0x61140 1388#define SDVOB 0x61140
@@ -1808,21 +1507,12 @@
1808 1507
1809/* Video Data Island Packet control */ 1508/* Video Data Island Packet control */
1810#define VIDEO_DIP_DATA 0x61178 1509#define VIDEO_DIP_DATA 0x61178
1811/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
1812 * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
1813 * of the infoframe structure specified by CEA-861. */
1814#define VIDEO_DIP_DATA_SIZE 32
1815#define VIDEO_DIP_CTL 0x61170 1510#define VIDEO_DIP_CTL 0x61170
1816/* Pre HSW: */
1817#define VIDEO_DIP_ENABLE (1 << 31) 1511#define VIDEO_DIP_ENABLE (1 << 31)
1818#define VIDEO_DIP_PORT_B (1 << 29) 1512#define VIDEO_DIP_PORT_B (1 << 29)
1819#define VIDEO_DIP_PORT_C (2 << 29) 1513#define VIDEO_DIP_PORT_C (2 << 29)
1820#define VIDEO_DIP_PORT_D (3 << 29)
1821#define VIDEO_DIP_PORT_MASK (3 << 29)
1822#define VIDEO_DIP_ENABLE_GCP (1 << 25)
1823#define VIDEO_DIP_ENABLE_AVI (1 << 21) 1514#define VIDEO_DIP_ENABLE_AVI (1 << 21)
1824#define VIDEO_DIP_ENABLE_VENDOR (2 << 21) 1515#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
1825#define VIDEO_DIP_ENABLE_GAMUT (4 << 21)
1826#define VIDEO_DIP_ENABLE_SPD (8 << 21) 1516#define VIDEO_DIP_ENABLE_SPD (8 << 21)
1827#define VIDEO_DIP_SELECT_AVI (0 << 19) 1517#define VIDEO_DIP_SELECT_AVI (0 << 19)
1828#define VIDEO_DIP_SELECT_VENDOR (1 << 19) 1518#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
@@ -1831,14 +1521,6 @@
1831#define VIDEO_DIP_FREQ_ONCE (0 << 16) 1521#define VIDEO_DIP_FREQ_ONCE (0 << 16)
1832#define VIDEO_DIP_FREQ_VSYNC (1 << 16) 1522#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
1833#define VIDEO_DIP_FREQ_2VSYNC (2 << 16) 1523#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
1834#define VIDEO_DIP_FREQ_MASK (3 << 16)
1835/* HSW and later: */
1836#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
1837#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
1838#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
1839#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
1840#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
1841#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
1842 1524
1843/* Panel power sequencing */ 1525/* Panel power sequencing */
1844#define PP_STATUS 0x61200 1526#define PP_STATUS 0x61200
@@ -1852,21 +1534,12 @@
1852 */ 1534 */
1853#define PP_READY (1 << 30) 1535#define PP_READY (1 << 30)
1854#define PP_SEQUENCE_NONE (0 << 28) 1536#define PP_SEQUENCE_NONE (0 << 28)
1855#define PP_SEQUENCE_POWER_UP (1 << 28) 1537#define PP_SEQUENCE_ON (1 << 28)
1856#define PP_SEQUENCE_POWER_DOWN (2 << 28) 1538#define PP_SEQUENCE_OFF (2 << 28)
1857#define PP_SEQUENCE_MASK (3 << 28) 1539#define PP_SEQUENCE_MASK 0x30000000
1858#define PP_SEQUENCE_SHIFT 28
1859#define PP_CYCLE_DELAY_ACTIVE (1 << 27) 1540#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
1541#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
1860#define PP_SEQUENCE_STATE_MASK 0x0000000f 1542#define PP_SEQUENCE_STATE_MASK 0x0000000f
1861#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
1862#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
1863#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
1864#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
1865#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
1866#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
1867#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
1868#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
1869#define PP_SEQUENCE_STATE_RESET (0xf << 0)
1870#define PP_CONTROL 0x61204 1543#define PP_CONTROL 0x61204
1871#define POWER_TARGET_ON (1 << 0) 1544#define POWER_TARGET_ON (1 << 0)
1872#define PP_ON_DELAYS 0x61208 1545#define PP_ON_DELAYS 0x61208
@@ -1909,35 +1582,18 @@
1909#define PFIT_AUTO_RATIOS 0x61238 1582#define PFIT_AUTO_RATIOS 0x61238
1910 1583
1911/* Backlight control */ 1584/* Backlight control */
1912#define BLC_PWM_CTL2 0x61250 /* 965+ only */
1913#define BLM_PWM_ENABLE (1 << 31)
1914#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
1915#define BLM_PIPE_SELECT (1 << 29)
1916#define BLM_PIPE_SELECT_IVB (3 << 29)
1917#define BLM_PIPE_A (0 << 29)
1918#define BLM_PIPE_B (1 << 29)
1919#define BLM_PIPE_C (2 << 29) /* ivb + */
1920#define BLM_PIPE(pipe) ((pipe) << 29)
1921#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
1922#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
1923#define BLM_PHASE_IN_ENABLE (1 << 25)
1924#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24)
1925#define BLM_PHASE_IN_TIME_BASE_SHIFT (16)
1926#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16)
1927#define BLM_PHASE_IN_COUNT_SHIFT (8)
1928#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
1929#define BLM_PHASE_IN_INCR_SHIFT (0)
1930#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
1931#define BLC_PWM_CTL 0x61254 1585#define BLC_PWM_CTL 0x61254
1586#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
1587#define BLC_PWM_CTL2 0x61250 /* 965+ only */
1588#define BLM_COMBINATION_MODE (1 << 30)
1932/* 1589/*
1933 * This is the most significant 15 bits of the number of backlight cycles in a 1590 * This is the most significant 15 bits of the number of backlight cycles in a
1934 * complete cycle of the modulated backlight control. 1591 * complete cycle of the modulated backlight control.
1935 * 1592 *
1936 * The actual value is this field multiplied by two. 1593 * The actual value is this field multiplied by two.
1937 */ 1594 */
1938#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) 1595#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
1939#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) 1596#define BLM_LEGACY_MODE (1 << 16)
1940#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */
1941/* 1597/*
1942 * This is the number of cycles out of the backlight modulation cycle for which 1598 * This is the number of cycles out of the backlight modulation cycle for which
1943 * the backlight is on. 1599 * the backlight is on.
@@ -1947,24 +1603,9 @@
1947 */ 1603 */
1948#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) 1604#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
1949#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) 1605#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
1950#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
1951#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
1952 1606
1953#define BLC_HIST_CTL 0x61260 1607#define BLC_HIST_CTL 0x61260
1954 1608
1955/* New registers for PCH-split platforms. Safe where new bits show up, the
1956 * register layout machtes with gen4 BLC_PWM_CTL[12]. */
1957#define BLC_PWM_CPU_CTL2 0x48250
1958#define BLC_PWM_CPU_CTL 0x48254
1959
1960/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
1961 * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
1962#define BLC_PWM_PCH_CTL1 0xc8250
1963#define BLM_PCH_PWM_ENABLE (1 << 31)
1964#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
1965#define BLM_PCH_POLARITY (1 << 29)
1966#define BLC_PWM_PCH_CTL2 0xc8254
1967
1968/* TV port control */ 1609/* TV port control */
1969#define TV_CTL 0x68000 1610#define TV_CTL 0x68000
1970/** Enables the TV encoder */ 1611/** Enables the TV encoder */
@@ -2637,36 +2278,21 @@
2637 2278
2638/* Pipe A */ 2279/* Pipe A */
2639#define _PIPEADSL 0x70000 2280#define _PIPEADSL 0x70000
2640#define DSL_LINEMASK_GEN2 0x00000fff 2281#define DSL_LINEMASK 0x00000fff
2641#define DSL_LINEMASK_GEN3 0x00001fff
2642#define _PIPEACONF 0x70008 2282#define _PIPEACONF 0x70008
2643#define PIPECONF_ENABLE (1<<31) 2283#define PIPECONF_ENABLE (1<<31)
2644#define PIPECONF_DISABLE 0 2284#define PIPECONF_DISABLE 0
2645#define PIPECONF_DOUBLE_WIDE (1<<30) 2285#define PIPECONF_DOUBLE_WIDE (1<<30)
2646#define I965_PIPECONF_ACTIVE (1<<30) 2286#define I965_PIPECONF_ACTIVE (1<<30)
2647#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
2648#define PIPECONF_SINGLE_WIDE 0 2287#define PIPECONF_SINGLE_WIDE 0
2649#define PIPECONF_PIPE_UNLOCKED 0 2288#define PIPECONF_PIPE_UNLOCKED 0
2650#define PIPECONF_PIPE_LOCKED (1<<25) 2289#define PIPECONF_PIPE_LOCKED (1<<25)
2651#define PIPECONF_PALETTE 0 2290#define PIPECONF_PALETTE 0
2652#define PIPECONF_GAMMA (1<<24) 2291#define PIPECONF_GAMMA (1<<24)
2653#define PIPECONF_FORCE_BORDER (1<<25) 2292#define PIPECONF_FORCE_BORDER (1<<25)
2654#define PIPECONF_INTERLACE_MASK (7 << 21) 2293#define PIPECONF_PROGRESSIVE (0 << 21)
2655#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
2656/* Note that pre-gen3 does not support interlaced display directly. Panel
2657 * fitting must be disabled on pre-ilk for interlaced. */
2658#define PIPECONF_PROGRESSIVE (0 << 21)
2659#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */
2660#define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */
2661#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) 2294#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
2662#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */ 2295#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
2663/* Ironlake and later have a complete new set of values for interlaced. PFIT
2664 * means panel fitter required, PF means progressive fetch, DBL means power
2665 * saving pixel doubling. */
2666#define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21)
2667#define PIPECONF_INTERLACED_ILK (3 << 21)
2668#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
2669#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
2670#define PIPECONF_CXSR_DOWNCLOCK (1<<16) 2296#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
2671#define PIPECONF_BPP_MASK (0x000000e0) 2297#define PIPECONF_BPP_MASK (0x000000e0)
2672#define PIPECONF_BPP_8 (0<<5) 2298#define PIPECONF_BPP_8 (0<<5)
@@ -2681,30 +2307,23 @@
2681#define PIPECONF_DITHER_TYPE_TEMP (3<<2) 2307#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
2682#define _PIPEASTAT 0x70024 2308#define _PIPEASTAT 0x70024
2683#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) 2309#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
2684#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
2685#define PIPE_CRC_ERROR_ENABLE (1UL<<29) 2310#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
2686#define PIPE_CRC_DONE_ENABLE (1UL<<28) 2311#define PIPE_CRC_DONE_ENABLE (1UL<<28)
2687#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) 2312#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
2688#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
2689#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) 2313#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
2690#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) 2314#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
2691#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) 2315#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
2692#define PIPE_DPST_EVENT_ENABLE (1UL<<23) 2316#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
2693#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
2694#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) 2317#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
2695#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) 2318#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
2696#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) 2319#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
2697#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ 2320#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
2698#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ 2321#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
2699#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) 2322#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
2700#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
2701#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) 2323#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
2702#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
2703#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
2704#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) 2324#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
2705#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) 2325#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
2706#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) 2326#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
2707#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10)
2708#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) 2327#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
2709#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) 2328#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
2710#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) 2329#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
@@ -2723,46 +2342,12 @@
2723#define PIPE_12BPC (3 << 5) 2342#define PIPE_12BPC (3 << 5)
2724 2343
2725#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) 2344#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
2726#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF) 2345#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
2727#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) 2346#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
2728#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) 2347#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
2729#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) 2348#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
2730#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) 2349#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
2731 2350
2732#define VLV_DPFLIPSTAT 0x70028
2733#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
2734#define PIPEB_HLINE_INT_EN (1<<28)
2735#define PIPEB_VBLANK_INT_EN (1<<27)
2736#define SPRITED_FLIPDONE_INT_EN (1<<26)
2737#define SPRITEC_FLIPDONE_INT_EN (1<<25)
2738#define PLANEB_FLIPDONE_INT_EN (1<<24)
2739#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
2740#define PIPEA_HLINE_INT_EN (1<<20)
2741#define PIPEA_VBLANK_INT_EN (1<<19)
2742#define SPRITEB_FLIPDONE_INT_EN (1<<18)
2743#define SPRITEA_FLIPDONE_INT_EN (1<<17)
2744#define PLANEA_FLIPDONE_INT_EN (1<<16)
2745
2746#define DPINVGTT 0x7002c /* VLV only */
2747#define CURSORB_INVALID_GTT_INT_EN (1<<23)
2748#define CURSORA_INVALID_GTT_INT_EN (1<<22)
2749#define SPRITED_INVALID_GTT_INT_EN (1<<21)
2750#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
2751#define PLANEB_INVALID_GTT_INT_EN (1<<19)
2752#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
2753#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
2754#define PLANEA_INVALID_GTT_INT_EN (1<<16)
2755#define DPINVGTT_EN_MASK 0xff0000
2756#define CURSORB_INVALID_GTT_STATUS (1<<7)
2757#define CURSORA_INVALID_GTT_STATUS (1<<6)
2758#define SPRITED_INVALID_GTT_STATUS (1<<5)
2759#define SPRITEC_INVALID_GTT_STATUS (1<<4)
2760#define PLANEB_INVALID_GTT_STATUS (1<<3)
2761#define SPRITEB_INVALID_GTT_STATUS (1<<2)
2762#define SPRITEA_INVALID_GTT_STATUS (1<<1)
2763#define PLANEA_INVALID_GTT_STATUS (1<<0)
2764#define DPINVGTT_STATUS_MASK 0xff
2765
2766#define DSPARB 0x70030 2351#define DSPARB 0x70030
2767#define DSPARB_CSTART_MASK (0x7f << 7) 2352#define DSPARB_CSTART_MASK (0x7f << 7)
2768#define DSPARB_CSTART_SHIFT 7 2353#define DSPARB_CSTART_SHIFT 7
@@ -2773,7 +2358,7 @@
2773 2358
2774#define DSPFW1 0x70034 2359#define DSPFW1 0x70034
2775#define DSPFW_SR_SHIFT 23 2360#define DSPFW_SR_SHIFT 23
2776#define DSPFW_SR_MASK (0x1ff<<23) 2361#define DSPFW_SR_MASK (0x1ff<<23)
2777#define DSPFW_CURSORB_SHIFT 16 2362#define DSPFW_CURSORB_SHIFT 16
2778#define DSPFW_CURSORB_MASK (0x3f<<16) 2363#define DSPFW_CURSORB_MASK (0x3f<<16)
2779#define DSPFW_PLANEB_SHIFT 8 2364#define DSPFW_PLANEB_SHIFT 8
@@ -2792,28 +2377,11 @@
2792#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) 2377#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
2793#define DSPFW_HPLL_SR_MASK (0x1ff) 2378#define DSPFW_HPLL_SR_MASK (0x1ff)
2794 2379
2795/* drain latency register values*/
2796#define DRAIN_LATENCY_PRECISION_32 32
2797#define DRAIN_LATENCY_PRECISION_16 16
2798#define VLV_DDL1 0x70050
2799#define DDL_CURSORA_PRECISION_32 (1<<31)
2800#define DDL_CURSORA_PRECISION_16 (0<<31)
2801#define DDL_CURSORA_SHIFT 24
2802#define DDL_PLANEA_PRECISION_32 (1<<7)
2803#define DDL_PLANEA_PRECISION_16 (0<<7)
2804#define VLV_DDL2 0x70054
2805#define DDL_CURSORB_PRECISION_32 (1<<31)
2806#define DDL_CURSORB_PRECISION_16 (0<<31)
2807#define DDL_CURSORB_SHIFT 24
2808#define DDL_PLANEB_PRECISION_32 (1<<7)
2809#define DDL_PLANEB_PRECISION_16 (0<<7)
2810
2811/* FIFO watermark sizes etc */ 2380/* FIFO watermark sizes etc */
2812#define G4X_FIFO_LINE_SIZE 64 2381#define G4X_FIFO_LINE_SIZE 64
2813#define I915_FIFO_LINE_SIZE 64 2382#define I915_FIFO_LINE_SIZE 64
2814#define I830_FIFO_LINE_SIZE 32 2383#define I830_FIFO_LINE_SIZE 32
2815 2384
2816#define VALLEYVIEW_FIFO_SIZE 255
2817#define G4X_FIFO_SIZE 127 2385#define G4X_FIFO_SIZE 127
2818#define I965_FIFO_SIZE 512 2386#define I965_FIFO_SIZE 512
2819#define I945_FIFO_SIZE 127 2387#define I945_FIFO_SIZE 127
@@ -2821,7 +2389,6 @@
2821#define I855GM_FIFO_SIZE 127 /* In cachelines */ 2389#define I855GM_FIFO_SIZE 127 /* In cachelines */
2822#define I830_FIFO_SIZE 95 2390#define I830_FIFO_SIZE 95
2823 2391
2824#define VALLEYVIEW_MAX_WM 0xff
2825#define G4X_MAX_WM 0x3f 2392#define G4X_MAX_WM 0x3f
2826#define I915_MAX_WM 0x3f 2393#define I915_MAX_WM 0x3f
2827 2394
@@ -2836,7 +2403,6 @@
2836#define PINEVIEW_CURSOR_DFT_WM 0 2403#define PINEVIEW_CURSOR_DFT_WM 0
2837#define PINEVIEW_CURSOR_GUARD_WM 5 2404#define PINEVIEW_CURSOR_GUARD_WM 5
2838 2405
2839#define VALLEYVIEW_CURSOR_MAX_WM 64
2840#define I965_CURSOR_FIFO 64 2406#define I965_CURSOR_FIFO 64
2841#define I965_CURSOR_MAX_WM 32 2407#define I965_CURSOR_MAX_WM 32
2842#define I965_CURSOR_DFT_WM 8 2408#define I965_CURSOR_DFT_WM 8
@@ -2850,7 +2416,6 @@
2850#define WM0_PIPE_CURSOR_MASK (0x1f) 2416#define WM0_PIPE_CURSOR_MASK (0x1f)
2851 2417
2852#define WM0_PIPEB_ILK 0x45104 2418#define WM0_PIPEB_ILK 0x45104
2853#define WM0_PIPEC_IVB 0x45200
2854#define WM1_LP_ILK 0x45108 2419#define WM1_LP_ILK 0x45108
2855#define WM1_LP_SR_EN (1<<31) 2420#define WM1_LP_SR_EN (1<<31)
2856#define WM1_LP_LATENCY_SHIFT 24 2421#define WM1_LP_LATENCY_SHIFT 24
@@ -2865,8 +2430,6 @@
2865#define WM3_LP_ILK 0x45110 2430#define WM3_LP_ILK 0x45110
2866#define WM3_LP_EN (1<<31) 2431#define WM3_LP_EN (1<<31)
2867#define WM1S_LP_ILK 0x45120 2432#define WM1S_LP_ILK 0x45120
2868#define WM2S_LP_IVB 0x45124
2869#define WM3S_LP_IVB 0x45128
2870#define WM1S_LP_EN (1<<31) 2433#define WM1S_LP_EN (1<<31)
2871 2434
2872/* Memory latency timer register */ 2435/* Memory latency timer register */
@@ -3010,19 +2573,12 @@
3010#define DISPPLANE_GAMMA_ENABLE (1<<30) 2573#define DISPPLANE_GAMMA_ENABLE (1<<30)
3011#define DISPPLANE_GAMMA_DISABLE 0 2574#define DISPPLANE_GAMMA_DISABLE 0
3012#define DISPPLANE_PIXFORMAT_MASK (0xf<<26) 2575#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
3013#define DISPPLANE_YUV422 (0x0<<26)
3014#define DISPPLANE_8BPP (0x2<<26) 2576#define DISPPLANE_8BPP (0x2<<26)
3015#define DISPPLANE_BGRA555 (0x3<<26) 2577#define DISPPLANE_15_16BPP (0x4<<26)
3016#define DISPPLANE_BGRX555 (0x4<<26) 2578#define DISPPLANE_16BPP (0x5<<26)
3017#define DISPPLANE_BGRX565 (0x5<<26) 2579#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
3018#define DISPPLANE_BGRX888 (0x6<<26) 2580#define DISPPLANE_32BPP (0x7<<26)
3019#define DISPPLANE_BGRA888 (0x7<<26) 2581#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
3020#define DISPPLANE_RGBX101010 (0x8<<26)
3021#define DISPPLANE_RGBA101010 (0x9<<26)
3022#define DISPPLANE_BGRX101010 (0xa<<26)
3023#define DISPPLANE_RGBX161616 (0xc<<26)
3024#define DISPPLANE_RGBX888 (0xe<<26)
3025#define DISPPLANE_RGBA888 (0xf<<26)
3026#define DISPPLANE_STEREO_ENABLE (1<<25) 2582#define DISPPLANE_STEREO_ENABLE (1<<25)
3027#define DISPPLANE_STEREO_DISABLE 0 2583#define DISPPLANE_STEREO_DISABLE 0
3028#define DISPPLANE_SEL_PIPE_SHIFT 24 2584#define DISPPLANE_SEL_PIPE_SHIFT 24
@@ -3043,8 +2599,6 @@
3043#define _DSPASIZE 0x70190 2599#define _DSPASIZE 0x70190
3044#define _DSPASURF 0x7019C /* 965+ only */ 2600#define _DSPASURF 0x7019C /* 965+ only */
3045#define _DSPATILEOFF 0x701A4 /* 965+ only */ 2601#define _DSPATILEOFF 0x701A4 /* 965+ only */
3046#define _DSPAOFFSET 0x701A4 /* HSW */
3047#define _DSPASURFLIVE 0x701AC
3048 2602
3049#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) 2603#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
3050#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) 2604#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@@ -3053,16 +2607,6 @@
3053#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE) 2607#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
3054#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) 2608#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
3055#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) 2609#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
3056#define DSPLINOFF(plane) DSPADDR(plane)
3057#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
3058#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
3059
3060/* Display/Sprite base address macros */
3061#define DISP_BASEADDR_MASK (0xfffff000)
3062#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
3063#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
3064#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
3065 (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
3066 2610
3067/* VBIOS flags */ 2611/* VBIOS flags */
3068#define SWF00 0x71410 2612#define SWF00 0x71410
@@ -3101,149 +2645,6 @@
3101#define _DSPBSIZE 0x71190 2645#define _DSPBSIZE 0x71190
3102#define _DSPBSURF 0x7119C 2646#define _DSPBSURF 0x7119C
3103#define _DSPBTILEOFF 0x711A4 2647#define _DSPBTILEOFF 0x711A4
3104#define _DSPBOFFSET 0x711A4
3105#define _DSPBSURFLIVE 0x711AC
3106
3107/* Sprite A control */
3108#define _DVSACNTR 0x72180
3109#define DVS_ENABLE (1<<31)
3110#define DVS_GAMMA_ENABLE (1<<30)
3111#define DVS_PIXFORMAT_MASK (3<<25)
3112#define DVS_FORMAT_YUV422 (0<<25)
3113#define DVS_FORMAT_RGBX101010 (1<<25)
3114#define DVS_FORMAT_RGBX888 (2<<25)
3115#define DVS_FORMAT_RGBX161616 (3<<25)
3116#define DVS_SOURCE_KEY (1<<22)
3117#define DVS_RGB_ORDER_XBGR (1<<20)
3118#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
3119#define DVS_YUV_ORDER_YUYV (0<<16)
3120#define DVS_YUV_ORDER_UYVY (1<<16)
3121#define DVS_YUV_ORDER_YVYU (2<<16)
3122#define DVS_YUV_ORDER_VYUY (3<<16)
3123#define DVS_DEST_KEY (1<<2)
3124#define DVS_TRICKLE_FEED_DISABLE (1<<14)
3125#define DVS_TILED (1<<10)
3126#define _DVSALINOFF 0x72184
3127#define _DVSASTRIDE 0x72188
3128#define _DVSAPOS 0x7218c
3129#define _DVSASIZE 0x72190
3130#define _DVSAKEYVAL 0x72194
3131#define _DVSAKEYMSK 0x72198
3132#define _DVSASURF 0x7219c
3133#define _DVSAKEYMAXVAL 0x721a0
3134#define _DVSATILEOFF 0x721a4
3135#define _DVSASURFLIVE 0x721ac
3136#define _DVSASCALE 0x72204
3137#define DVS_SCALE_ENABLE (1<<31)
3138#define DVS_FILTER_MASK (3<<29)
3139#define DVS_FILTER_MEDIUM (0<<29)
3140#define DVS_FILTER_ENHANCING (1<<29)
3141#define DVS_FILTER_SOFTENING (2<<29)
3142#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
3143#define DVS_VERTICAL_OFFSET_ENABLE (1<<27)
3144#define _DVSAGAMC 0x72300
3145
3146#define _DVSBCNTR 0x73180
3147#define _DVSBLINOFF 0x73184
3148#define _DVSBSTRIDE 0x73188
3149#define _DVSBPOS 0x7318c
3150#define _DVSBSIZE 0x73190
3151#define _DVSBKEYVAL 0x73194
3152#define _DVSBKEYMSK 0x73198
3153#define _DVSBSURF 0x7319c
3154#define _DVSBKEYMAXVAL 0x731a0
3155#define _DVSBTILEOFF 0x731a4
3156#define _DVSBSURFLIVE 0x731ac
3157#define _DVSBSCALE 0x73204
3158#define _DVSBGAMC 0x73300
3159
3160#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
3161#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
3162#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
3163#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
3164#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
3165#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
3166#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
3167#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
3168#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
3169#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
3170#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
3171#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
3172
3173#define _SPRA_CTL 0x70280
3174#define SPRITE_ENABLE (1<<31)
3175#define SPRITE_GAMMA_ENABLE (1<<30)
3176#define SPRITE_PIXFORMAT_MASK (7<<25)
3177#define SPRITE_FORMAT_YUV422 (0<<25)
3178#define SPRITE_FORMAT_RGBX101010 (1<<25)
3179#define SPRITE_FORMAT_RGBX888 (2<<25)
3180#define SPRITE_FORMAT_RGBX161616 (3<<25)
3181#define SPRITE_FORMAT_YUV444 (4<<25)
3182#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
3183#define SPRITE_CSC_ENABLE (1<<24)
3184#define SPRITE_SOURCE_KEY (1<<22)
3185#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
3186#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
3187#define SPRITE_YUV_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
3188#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16)
3189#define SPRITE_YUV_ORDER_YUYV (0<<16)
3190#define SPRITE_YUV_ORDER_UYVY (1<<16)
3191#define SPRITE_YUV_ORDER_YVYU (2<<16)
3192#define SPRITE_YUV_ORDER_VYUY (3<<16)
3193#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
3194#define SPRITE_INT_GAMMA_ENABLE (1<<13)
3195#define SPRITE_TILED (1<<10)
3196#define SPRITE_DEST_KEY (1<<2)
3197#define _SPRA_LINOFF 0x70284
3198#define _SPRA_STRIDE 0x70288
3199#define _SPRA_POS 0x7028c
3200#define _SPRA_SIZE 0x70290
3201#define _SPRA_KEYVAL 0x70294
3202#define _SPRA_KEYMSK 0x70298
3203#define _SPRA_SURF 0x7029c
3204#define _SPRA_KEYMAX 0x702a0
3205#define _SPRA_TILEOFF 0x702a4
3206#define _SPRA_OFFSET 0x702a4
3207#define _SPRA_SURFLIVE 0x702ac
3208#define _SPRA_SCALE 0x70304
3209#define SPRITE_SCALE_ENABLE (1<<31)
3210#define SPRITE_FILTER_MASK (3<<29)
3211#define SPRITE_FILTER_MEDIUM (0<<29)
3212#define SPRITE_FILTER_ENHANCING (1<<29)
3213#define SPRITE_FILTER_SOFTENING (2<<29)
3214#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
3215#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27)
3216#define _SPRA_GAMC 0x70400
3217
3218#define _SPRB_CTL 0x71280
3219#define _SPRB_LINOFF 0x71284
3220#define _SPRB_STRIDE 0x71288
3221#define _SPRB_POS 0x7128c
3222#define _SPRB_SIZE 0x71290
3223#define _SPRB_KEYVAL 0x71294
3224#define _SPRB_KEYMSK 0x71298
3225#define _SPRB_SURF 0x7129c
3226#define _SPRB_KEYMAX 0x712a0
3227#define _SPRB_TILEOFF 0x712a4
3228#define _SPRB_OFFSET 0x712a4
3229#define _SPRB_SURFLIVE 0x712ac
3230#define _SPRB_SCALE 0x71304
3231#define _SPRB_GAMC 0x71400
3232
3233#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
3234#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
3235#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
3236#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
3237#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
3238#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
3239#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
3240#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
3241#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
3242#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
3243#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
3244#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
3245#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
3246#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
3247 2648
3248/* VBIOS regs */ 2649/* VBIOS regs */
3249#define VGACNTRL 0x71400 2650#define VGACNTRL 0x71400
@@ -3278,6 +2679,12 @@
3278#define DISPLAY_PORT_PLL_BIOS_1 0x46010 2679#define DISPLAY_PORT_PLL_BIOS_1 0x46010
3279#define DISPLAY_PORT_PLL_BIOS_2 0x46014 2680#define DISPLAY_PORT_PLL_BIOS_2 0x46014
3280 2681
2682#define PCH_DSPCLK_GATE_D 0x42020
2683# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
2684# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
2685# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
2686# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
2687
3281#define PCH_3DCGDIS0 0x46020 2688#define PCH_3DCGDIS0 0x46020
3282# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) 2689# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
3283# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) 2690# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
@@ -3327,22 +2734,20 @@
3327#define _PIPEB_LINK_M2 0x61048 2734#define _PIPEB_LINK_M2 0x61048
3328#define _PIPEB_LINK_N2 0x6104c 2735#define _PIPEB_LINK_N2 0x6104c
3329 2736
3330#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1) 2737#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
3331#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1) 2738#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
3332#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2) 2739#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
3333#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2) 2740#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
3334#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1) 2741#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
3335#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1) 2742#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
3336#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2) 2743#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
3337#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2) 2744#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
3338 2745
3339/* CPU panel fitter */ 2746/* CPU panel fitter */
3340/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ 2747/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
3341#define _PFA_CTL_1 0x68080 2748#define _PFA_CTL_1 0x68080
3342#define _PFB_CTL_1 0x68880 2749#define _PFB_CTL_1 0x68880
3343#define PF_ENABLE (1<<31) 2750#define PF_ENABLE (1<<31)
3344#define PF_PIPE_SEL_MASK_IVB (3<<29)
3345#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
3346#define PF_FILTER_MASK (3<<23) 2751#define PF_FILTER_MASK (3<<23)
3347#define PF_FILTER_PROGRAMMED (0<<23) 2752#define PF_FILTER_PROGRAMMED (0<<23)
3348#define PF_FILTER_MED_3x3 (1<<23) 2753#define PF_FILTER_MED_3x3 (1<<23)
@@ -3401,38 +2806,25 @@
3401#define DE_PCH_EVENT_IVB (1<<28) 2806#define DE_PCH_EVENT_IVB (1<<28)
3402#define DE_DP_A_HOTPLUG_IVB (1<<27) 2807#define DE_DP_A_HOTPLUG_IVB (1<<27)
3403#define DE_AUX_CHANNEL_A_IVB (1<<26) 2808#define DE_AUX_CHANNEL_A_IVB (1<<26)
3404#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
3405#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
3406#define DE_PIPEC_VBLANK_IVB (1<<10)
3407#define DE_SPRITEB_FLIP_DONE_IVB (1<<9) 2809#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
3408#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
3409#define DE_PIPEB_VBLANK_IVB (1<<5)
3410#define DE_SPRITEA_FLIP_DONE_IVB (1<<4) 2810#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
2811#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
3411#define DE_PLANEA_FLIP_DONE_IVB (1<<3) 2812#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
2813#define DE_PIPEB_VBLANK_IVB (1<<5)
3412#define DE_PIPEA_VBLANK_IVB (1<<0) 2814#define DE_PIPEA_VBLANK_IVB (1<<0)
3413 2815
3414#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
3415#define MASTER_INTERRUPT_ENABLE (1<<31)
3416
3417#define DEISR 0x44000 2816#define DEISR 0x44000
3418#define DEIMR 0x44004 2817#define DEIMR 0x44004
3419#define DEIIR 0x44008 2818#define DEIIR 0x44008
3420#define DEIER 0x4400c 2819#define DEIER 0x4400c
3421 2820
3422/* GT interrupt. 2821/* GT interrupt */
3423 * Note that for gen6+ the ring-specific interrupt bits do alias with the 2822#define GT_PIPE_NOTIFY (1 << 4)
3424 * corresponding bits in the per-ring interrupt control registers. */ 2823#define GT_SYNC_STATUS (1 << 2)
3425#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26) 2824#define GT_USER_INTERRUPT (1 << 0)
3426#define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25) 2825#define GT_BSD_USER_INTERRUPT (1 << 5)
3427#define GT_GEN6_BLT_USER_INTERRUPT (1 << 22) 2826#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
3428#define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15) 2827#define GT_BLT_USER_INTERRUPT (1 << 22)
3429#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
3430#define GT_BSD_USER_INTERRUPT (1 << 5) /* ilk only */
3431#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5)
3432#define GT_PIPE_NOTIFY (1 << 4)
3433#define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3)
3434#define GT_SYNC_STATUS (1 << 2)
3435#define GT_USER_INTERRUPT (1 << 0)
3436 2828
3437#define GTISR 0x44010 2829#define GTISR 0x44010
3438#define GTIMR 0x44014 2830#define GTIMR 0x44014
@@ -3451,46 +2843,23 @@
3451#define ILK_HDCP_DISABLE (1<<25) 2843#define ILK_HDCP_DISABLE (1<<25)
3452#define ILK_eDP_A_DISABLE (1<<24) 2844#define ILK_eDP_A_DISABLE (1<<24)
3453#define ILK_DESKTOP (1<<23) 2845#define ILK_DESKTOP (1<<23)
2846#define ILK_DSPCLK_GATE 0x42020
2847#define IVB_VRHUNIT_CLK_GATE (1<<28)
2848#define ILK_DPARB_CLK_GATE (1<<5)
2849#define ILK_DPFD_CLK_GATE (1<<7)
3454 2850
3455#define ILK_DSPCLK_GATE_D 0x42020 2851/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
3456#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) 2852#define ILK_CLK_FBC (1<<7)
3457#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) 2853#define ILK_DPFC_DIS1 (1<<8)
3458#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) 2854#define ILK_DPFC_DIS2 (1<<9)
3459#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
3460#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
3461
3462#define IVB_CHICKEN3 0x4200c
3463# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
3464# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
3465 2855
3466#define DISP_ARB_CTL 0x45000 2856#define DISP_ARB_CTL 0x45000
3467#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 2857#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
3468#define DISP_FBC_WM_DIS (1<<15) 2858#define DISP_FBC_WM_DIS (1<<15)
3469 2859
3470/* GEN7 chicken */
3471#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
3472# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
3473
3474#define GEN7_L3CNTLREG1 0xB01C
3475#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
3476#define GEN7_L3AGDIS (1<<19)
3477
3478#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
3479#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
3480
3481#define GEN7_L3SQCREG4 0xb034
3482#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
3483
3484/* WaCatErrorRejectionIssue */
3485#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
3486#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
3487
3488#define HSW_FUSE_STRAP 0x42014
3489#define HSW_CDCLK_LIMIT (1 << 24)
3490
3491/* PCH */ 2860/* PCH */
3492 2861
3493/* south display engine interrupt: IBX */ 2862/* south display engine interrupt */
3494#define SDE_AUDIO_POWER_D (1 << 27) 2863#define SDE_AUDIO_POWER_D (1 << 27)
3495#define SDE_AUDIO_POWER_C (1 << 26) 2864#define SDE_AUDIO_POWER_C (1 << 26)
3496#define SDE_AUDIO_POWER_B (1 << 25) 2865#define SDE_AUDIO_POWER_B (1 << 25)
@@ -3526,44 +2895,15 @@
3526#define SDE_TRANSA_CRC_ERR (1 << 1) 2895#define SDE_TRANSA_CRC_ERR (1 << 1)
3527#define SDE_TRANSA_FIFO_UNDER (1 << 0) 2896#define SDE_TRANSA_FIFO_UNDER (1 << 0)
3528#define SDE_TRANS_MASK (0x3f) 2897#define SDE_TRANS_MASK (0x3f)
3529 2898/* CPT */
3530/* south display engine interrupt: CPT/PPT */ 2899#define SDE_CRT_HOTPLUG_CPT (1 << 19)
3531#define SDE_AUDIO_POWER_D_CPT (1 << 31)
3532#define SDE_AUDIO_POWER_C_CPT (1 << 30)
3533#define SDE_AUDIO_POWER_B_CPT (1 << 29)
3534#define SDE_AUDIO_POWER_SHIFT_CPT 29
3535#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
3536#define SDE_AUXD_CPT (1 << 27)
3537#define SDE_AUXC_CPT (1 << 26)
3538#define SDE_AUXB_CPT (1 << 25)
3539#define SDE_AUX_MASK_CPT (7 << 25)
3540#define SDE_PORTD_HOTPLUG_CPT (1 << 23) 2900#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
3541#define SDE_PORTC_HOTPLUG_CPT (1 << 22) 2901#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
3542#define SDE_PORTB_HOTPLUG_CPT (1 << 21) 2902#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
3543#define SDE_CRT_HOTPLUG_CPT (1 << 19)
3544#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ 2903#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
3545 SDE_PORTD_HOTPLUG_CPT | \ 2904 SDE_PORTD_HOTPLUG_CPT | \
3546 SDE_PORTC_HOTPLUG_CPT | \ 2905 SDE_PORTC_HOTPLUG_CPT | \
3547 SDE_PORTB_HOTPLUG_CPT) 2906 SDE_PORTB_HOTPLUG_CPT)
3548#define SDE_GMBUS_CPT (1 << 17)
3549#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
3550#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
3551#define SDE_FDI_RXC_CPT (1 << 8)
3552#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
3553#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
3554#define SDE_FDI_RXB_CPT (1 << 4)
3555#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
3556#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
3557#define SDE_FDI_RXA_CPT (1 << 0)
3558#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
3559 SDE_AUDIO_CP_REQ_B_CPT | \
3560 SDE_AUDIO_CP_REQ_A_CPT)
3561#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
3562 SDE_AUDIO_CP_CHG_B_CPT | \
3563 SDE_AUDIO_CP_CHG_A_CPT)
3564#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
3565 SDE_FDI_RXB_CPT | \
3566 SDE_FDI_RXA_CPT)
3567 2907
3568#define SDEISR 0xc4000 2908#define SDEISR 0xc4000
3569#define SDEIMR 0xc4004 2909#define SDEIMR 0xc4004
@@ -3571,13 +2911,12 @@
3571#define SDEIER 0xc400c 2911#define SDEIER 0xc400c
3572 2912
3573/* digital port hotplug */ 2913/* digital port hotplug */
3574#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ 2914#define PCH_PORT_HOTPLUG 0xc4030
3575#define PORTD_HOTPLUG_ENABLE (1 << 20) 2915#define PORTD_HOTPLUG_ENABLE (1 << 20)
3576#define PORTD_PULSE_DURATION_2ms (0) 2916#define PORTD_PULSE_DURATION_2ms (0)
3577#define PORTD_PULSE_DURATION_4_5ms (1 << 18) 2917#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
3578#define PORTD_PULSE_DURATION_6ms (2 << 18) 2918#define PORTD_PULSE_DURATION_6ms (2 << 18)
3579#define PORTD_PULSE_DURATION_100ms (3 << 18) 2919#define PORTD_PULSE_DURATION_100ms (3 << 18)
3580#define PORTD_PULSE_DURATION_MASK (3 << 18)
3581#define PORTD_HOTPLUG_NO_DETECT (0) 2920#define PORTD_HOTPLUG_NO_DETECT (0)
3582#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) 2921#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
3583#define PORTD_HOTPLUG_LONG_DETECT (1 << 17) 2922#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
@@ -3586,7 +2925,6 @@
3586#define PORTC_PULSE_DURATION_4_5ms (1 << 10) 2925#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
3587#define PORTC_PULSE_DURATION_6ms (2 << 10) 2926#define PORTC_PULSE_DURATION_6ms (2 << 10)
3588#define PORTC_PULSE_DURATION_100ms (3 << 10) 2927#define PORTC_PULSE_DURATION_100ms (3 << 10)
3589#define PORTC_PULSE_DURATION_MASK (3 << 10)
3590#define PORTC_HOTPLUG_NO_DETECT (0) 2928#define PORTC_HOTPLUG_NO_DETECT (0)
3591#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) 2929#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
3592#define PORTC_HOTPLUG_LONG_DETECT (1 << 9) 2930#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
@@ -3595,7 +2933,6 @@
3595#define PORTB_PULSE_DURATION_4_5ms (1 << 2) 2933#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
3596#define PORTB_PULSE_DURATION_6ms (2 << 2) 2934#define PORTB_PULSE_DURATION_6ms (2 << 2)
3597#define PORTB_PULSE_DURATION_100ms (3 << 2) 2935#define PORTB_PULSE_DURATION_100ms (3 << 2)
3598#define PORTB_PULSE_DURATION_MASK (3 << 2)
3599#define PORTB_HOTPLUG_NO_DETECT (0) 2936#define PORTB_HOTPLUG_NO_DETECT (0)
3600#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) 2937#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
3601#define PORTB_HOTPLUG_LONG_DETECT (1 << 1) 2938#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
@@ -3616,15 +2953,15 @@
3616 2953
3617#define _PCH_DPLL_A 0xc6014 2954#define _PCH_DPLL_A 0xc6014
3618#define _PCH_DPLL_B 0xc6018 2955#define _PCH_DPLL_B 0xc6018
3619#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) 2956#define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B)
3620 2957
3621#define _PCH_FPA0 0xc6040 2958#define _PCH_FPA0 0xc6040
3622#define FP_CB_TUNE (0x3<<22) 2959#define FP_CB_TUNE (0x3<<22)
3623#define _PCH_FPA1 0xc6044 2960#define _PCH_FPA1 0xc6044
3624#define _PCH_FPB0 0xc6048 2961#define _PCH_FPB0 0xc6048
3625#define _PCH_FPB1 0xc604c 2962#define _PCH_FPB1 0xc604c
3626#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0) 2963#define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0)
3627#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1) 2964#define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1)
3628 2965
3629#define PCH_DPLL_TEST 0xc606c 2966#define PCH_DPLL_TEST 0xc606c
3630 2967
@@ -3694,7 +3031,6 @@
3694#define _TRANS_VSYNC_A 0xe0014 3031#define _TRANS_VSYNC_A 0xe0014
3695#define TRANS_VSYNC_END_SHIFT 16 3032#define TRANS_VSYNC_END_SHIFT 16
3696#define TRANS_VSYNC_START_SHIFT 0 3033#define TRANS_VSYNC_START_SHIFT 0
3697#define _TRANS_VSYNCSHIFT_A 0xe0028
3698 3034
3699#define _TRANSA_DATA_M1 0xe0030 3035#define _TRANSA_DATA_M1 0xe0030
3700#define _TRANSA_DATA_N1 0xe0034 3036#define _TRANSA_DATA_N1 0xe0034
@@ -3719,64 +3055,12 @@
3719#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) 3055#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
3720#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) 3056#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
3721 3057
3722#define VLV_VIDEO_DIP_CTL_A 0x60200
3723#define VLV_VIDEO_DIP_DATA_A 0x60208
3724#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
3725
3726#define VLV_VIDEO_DIP_CTL_B 0x61170
3727#define VLV_VIDEO_DIP_DATA_B 0x61174
3728#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
3729
3730#define VLV_TVIDEO_DIP_CTL(pipe) \
3731 _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
3732#define VLV_TVIDEO_DIP_DATA(pipe) \
3733 _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
3734#define VLV_TVIDEO_DIP_GCP(pipe) \
3735 _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
3736
3737/* Haswell DIP controls */
3738#define HSW_VIDEO_DIP_CTL_A 0x60200
3739#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220
3740#define HSW_VIDEO_DIP_VS_DATA_A 0x60260
3741#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
3742#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
3743#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320
3744#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240
3745#define HSW_VIDEO_DIP_VS_ECC_A 0x60280
3746#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
3747#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300
3748#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344
3749#define HSW_VIDEO_DIP_GCP_A 0x60210
3750
3751#define HSW_VIDEO_DIP_CTL_B 0x61200
3752#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220
3753#define HSW_VIDEO_DIP_VS_DATA_B 0x61260
3754#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
3755#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
3756#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320
3757#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240
3758#define HSW_VIDEO_DIP_VS_ECC_B 0x61280
3759#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
3760#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300
3761#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
3762#define HSW_VIDEO_DIP_GCP_B 0x61210
3763
3764#define HSW_TVIDEO_DIP_CTL(pipe) \
3765 _PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
3766#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
3767 _PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
3768#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
3769 _PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
3770#define HSW_TVIDEO_DIP_GCP(pipe) \
3771 _PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
3772
3773#define _TRANS_HTOTAL_B 0xe1000 3058#define _TRANS_HTOTAL_B 0xe1000
3774#define _TRANS_HBLANK_B 0xe1004 3059#define _TRANS_HBLANK_B 0xe1004
3775#define _TRANS_HSYNC_B 0xe1008 3060#define _TRANS_HSYNC_B 0xe1008
3776#define _TRANS_VTOTAL_B 0xe100c 3061#define _TRANS_VTOTAL_B 0xe100c
3777#define _TRANS_VBLANK_B 0xe1010 3062#define _TRANS_VBLANK_B 0xe1010
3778#define _TRANS_VSYNC_B 0xe1014 3063#define _TRANS_VSYNC_B 0xe1014
3779#define _TRANS_VSYNCSHIFT_B 0xe1028
3780 3064
3781#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B) 3065#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
3782#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B) 3066#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
@@ -3784,8 +3068,6 @@
3784#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B) 3068#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
3785#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B) 3069#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
3786#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B) 3070#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
3787#define TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _TRANS_VSYNCSHIFT_A, \
3788 _TRANS_VSYNCSHIFT_B)
3789 3071
3790#define _TRANSB_DATA_M1 0xe1030 3072#define _TRANSB_DATA_M1 0xe1030
3791#define _TRANSB_DATA_N1 0xe1034 3073#define _TRANSB_DATA_N1 0xe1034
@@ -3819,35 +3101,24 @@
3819#define TRANS_FSYNC_DELAY_HB4 (3<<27) 3101#define TRANS_FSYNC_DELAY_HB4 (3<<27)
3820#define TRANS_DP_AUDIO_ONLY (1<<26) 3102#define TRANS_DP_AUDIO_ONLY (1<<26)
3821#define TRANS_DP_VIDEO_AUDIO (0<<26) 3103#define TRANS_DP_VIDEO_AUDIO (0<<26)
3822#define TRANS_INTERLACE_MASK (7<<21)
3823#define TRANS_PROGRESSIVE (0<<21) 3104#define TRANS_PROGRESSIVE (0<<21)
3824#define TRANS_INTERLACED (3<<21)
3825#define TRANS_LEGACY_INTERLACED_ILK (2<<21)
3826#define TRANS_8BPC (0<<5) 3105#define TRANS_8BPC (0<<5)
3827#define TRANS_10BPC (1<<5) 3106#define TRANS_10BPC (1<<5)
3828#define TRANS_6BPC (2<<5) 3107#define TRANS_6BPC (2<<5)
3829#define TRANS_12BPC (3<<5) 3108#define TRANS_12BPC (3<<5)
3830 3109
3831#define _TRANSA_CHICKEN1 0xf0060
3832#define _TRANSB_CHICKEN1 0xf1060
3833#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
3834#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
3835#define _TRANSA_CHICKEN2 0xf0064 3110#define _TRANSA_CHICKEN2 0xf0064
3836#define _TRANSB_CHICKEN2 0xf1064 3111#define _TRANSB_CHICKEN2 0xf1064
3837#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) 3112#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
3838#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) 3113#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
3839
3840 3114
3841#define SOUTH_CHICKEN1 0xc2000 3115#define SOUTH_CHICKEN1 0xc2000
3842#define FDIA_PHASE_SYNC_SHIFT_OVR 19 3116#define FDIA_PHASE_SYNC_SHIFT_OVR 19
3843#define FDIA_PHASE_SYNC_SHIFT_EN 18 3117#define FDIA_PHASE_SYNC_SHIFT_EN 18
3844#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) 3118#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
3845#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) 3119#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
3846#define FDI_BC_BIFURCATION_SELECT (1 << 12)
3847#define SOUTH_CHICKEN2 0xc2004 3120#define SOUTH_CHICKEN2 0xc2004
3848#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13) 3121#define DPLS_EDP_PPS_FIX_DIS (1<<0)
3849#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
3850#define DPLS_EDP_PPS_FIX_DIS (1<<0)
3851 3122
3852#define _FDI_RXA_CHICKEN 0xc200c 3123#define _FDI_RXA_CHICKEN 0xc200c
3853#define _FDI_RXB_CHICKEN 0xc2010 3124#define _FDI_RXB_CHICKEN 0xc2010
@@ -3857,7 +3128,6 @@
3857 3128
3858#define SOUTH_DSPCLK_GATE_D 0xc2020 3129#define SOUTH_DSPCLK_GATE_D 0xc2020
3859#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) 3130#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
3860#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
3861 3131
3862/* CPU: FDI_TX */ 3132/* CPU: FDI_TX */
3863#define _FDI_TXA_CTL 0x60100 3133#define _FDI_TXA_CTL 0x60100
@@ -3919,7 +3189,6 @@
3919#define FDI_FS_ERRC_ENABLE (1<<27) 3189#define FDI_FS_ERRC_ENABLE (1<<27)
3920#define FDI_FE_ERRC_ENABLE (1<<26) 3190#define FDI_FE_ERRC_ENABLE (1<<26)
3921#define FDI_DP_PORT_WIDTH_X8 (7<<19) 3191#define FDI_DP_PORT_WIDTH_X8 (7<<19)
3922#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
3923#define FDI_8BPC (0<<16) 3192#define FDI_8BPC (0<<16)
3924#define FDI_10BPC (1<<16) 3193#define FDI_10BPC (1<<16)
3925#define FDI_6BPC (2<<16) 3194#define FDI_6BPC (2<<16)
@@ -3940,25 +3209,14 @@
3940#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8) 3209#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
3941#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) 3210#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
3942#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) 3211#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
3943/* LPT */
3944#define FDI_PORT_WIDTH_2X_LPT (1<<19)
3945#define FDI_PORT_WIDTH_1X_LPT (0<<19)
3946
3947#define _FDI_RXA_MISC 0xf0010
3948#define _FDI_RXB_MISC 0xf1010
3949#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
3950#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
3951#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
3952#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
3953#define FDI_RX_TP1_TO_TP2_48 (2<<20)
3954#define FDI_RX_TP1_TO_TP2_64 (3<<20)
3955#define FDI_RX_FDI_DELAY_90 (0x90<<0)
3956#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
3957 3212
3213#define _FDI_RXA_MISC 0xf0010
3214#define _FDI_RXB_MISC 0xf1010
3958#define _FDI_RXA_TUSIZE1 0xf0030 3215#define _FDI_RXA_TUSIZE1 0xf0030
3959#define _FDI_RXA_TUSIZE2 0xf0038 3216#define _FDI_RXA_TUSIZE2 0xf0038
3960#define _FDI_RXB_TUSIZE1 0xf1030 3217#define _FDI_RXB_TUSIZE1 0xf1030
3961#define _FDI_RXB_TUSIZE2 0xf1038 3218#define _FDI_RXB_TUSIZE2 0xf1038
3219#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
3962#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) 3220#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
3963#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) 3221#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
3964 3222
@@ -3985,6 +3243,31 @@
3985#define FDI_PLL_CTL_1 0xfe000 3243#define FDI_PLL_CTL_1 0xfe000
3986#define FDI_PLL_CTL_2 0xfe004 3244#define FDI_PLL_CTL_2 0xfe004
3987 3245
3246/* CRT */
3247#define PCH_ADPA 0xe1100
3248#define ADPA_TRANS_SELECT_MASK (1<<30)
3249#define ADPA_TRANS_A_SELECT 0
3250#define ADPA_TRANS_B_SELECT (1<<30)
3251#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
3252#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
3253#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
3254#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
3255#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
3256#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
3257#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
3258#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
3259#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
3260#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
3261#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
3262#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
3263#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
3264#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
3265#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
3266#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
3267#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
3268#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
3269#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
3270
3988/* or SDVOB */ 3271/* or SDVOB */
3989#define HDMIB 0xe1140 3272#define HDMIB 0xe1140
3990#define PORT_ENABLE (1 << 31) 3273#define PORT_ENABLE (1 << 31)
@@ -4016,56 +3299,33 @@
4016#define PCH_LVDS 0xe1180 3299#define PCH_LVDS 0xe1180
4017#define LVDS_DETECTED (1 << 1) 3300#define LVDS_DETECTED (1 << 1)
4018 3301
4019/* vlv has 2 sets of panel control regs. */ 3302#define BLC_PWM_CPU_CTL2 0x48250
4020#define PIPEA_PP_STATUS 0x61200 3303#define PWM_ENABLE (1 << 31)
4021#define PIPEA_PP_CONTROL 0x61204 3304#define PWM_PIPE_A (0 << 29)
4022#define PIPEA_PP_ON_DELAYS 0x61208 3305#define PWM_PIPE_B (1 << 29)
4023#define PIPEA_PP_OFF_DELAYS 0x6120c 3306#define BLC_PWM_CPU_CTL 0x48254
4024#define PIPEA_PP_DIVISOR 0x61210
4025 3307
4026#define PIPEB_PP_STATUS 0x61300 3308#define BLC_PWM_PCH_CTL1 0xc8250
4027#define PIPEB_PP_CONTROL 0x61304 3309#define PWM_PCH_ENABLE (1 << 31)
4028#define PIPEB_PP_ON_DELAYS 0x61308 3310#define PWM_POLARITY_ACTIVE_LOW (1 << 29)
4029#define PIPEB_PP_OFF_DELAYS 0x6130c 3311#define PWM_POLARITY_ACTIVE_HIGH (0 << 29)
4030#define PIPEB_PP_DIVISOR 0x61310 3312#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28)
3313#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28)
3314
3315#define BLC_PWM_PCH_CTL2 0xc8254
4031 3316
4032#define PCH_PP_STATUS 0xc7200 3317#define PCH_PP_STATUS 0xc7200
4033#define PCH_PP_CONTROL 0xc7204 3318#define PCH_PP_CONTROL 0xc7204
4034#define PANEL_UNLOCK_REGS (0xabcd << 16) 3319#define PANEL_UNLOCK_REGS (0xabcd << 16)
4035#define PANEL_UNLOCK_MASK (0xffff << 16)
4036#define EDP_FORCE_VDD (1 << 3) 3320#define EDP_FORCE_VDD (1 << 3)
4037#define EDP_BLC_ENABLE (1 << 2) 3321#define EDP_BLC_ENABLE (1 << 2)
4038#define PANEL_POWER_RESET (1 << 1) 3322#define PANEL_POWER_RESET (1 << 1)
4039#define PANEL_POWER_OFF (0 << 0) 3323#define PANEL_POWER_OFF (0 << 0)
4040#define PANEL_POWER_ON (1 << 0) 3324#define PANEL_POWER_ON (1 << 0)
4041#define PCH_PP_ON_DELAYS 0xc7208 3325#define PCH_PP_ON_DELAYS 0xc7208
4042#define PANEL_PORT_SELECT_MASK (3 << 30)
4043#define PANEL_PORT_SELECT_LVDS (0 << 30)
4044#define PANEL_PORT_SELECT_DPA (1 << 30)
4045#define EDP_PANEL (1 << 30) 3326#define EDP_PANEL (1 << 30)
4046#define PANEL_PORT_SELECT_DPC (2 << 30)
4047#define PANEL_PORT_SELECT_DPD (3 << 30)
4048#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
4049#define PANEL_POWER_UP_DELAY_SHIFT 16
4050#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
4051#define PANEL_LIGHT_ON_DELAY_SHIFT 0
4052
4053#define PCH_PP_OFF_DELAYS 0xc720c 3327#define PCH_PP_OFF_DELAYS 0xc720c
4054#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
4055#define PANEL_POWER_PORT_LVDS (0 << 30)
4056#define PANEL_POWER_PORT_DP_A (1 << 30)
4057#define PANEL_POWER_PORT_DP_C (2 << 30)
4058#define PANEL_POWER_PORT_DP_D (3 << 30)
4059#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
4060#define PANEL_POWER_DOWN_DELAY_SHIFT 16
4061#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
4062#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
4063
4064#define PCH_PP_DIVISOR 0xc7210 3328#define PCH_PP_DIVISOR 0xc7210
4065#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
4066#define PP_REFERENCE_DIVIDER_SHIFT 8
4067#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
4068#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
4069 3329
4070#define PCH_DP_B 0xe4100 3330#define PCH_DP_B 0xe4100
4071#define PCH_DPB_AUX_CH_CTL 0xe4110 3331#define PCH_DPB_AUX_CH_CTL 0xe4110
@@ -4097,13 +3357,11 @@
4097#define PORT_TRANS_C_SEL_CPT (2<<29) 3357#define PORT_TRANS_C_SEL_CPT (2<<29)
4098#define PORT_TRANS_SEL_MASK (3<<29) 3358#define PORT_TRANS_SEL_MASK (3<<29)
4099#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29) 3359#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
4100#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
4101#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
4102 3360
4103#define TRANS_DP_CTL_A 0xe0300 3361#define TRANS_DP_CTL_A 0xe0300
4104#define TRANS_DP_CTL_B 0xe1300 3362#define TRANS_DP_CTL_B 0xe1300
4105#define TRANS_DP_CTL_C 0xe2300 3363#define TRANS_DP_CTL_C 0xe2300
4106#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B) 3364#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000)
4107#define TRANS_DP_OUTPUT_ENABLE (1<<31) 3365#define TRANS_DP_OUTPUT_ENABLE (1<<31)
4108#define TRANS_DP_PORT_SEL_B (0<<29) 3366#define TRANS_DP_PORT_SEL_B (0<<29)
4109#define TRANS_DP_PORT_SEL_C (1<<29) 3367#define TRANS_DP_PORT_SEL_C (1<<29)
@@ -4137,59 +3395,16 @@
4137#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22) 3395#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
4138#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) 3396#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
4139 3397
4140/* IVB */
4141#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
4142#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
4143#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
4144#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
4145#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
4146#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
4147#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
4148
4149/* legacy values */
4150#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
4151#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
4152#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
4153#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
4154#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
4155
4156#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
4157
4158#define FORCEWAKE 0xA18C 3398#define FORCEWAKE 0xA18C
4159#define FORCEWAKE_VLV 0x1300b0
4160#define FORCEWAKE_ACK_VLV 0x1300b4
4161#define FORCEWAKE_ACK_HSW 0x130044
4162#define FORCEWAKE_ACK 0x130090 3399#define FORCEWAKE_ACK 0x130090
4163#define FORCEWAKE_MT 0xa188 /* multi-threaded */
4164#define FORCEWAKE_KERNEL 0x1
4165#define FORCEWAKE_USER 0x2
4166#define FORCEWAKE_MT_ACK 0x130040
4167#define ECOBUS 0xa180
4168#define FORCEWAKE_MT_ENABLE (1<<5)
4169
4170#define GTFIFODBG 0x120000
4171#define GT_FIFO_CPU_ERROR_MASK 7
4172#define GT_FIFO_OVFERR (1<<2)
4173#define GT_FIFO_IAWRERR (1<<1)
4174#define GT_FIFO_IARDERR (1<<0)
4175 3400
4176#define GT_FIFO_FREE_ENTRIES 0x120008 3401#define GT_FIFO_FREE_ENTRIES 0x120008
4177#define GT_FIFO_NUM_RESERVED_ENTRIES 20 3402#define GT_FIFO_NUM_RESERVED_ENTRIES 20
4178 3403
4179#define GEN6_UCGCTL1 0x9400
4180# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
4181# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
4182
4183#define GEN6_UCGCTL2 0x9404 3404#define GEN6_UCGCTL2 0x9404
4184# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
4185# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
4186# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
4187# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) 3405# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
4188# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) 3406# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
4189 3407
4190#define GEN7_UCGCTL4 0x940c
4191#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
4192
4193#define GEN6_RPNSWREQ 0xA008 3408#define GEN6_RPNSWREQ 0xA008
4194#define GEN6_TURBO_DISABLE (1<<31) 3409#define GEN6_TURBO_DISABLE (1<<31)
4195#define GEN6_FREQUENCY(x) ((x)<<25) 3410#define GEN6_FREQUENCY(x) ((x)<<25)
@@ -4211,17 +3426,12 @@
4211#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) 3426#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
4212#define GEN6_RP_CONTROL 0xA024 3427#define GEN6_RP_CONTROL 0xA024
4213#define GEN6_RP_MEDIA_TURBO (1<<11) 3428#define GEN6_RP_MEDIA_TURBO (1<<11)
4214#define GEN6_RP_MEDIA_MODE_MASK (3<<9) 3429#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
4215#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
4216#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9)
4217#define GEN6_RP_MEDIA_HW_MODE (1<<9)
4218#define GEN6_RP_MEDIA_SW_MODE (0<<9)
4219#define GEN6_RP_MEDIA_IS_GFX (1<<8) 3430#define GEN6_RP_MEDIA_IS_GFX (1<<8)
4220#define GEN6_RP_ENABLE (1<<7) 3431#define GEN6_RP_ENABLE (1<<7)
4221#define GEN6_RP_UP_IDLE_MIN (0x1<<3) 3432#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
4222#define GEN6_RP_UP_BUSY_AVG (0x2<<3) 3433#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
4223#define GEN6_RP_UP_BUSY_CONT (0x4<<3) 3434#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
4224#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0)
4225#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) 3435#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
4226#define GEN6_RP_UP_THRESHOLD 0xA02C 3436#define GEN6_RP_UP_THRESHOLD 0xA02C
4227#define GEN6_RP_DOWN_THRESHOLD 0xA030 3437#define GEN6_RP_DOWN_THRESHOLD 0xA030
@@ -4265,393 +3475,12 @@
4265 GEN6_PM_RP_DOWN_THRESHOLD | \ 3475 GEN6_PM_RP_DOWN_THRESHOLD | \
4266 GEN6_PM_RP_DOWN_TIMEOUT) 3476 GEN6_PM_RP_DOWN_TIMEOUT)
4267 3477
4268#define GEN6_GT_GFX_RC6_LOCKED 0x138104
4269#define GEN6_GT_GFX_RC6 0x138108
4270#define GEN6_GT_GFX_RC6p 0x13810C
4271#define GEN6_GT_GFX_RC6pp 0x138110
4272
4273#define GEN6_PCODE_MAILBOX 0x138124 3478#define GEN6_PCODE_MAILBOX 0x138124
4274#define GEN6_PCODE_READY (1<<31) 3479#define GEN6_PCODE_READY (1<<31)
4275#define GEN6_READ_OC_PARAMS 0xc 3480#define GEN6_READ_OC_PARAMS 0xc
4276#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 3481#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
4277#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 3482#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
4278#define GEN6_PCODE_WRITE_RC6VIDS 0x4
4279#define GEN6_PCODE_READ_RC6VIDS 0x5
4280#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
4281#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
4282#define GEN6_PCODE_DATA 0x138128 3483#define GEN6_PCODE_DATA 0x138128
4283#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 3484#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
4284 3485
4285#define GEN6_GT_CORE_STATUS 0x138060
4286#define GEN6_CORE_CPD_STATE_MASK (7<<4)
4287#define GEN6_RCn_MASK 7
4288#define GEN6_RC0 0
4289#define GEN6_RC3 2
4290#define GEN6_RC6 3
4291#define GEN6_RC7 4
4292
4293#define GEN7_MISCCPCTL (0x9424)
4294#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
4295
4296/* IVYBRIDGE DPF */
4297#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
4298#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
4299#define GEN7_PARITY_ERROR_VALID (1<<13)
4300#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
4301#define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8)
4302#define GEN7_PARITY_ERROR_ROW(reg) \
4303 ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14)
4304#define GEN7_PARITY_ERROR_BANK(reg) \
4305 ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11)
4306#define GEN7_PARITY_ERROR_SUBBANK(reg) \
4307 ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
4308#define GEN7_L3CDERRST1_ENABLE (1<<7)
4309
4310#define GEN7_L3LOG_BASE 0xB070
4311#define GEN7_L3LOG_SIZE 0x80
4312
4313#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
4314#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
4315#define GEN7_MAX_PS_THREAD_DEP (8<<12)
4316#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
4317
4318#define GEN7_ROW_CHICKEN2 0xe4f4
4319#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
4320#define DOP_CLOCK_GATING_DISABLE (1<<0)
4321
4322#define G4X_AUD_VID_DID 0x62020
4323#define INTEL_AUDIO_DEVCL 0x808629FB
4324#define INTEL_AUDIO_DEVBLC 0x80862801
4325#define INTEL_AUDIO_DEVCTG 0x80862802
4326
4327#define G4X_AUD_CNTL_ST 0x620B4
4328#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
4329#define G4X_ELDV_DEVCTG (1 << 14)
4330#define G4X_ELD_ADDR (0xf << 5)
4331#define G4X_ELD_ACK (1 << 4)
4332#define G4X_HDMIW_HDMIEDID 0x6210C
4333
4334#define IBX_HDMIW_HDMIEDID_A 0xE2050
4335#define IBX_HDMIW_HDMIEDID_B 0xE2150
4336#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
4337 IBX_HDMIW_HDMIEDID_A, \
4338 IBX_HDMIW_HDMIEDID_B)
4339#define IBX_AUD_CNTL_ST_A 0xE20B4
4340#define IBX_AUD_CNTL_ST_B 0xE21B4
4341#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
4342 IBX_AUD_CNTL_ST_A, \
4343 IBX_AUD_CNTL_ST_B)
4344#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
4345#define IBX_ELD_ADDRESS (0x1f << 5)
4346#define IBX_ELD_ACK (1 << 4)
4347#define IBX_AUD_CNTL_ST2 0xE20C0
4348#define IBX_ELD_VALIDB (1 << 0)
4349#define IBX_CP_READYB (1 << 1)
4350
4351#define CPT_HDMIW_HDMIEDID_A 0xE5050
4352#define CPT_HDMIW_HDMIEDID_B 0xE5150
4353#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
4354 CPT_HDMIW_HDMIEDID_A, \
4355 CPT_HDMIW_HDMIEDID_B)
4356#define CPT_AUD_CNTL_ST_A 0xE50B4
4357#define CPT_AUD_CNTL_ST_B 0xE51B4
4358#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
4359 CPT_AUD_CNTL_ST_A, \
4360 CPT_AUD_CNTL_ST_B)
4361#define CPT_AUD_CNTRL_ST2 0xE50C0
4362
4363/* These are the 4 32-bit write offset registers for each stream
4364 * output buffer. It determines the offset from the
4365 * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
4366 */
4367#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
4368
4369#define IBX_AUD_CONFIG_A 0xe2000
4370#define IBX_AUD_CONFIG_B 0xe2100
4371#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
4372 IBX_AUD_CONFIG_A, \
4373 IBX_AUD_CONFIG_B)
4374#define CPT_AUD_CONFIG_A 0xe5000
4375#define CPT_AUD_CONFIG_B 0xe5100
4376#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
4377 CPT_AUD_CONFIG_A, \
4378 CPT_AUD_CONFIG_B)
4379#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
4380#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
4381#define AUD_CONFIG_UPPER_N_SHIFT 20
4382#define AUD_CONFIG_UPPER_N_VALUE (0xff << 20)
4383#define AUD_CONFIG_LOWER_N_SHIFT 4
4384#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
4385#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
4386#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
4387#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
4388
4389/* HSW Audio */
4390#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */
4391#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */
4392#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
4393 HSW_AUD_CONFIG_A, \
4394 HSW_AUD_CONFIG_B)
4395
4396#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */
4397#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */
4398#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
4399 HSW_AUD_MISC_CTRL_A, \
4400 HSW_AUD_MISC_CTRL_B)
4401
4402#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */
4403#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */
4404#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
4405 HSW_AUD_DIP_ELD_CTRL_ST_A, \
4406 HSW_AUD_DIP_ELD_CTRL_ST_B)
4407
4408/* Audio Digital Converter */
4409#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */
4410#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */
4411#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
4412 HSW_AUD_DIG_CNVT_1, \
4413 HSW_AUD_DIG_CNVT_2)
4414#define DIP_PORT_SEL_MASK 0x3
4415
4416#define HSW_AUD_EDID_DATA_A 0x65050
4417#define HSW_AUD_EDID_DATA_B 0x65150
4418#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
4419 HSW_AUD_EDID_DATA_A, \
4420 HSW_AUD_EDID_DATA_B)
4421
4422#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */
4423#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */
4424#define AUDIO_INACTIVE_C (1<<11)
4425#define AUDIO_INACTIVE_B (1<<7)
4426#define AUDIO_INACTIVE_A (1<<3)
4427#define AUDIO_OUTPUT_ENABLE_A (1<<2)
4428#define AUDIO_OUTPUT_ENABLE_B (1<<6)
4429#define AUDIO_OUTPUT_ENABLE_C (1<<10)
4430#define AUDIO_ELD_VALID_A (1<<0)
4431#define AUDIO_ELD_VALID_B (1<<4)
4432#define AUDIO_ELD_VALID_C (1<<8)
4433#define AUDIO_CP_READY_A (1<<1)
4434#define AUDIO_CP_READY_B (1<<5)
4435#define AUDIO_CP_READY_C (1<<9)
4436
4437/* HSW Power Wells */
4438#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
4439#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
4440#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
4441#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
4442#define HSW_PWR_WELL_ENABLE (1<<31)
4443#define HSW_PWR_WELL_STATE (1<<30)
4444#define HSW_PWR_WELL_CTL5 0x45410
4445#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
4446#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
4447#define HSW_PWR_WELL_FORCE_ON (1<<19)
4448#define HSW_PWR_WELL_CTL6 0x45414
4449
4450/* Per-pipe DDI Function Control */
4451#define TRANS_DDI_FUNC_CTL_A 0x60400
4452#define TRANS_DDI_FUNC_CTL_B 0x61400
4453#define TRANS_DDI_FUNC_CTL_C 0x62400
4454#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
4455#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
4456 TRANS_DDI_FUNC_CTL_B)
4457#define TRANS_DDI_FUNC_ENABLE (1<<31)
4458/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
4459#define TRANS_DDI_PORT_MASK (7<<28)
4460#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
4461#define TRANS_DDI_PORT_NONE (0<<28)
4462#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
4463#define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
4464#define TRANS_DDI_MODE_SELECT_DVI (1<<24)
4465#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
4466#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
4467#define TRANS_DDI_MODE_SELECT_FDI (4<<24)
4468#define TRANS_DDI_BPC_MASK (7<<20)
4469#define TRANS_DDI_BPC_8 (0<<20)
4470#define TRANS_DDI_BPC_10 (1<<20)
4471#define TRANS_DDI_BPC_6 (2<<20)
4472#define TRANS_DDI_BPC_12 (3<<20)
4473#define TRANS_DDI_PVSYNC (1<<17)
4474#define TRANS_DDI_PHSYNC (1<<16)
4475#define TRANS_DDI_EDP_INPUT_MASK (7<<12)
4476#define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
4477#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
4478#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
4479#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
4480#define TRANS_DDI_BFI_ENABLE (1<<4)
4481#define TRANS_DDI_PORT_WIDTH_X1 (0<<1)
4482#define TRANS_DDI_PORT_WIDTH_X2 (1<<1)
4483#define TRANS_DDI_PORT_WIDTH_X4 (3<<1)
4484
4485/* DisplayPort Transport Control */
4486#define DP_TP_CTL_A 0x64040
4487#define DP_TP_CTL_B 0x64140
4488#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
4489#define DP_TP_CTL_ENABLE (1<<31)
4490#define DP_TP_CTL_MODE_SST (0<<27)
4491#define DP_TP_CTL_MODE_MST (1<<27)
4492#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
4493#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
4494#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
4495#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
4496#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
4497#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
4498#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
4499#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
4500#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
4501
4502/* DisplayPort Transport Status */
4503#define DP_TP_STATUS_A 0x64044
4504#define DP_TP_STATUS_B 0x64144
4505#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
4506#define DP_TP_STATUS_IDLE_DONE (1<<25)
4507#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
4508
4509/* DDI Buffer Control */
4510#define DDI_BUF_CTL_A 0x64000
4511#define DDI_BUF_CTL_B 0x64100
4512#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
4513#define DDI_BUF_CTL_ENABLE (1<<31)
4514#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
4515#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
4516#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
4517#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
4518#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
4519#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
4520#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
4521#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
4522#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
4523#define DDI_BUF_EMP_MASK (0xf<<24)
4524#define DDI_BUF_IS_IDLE (1<<7)
4525#define DDI_A_4_LANES (1<<4)
4526#define DDI_PORT_WIDTH_X1 (0<<1)
4527#define DDI_PORT_WIDTH_X2 (1<<1)
4528#define DDI_PORT_WIDTH_X4 (3<<1)
4529#define DDI_INIT_DISPLAY_DETECTED (1<<0)
4530
4531/* DDI Buffer Translations */
4532#define DDI_BUF_TRANS_A 0x64E00
4533#define DDI_BUF_TRANS_B 0x64E60
4534#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
4535
4536/* Sideband Interface (SBI) is programmed indirectly, via
4537 * SBI_ADDR, which contains the register offset; and SBI_DATA,
4538 * which contains the payload */
4539#define SBI_ADDR 0xC6000
4540#define SBI_DATA 0xC6004
4541#define SBI_CTL_STAT 0xC6008
4542#define SBI_CTL_DEST_ICLK (0x0<<16)
4543#define SBI_CTL_DEST_MPHY (0x1<<16)
4544#define SBI_CTL_OP_IORD (0x2<<8)
4545#define SBI_CTL_OP_IOWR (0x3<<8)
4546#define SBI_CTL_OP_CRRD (0x6<<8)
4547#define SBI_CTL_OP_CRWR (0x7<<8)
4548#define SBI_RESPONSE_FAIL (0x1<<1)
4549#define SBI_RESPONSE_SUCCESS (0x0<<1)
4550#define SBI_BUSY (0x1<<0)
4551#define SBI_READY (0x0<<0)
4552
4553/* SBI offsets */
4554#define SBI_SSCDIVINTPHASE6 0x0600
4555#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
4556#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
4557#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
4558#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
4559#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
4560#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
4561#define SBI_SSCCTL 0x020c
4562#define SBI_SSCCTL6 0x060C
4563#define SBI_SSCCTL_PATHALT (1<<3)
4564#define SBI_SSCCTL_DISABLE (1<<0)
4565#define SBI_SSCAUXDIV6 0x0610
4566#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
4567#define SBI_DBUFF0 0x2a00
4568#define SBI_DBUFF0_ENABLE (1<<0)
4569
4570/* LPT PIXCLK_GATE */
4571#define PIXCLK_GATE 0xC6020
4572#define PIXCLK_GATE_UNGATE (1<<0)
4573#define PIXCLK_GATE_GATE (0<<0)
4574
4575/* SPLL */
4576#define SPLL_CTL 0x46020
4577#define SPLL_PLL_ENABLE (1<<31)
4578#define SPLL_PLL_SSC (1<<28)
4579#define SPLL_PLL_NON_SSC (2<<28)
4580#define SPLL_PLL_FREQ_810MHz (0<<26)
4581#define SPLL_PLL_FREQ_1350MHz (1<<26)
4582
4583/* WRPLL */
4584#define WRPLL_CTL1 0x46040
4585#define WRPLL_CTL2 0x46060
4586#define WRPLL_PLL_ENABLE (1<<31)
4587#define WRPLL_PLL_SELECT_SSC (0x01<<28)
4588#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
4589#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
4590/* WRPLL divider programming */
4591#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
4592#define WRPLL_DIVIDER_POST(x) ((x)<<8)
4593#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
4594
4595/* Port clock selection */
4596#define PORT_CLK_SEL_A 0x46100
4597#define PORT_CLK_SEL_B 0x46104
4598#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
4599#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
4600#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
4601#define PORT_CLK_SEL_LCPLL_810 (2<<29)
4602#define PORT_CLK_SEL_SPLL (3<<29)
4603#define PORT_CLK_SEL_WRPLL1 (4<<29)
4604#define PORT_CLK_SEL_WRPLL2 (5<<29)
4605#define PORT_CLK_SEL_NONE (7<<29)
4606
4607/* Transcoder clock selection */
4608#define TRANS_CLK_SEL_A 0x46140
4609#define TRANS_CLK_SEL_B 0x46144
4610#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
4611/* For each transcoder, we need to select the corresponding port clock */
4612#define TRANS_CLK_SEL_DISABLED (0x0<<29)
4613#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
4614
4615#define _TRANSA_MSA_MISC 0x60410
4616#define _TRANSB_MSA_MISC 0x61410
4617#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
4618 _TRANSB_MSA_MISC)
4619#define TRANS_MSA_SYNC_CLK (1<<0)
4620#define TRANS_MSA_6_BPC (0<<5)
4621#define TRANS_MSA_8_BPC (1<<5)
4622#define TRANS_MSA_10_BPC (2<<5)
4623#define TRANS_MSA_12_BPC (3<<5)
4624#define TRANS_MSA_16_BPC (4<<5)
4625
4626/* LCPLL Control */
4627#define LCPLL_CTL 0x130040
4628#define LCPLL_PLL_DISABLE (1<<31)
4629#define LCPLL_PLL_LOCK (1<<30)
4630#define LCPLL_CLK_FREQ_MASK (3<<26)
4631#define LCPLL_CLK_FREQ_450 (0<<26)
4632#define LCPLL_CD_CLOCK_DISABLE (1<<25)
4633#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
4634#define LCPLL_CD_SOURCE_FCLK (1<<21)
4635
4636/* Pipe WM_LINETIME - watermark line time */
4637#define PIPE_WM_LINETIME_A 0x45270
4638#define PIPE_WM_LINETIME_B 0x45274
4639#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
4640 PIPE_WM_LINETIME_B)
4641#define PIPE_WM_LINETIME_MASK (0x1ff)
4642#define PIPE_WM_LINETIME_TIME(x) ((x))
4643#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
4644#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
4645
4646/* SFUSE_STRAP */
4647#define SFUSE_STRAP 0xc2014
4648#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
4649#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
4650#define SFUSE_STRAP_DDID_DETECTED (1<<0)
4651
4652#define WM_DBG 0x45280
4653#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
4654#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
4655#define WM_DBG_DISALLOW_SPRITE (1<<2)
4656
4657#endif /* _I915_REG_H_ */ 3486#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 63d4d30c39d..9c7706a4c71 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,22 +24,18 @@
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */ 25 */
26 26
27#include <drm/drmP.h> 27#include "drmP.h"
28#include <drm/i915_drm.h> 28#include "drm.h"
29#include "i915_drm.h"
29#include "intel_drv.h" 30#include "intel_drv.h"
30#include "i915_reg.h"
31 31
32static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) 32static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
33{ 33{
34 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
35 u32 dpll_reg; 35 u32 dpll_reg;
36 36
37 /* On IVB, 3rd pipe shares PLL with another one */
38 if (pipe > 1)
39 return false;
40
41 if (HAS_PCH_SPLIT(dev)) 37 if (HAS_PCH_SPLIT(dev))
42 dpll_reg = _PCH_DPLL(pipe); 38 dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B;
43 else 39 else
44 dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; 40 dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
45 41
@@ -60,11 +56,11 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
60 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; 56 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
61 57
62 if (pipe == PIPE_A) 58 if (pipe == PIPE_A)
63 array = dev_priv->regfile.save_palette_a; 59 array = dev_priv->save_palette_a;
64 else 60 else
65 array = dev_priv->regfile.save_palette_b; 61 array = dev_priv->save_palette_b;
66 62
67 for (i = 0; i < 256; i++) 63 for(i = 0; i < 256; i++)
68 array[i] = I915_READ(reg + (i << 2)); 64 array[i] = I915_READ(reg + (i << 2));
69} 65}
70 66
@@ -82,11 +78,11 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
82 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; 78 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
83 79
84 if (pipe == PIPE_A) 80 if (pipe == PIPE_A)
85 array = dev_priv->regfile.save_palette_a; 81 array = dev_priv->save_palette_a;
86 else 82 else
87 array = dev_priv->regfile.save_palette_b; 83 array = dev_priv->save_palette_b;
88 84
89 for (i = 0; i < 256; i++) 85 for(i = 0; i < 256; i++)
90 I915_WRITE(reg + (i << 2), array[i]); 86 I915_WRITE(reg + (i << 2), array[i]);
91} 87}
92 88
@@ -131,11 +127,11 @@ static void i915_save_vga(struct drm_device *dev)
131 u16 cr_index, cr_data, st01; 127 u16 cr_index, cr_data, st01;
132 128
133 /* VGA color palette registers */ 129 /* VGA color palette registers */
134 dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK); 130 dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
135 131
136 /* MSR bits */ 132 /* MSR bits */
137 dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ); 133 dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
138 if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { 134 if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
139 cr_index = VGA_CR_INDEX_CGA; 135 cr_index = VGA_CR_INDEX_CGA;
140 cr_data = VGA_CR_DATA_CGA; 136 cr_data = VGA_CR_DATA_CGA;
141 st01 = VGA_ST01_CGA; 137 st01 = VGA_ST01_CGA;
@@ -150,35 +146,35 @@ static void i915_save_vga(struct drm_device *dev)
150 i915_read_indexed(dev, cr_index, cr_data, 0x11) & 146 i915_read_indexed(dev, cr_index, cr_data, 0x11) &
151 (~0x80)); 147 (~0x80));
152 for (i = 0; i <= 0x24; i++) 148 for (i = 0; i <= 0x24; i++)
153 dev_priv->regfile.saveCR[i] = 149 dev_priv->saveCR[i] =
154 i915_read_indexed(dev, cr_index, cr_data, i); 150 i915_read_indexed(dev, cr_index, cr_data, i);
155 /* Make sure we don't turn off CR group 0 writes */ 151 /* Make sure we don't turn off CR group 0 writes */
156 dev_priv->regfile.saveCR[0x11] &= ~0x80; 152 dev_priv->saveCR[0x11] &= ~0x80;
157 153
158 /* Attribute controller registers */ 154 /* Attribute controller registers */
159 I915_READ8(st01); 155 I915_READ8(st01);
160 dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX); 156 dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
161 for (i = 0; i <= 0x14; i++) 157 for (i = 0; i <= 0x14; i++)
162 dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0); 158 dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
163 I915_READ8(st01); 159 I915_READ8(st01);
164 I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX); 160 I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
165 I915_READ8(st01); 161 I915_READ8(st01);
166 162
167 /* Graphics controller registers */ 163 /* Graphics controller registers */
168 for (i = 0; i < 9; i++) 164 for (i = 0; i < 9; i++)
169 dev_priv->regfile.saveGR[i] = 165 dev_priv->saveGR[i] =
170 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); 166 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
171 167
172 dev_priv->regfile.saveGR[0x10] = 168 dev_priv->saveGR[0x10] =
173 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); 169 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
174 dev_priv->regfile.saveGR[0x11] = 170 dev_priv->saveGR[0x11] =
175 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); 171 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
176 dev_priv->regfile.saveGR[0x18] = 172 dev_priv->saveGR[0x18] =
177 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); 173 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
178 174
179 /* Sequencer registers */ 175 /* Sequencer registers */
180 for (i = 0; i < 8; i++) 176 for (i = 0; i < 8; i++)
181 dev_priv->regfile.saveSR[i] = 177 dev_priv->saveSR[i] =
182 i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); 178 i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
183} 179}
184 180
@@ -189,8 +185,8 @@ static void i915_restore_vga(struct drm_device *dev)
189 u16 cr_index, cr_data, st01; 185 u16 cr_index, cr_data, st01;
190 186
191 /* MSR bits */ 187 /* MSR bits */
192 I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR); 188 I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
193 if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { 189 if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
194 cr_index = VGA_CR_INDEX_CGA; 190 cr_index = VGA_CR_INDEX_CGA;
195 cr_data = VGA_CR_DATA_CGA; 191 cr_data = VGA_CR_DATA_CGA;
196 st01 = VGA_ST01_CGA; 192 st01 = VGA_ST01_CGA;
@@ -203,36 +199,36 @@ static void i915_restore_vga(struct drm_device *dev)
203 /* Sequencer registers, don't write SR07 */ 199 /* Sequencer registers, don't write SR07 */
204 for (i = 0; i < 7; i++) 200 for (i = 0; i < 7; i++)
205 i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, 201 i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
206 dev_priv->regfile.saveSR[i]); 202 dev_priv->saveSR[i]);
207 203
208 /* CRT controller regs */ 204 /* CRT controller regs */
209 /* Enable CR group 0 writes */ 205 /* Enable CR group 0 writes */
210 i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]); 206 i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
211 for (i = 0; i <= 0x24; i++) 207 for (i = 0; i <= 0x24; i++)
212 i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]); 208 i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
213 209
214 /* Graphics controller regs */ 210 /* Graphics controller regs */
215 for (i = 0; i < 9; i++) 211 for (i = 0; i < 9; i++)
216 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, 212 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
217 dev_priv->regfile.saveGR[i]); 213 dev_priv->saveGR[i]);
218 214
219 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, 215 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
220 dev_priv->regfile.saveGR[0x10]); 216 dev_priv->saveGR[0x10]);
221 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, 217 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
222 dev_priv->regfile.saveGR[0x11]); 218 dev_priv->saveGR[0x11]);
223 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, 219 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
224 dev_priv->regfile.saveGR[0x18]); 220 dev_priv->saveGR[0x18]);
225 221
226 /* Attribute controller registers */ 222 /* Attribute controller registers */
227 I915_READ8(st01); /* switch back to index mode */ 223 I915_READ8(st01); /* switch back to index mode */
228 for (i = 0; i <= 0x14; i++) 224 for (i = 0; i <= 0x14; i++)
229 i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0); 225 i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
230 I915_READ8(st01); /* switch back to index mode */ 226 I915_READ8(st01); /* switch back to index mode */
231 I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20); 227 I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
232 I915_READ8(st01); 228 I915_READ8(st01);
233 229
234 /* VGA color palette registers */ 230 /* VGA color palette registers */
235 I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK); 231 I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
236} 232}
237 233
238static void i915_save_modeset_reg(struct drm_device *dev) 234static void i915_save_modeset_reg(struct drm_device *dev)
@@ -244,162 +240,156 @@ static void i915_save_modeset_reg(struct drm_device *dev)
244 return; 240 return;
245 241
246 /* Cursor state */ 242 /* Cursor state */
247 dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR); 243 dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
248 dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS); 244 dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
249 dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE); 245 dev_priv->saveCURABASE = I915_READ(_CURABASE);
250 dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR); 246 dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
251 dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS); 247 dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
252 dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE); 248 dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
253 if (IS_GEN2(dev)) 249 if (IS_GEN2(dev))
254 dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE); 250 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
255 251
256 if (HAS_PCH_SPLIT(dev)) { 252 if (HAS_PCH_SPLIT(dev)) {
257 dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); 253 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
258 dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); 254 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
259 } 255 }
260 256
261 /* Pipe & plane A info */ 257 /* Pipe & plane A info */
262 dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF); 258 dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
263 dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC); 259 dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
264 if (HAS_PCH_SPLIT(dev)) { 260 if (HAS_PCH_SPLIT(dev)) {
265 dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0); 261 dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
266 dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1); 262 dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
267 dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A); 263 dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
268 } else { 264 } else {
269 dev_priv->regfile.saveFPA0 = I915_READ(_FPA0); 265 dev_priv->saveFPA0 = I915_READ(_FPA0);
270 dev_priv->regfile.saveFPA1 = I915_READ(_FPA1); 266 dev_priv->saveFPA1 = I915_READ(_FPA1);
271 dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A); 267 dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
272 } 268 }
273 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 269 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
274 dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD); 270 dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
275 dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A); 271 dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
276 dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A); 272 dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
277 dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A); 273 dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
278 dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A); 274 dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
279 dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A); 275 dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
280 dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A); 276 dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
281 if (!HAS_PCH_SPLIT(dev)) 277 if (!HAS_PCH_SPLIT(dev))
282 dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A); 278 dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
283 279
284 if (HAS_PCH_SPLIT(dev)) { 280 if (HAS_PCH_SPLIT(dev)) {
285 dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); 281 dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
286 dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); 282 dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
287 dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); 283 dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
288 dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); 284 dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
289 285
290 dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); 286 dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
291 dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); 287 dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
292 288
293 dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1); 289 dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
294 dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); 290 dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
295 dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); 291 dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
296 292
297 dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF); 293 dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
298 dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); 294 dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
299 dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); 295 dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
300 dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); 296 dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
301 dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); 297 dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
302 dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); 298 dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
303 dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); 299 dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
304 } 300 }
305 301
306 dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR); 302 dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
307 dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE); 303 dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
308 dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE); 304 dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
309 dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS); 305 dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
310 dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR); 306 dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
311 if (INTEL_INFO(dev)->gen >= 4) { 307 if (INTEL_INFO(dev)->gen >= 4) {
312 dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF); 308 dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
313 dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF); 309 dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
314 } 310 }
315 i915_save_palette(dev, PIPE_A); 311 i915_save_palette(dev, PIPE_A);
316 dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT); 312 dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
317 313
318 /* Pipe & plane B info */ 314 /* Pipe & plane B info */
319 dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF); 315 dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
320 dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC); 316 dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
321 if (HAS_PCH_SPLIT(dev)) { 317 if (HAS_PCH_SPLIT(dev)) {
322 dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0); 318 dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
323 dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1); 319 dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
324 dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B); 320 dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
325 } else { 321 } else {
326 dev_priv->regfile.saveFPB0 = I915_READ(_FPB0); 322 dev_priv->saveFPB0 = I915_READ(_FPB0);
327 dev_priv->regfile.saveFPB1 = I915_READ(_FPB1); 323 dev_priv->saveFPB1 = I915_READ(_FPB1);
328 dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B); 324 dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
329 } 325 }
330 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 326 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
331 dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD); 327 dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
332 dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B); 328 dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
333 dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B); 329 dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
334 dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B); 330 dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
335 dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B); 331 dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
336 dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B); 332 dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
337 dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B); 333 dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
338 if (!HAS_PCH_SPLIT(dev)) 334 if (!HAS_PCH_SPLIT(dev))
339 dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B); 335 dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
340 336
341 if (HAS_PCH_SPLIT(dev)) { 337 if (HAS_PCH_SPLIT(dev)) {
342 dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); 338 dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
343 dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); 339 dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
344 dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); 340 dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
345 dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); 341 dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
346 342
347 dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); 343 dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
348 dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); 344 dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
349 345
350 dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1); 346 dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
351 dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); 347 dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
352 dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); 348 dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
353 349
354 dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF); 350 dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
355 dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); 351 dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
356 dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); 352 dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
357 dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); 353 dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
358 dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); 354 dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
359 dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); 355 dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
360 dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); 356 dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
361 } 357 }
362 358
363 dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR); 359 dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
364 dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); 360 dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
365 dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE); 361 dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
366 dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS); 362 dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
367 dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR); 363 dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
368 if (INTEL_INFO(dev)->gen >= 4) { 364 if (INTEL_INFO(dev)->gen >= 4) {
369 dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF); 365 dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
370 dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); 366 dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
371 } 367 }
372 i915_save_palette(dev, PIPE_B); 368 i915_save_palette(dev, PIPE_B);
373 dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT); 369 dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
374 370
375 /* Fences */ 371 /* Fences */
376 switch (INTEL_INFO(dev)->gen) { 372 switch (INTEL_INFO(dev)->gen) {
377 case 7: 373 case 7:
378 case 6: 374 case 6:
379 for (i = 0; i < 16; i++) 375 for (i = 0; i < 16; i++)
380 dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 376 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
381 break; 377 break;
382 case 5: 378 case 5:
383 case 4: 379 case 4:
384 for (i = 0; i < 16; i++) 380 for (i = 0; i < 16; i++)
385 dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 381 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
386 break; 382 break;
387 case 3: 383 case 3:
388 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 384 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
389 for (i = 0; i < 8; i++) 385 for (i = 0; i < 8; i++)
390 dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 386 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
391 case 2: 387 case 2:
392 for (i = 0; i < 8; i++) 388 for (i = 0; i < 8; i++)
393 dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 389 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
394 break; 390 break;
395 } 391 }
396 392
397 /* CRT state */
398 if (HAS_PCH_SPLIT(dev))
399 dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
400 else
401 dev_priv->regfile.saveADPA = I915_READ(ADPA);
402
403 return; 393 return;
404} 394}
405 395
@@ -418,20 +408,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
418 case 7: 408 case 7:
419 case 6: 409 case 6:
420 for (i = 0; i < 16; i++) 410 for (i = 0; i < 16; i++)
421 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); 411 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
422 break; 412 break;
423 case 5: 413 case 5:
424 case 4: 414 case 4:
425 for (i = 0; i < 16; i++) 415 for (i = 0; i < 16; i++)
426 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); 416 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
427 break; 417 break;
428 case 3: 418 case 3:
429 case 2: 419 case 2:
430 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 420 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
431 for (i = 0; i < 8; i++) 421 for (i = 0; i < 8; i++)
432 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]); 422 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
433 for (i = 0; i < 8; i++) 423 for (i = 0; i < 8; i++)
434 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]); 424 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
435 break; 425 break;
436 } 426 }
437 427
@@ -453,164 +443,158 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
453 } 443 }
454 444
455 if (HAS_PCH_SPLIT(dev)) { 445 if (HAS_PCH_SPLIT(dev)) {
456 I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL); 446 I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
457 I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL); 447 I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
458 } 448 }
459 449
460 /* Pipe & plane A info */ 450 /* Pipe & plane A info */
461 /* Prime the clock */ 451 /* Prime the clock */
462 if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) { 452 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
463 I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A & 453 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
464 ~DPLL_VCO_ENABLE); 454 ~DPLL_VCO_ENABLE);
465 POSTING_READ(dpll_a_reg); 455 POSTING_READ(dpll_a_reg);
466 udelay(150); 456 udelay(150);
467 } 457 }
468 I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0); 458 I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
469 I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1); 459 I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
470 /* Actually enable it */ 460 /* Actually enable it */
471 I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A); 461 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
472 POSTING_READ(dpll_a_reg); 462 POSTING_READ(dpll_a_reg);
473 udelay(150); 463 udelay(150);
474 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 464 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
475 I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD); 465 I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
476 POSTING_READ(_DPLL_A_MD); 466 POSTING_READ(_DPLL_A_MD);
477 } 467 }
478 udelay(150); 468 udelay(150);
479 469
480 /* Restore mode */ 470 /* Restore mode */
481 I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A); 471 I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
482 I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A); 472 I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
483 I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A); 473 I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
484 I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A); 474 I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
485 I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A); 475 I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
486 I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A); 476 I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
487 if (!HAS_PCH_SPLIT(dev)) 477 if (!HAS_PCH_SPLIT(dev))
488 I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A); 478 I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
489 479
490 if (HAS_PCH_SPLIT(dev)) { 480 if (HAS_PCH_SPLIT(dev)) {
491 I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1); 481 I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
492 I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1); 482 I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
493 I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1); 483 I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
494 I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1); 484 I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
495 485
496 I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL); 486 I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
497 I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL); 487 I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
498 488
499 I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1); 489 I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
500 I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ); 490 I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
501 I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS); 491 I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
502 492
503 I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF); 493 I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
504 I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A); 494 I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
505 I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A); 495 I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
506 I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A); 496 I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
507 I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A); 497 I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
508 I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A); 498 I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
509 I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A); 499 I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
510 } 500 }
511 501
512 /* Restore plane info */ 502 /* Restore plane info */
513 I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE); 503 I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
514 I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS); 504 I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
515 I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC); 505 I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
516 I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR); 506 I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
517 I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE); 507 I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
518 if (INTEL_INFO(dev)->gen >= 4) { 508 if (INTEL_INFO(dev)->gen >= 4) {
519 I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF); 509 I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
520 I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF); 510 I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
521 } 511 }
522 512
523 I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF); 513 I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
524 514
525 i915_restore_palette(dev, PIPE_A); 515 i915_restore_palette(dev, PIPE_A);
526 /* Enable the plane */ 516 /* Enable the plane */
527 I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR); 517 I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
528 I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); 518 I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
529 519
530 /* Pipe & plane B info */ 520 /* Pipe & plane B info */
531 if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) { 521 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
532 I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B & 522 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
533 ~DPLL_VCO_ENABLE); 523 ~DPLL_VCO_ENABLE);
534 POSTING_READ(dpll_b_reg); 524 POSTING_READ(dpll_b_reg);
535 udelay(150); 525 udelay(150);
536 } 526 }
537 I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0); 527 I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
538 I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1); 528 I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
539 /* Actually enable it */ 529 /* Actually enable it */
540 I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B); 530 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
541 POSTING_READ(dpll_b_reg); 531 POSTING_READ(dpll_b_reg);
542 udelay(150); 532 udelay(150);
543 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 533 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
544 I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD); 534 I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
545 POSTING_READ(_DPLL_B_MD); 535 POSTING_READ(_DPLL_B_MD);
546 } 536 }
547 udelay(150); 537 udelay(150);
548 538
549 /* Restore mode */ 539 /* Restore mode */
550 I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B); 540 I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
551 I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B); 541 I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
552 I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B); 542 I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
553 I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B); 543 I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
554 I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B); 544 I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
555 I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B); 545 I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
556 if (!HAS_PCH_SPLIT(dev)) 546 if (!HAS_PCH_SPLIT(dev))
557 I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B); 547 I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
558 548
559 if (HAS_PCH_SPLIT(dev)) { 549 if (HAS_PCH_SPLIT(dev)) {
560 I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1); 550 I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
561 I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1); 551 I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
562 I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1); 552 I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
563 I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1); 553 I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
564 554
565 I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL); 555 I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
566 I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL); 556 I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
567 557
568 I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1); 558 I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
569 I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ); 559 I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
570 I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS); 560 I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
571 561
572 I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF); 562 I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
573 I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B); 563 I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
574 I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B); 564 I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
575 I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B); 565 I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
576 I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B); 566 I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
577 I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B); 567 I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
578 I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B); 568 I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
579 } 569 }
580 570
581 /* Restore plane info */ 571 /* Restore plane info */
582 I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE); 572 I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
583 I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS); 573 I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
584 I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC); 574 I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
585 I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR); 575 I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
586 I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE); 576 I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
587 if (INTEL_INFO(dev)->gen >= 4) { 577 if (INTEL_INFO(dev)->gen >= 4) {
588 I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF); 578 I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
589 I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF); 579 I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
590 } 580 }
591 581
592 I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF); 582 I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
593 583
594 i915_restore_palette(dev, PIPE_B); 584 i915_restore_palette(dev, PIPE_B);
595 /* Enable the plane */ 585 /* Enable the plane */
596 I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR); 586 I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
597 I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); 587 I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
598 588
599 /* Cursor state */ 589 /* Cursor state */
600 I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS); 590 I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
601 I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR); 591 I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
602 I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE); 592 I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
603 I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS); 593 I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
604 I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR); 594 I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
605 I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE); 595 I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
606 if (IS_GEN2(dev)) 596 if (IS_GEN2(dev))
607 I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE); 597 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
608
609 /* CRT state */
610 if (HAS_PCH_SPLIT(dev))
611 I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
612 else
613 I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
614 598
615 return; 599 return;
616} 600}
@@ -620,84 +604,89 @@ static void i915_save_display(struct drm_device *dev)
620 struct drm_i915_private *dev_priv = dev->dev_private; 604 struct drm_i915_private *dev_priv = dev->dev_private;
621 605
622 /* Display arbitration control */ 606 /* Display arbitration control */
623 dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); 607 dev_priv->saveDSPARB = I915_READ(DSPARB);
624 608
625 /* This is only meaningful in non-KMS mode */ 609 /* This is only meaningful in non-KMS mode */
626 /* Don't regfile.save them in KMS mode */ 610 /* Don't save them in KMS mode */
627 i915_save_modeset_reg(dev); 611 i915_save_modeset_reg(dev);
628 612
613 /* CRT state */
614 if (HAS_PCH_SPLIT(dev)) {
615 dev_priv->saveADPA = I915_READ(PCH_ADPA);
616 } else {
617 dev_priv->saveADPA = I915_READ(ADPA);
618 }
619
629 /* LVDS state */ 620 /* LVDS state */
630 if (HAS_PCH_SPLIT(dev)) { 621 if (HAS_PCH_SPLIT(dev)) {
631 dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL); 622 dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
632 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); 623 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
633 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); 624 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
634 dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); 625 dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
635 dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); 626 dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
636 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); 627 dev_priv->saveLVDS = I915_READ(PCH_LVDS);
637 } else { 628 } else {
638 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); 629 dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
639 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); 630 dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
640 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 631 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
641 dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); 632 dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
642 if (INTEL_INFO(dev)->gen >= 4) 633 if (INTEL_INFO(dev)->gen >= 4)
643 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); 634 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
644 if (IS_MOBILE(dev) && !IS_I830(dev)) 635 if (IS_MOBILE(dev) && !IS_I830(dev))
645 dev_priv->regfile.saveLVDS = I915_READ(LVDS); 636 dev_priv->saveLVDS = I915_READ(LVDS);
646 } 637 }
647 638
648 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) 639 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
649 dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL); 640 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
650 641
651 if (HAS_PCH_SPLIT(dev)) { 642 if (HAS_PCH_SPLIT(dev)) {
652 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); 643 dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
653 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); 644 dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
654 dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); 645 dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
655 } else { 646 } else {
656 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); 647 dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
657 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); 648 dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
658 dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); 649 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
659 } 650 }
660 651
661 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 652 /* Display Port state */
662 /* Display Port state */ 653 if (SUPPORTS_INTEGRATED_DP(dev)) {
663 if (SUPPORTS_INTEGRATED_DP(dev)) { 654 dev_priv->saveDP_B = I915_READ(DP_B);
664 dev_priv->regfile.saveDP_B = I915_READ(DP_B); 655 dev_priv->saveDP_C = I915_READ(DP_C);
665 dev_priv->regfile.saveDP_C = I915_READ(DP_C); 656 dev_priv->saveDP_D = I915_READ(DP_D);
666 dev_priv->regfile.saveDP_D = I915_READ(DP_D); 657 dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
667 dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); 658 dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
668 dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); 659 dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
669 dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); 660 dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
670 dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); 661 dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
671 dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); 662 dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
672 dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); 663 dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
673 dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); 664 dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
674 dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); 665 }
675 } 666 /* FIXME: save TV & SDVO state */
676 /* FIXME: regfile.save TV & SDVO state */ 667
677 } 668 /* Only save FBC state on the platform that supports FBC */
678
679 /* Only regfile.save FBC state on the platform that supports FBC */
680 if (I915_HAS_FBC(dev)) { 669 if (I915_HAS_FBC(dev)) {
681 if (HAS_PCH_SPLIT(dev)) { 670 if (HAS_PCH_SPLIT(dev)) {
682 dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); 671 dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
683 } else if (IS_GM45(dev)) { 672 } else if (IS_GM45(dev)) {
684 dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); 673 dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
685 } else { 674 } else {
686 dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); 675 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
687 dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); 676 dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
688 dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); 677 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
689 dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); 678 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
690 } 679 }
691 } 680 }
692 681
693 /* VGA state */ 682 /* VGA state */
694 dev_priv->regfile.saveVGA0 = I915_READ(VGA0); 683 dev_priv->saveVGA0 = I915_READ(VGA0);
695 dev_priv->regfile.saveVGA1 = I915_READ(VGA1); 684 dev_priv->saveVGA1 = I915_READ(VGA1);
696 dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD); 685 dev_priv->saveVGA_PD = I915_READ(VGA_PD);
697 if (HAS_PCH_SPLIT(dev)) 686 if (HAS_PCH_SPLIT(dev))
698 dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL); 687 dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
699 else 688 else
700 dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL); 689 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
701 690
702 i915_save_vga(dev); 691 i915_save_vga(dev);
703} 692}
@@ -707,95 +696,94 @@ static void i915_restore_display(struct drm_device *dev)
707 struct drm_i915_private *dev_priv = dev->dev_private; 696 struct drm_i915_private *dev_priv = dev->dev_private;
708 697
709 /* Display arbitration */ 698 /* Display arbitration */
710 I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); 699 I915_WRITE(DSPARB, dev_priv->saveDSPARB);
711 700
712 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 701 /* Display port ratios (must be done before clock is set) */
713 /* Display port ratios (must be done before clock is set) */ 702 if (SUPPORTS_INTEGRATED_DP(dev)) {
714 if (SUPPORTS_INTEGRATED_DP(dev)) { 703 I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
715 I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M); 704 I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
716 I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M); 705 I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
717 I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N); 706 I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
718 I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N); 707 I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
719 I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M); 708 I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
720 I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M); 709 I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
721 I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N); 710 I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
722 I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
723 }
724 } 711 }
725 712
726 /* This is only meaningful in non-KMS mode */ 713 /* This is only meaningful in non-KMS mode */
727 /* Don't restore them in KMS mode */ 714 /* Don't restore them in KMS mode */
728 i915_restore_modeset_reg(dev); 715 i915_restore_modeset_reg(dev);
729 716
717 /* CRT state */
718 if (HAS_PCH_SPLIT(dev))
719 I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
720 else
721 I915_WRITE(ADPA, dev_priv->saveADPA);
722
730 /* LVDS state */ 723 /* LVDS state */
731 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 724 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
732 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); 725 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
733 726
734 if (HAS_PCH_SPLIT(dev)) { 727 if (HAS_PCH_SPLIT(dev)) {
735 I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS); 728 I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
736 } else if (IS_MOBILE(dev) && !IS_I830(dev)) 729 } else if (IS_MOBILE(dev) && !IS_I830(dev))
737 I915_WRITE(LVDS, dev_priv->regfile.saveLVDS); 730 I915_WRITE(LVDS, dev_priv->saveLVDS);
738 731
739 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) 732 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
740 I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL); 733 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
741 734
742 if (HAS_PCH_SPLIT(dev)) { 735 if (HAS_PCH_SPLIT(dev)) {
743 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL); 736 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
744 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); 737 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
745 /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2; 738 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
746 * otherwise we get blank eDP screen after S3 on some machines 739 I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
747 */ 740 I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
748 I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2); 741 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
749 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL); 742 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
750 I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); 743 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
751 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
752 I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
753 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
754 I915_WRITE(RSTDBYCTL, 744 I915_WRITE(RSTDBYCTL,
755 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY); 745 dev_priv->saveMCHBAR_RENDER_STANDBY);
756 } else { 746 } else {
757 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS); 747 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
758 I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL); 748 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
759 I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL); 749 I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
760 I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); 750 I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
761 I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); 751 I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
762 I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); 752 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
763 I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL); 753 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
764 }
765
766 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
767 /* Display Port state */
768 if (SUPPORTS_INTEGRATED_DP(dev)) {
769 I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
770 I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
771 I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
772 }
773 /* FIXME: restore TV & SDVO state */
774 } 754 }
775 755
756 /* Display Port state */
757 if (SUPPORTS_INTEGRATED_DP(dev)) {
758 I915_WRITE(DP_B, dev_priv->saveDP_B);
759 I915_WRITE(DP_C, dev_priv->saveDP_C);
760 I915_WRITE(DP_D, dev_priv->saveDP_D);
761 }
762 /* FIXME: restore TV & SDVO state */
763
776 /* only restore FBC info on the platform that supports FBC*/ 764 /* only restore FBC info on the platform that supports FBC*/
777 intel_disable_fbc(dev); 765 intel_disable_fbc(dev);
778 if (I915_HAS_FBC(dev)) { 766 if (I915_HAS_FBC(dev)) {
779 if (HAS_PCH_SPLIT(dev)) { 767 if (HAS_PCH_SPLIT(dev)) {
780 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); 768 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
781 } else if (IS_GM45(dev)) { 769 } else if (IS_GM45(dev)) {
782 I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); 770 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
783 } else { 771 } else {
784 I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE); 772 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
785 I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE); 773 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
786 I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2); 774 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
787 I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); 775 I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
788 } 776 }
789 } 777 }
790 /* VGA state */ 778 /* VGA state */
791 if (HAS_PCH_SPLIT(dev)) 779 if (HAS_PCH_SPLIT(dev))
792 I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL); 780 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
793 else 781 else
794 I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL); 782 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
795 783
796 I915_WRITE(VGA0, dev_priv->regfile.saveVGA0); 784 I915_WRITE(VGA0, dev_priv->saveVGA0);
797 I915_WRITE(VGA1, dev_priv->regfile.saveVGA1); 785 I915_WRITE(VGA1, dev_priv->saveVGA1);
798 I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD); 786 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
799 POSTING_READ(VGA_PD); 787 POSTING_READ(VGA_PD);
800 udelay(150); 788 udelay(150);
801 789
@@ -807,45 +795,49 @@ int i915_save_state(struct drm_device *dev)
807 struct drm_i915_private *dev_priv = dev->dev_private; 795 struct drm_i915_private *dev_priv = dev->dev_private;
808 int i; 796 int i;
809 797
810 pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB); 798 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
811 799
812 mutex_lock(&dev->struct_mutex); 800 mutex_lock(&dev->struct_mutex);
813 801
802 /* Hardware status page */
803 dev_priv->saveHWS = I915_READ(HWS_PGA);
804
814 i915_save_display(dev); 805 i915_save_display(dev);
815 806
816 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 807 /* Interrupt state */
817 /* Interrupt state */ 808 if (HAS_PCH_SPLIT(dev)) {
818 if (HAS_PCH_SPLIT(dev)) { 809 dev_priv->saveDEIER = I915_READ(DEIER);
819 dev_priv->regfile.saveDEIER = I915_READ(DEIER); 810 dev_priv->saveDEIMR = I915_READ(DEIMR);
820 dev_priv->regfile.saveDEIMR = I915_READ(DEIMR); 811 dev_priv->saveGTIER = I915_READ(GTIER);
821 dev_priv->regfile.saveGTIER = I915_READ(GTIER); 812 dev_priv->saveGTIMR = I915_READ(GTIMR);
822 dev_priv->regfile.saveGTIMR = I915_READ(GTIMR); 813 dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
823 dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); 814 dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
824 dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); 815 dev_priv->saveMCHBAR_RENDER_STANDBY =
825 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY = 816 I915_READ(RSTDBYCTL);
826 I915_READ(RSTDBYCTL); 817 dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
827 dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); 818 } else {
828 } else { 819 dev_priv->saveIER = I915_READ(IER);
829 dev_priv->regfile.saveIER = I915_READ(IER); 820 dev_priv->saveIMR = I915_READ(IMR);
830 dev_priv->regfile.saveIMR = I915_READ(IMR);
831 }
832 } 821 }
833 822
834 intel_disable_gt_powersave(dev); 823 if (IS_IRONLAKE_M(dev))
824 ironlake_disable_drps(dev);
825 if (IS_GEN6(dev))
826 gen6_disable_rps(dev);
835 827
836 /* Cache mode state */ 828 /* Cache mode state */
837 dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 829 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
838 830
839 /* Memory Arbitration state */ 831 /* Memory Arbitration state */
840 dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); 832 dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
841 833
842 /* Scratch space */ 834 /* Scratch space */
843 for (i = 0; i < 16; i++) { 835 for (i = 0; i < 16; i++) {
844 dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2)); 836 dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
845 dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2)); 837 dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
846 } 838 }
847 for (i = 0; i < 3; i++) 839 for (i = 0; i < 3; i++)
848 dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2)); 840 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
849 841
850 mutex_unlock(&dev->struct_mutex); 842 mutex_unlock(&dev->struct_mutex);
851 843
@@ -857,40 +849,57 @@ int i915_restore_state(struct drm_device *dev)
857 struct drm_i915_private *dev_priv = dev->dev_private; 849 struct drm_i915_private *dev_priv = dev->dev_private;
858 int i; 850 int i;
859 851
860 pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB); 852 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
861 853
862 mutex_lock(&dev->struct_mutex); 854 mutex_lock(&dev->struct_mutex);
863 855
856 /* Hardware status page */
857 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
858
864 i915_restore_display(dev); 859 i915_restore_display(dev);
865 860
866 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 861 /* Interrupt state */
867 /* Interrupt state */ 862 if (HAS_PCH_SPLIT(dev)) {
868 if (HAS_PCH_SPLIT(dev)) { 863 I915_WRITE(DEIER, dev_priv->saveDEIER);
869 I915_WRITE(DEIER, dev_priv->regfile.saveDEIER); 864 I915_WRITE(DEIMR, dev_priv->saveDEIMR);
870 I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR); 865 I915_WRITE(GTIER, dev_priv->saveGTIER);
871 I915_WRITE(GTIER, dev_priv->regfile.saveGTIER); 866 I915_WRITE(GTIMR, dev_priv->saveGTIMR);
872 I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR); 867 I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
873 I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR); 868 I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
874 I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR); 869 I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
875 I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG); 870 } else {
876 } else { 871 I915_WRITE(IER, dev_priv->saveIER);
877 I915_WRITE(IER, dev_priv->regfile.saveIER); 872 I915_WRITE(IMR, dev_priv->saveIMR);
878 I915_WRITE(IMR, dev_priv->regfile.saveIMR);
879 }
880 } 873 }
874 mutex_unlock(&dev->struct_mutex);
875
876 if (drm_core_check_feature(dev, DRIVER_MODESET))
877 intel_init_clock_gating(dev);
878
879 if (IS_IRONLAKE_M(dev)) {
880 ironlake_enable_drps(dev);
881 intel_init_emon(dev);
882 }
883
884 if (IS_GEN6(dev)) {
885 gen6_enable_rps(dev_priv);
886 gen6_update_ring_freq(dev_priv);
887 }
888
889 mutex_lock(&dev->struct_mutex);
881 890
882 /* Cache mode state */ 891 /* Cache mode state */
883 I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000); 892 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
884 893
885 /* Memory arbitration state */ 894 /* Memory arbitration state */
886 I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); 895 I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
887 896
888 for (i = 0; i < 16; i++) { 897 for (i = 0; i < 16; i++) {
889 I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]); 898 I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
890 I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]); 899 I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
891 } 900 }
892 for (i = 0; i < 3; i++) 901 for (i = 0; i < 3; i++)
893 I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]); 902 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
894 903
895 mutex_unlock(&dev->struct_mutex); 904 mutex_unlock(&dev->struct_mutex);
896 905
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
deleted file mode 100644
index 9462081b1e6..00000000000
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ /dev/null
@@ -1,403 +0,0 @@
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28#include <linux/device.h>
29#include <linux/module.h>
30#include <linux/stat.h>
31#include <linux/sysfs.h>
32#include "intel_drv.h"
33#include "i915_drv.h"
34
35#ifdef CONFIG_PM
36static u32 calc_residency(struct drm_device *dev, const u32 reg)
37{
38 struct drm_i915_private *dev_priv = dev->dev_private;
39 u64 raw_time; /* 32b value may overflow during fixed point math */
40
41 if (!intel_enable_rc6(dev))
42 return 0;
43
44 raw_time = I915_READ(reg) * 128ULL;
45 return DIV_ROUND_UP_ULL(raw_time, 100000);
46}
47
48static ssize_t
49show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
50{
51 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
52 return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
53}
54
55static ssize_t
56show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
57{
58 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
59 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
60 return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
61}
62
63static ssize_t
64show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
65{
66 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
67 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
68 return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
69}
70
71static ssize_t
72show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
73{
74 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
75 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
76 return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
77}
78
79static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
80static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
81static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
82static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
83
84static struct attribute *rc6_attrs[] = {
85 &dev_attr_rc6_enable.attr,
86 &dev_attr_rc6_residency_ms.attr,
87 &dev_attr_rc6p_residency_ms.attr,
88 &dev_attr_rc6pp_residency_ms.attr,
89 NULL
90};
91
92static struct attribute_group rc6_attr_group = {
93 .name = power_group_name,
94 .attrs = rc6_attrs
95};
96#endif
97
98static int l3_access_valid(struct drm_device *dev, loff_t offset)
99{
100 if (!HAS_L3_GPU_CACHE(dev))
101 return -EPERM;
102
103 if (offset % 4 != 0)
104 return -EINVAL;
105
106 if (offset >= GEN7_L3LOG_SIZE)
107 return -ENXIO;
108
109 return 0;
110}
111
112static ssize_t
113i915_l3_read(struct file *filp, struct kobject *kobj,
114 struct bin_attribute *attr, char *buf,
115 loff_t offset, size_t count)
116{
117 struct device *dev = container_of(kobj, struct device, kobj);
118 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
119 struct drm_device *drm_dev = dminor->dev;
120 struct drm_i915_private *dev_priv = drm_dev->dev_private;
121 uint32_t misccpctl;
122 int i, ret;
123
124 ret = l3_access_valid(drm_dev, offset);
125 if (ret)
126 return ret;
127
128 ret = i915_mutex_lock_interruptible(drm_dev);
129 if (ret)
130 return ret;
131
132 misccpctl = I915_READ(GEN7_MISCCPCTL);
133 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
134
135 for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
136 *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
137
138 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
139
140 mutex_unlock(&drm_dev->struct_mutex);
141
142 return i - offset;
143}
144
145static ssize_t
146i915_l3_write(struct file *filp, struct kobject *kobj,
147 struct bin_attribute *attr, char *buf,
148 loff_t offset, size_t count)
149{
150 struct device *dev = container_of(kobj, struct device, kobj);
151 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
152 struct drm_device *drm_dev = dminor->dev;
153 struct drm_i915_private *dev_priv = drm_dev->dev_private;
154 u32 *temp = NULL; /* Just here to make handling failures easy */
155 int ret;
156
157 ret = l3_access_valid(drm_dev, offset);
158 if (ret)
159 return ret;
160
161 ret = i915_mutex_lock_interruptible(drm_dev);
162 if (ret)
163 return ret;
164
165 if (!dev_priv->l3_parity.remap_info) {
166 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
167 if (!temp) {
168 mutex_unlock(&drm_dev->struct_mutex);
169 return -ENOMEM;
170 }
171 }
172
173 ret = i915_gpu_idle(drm_dev);
174 if (ret) {
175 kfree(temp);
176 mutex_unlock(&drm_dev->struct_mutex);
177 return ret;
178 }
179
180 /* TODO: Ideally we really want a GPU reset here to make sure errors
181 * aren't propagated. Since I cannot find a stable way to reset the GPU
182 * at this point it is left as a TODO.
183 */
184 if (temp)
185 dev_priv->l3_parity.remap_info = temp;
186
187 memcpy(dev_priv->l3_parity.remap_info + (offset/4),
188 buf + (offset/4),
189 count);
190
191 i915_gem_l3_remap(drm_dev);
192
193 mutex_unlock(&drm_dev->struct_mutex);
194
195 return count;
196}
197
198static struct bin_attribute dpf_attrs = {
199 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
200 .size = GEN7_L3LOG_SIZE,
201 .read = i915_l3_read,
202 .write = i915_l3_write,
203 .mmap = NULL
204};
205
206static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
207 struct device_attribute *attr, char *buf)
208{
209 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
210 struct drm_device *dev = minor->dev;
211 struct drm_i915_private *dev_priv = dev->dev_private;
212 int ret;
213
214 mutex_lock(&dev_priv->rps.hw_lock);
215 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
216 mutex_unlock(&dev_priv->rps.hw_lock);
217
218 return snprintf(buf, PAGE_SIZE, "%d", ret);
219}
220
221static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
222{
223 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
224 struct drm_device *dev = minor->dev;
225 struct drm_i915_private *dev_priv = dev->dev_private;
226 int ret;
227
228 mutex_lock(&dev_priv->rps.hw_lock);
229 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
230 mutex_unlock(&dev_priv->rps.hw_lock);
231
232 return snprintf(buf, PAGE_SIZE, "%d", ret);
233}
234
235static ssize_t gt_max_freq_mhz_store(struct device *kdev,
236 struct device_attribute *attr,
237 const char *buf, size_t count)
238{
239 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
240 struct drm_device *dev = minor->dev;
241 struct drm_i915_private *dev_priv = dev->dev_private;
242 u32 val, rp_state_cap, hw_max, hw_min;
243 ssize_t ret;
244
245 ret = kstrtou32(buf, 0, &val);
246 if (ret)
247 return ret;
248
249 val /= GT_FREQUENCY_MULTIPLIER;
250
251 mutex_lock(&dev_priv->rps.hw_lock);
252
253 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
254 hw_max = (rp_state_cap & 0xff);
255 hw_min = ((rp_state_cap & 0xff0000) >> 16);
256
257 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
258 mutex_unlock(&dev_priv->rps.hw_lock);
259 return -EINVAL;
260 }
261
262 if (dev_priv->rps.cur_delay > val)
263 gen6_set_rps(dev_priv->dev, val);
264
265 dev_priv->rps.max_delay = val;
266
267 mutex_unlock(&dev_priv->rps.hw_lock);
268
269 return count;
270}
271
272static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
273{
274 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
275 struct drm_device *dev = minor->dev;
276 struct drm_i915_private *dev_priv = dev->dev_private;
277 int ret;
278
279 mutex_lock(&dev_priv->rps.hw_lock);
280 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
281 mutex_unlock(&dev_priv->rps.hw_lock);
282
283 return snprintf(buf, PAGE_SIZE, "%d", ret);
284}
285
286static ssize_t gt_min_freq_mhz_store(struct device *kdev,
287 struct device_attribute *attr,
288 const char *buf, size_t count)
289{
290 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
291 struct drm_device *dev = minor->dev;
292 struct drm_i915_private *dev_priv = dev->dev_private;
293 u32 val, rp_state_cap, hw_max, hw_min;
294 ssize_t ret;
295
296 ret = kstrtou32(buf, 0, &val);
297 if (ret)
298 return ret;
299
300 val /= GT_FREQUENCY_MULTIPLIER;
301
302 mutex_lock(&dev_priv->rps.hw_lock);
303
304 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
305 hw_max = (rp_state_cap & 0xff);
306 hw_min = ((rp_state_cap & 0xff0000) >> 16);
307
308 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
309 mutex_unlock(&dev_priv->rps.hw_lock);
310 return -EINVAL;
311 }
312
313 if (dev_priv->rps.cur_delay < val)
314 gen6_set_rps(dev_priv->dev, val);
315
316 dev_priv->rps.min_delay = val;
317
318 mutex_unlock(&dev_priv->rps.hw_lock);
319
320 return count;
321
322}
323
324static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
325static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
326static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
327
328
329static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
330static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
331static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
332static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
333
334/* For now we have a static number of RP states */
335static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
336{
337 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
338 struct drm_device *dev = minor->dev;
339 struct drm_i915_private *dev_priv = dev->dev_private;
340 u32 val, rp_state_cap;
341 ssize_t ret;
342
343 ret = mutex_lock_interruptible(&dev->struct_mutex);
344 if (ret)
345 return ret;
346 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
347 mutex_unlock(&dev->struct_mutex);
348
349 if (attr == &dev_attr_gt_RP0_freq_mhz) {
350 val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
351 } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
352 val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
353 } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
354 val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
355 } else {
356 BUG();
357 }
358 return snprintf(buf, PAGE_SIZE, "%d", val);
359}
360
361static const struct attribute *gen6_attrs[] = {
362 &dev_attr_gt_cur_freq_mhz.attr,
363 &dev_attr_gt_max_freq_mhz.attr,
364 &dev_attr_gt_min_freq_mhz.attr,
365 &dev_attr_gt_RP0_freq_mhz.attr,
366 &dev_attr_gt_RP1_freq_mhz.attr,
367 &dev_attr_gt_RPn_freq_mhz.attr,
368 NULL,
369};
370
371void i915_setup_sysfs(struct drm_device *dev)
372{
373 int ret;
374
375#ifdef CONFIG_PM
376 if (INTEL_INFO(dev)->gen >= 6) {
377 ret = sysfs_merge_group(&dev->primary->kdev.kobj,
378 &rc6_attr_group);
379 if (ret)
380 DRM_ERROR("RC6 residency sysfs setup failed\n");
381 }
382#endif
383 if (HAS_L3_GPU_CACHE(dev)) {
384 ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
385 if (ret)
386 DRM_ERROR("l3 parity sysfs setup failed\n");
387 }
388
389 if (INTEL_INFO(dev)->gen >= 6) {
390 ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
391 if (ret)
392 DRM_ERROR("gen6 sysfs setup failed\n");
393 }
394}
395
396void i915_teardown_sysfs(struct drm_device *dev)
397{
398 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
399 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
400#ifdef CONFIG_PM
401 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
402#endif
403}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 3db4a681771..d623fefbfac 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -214,41 +214,43 @@ TRACE_EVENT(i915_gem_evict,
214); 214);
215 215
216TRACE_EVENT(i915_gem_evict_everything, 216TRACE_EVENT(i915_gem_evict_everything,
217 TP_PROTO(struct drm_device *dev), 217 TP_PROTO(struct drm_device *dev, bool purgeable),
218 TP_ARGS(dev), 218 TP_ARGS(dev, purgeable),
219 219
220 TP_STRUCT__entry( 220 TP_STRUCT__entry(
221 __field(u32, dev) 221 __field(u32, dev)
222 __field(bool, purgeable)
222 ), 223 ),
223 224
224 TP_fast_assign( 225 TP_fast_assign(
225 __entry->dev = dev->primary->index; 226 __entry->dev = dev->primary->index;
227 __entry->purgeable = purgeable;
226 ), 228 ),
227 229
228 TP_printk("dev=%d", __entry->dev) 230 TP_printk("dev=%d%s",
231 __entry->dev,
232 __entry->purgeable ? ", purgeable only" : "")
229); 233);
230 234
231TRACE_EVENT(i915_gem_ring_dispatch, 235TRACE_EVENT(i915_gem_ring_dispatch,
232 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags), 236 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
233 TP_ARGS(ring, seqno, flags), 237 TP_ARGS(ring, seqno),
234 238
235 TP_STRUCT__entry( 239 TP_STRUCT__entry(
236 __field(u32, dev) 240 __field(u32, dev)
237 __field(u32, ring) 241 __field(u32, ring)
238 __field(u32, seqno) 242 __field(u32, seqno)
239 __field(u32, flags)
240 ), 243 ),
241 244
242 TP_fast_assign( 245 TP_fast_assign(
243 __entry->dev = ring->dev->primary->index; 246 __entry->dev = ring->dev->primary->index;
244 __entry->ring = ring->id; 247 __entry->ring = ring->id;
245 __entry->seqno = seqno; 248 __entry->seqno = seqno;
246 __entry->flags = flags;
247 i915_trace_irq_get(ring, seqno); 249 i915_trace_irq_get(ring, seqno);
248 ), 250 ),
249 251
250 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", 252 TP_printk("dev=%u, ring=%u, seqno=%u",
251 __entry->dev, __entry->ring, __entry->seqno, __entry->flags) 253 __entry->dev, __entry->ring, __entry->seqno)
252); 254);
253 255
254TRACE_EVENT(i915_gem_ring_flush, 256TRACE_EVENT(i915_gem_ring_flush,
@@ -309,33 +311,9 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
309 TP_ARGS(ring, seqno) 311 TP_ARGS(ring, seqno)
310); 312);
311 313
312TRACE_EVENT(i915_gem_request_wait_begin, 314DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
313 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 315 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
314 TP_ARGS(ring, seqno), 316 TP_ARGS(ring, seqno)
315
316 TP_STRUCT__entry(
317 __field(u32, dev)
318 __field(u32, ring)
319 __field(u32, seqno)
320 __field(bool, blocking)
321 ),
322
323 /* NB: the blocking information is racy since mutex_is_locked
324 * doesn't check that the current thread holds the lock. The only
325 * other option would be to pass the boolean information of whether
326 * or not the class was blocking down through the stack which is
327 * less desirable.
328 */
329 TP_fast_assign(
330 __entry->dev = ring->dev->primary->index;
331 __entry->ring = ring->id;
332 __entry->seqno = seqno;
333 __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
334 ),
335
336 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
337 __entry->dev, __entry->ring, __entry->seqno,
338 __entry->blocking ? "yes (NB)" : "no")
339); 317);
340 318
341DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, 319DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
@@ -407,44 +385,29 @@ TRACE_EVENT(i915_flip_complete,
407); 385);
408 386
409TRACE_EVENT(i915_reg_rw, 387TRACE_EVENT(i915_reg_rw,
410 TP_PROTO(bool write, u32 reg, u64 val, int len), 388 TP_PROTO(bool write, u32 reg, u64 val, int len),
411 389
412 TP_ARGS(write, reg, val, len), 390 TP_ARGS(write, reg, val, len),
413 391
414 TP_STRUCT__entry( 392 TP_STRUCT__entry(
415 __field(u64, val) 393 __field(u64, val)
416 __field(u32, reg) 394 __field(u32, reg)
417 __field(u16, write) 395 __field(u16, write)
418 __field(u16, len) 396 __field(u16, len)
419 ), 397 ),
420 398
421 TP_fast_assign( 399 TP_fast_assign(
422 __entry->val = (u64)val; 400 __entry->val = (u64)val;
423 __entry->reg = reg; 401 __entry->reg = reg;
424 __entry->write = write; 402 __entry->write = write;
425 __entry->len = len; 403 __entry->len = len;
426 ), 404 ),
427 405
428 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)", 406 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
429 __entry->write ? "write" : "read", 407 __entry->write ? "write" : "read",
430 __entry->reg, __entry->len, 408 __entry->reg, __entry->len,
431 (u32)(__entry->val & 0xffffffff), 409 (u32)(__entry->val & 0xffffffff),
432 (u32)(__entry->val >> 32)) 410 (u32)(__entry->val >> 32))
433);
434
435TRACE_EVENT(intel_gpu_freq_change,
436 TP_PROTO(u32 freq),
437 TP_ARGS(freq),
438
439 TP_STRUCT__entry(
440 __field(u32, freq)
441 ),
442
443 TP_fast_assign(
444 __entry->freq = freq;
445 ),
446
447 TP_printk("new_freq=%u", __entry->freq)
448); 411);
449 412
450#endif /* _I915_TRACE_H_ */ 413#endif /* _I915_TRACE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
index f1df2bd4ecf..ead876eb6ea 100644
--- a/drivers/gpu/drm/i915/i915_trace_points.c
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -7,7 +7,5 @@
7 7
8#include "i915_drv.h" 8#include "i915_drv.h"
9 9
10#ifndef __CHECKER__
11#define CREATE_TRACE_POINTS 10#define CREATE_TRACE_POINTS
12#include "i915_trace.h" 11#include "i915_trace.h"
13#endif
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index bcbbaea2a78..2cb8e0b9f1e 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -8,8 +8,7 @@
8#include <linux/vga_switcheroo.h> 8#include <linux/vga_switcheroo.h>
9#include <acpi/acpi_drivers.h> 9#include <acpi/acpi_drivers.h>
10 10
11#include <drm/drmP.h> 11#include "drmP.h"
12#include "i915_drv.h"
13 12
14#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ 13#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
15 14
@@ -65,7 +64,7 @@ static int intel_dsm(acpi_handle handle, int func, int arg)
65 64
66 case ACPI_TYPE_BUFFER: 65 case ACPI_TYPE_BUFFER:
67 if (obj->buffer.length == 4) { 66 if (obj->buffer.length == 4) {
68 result = (obj->buffer.pointer[0] | 67 result =(obj->buffer.pointer[0] |
69 (obj->buffer.pointer[1] << 8) | 68 (obj->buffer.pointer[1] << 8) |
70 (obj->buffer.pointer[2] << 16) | 69 (obj->buffer.pointer[2] << 16) |
71 (obj->buffer.pointer[3] << 24)); 70 (obj->buffer.pointer[3] << 24));
@@ -183,6 +182,8 @@ static void intel_dsm_platform_mux_info(void)
183 DRM_DEBUG_DRIVER(" hpd mux info: %s\n", 182 DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
184 intel_dsm_mux_type(info->buffer.pointer[3])); 183 intel_dsm_mux_type(info->buffer.pointer[3]));
185 } 184 }
185 } else {
186 DRM_ERROR("MUX INFO call failed\n");
186 } 187 }
187 188
188out: 189out:
@@ -207,7 +208,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
207 208
208 ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0); 209 ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
209 if (ret < 0) { 210 if (ret < 0) {
210 DRM_DEBUG_KMS("failed to get supported _DSM functions\n"); 211 DRM_ERROR("failed to get supported _DSM functions\n");
211 return false; 212 return false;
212 } 213 }
213 214
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 55ffba1f581..61abef8a811 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright © 2006 Intel Corporation 2 * Copyright © 2006 Intel Corporation
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -24,10 +24,10 @@
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 * 25 *
26 */ 26 */
27#include <linux/dmi.h>
28#include <drm/drm_dp_helper.h> 27#include <drm/drm_dp_helper.h>
29#include <drm/drmP.h> 28#include "drmP.h"
30#include <drm/i915_drm.h> 29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "intel_bios.h" 32#include "intel_bios.h"
33 33
@@ -173,28 +173,6 @@ get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
173 return (struct lvds_dvo_timing *)(entry + dvo_timing_offset); 173 return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
174} 174}
175 175
176/* get lvds_fp_timing entry
177 * this function may return NULL if the corresponding entry is invalid
178 */
179static const struct lvds_fp_timing *
180get_lvds_fp_timing(const struct bdb_header *bdb,
181 const struct bdb_lvds_lfp_data *data,
182 const struct bdb_lvds_lfp_data_ptrs *ptrs,
183 int index)
184{
185 size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
186 u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
187 size_t ofs;
188
189 if (index >= ARRAY_SIZE(ptrs->ptr))
190 return NULL;
191 ofs = ptrs->ptr[index].fp_timing_offset;
192 if (ofs < data_ofs ||
193 ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
194 return NULL;
195 return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
196}
197
198/* Try to find integrated panel data */ 176/* Try to find integrated panel data */
199static void 177static void
200parse_lfp_panel_data(struct drm_i915_private *dev_priv, 178parse_lfp_panel_data(struct drm_i915_private *dev_priv,
@@ -204,7 +182,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
204 const struct bdb_lvds_lfp_data *lvds_lfp_data; 182 const struct bdb_lvds_lfp_data *lvds_lfp_data;
205 const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; 183 const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
206 const struct lvds_dvo_timing *panel_dvo_timing; 184 const struct lvds_dvo_timing *panel_dvo_timing;
207 const struct lvds_fp_timing *fp_timing;
208 struct drm_display_mode *panel_fixed_mode; 185 struct drm_display_mode *panel_fixed_mode;
209 int i, downclock; 186 int i, downclock;
210 187
@@ -266,19 +243,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
266 "Normal Clock %dKHz, downclock %dKHz\n", 243 "Normal Clock %dKHz, downclock %dKHz\n",
267 panel_fixed_mode->clock, 10*downclock); 244 panel_fixed_mode->clock, 10*downclock);
268 } 245 }
269
270 fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
271 lvds_lfp_data_ptrs,
272 lvds_options->panel_type);
273 if (fp_timing) {
274 /* check the resolution, just to be sure */
275 if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
276 fp_timing->y_res == panel_fixed_mode->vdisplay) {
277 dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
278 DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
279 dev_priv->bios_lvds_val);
280 }
281 }
282} 246}
283 247
284/* Try to find sdvo panel data */ 248/* Try to find sdvo panel data */
@@ -291,11 +255,6 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
291 int index; 255 int index;
292 256
293 index = i915_vbt_sdvo_panel_type; 257 index = i915_vbt_sdvo_panel_type;
294 if (index == -2) {
295 DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
296 return;
297 }
298
299 if (index == -1) { 258 if (index == -1) {
300 struct bdb_sdvo_lvds_options *sdvo_lvds_options; 259 struct bdb_sdvo_lvds_options *sdvo_lvds_options;
301 260
@@ -350,13 +309,6 @@ parse_general_features(struct drm_i915_private *dev_priv,
350 dev_priv->lvds_use_ssc = general->enable_ssc; 309 dev_priv->lvds_use_ssc = general->enable_ssc;
351 dev_priv->lvds_ssc_freq = 310 dev_priv->lvds_ssc_freq =
352 intel_bios_ssc_frequency(dev, general->ssc_freq); 311 intel_bios_ssc_frequency(dev, general->ssc_freq);
353 dev_priv->display_clock_mode = general->display_clock_mode;
354 DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n",
355 dev_priv->int_tv_support,
356 dev_priv->int_crt_support,
357 dev_priv->lvds_use_ssc,
358 dev_priv->lvds_ssc_freq,
359 dev_priv->display_clock_mode);
360 } 312 }
361} 313}
362 314
@@ -372,11 +324,11 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
372 if (block_size >= sizeof(*general)) { 324 if (block_size >= sizeof(*general)) {
373 int bus_pin = general->crt_ddc_gmbus_pin; 325 int bus_pin = general->crt_ddc_gmbus_pin;
374 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); 326 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
375 if (intel_gmbus_is_port_valid(bus_pin)) 327 if (bus_pin >= 1 && bus_pin <= 6)
376 dev_priv->crt_ddc_pin = bus_pin; 328 dev_priv->crt_ddc_pin = bus_pin;
377 } else { 329 } else {
378 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", 330 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
379 block_size); 331 block_size);
380 } 332 }
381 } 333 }
382} 334}
@@ -429,7 +381,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
429 if (p_child->dvo_port != DEVICE_PORT_DVOB && 381 if (p_child->dvo_port != DEVICE_PORT_DVOB &&
430 p_child->dvo_port != DEVICE_PORT_DVOC) { 382 p_child->dvo_port != DEVICE_PORT_DVOC) {
431 /* skip the incorrect SDVO port */ 383 /* skip the incorrect SDVO port */
432 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n"); 384 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n");
433 continue; 385 continue;
434 } 386 }
435 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" 387 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
@@ -444,13 +396,15 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
444 p_mapping->dvo_wiring = p_child->dvo_wiring; 396 p_mapping->dvo_wiring = p_child->dvo_wiring;
445 p_mapping->ddc_pin = p_child->ddc_pin; 397 p_mapping->ddc_pin = p_child->ddc_pin;
446 p_mapping->i2c_pin = p_child->i2c_pin; 398 p_mapping->i2c_pin = p_child->i2c_pin;
399 p_mapping->i2c_speed = p_child->i2c_speed;
447 p_mapping->initialized = 1; 400 p_mapping->initialized = 1;
448 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", 401 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
449 p_mapping->dvo_port, 402 p_mapping->dvo_port,
450 p_mapping->slave_addr, 403 p_mapping->slave_addr,
451 p_mapping->dvo_wiring, 404 p_mapping->dvo_wiring,
452 p_mapping->ddc_pin, 405 p_mapping->ddc_pin,
453 p_mapping->i2c_pin); 406 p_mapping->i2c_pin,
407 p_mapping->i2c_speed);
454 } else { 408 } else {
455 DRM_DEBUG_KMS("Maybe one SDVO port is shared by " 409 DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
456 "two SDVO device.\n"); 410 "two SDVO device.\n");
@@ -499,8 +453,12 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
499 453
500 edp = find_section(bdb, BDB_EDP); 454 edp = find_section(bdb, BDB_EDP);
501 if (!edp) { 455 if (!edp) {
502 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) 456 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
503 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n"); 457 DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
458 "supported, assume %dbpp panel color "
459 "depth.\n",
460 dev_priv->edp.bpp);
461 }
504 return; 462 return;
505 } 463 }
506 464
@@ -606,10 +564,10 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
606 count++; 564 count++;
607 } 565 }
608 if (!count) { 566 if (!count) {
609 DRM_DEBUG_KMS("no child dev is parsed from VBT\n"); 567 DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
610 return; 568 return;
611 } 569 }
612 dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL); 570 dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
613 if (!dev_priv->child_dev) { 571 if (!dev_priv->child_dev) {
614 DRM_DEBUG_KMS("No memory space for child device\n"); 572 DRM_DEBUG_KMS("No memory space for child device\n");
615 return; 573 return;
@@ -652,29 +610,12 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
652 /* Default to using SSC */ 610 /* Default to using SSC */
653 dev_priv->lvds_use_ssc = 1; 611 dev_priv->lvds_use_ssc = 1;
654 dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); 612 dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
655 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); 613 DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
656}
657 614
658static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) 615 /* eDP data */
659{ 616 dev_priv->edp.bpp = 18;
660 DRM_DEBUG_KMS("Falling back to manually reading VBT from "
661 "VBIOS ROM for %s\n",
662 id->ident);
663 return 1;
664} 617}
665 618
666static const struct dmi_system_id intel_no_opregion_vbt[] = {
667 {
668 .callback = intel_no_opregion_vbt_callback,
669 .ident = "ThinkCentre A57",
670 .matches = {
671 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
672 DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
673 },
674 },
675 { }
676};
677
678/** 619/**
679 * intel_parse_bios - find VBT and initialize settings from the BIOS 620 * intel_parse_bios - find VBT and initialize settings from the BIOS
680 * @dev: DRM device 621 * @dev: DRM device
@@ -684,7 +625,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
684 * 625 *
685 * Returns 0 on success, nonzero on failure. 626 * Returns 0 on success, nonzero on failure.
686 */ 627 */
687int 628bool
688intel_parse_bios(struct drm_device *dev) 629intel_parse_bios(struct drm_device *dev)
689{ 630{
690 struct drm_i915_private *dev_priv = dev->dev_private; 631 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -695,10 +636,10 @@ intel_parse_bios(struct drm_device *dev)
695 init_vbt_defaults(dev_priv); 636 init_vbt_defaults(dev_priv);
696 637
697 /* XXX Should this validation be moved to intel_opregion.c? */ 638 /* XXX Should this validation be moved to intel_opregion.c? */
698 if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) { 639 if (dev_priv->opregion.vbt) {
699 struct vbt_header *vbt = dev_priv->opregion.vbt; 640 struct vbt_header *vbt = dev_priv->opregion.vbt;
700 if (memcmp(vbt->signature, "$VBT", 4) == 0) { 641 if (memcmp(vbt->signature, "$VBT", 4) == 0) {
701 DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n", 642 DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
702 vbt->signature); 643 vbt->signature);
703 bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset); 644 bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
704 } else 645 } else
@@ -723,7 +664,7 @@ intel_parse_bios(struct drm_device *dev)
723 } 664 }
724 665
725 if (!vbt) { 666 if (!vbt) {
726 DRM_DEBUG_DRIVER("VBT signature missing\n"); 667 DRM_ERROR("VBT signature missing\n");
727 pci_unmap_rom(pdev, bios); 668 pci_unmap_rom(pdev, bios);
728 return -1; 669 return -1;
729 } 670 }
@@ -755,8 +696,7 @@ void intel_setup_bios(struct drm_device *dev)
755 struct drm_i915_private *dev_priv = dev->dev_private; 696 struct drm_i915_private *dev_priv = dev->dev_private;
756 697
757 /* Set the Panel Power On/Off timings if uninitialized. */ 698 /* Set the Panel Power On/Off timings if uninitialized. */
758 if (!HAS_PCH_SPLIT(dev) && 699 if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
759 I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
760 /* Set T2 to 40ms and T5 to 200ms */ 700 /* Set T2 to 40ms and T5 to 200ms */
761 I915_WRITE(PP_ON_DELAYS, 0x019007d0); 701 I915_WRITE(PP_ON_DELAYS, 0x019007d0);
762 702
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 36e57f93437..5f8e4edcbbb 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright © 2006 Intel Corporation 2 * Copyright © 2006 Intel Corporation
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -28,7 +28,7 @@
28#ifndef _I830_BIOS_H_ 28#ifndef _I830_BIOS_H_
29#define _I830_BIOS_H_ 29#define _I830_BIOS_H_
30 30
31#include <drm/drmP.h> 31#include "drmP.h"
32 32
33struct vbt_header { 33struct vbt_header {
34 u8 signature[20]; /**< Always starts with 'VBT$' */ 34 u8 signature[20]; /**< Always starts with 'VBT$' */
@@ -120,9 +120,7 @@ struct bdb_general_features {
120 u8 ssc_freq:1; 120 u8 ssc_freq:1;
121 u8 enable_lfp_on_override:1; 121 u8 enable_lfp_on_override:1;
122 u8 disable_ssc_ddt:1; 122 u8 disable_ssc_ddt:1;
123 u8 rsvd7:1; 123 u8 rsvd8:3; /* finish byte */
124 u8 display_clock_mode:1;
125 u8 rsvd8:1; /* finish byte */
126 124
127 /* bits 3 */ 125 /* bits 3 */
128 u8 disable_smooth_vision:1; 126 u8 disable_smooth_vision:1;
@@ -135,10 +133,7 @@ struct bdb_general_features {
135 /* bits 5 */ 133 /* bits 5 */
136 u8 int_crt_support:1; 134 u8 int_crt_support:1;
137 u8 int_tv_support:1; 135 u8 int_tv_support:1;
138 u8 int_efp_support:1; 136 u8 rsvd11:6; /* finish byte */
139 u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
140 u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
141 u8 rsvd11:3; /* finish byte */
142} __attribute__((packed)); 137} __attribute__((packed));
143 138
144/* pre-915 */ 139/* pre-915 */
@@ -202,7 +197,8 @@ struct bdb_general_features {
202struct child_device_config { 197struct child_device_config {
203 u16 handle; 198 u16 handle;
204 u16 device_type; 199 u16 device_type;
205 u8 device_id[10]; /* ascii string */ 200 u8 i2c_speed;
201 u8 rsvd[9];
206 u16 addin_offset; 202 u16 addin_offset;
207 u8 dvo_port; /* See Device_PORT_* above */ 203 u8 dvo_port; /* See Device_PORT_* above */
208 u8 i2c_pin; 204 u8 i2c_pin;
@@ -244,7 +240,7 @@ struct bdb_general_definitions {
244 * And the device num is related with the size of general definition 240 * And the device num is related with the size of general definition
245 * block. It is obtained by using the following formula: 241 * block. It is obtained by using the following formula:
246 * number = (block_size - sizeof(bdb_general_definitions))/ 242 * number = (block_size - sizeof(bdb_general_definitions))/
247 * sizeof(child_device_config); 243 * sizeof(child_device_config);
248 */ 244 */
249 struct child_device_config devices[0]; 245 struct child_device_config devices[0];
250} __attribute__((packed)); 246} __attribute__((packed));
@@ -450,11 +446,11 @@ struct bdb_driver_features {
450#define EDP_VSWING_1_2V 3 446#define EDP_VSWING_1_2V 3
451 447
452struct edp_power_seq { 448struct edp_power_seq {
453 u16 t1_t3; 449 u16 t3;
454 u16 t8; 450 u16 t7;
455 u16 t9; 451 u16 t9;
456 u16 t10; 452 u16 t10;
457 u16 t11_t12; 453 u16 t12;
458} __attribute__ ((packed)); 454} __attribute__ ((packed));
459 455
460struct edp_link_params { 456struct edp_link_params {
@@ -467,16 +463,12 @@ struct edp_link_params {
467struct bdb_edp { 463struct bdb_edp {
468 struct edp_power_seq power_seqs[16]; 464 struct edp_power_seq power_seqs[16];
469 u32 color_depth; 465 u32 color_depth;
470 struct edp_link_params link_params[16];
471 u32 sdrrs_msa_timing_delay; 466 u32 sdrrs_msa_timing_delay;
472 467 struct edp_link_params link_params[16];
473 /* ith bit indicates enabled/disabled for (i+1)th panel */
474 u16 edp_s3d_feature;
475 u16 edp_t3_optimization;
476} __attribute__ ((packed)); 468} __attribute__ ((packed));
477 469
478void intel_setup_bios(struct drm_device *dev); 470void intel_setup_bios(struct drm_device *dev);
479int intel_parse_bios(struct drm_device *dev); 471bool intel_parse_bios(struct drm_device *dev);
480 472
481/* 473/*
482 * Driver<->VBIOS interaction occurs through scratch bits in 474 * Driver<->VBIOS interaction occurs through scratch bits in
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9293878ec7e..0979d887788 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -24,15 +24,15 @@
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 */ 25 */
26 26
27#include <linux/dmi.h>
28#include <linux/i2c.h> 27#include <linux/i2c.h>
29#include <linux/slab.h> 28#include <linux/slab.h>
30#include <drm/drmP.h> 29#include "drmP.h"
31#include <drm/drm_crtc.h> 30#include "drm.h"
32#include <drm/drm_crtc_helper.h> 31#include "drm_crtc.h"
33#include <drm/drm_edid.h> 32#include "drm_crtc_helper.h"
33#include "drm_edid.h"
34#include "intel_drv.h" 34#include "intel_drv.h"
35#include <drm/i915_drm.h> 35#include "i915_drm.h"
36#include "i915_drv.h" 36#include "i915_drv.h"
37 37
38/* Here's the desired hotplug mode */ 38/* Here's the desired hotplug mode */
@@ -46,7 +46,6 @@
46struct intel_crt { 46struct intel_crt {
47 struct intel_encoder base; 47 struct intel_encoder base;
48 bool force_hotplug_required; 48 bool force_hotplug_required;
49 u32 adpa_reg;
50}; 49};
51 50
52static struct intel_crt *intel_attached_crt(struct drm_connector *connector) 51static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
@@ -55,69 +54,22 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
55 struct intel_crt, base); 54 struct intel_crt, base);
56} 55}
57 56
58static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) 57static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
59{
60 return container_of(encoder, struct intel_crt, base);
61}
62
63static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
64 enum pipe *pipe)
65{ 58{
66 struct drm_device *dev = encoder->base.dev; 59 struct drm_device *dev = encoder->dev;
67 struct drm_i915_private *dev_priv = dev->dev_private; 60 struct drm_i915_private *dev_priv = dev->dev_private;
68 struct intel_crt *crt = intel_encoder_to_crt(encoder); 61 u32 temp, reg;
69 u32 tmp;
70
71 tmp = I915_READ(crt->adpa_reg);
72 62
73 if (!(tmp & ADPA_DAC_ENABLE)) 63 if (HAS_PCH_SPLIT(dev))
74 return false; 64 reg = PCH_ADPA;
75
76 if (HAS_PCH_CPT(dev))
77 *pipe = PORT_TO_PIPE_CPT(tmp);
78 else 65 else
79 *pipe = PORT_TO_PIPE(tmp); 66 reg = ADPA;
80 67
81 return true; 68 temp = I915_READ(reg);
82}
83
84static void intel_disable_crt(struct intel_encoder *encoder)
85{
86 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
87 struct intel_crt *crt = intel_encoder_to_crt(encoder);
88 u32 temp;
89
90 temp = I915_READ(crt->adpa_reg);
91 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); 69 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
92 temp &= ~ADPA_DAC_ENABLE; 70 temp &= ~ADPA_DAC_ENABLE;
93 I915_WRITE(crt->adpa_reg, temp);
94}
95
96static void intel_enable_crt(struct intel_encoder *encoder)
97{
98 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
99 struct intel_crt *crt = intel_encoder_to_crt(encoder);
100 u32 temp;
101
102 temp = I915_READ(crt->adpa_reg);
103 temp |= ADPA_DAC_ENABLE;
104 I915_WRITE(crt->adpa_reg, temp);
105}
106 71
107/* Note: The caller is required to filter out dpms modes not supported by the 72 switch(mode) {
108 * platform. */
109static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
110{
111 struct drm_device *dev = encoder->base.dev;
112 struct drm_i915_private *dev_priv = dev->dev_private;
113 struct intel_crt *crt = intel_encoder_to_crt(encoder);
114 u32 temp;
115
116 temp = I915_READ(crt->adpa_reg);
117 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
118 temp &= ~ADPA_DAC_ENABLE;
119
120 switch (mode) {
121 case DRM_MODE_DPMS_ON: 73 case DRM_MODE_DPMS_ON:
122 temp |= ADPA_DAC_ENABLE; 74 temp |= ADPA_DAC_ENABLE;
123 break; 75 break;
@@ -132,51 +84,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
132 break; 84 break;
133 } 85 }
134 86
135 I915_WRITE(crt->adpa_reg, temp); 87 I915_WRITE(reg, temp);
136}
137
138static void intel_crt_dpms(struct drm_connector *connector, int mode)
139{
140 struct drm_device *dev = connector->dev;
141 struct intel_encoder *encoder = intel_attached_encoder(connector);
142 struct drm_crtc *crtc;
143 int old_dpms;
144
145 /* PCH platforms and VLV only support on/off. */
146 if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
147 mode = DRM_MODE_DPMS_OFF;
148
149 if (mode == connector->dpms)
150 return;
151
152 old_dpms = connector->dpms;
153 connector->dpms = mode;
154
155 /* Only need to change hw state when actually enabled */
156 crtc = encoder->base.crtc;
157 if (!crtc) {
158 encoder->connectors_active = false;
159 return;
160 }
161
162 /* We need the pipe to run for anything but OFF. */
163 if (mode == DRM_MODE_DPMS_OFF)
164 encoder->connectors_active = false;
165 else
166 encoder->connectors_active = true;
167
168 if (mode < old_dpms) {
169 /* From off to on, enable the pipe first. */
170 intel_crtc_update_dpms(crtc);
171
172 intel_crt_set_dpms(encoder, mode);
173 } else {
174 intel_crt_set_dpms(encoder, mode);
175
176 intel_crtc_update_dpms(crtc);
177 }
178
179 intel_modeset_check_state(connector->dev);
180} 88}
181 89
182static int intel_crt_mode_valid(struct drm_connector *connector, 90static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -198,16 +106,11 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
198 if (mode->clock > max_clock) 106 if (mode->clock > max_clock)
199 return MODE_CLOCK_HIGH; 107 return MODE_CLOCK_HIGH;
200 108
201 /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
202 if (HAS_PCH_LPT(dev) &&
203 (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
204 return MODE_CLOCK_HIGH;
205
206 return MODE_OK; 109 return MODE_OK;
207} 110}
208 111
209static bool intel_crt_mode_fixup(struct drm_encoder *encoder, 112static bool intel_crt_mode_fixup(struct drm_encoder *encoder,
210 const struct drm_display_mode *mode, 113 struct drm_display_mode *mode,
211 struct drm_display_mode *adjusted_mode) 114 struct drm_display_mode *adjusted_mode)
212{ 115{
213 return true; 116 return true;
@@ -220,36 +123,51 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
220 123
221 struct drm_device *dev = encoder->dev; 124 struct drm_device *dev = encoder->dev;
222 struct drm_crtc *crtc = encoder->crtc; 125 struct drm_crtc *crtc = encoder->crtc;
223 struct intel_crt *crt =
224 intel_encoder_to_crt(to_intel_encoder(encoder));
225 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 126 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
226 struct drm_i915_private *dev_priv = dev->dev_private; 127 struct drm_i915_private *dev_priv = dev->dev_private;
227 u32 adpa; 128 int dpll_md_reg;
129 u32 adpa, dpll_md;
130 u32 adpa_reg;
131
132 dpll_md_reg = DPLL_MD(intel_crtc->pipe);
228 133
229 if (HAS_PCH_SPLIT(dev)) 134 if (HAS_PCH_SPLIT(dev))
230 adpa = ADPA_HOTPLUG_BITS; 135 adpa_reg = PCH_ADPA;
231 else 136 else
232 adpa = 0; 137 adpa_reg = ADPA;
138
139 /*
140 * Disable separate mode multiplier used when cloning SDVO to CRT
141 * XXX this needs to be adjusted when we really are cloning
142 */
143 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
144 dpll_md = I915_READ(dpll_md_reg);
145 I915_WRITE(dpll_md_reg,
146 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
147 }
233 148
149 adpa = ADPA_HOTPLUG_BITS;
234 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 150 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
235 adpa |= ADPA_HSYNC_ACTIVE_HIGH; 151 adpa |= ADPA_HSYNC_ACTIVE_HIGH;
236 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 152 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
237 adpa |= ADPA_VSYNC_ACTIVE_HIGH; 153 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
238 154
239 /* For CPT allow 3 pipe config, for others just use A or B */ 155 if (intel_crtc->pipe == 0) {
240 if (HAS_PCH_LPT(dev)) 156 if (HAS_PCH_CPT(dev))
241 ; /* Those bits don't exist here */ 157 adpa |= PORT_TRANS_A_SEL_CPT;
242 else if (HAS_PCH_CPT(dev)) 158 else
243 adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); 159 adpa |= ADPA_PIPE_A_SELECT;
244 else if (intel_crtc->pipe == 0) 160 } else {
245 adpa |= ADPA_PIPE_A_SELECT; 161 if (HAS_PCH_CPT(dev))
246 else 162 adpa |= PORT_TRANS_B_SEL_CPT;
247 adpa |= ADPA_PIPE_B_SELECT; 163 else
164 adpa |= ADPA_PIPE_B_SELECT;
165 }
248 166
249 if (!HAS_PCH_SPLIT(dev)) 167 if (!HAS_PCH_SPLIT(dev))
250 I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); 168 I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
251 169
252 I915_WRITE(crt->adpa_reg, adpa); 170 I915_WRITE(adpa_reg, adpa);
253} 171}
254 172
255static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) 173static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
@@ -297,42 +215,6 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
297 return ret; 215 return ret;
298} 216}
299 217
300static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
301{
302 struct drm_device *dev = connector->dev;
303 struct drm_i915_private *dev_priv = dev->dev_private;
304 u32 adpa;
305 bool ret;
306 u32 save_adpa;
307
308 save_adpa = adpa = I915_READ(ADPA);
309 DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
310
311 adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
312
313 I915_WRITE(ADPA, adpa);
314
315 if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
316 1000)) {
317 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
318 I915_WRITE(ADPA, save_adpa);
319 }
320
321 /* Check the status to see if both blue and green are on now */
322 adpa = I915_READ(ADPA);
323 if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
324 ret = true;
325 else
326 ret = false;
327
328 DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
329
330 /* FIXME: debug force function and remove */
331 ret = true;
332
333 return ret;
334}
335
336/** 218/**
337 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence. 219 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
338 * 220 *
@@ -352,9 +234,6 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
352 if (HAS_PCH_SPLIT(dev)) 234 if (HAS_PCH_SPLIT(dev))
353 return intel_ironlake_crt_detect_hotplug(connector); 235 return intel_ironlake_crt_detect_hotplug(connector);
354 236
355 if (IS_VALLEYVIEW(dev))
356 return valleyview_crt_detect_hotplug(connector);
357
358 /* 237 /*
359 * On 4 series desktop, CRT detect sequence need to be done twice 238 * On 4 series desktop, CRT detect sequence need to be done twice
360 * to get a reliable result. 239 * to get a reliable result.
@@ -390,72 +269,42 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
390 return ret; 269 return ret;
391} 270}
392 271
393static struct edid *intel_crt_get_edid(struct drm_connector *connector,
394 struct i2c_adapter *i2c)
395{
396 struct edid *edid;
397
398 edid = drm_get_edid(connector, i2c);
399
400 if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
401 DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
402 intel_gmbus_force_bit(i2c, true);
403 edid = drm_get_edid(connector, i2c);
404 intel_gmbus_force_bit(i2c, false);
405 }
406
407 return edid;
408}
409
410/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
411static int intel_crt_ddc_get_modes(struct drm_connector *connector,
412 struct i2c_adapter *adapter)
413{
414 struct edid *edid;
415 int ret;
416
417 edid = intel_crt_get_edid(connector, adapter);
418 if (!edid)
419 return 0;
420
421 ret = intel_connector_update_modes(connector, edid);
422 kfree(edid);
423
424 return ret;
425}
426
427static bool intel_crt_detect_ddc(struct drm_connector *connector) 272static bool intel_crt_detect_ddc(struct drm_connector *connector)
428{ 273{
429 struct intel_crt *crt = intel_attached_crt(connector); 274 struct intel_crt *crt = intel_attached_crt(connector);
430 struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; 275 struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
431 struct edid *edid;
432 struct i2c_adapter *i2c;
433
434 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
435 276
436 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); 277 /* CRT should always be at 0, but check anyway */
437 edid = intel_crt_get_edid(connector, i2c); 278 if (crt->base.type != INTEL_OUTPUT_ANALOG)
279 return false;
438 280
439 if (edid) { 281 if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
440 bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; 282 struct edid *edid;
283 bool is_digital = false;
441 284
285 edid = drm_get_edid(connector,
286 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
442 /* 287 /*
443 * This may be a DVI-I connector with a shared DDC 288 * This may be a DVI-I connector with a shared DDC
444 * link between analog and digital outputs, so we 289 * link between analog and digital outputs, so we
445 * have to check the EDID input spec of the attached device. 290 * have to check the EDID input spec of the attached device.
291 *
292 * On the other hand, what should we do if it is a broken EDID?
446 */ 293 */
294 if (edid != NULL) {
295 is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
296 connector->display_info.raw_edid = NULL;
297 kfree(edid);
298 }
299
447 if (!is_digital) { 300 if (!is_digital) {
448 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 301 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
449 return true; 302 return true;
303 } else {
304 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
450 } 305 }
451
452 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
453 } else {
454 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
455 } 306 }
456 307
457 kfree(edid);
458
459 return false; 308 return false;
460} 309}
461 310
@@ -584,43 +433,43 @@ intel_crt_detect(struct drm_connector *connector, bool force)
584{ 433{
585 struct drm_device *dev = connector->dev; 434 struct drm_device *dev = connector->dev;
586 struct intel_crt *crt = intel_attached_crt(connector); 435 struct intel_crt *crt = intel_attached_crt(connector);
436 struct drm_crtc *crtc;
587 enum drm_connector_status status; 437 enum drm_connector_status status;
588 struct intel_load_detect_pipe tmp;
589 438
590 if (I915_HAS_HOTPLUG(dev)) { 439 if (I915_HAS_HOTPLUG(dev)) {
591 /* We can not rely on the HPD pin always being correctly wired
592 * up, for example many KVM do not pass it through, and so
593 * only trust an assertion that the monitor is connected.
594 */
595 if (intel_crt_detect_hotplug(connector)) { 440 if (intel_crt_detect_hotplug(connector)) {
596 DRM_DEBUG_KMS("CRT detected via hotplug\n"); 441 DRM_DEBUG_KMS("CRT detected via hotplug\n");
597 return connector_status_connected; 442 return connector_status_connected;
598 } else 443 } else {
599 DRM_DEBUG_KMS("CRT not detected via hotplug\n"); 444 DRM_DEBUG_KMS("CRT not detected via hotplug\n");
445 return connector_status_disconnected;
446 }
600 } 447 }
601 448
602 if (intel_crt_detect_ddc(connector)) 449 if (intel_crt_detect_ddc(connector))
603 return connector_status_connected; 450 return connector_status_connected;
604 451
605 /* Load detection is broken on HPD capable machines. Whoever wants a
606 * broken monitor (without edid) to work behind a broken kvm (that fails
607 * to have the right resistors for HP detection) needs to fix this up.
608 * For now just bail out. */
609 if (I915_HAS_HOTPLUG(dev))
610 return connector_status_disconnected;
611
612 if (!force) 452 if (!force)
613 return connector->status; 453 return connector->status;
614 454
615 /* for pre-945g platforms use load detect */ 455 /* for pre-945g platforms use load detect */
616 if (intel_get_load_detect_pipe(connector, NULL, &tmp)) { 456 crtc = crt->base.base.crtc;
617 if (intel_crt_detect_ddc(connector)) 457 if (crtc && crtc->enabled) {
618 status = connector_status_connected; 458 status = intel_crt_load_detect(crt);
619 else 459 } else {
620 status = intel_crt_load_detect(crt); 460 struct intel_load_detect_pipe tmp;
621 intel_release_load_detect_pipe(connector, &tmp); 461
622 } else 462 if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
623 status = connector_status_unknown; 463 &tmp)) {
464 if (intel_crt_detect_ddc(connector))
465 status = connector_status_connected;
466 else
467 status = intel_crt_load_detect(crt);
468 intel_release_load_detect_pipe(&crt->base, connector,
469 &tmp);
470 } else
471 status = connector_status_unknown;
472 }
624 473
625 return status; 474 return status;
626} 475}
@@ -637,16 +486,15 @@ static int intel_crt_get_modes(struct drm_connector *connector)
637 struct drm_device *dev = connector->dev; 486 struct drm_device *dev = connector->dev;
638 struct drm_i915_private *dev_priv = dev->dev_private; 487 struct drm_i915_private *dev_priv = dev->dev_private;
639 int ret; 488 int ret;
640 struct i2c_adapter *i2c;
641 489
642 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); 490 ret = intel_ddc_get_modes(connector,
643 ret = intel_crt_ddc_get_modes(connector, i2c); 491 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
644 if (ret || !IS_G4X(dev)) 492 if (ret || !IS_G4X(dev))
645 return ret; 493 return ret;
646 494
647 /* Try to probe digital port for output in DVI-I -> VGA mode. */ 495 /* Try to probe digital port for output in DVI-I -> VGA mode. */
648 i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); 496 return intel_ddc_get_modes(connector,
649 return intel_crt_ddc_get_modes(connector, i2c); 497 &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
650} 498}
651 499
652static int intel_crt_set_property(struct drm_connector *connector, 500static int intel_crt_set_property(struct drm_connector *connector,
@@ -659,37 +507,27 @@ static int intel_crt_set_property(struct drm_connector *connector,
659static void intel_crt_reset(struct drm_connector *connector) 507static void intel_crt_reset(struct drm_connector *connector)
660{ 508{
661 struct drm_device *dev = connector->dev; 509 struct drm_device *dev = connector->dev;
662 struct drm_i915_private *dev_priv = dev->dev_private;
663 struct intel_crt *crt = intel_attached_crt(connector); 510 struct intel_crt *crt = intel_attached_crt(connector);
664 511
665 if (HAS_PCH_SPLIT(dev)) { 512 if (HAS_PCH_SPLIT(dev))
666 u32 adpa;
667
668 adpa = I915_READ(PCH_ADPA);
669 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
670 adpa |= ADPA_HOTPLUG_BITS;
671 I915_WRITE(PCH_ADPA, adpa);
672 POSTING_READ(PCH_ADPA);
673
674 DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
675 crt->force_hotplug_required = 1; 513 crt->force_hotplug_required = 1;
676 }
677
678} 514}
679 515
680/* 516/*
681 * Routines for controlling stuff on the analog port 517 * Routines for controlling stuff on the analog port
682 */ 518 */
683 519
684static const struct drm_encoder_helper_funcs crt_encoder_funcs = { 520static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
521 .dpms = intel_crt_dpms,
685 .mode_fixup = intel_crt_mode_fixup, 522 .mode_fixup = intel_crt_mode_fixup,
523 .prepare = intel_encoder_prepare,
524 .commit = intel_encoder_commit,
686 .mode_set = intel_crt_mode_set, 525 .mode_set = intel_crt_mode_set,
687 .disable = intel_encoder_noop,
688}; 526};
689 527
690static const struct drm_connector_funcs intel_crt_connector_funcs = { 528static const struct drm_connector_funcs intel_crt_connector_funcs = {
691 .reset = intel_crt_reset, 529 .reset = intel_crt_reset,
692 .dpms = intel_crt_dpms, 530 .dpms = drm_helper_connector_dpms,
693 .detect = intel_crt_detect, 531 .detect = intel_crt_detect,
694 .fill_modes = drm_helper_probe_single_connector_modes, 532 .fill_modes = drm_helper_probe_single_connector_modes,
695 .destroy = intel_crt_destroy, 533 .destroy = intel_crt_destroy,
@@ -706,24 +544,6 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
706 .destroy = intel_encoder_destroy, 544 .destroy = intel_encoder_destroy,
707}; 545};
708 546
709static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
710{
711 DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
712 return 1;
713}
714
715static const struct dmi_system_id intel_no_crt[] = {
716 {
717 .callback = intel_no_crt_dmi_callback,
718 .ident = "ACER ZGB",
719 .matches = {
720 DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
721 DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
722 },
723 },
724 { }
725};
726
727void intel_crt_init(struct drm_device *dev) 547void intel_crt_init(struct drm_device *dev)
728{ 548{
729 struct drm_connector *connector; 549 struct drm_connector *connector;
@@ -731,10 +551,6 @@ void intel_crt_init(struct drm_device *dev)
731 struct intel_connector *intel_connector; 551 struct intel_connector *intel_connector;
732 struct drm_i915_private *dev_priv = dev->dev_private; 552 struct drm_i915_private *dev_priv = dev->dev_private;
733 553
734 /* Skip machines without VGA that falsely report hotplug events */
735 if (dmi_check_system(intel_no_crt))
736 return;
737
738 crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); 554 crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
739 if (!crt) 555 if (!crt)
740 return; 556 return;
@@ -755,34 +571,14 @@ void intel_crt_init(struct drm_device *dev)
755 intel_connector_attach_encoder(intel_connector, &crt->base); 571 intel_connector_attach_encoder(intel_connector, &crt->base);
756 572
757 crt->base.type = INTEL_OUTPUT_ANALOG; 573 crt->base.type = INTEL_OUTPUT_ANALOG;
758 crt->base.cloneable = true; 574 crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
759 if (IS_I830(dev)) 575 1 << INTEL_ANALOG_CLONE_BIT |
760 crt->base.crtc_mask = (1 << 0); 576 1 << INTEL_SDVO_LVDS_CLONE_BIT);
761 else 577 crt->base.crtc_mask = (1 << 0) | (1 << 1);
762 crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 578 connector->interlace_allowed = 1;
763
764 if (IS_GEN2(dev))
765 connector->interlace_allowed = 0;
766 else
767 connector->interlace_allowed = 1;
768 connector->doublescan_allowed = 0; 579 connector->doublescan_allowed = 0;
769 580
770 if (HAS_PCH_SPLIT(dev)) 581 drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
771 crt->adpa_reg = PCH_ADPA;
772 else if (IS_VALLEYVIEW(dev))
773 crt->adpa_reg = VLV_ADPA;
774 else
775 crt->adpa_reg = ADPA;
776
777 crt->base.disable = intel_disable_crt;
778 crt->base.enable = intel_enable_crt;
779 if (IS_HASWELL(dev))
780 crt->base.get_hw_state = intel_ddi_get_hw_state;
781 else
782 crt->base.get_hw_state = intel_crt_get_hw_state;
783 intel_connector->get_hw_state = intel_connector_get_hw_state;
784
785 drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
786 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 582 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
787 583
788 drm_sysfs_connector_add(connector); 584 drm_sysfs_connector_add(connector);
@@ -796,14 +592,18 @@ void intel_crt_init(struct drm_device *dev)
796 * Configure the automatic hotplug detection stuff 592 * Configure the automatic hotplug detection stuff
797 */ 593 */
798 crt->force_hotplug_required = 0; 594 crt->force_hotplug_required = 0;
595 if (HAS_PCH_SPLIT(dev)) {
596 u32 adpa;
799 597
800 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; 598 adpa = I915_READ(PCH_ADPA);
599 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
600 adpa |= ADPA_HOTPLUG_BITS;
601 I915_WRITE(PCH_ADPA, adpa);
602 POSTING_READ(PCH_ADPA);
801 603
802 /* 604 DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
803 * TODO: find a proper way to discover whether we need to set the 605 crt->force_hotplug_required = 1;
804 * polarity reversal bit or not, instead of relying on the BIOS. 606 }
805 */ 607
806 if (HAS_PCH_LPT(dev)) 608 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
807 dev_priv->fdi_rx_polarity_reversed =
808 !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
809} 609}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
deleted file mode 100644
index 4bad0f72401..00000000000
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ /dev/null
@@ -1,1514 +0,0 @@
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
28#include "i915_drv.h"
29#include "intel_drv.h"
30
31/* HDMI/DVI modes ignore everything but the last 2 items. So we share
32 * them for both DP and FDI transports, allowing those ports to
33 * automatically adapt to HDMI connections as well
34 */
35static const u32 hsw_ddi_translations_dp[] = {
36 0x00FFFFFF, 0x0006000E, /* DP parameters */
37 0x00D75FFF, 0x0005000A,
38 0x00C30FFF, 0x00040006,
39 0x80AAAFFF, 0x000B0000,
40 0x00FFFFFF, 0x0005000A,
41 0x00D75FFF, 0x000C0004,
42 0x80C30FFF, 0x000B0000,
43 0x00FFFFFF, 0x00040006,
44 0x80D75FFF, 0x000B0000,
45 0x00FFFFFF, 0x00040006 /* HDMI parameters */
46};
47
48static const u32 hsw_ddi_translations_fdi[] = {
49 0x00FFFFFF, 0x0007000E, /* FDI parameters */
50 0x00D75FFF, 0x000F000A,
51 0x00C30FFF, 0x00060006,
52 0x00AAAFFF, 0x001E0000,
53 0x00FFFFFF, 0x000F000A,
54 0x00D75FFF, 0x00160004,
55 0x00C30FFF, 0x001E0000,
56 0x00FFFFFF, 0x00060006,
57 0x00D75FFF, 0x001E0000,
58 0x00FFFFFF, 0x00040006 /* HDMI parameters */
59};
60
61static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
62{
63 struct drm_encoder *encoder = &intel_encoder->base;
64 int type = intel_encoder->type;
65
66 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
67 type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
68 struct intel_digital_port *intel_dig_port =
69 enc_to_dig_port(encoder);
70 return intel_dig_port->port;
71
72 } else if (type == INTEL_OUTPUT_ANALOG) {
73 return PORT_E;
74
75 } else {
76 DRM_ERROR("Invalid DDI encoder type %d\n", type);
77 BUG();
78 }
79}
80
81/* On Haswell, DDI port buffers must be programmed with correct values
82 * in advance. The buffer values are different for FDI and DP modes,
83 * but the HDMI/DVI fields are shared among those. So we program the DDI
84 * in either FDI or DP modes only, as HDMI connections will work with both
85 * of those
86 */
87void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
88{
89 struct drm_i915_private *dev_priv = dev->dev_private;
90 u32 reg;
91 int i;
92 const u32 *ddi_translations = ((use_fdi_mode) ?
93 hsw_ddi_translations_fdi :
94 hsw_ddi_translations_dp);
95
96 DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
97 port_name(port),
98 use_fdi_mode ? "FDI" : "DP");
99
100 WARN((use_fdi_mode && (port != PORT_E)),
101 "Programming port %c in FDI mode, this probably will not work.\n",
102 port_name(port));
103
104 for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
105 I915_WRITE(reg, ddi_translations[i]);
106 reg += 4;
107 }
108}
109
110/* Program DDI buffers translations for DP. By default, program ports A-D in DP
111 * mode and port E for FDI.
112 */
113void intel_prepare_ddi(struct drm_device *dev)
114{
115 int port;
116
117 if (IS_HASWELL(dev)) {
118 for (port = PORT_A; port < PORT_E; port++)
119 intel_prepare_ddi_buffers(dev, port, false);
120
121 /* DDI E is the suggested one to work in FDI mode, so program is as such by
122 * default. It will have to be re-programmed in case a digital DP output
123 * will be detected on it
124 */
125 intel_prepare_ddi_buffers(dev, PORT_E, true);
126 }
127}
128
129static const long hsw_ddi_buf_ctl_values[] = {
130 DDI_BUF_EMP_400MV_0DB_HSW,
131 DDI_BUF_EMP_400MV_3_5DB_HSW,
132 DDI_BUF_EMP_400MV_6DB_HSW,
133 DDI_BUF_EMP_400MV_9_5DB_HSW,
134 DDI_BUF_EMP_600MV_0DB_HSW,
135 DDI_BUF_EMP_600MV_3_5DB_HSW,
136 DDI_BUF_EMP_600MV_6DB_HSW,
137 DDI_BUF_EMP_800MV_0DB_HSW,
138 DDI_BUF_EMP_800MV_3_5DB_HSW
139};
140
141static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
142 enum port port)
143{
144 uint32_t reg = DDI_BUF_CTL(port);
145 int i;
146
147 for (i = 0; i < 8; i++) {
148 udelay(1);
149 if (I915_READ(reg) & DDI_BUF_IS_IDLE)
150 return;
151 }
152 DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
153}
154
155/* Starting with Haswell, different DDI ports can work in FDI mode for
156 * connection to the PCH-located connectors. For this, it is necessary to train
157 * both the DDI port and PCH receiver for the desired DDI buffer settings.
158 *
159 * The recommended port to work in FDI mode is DDI E, which we use here. Also,
160 * please note that when FDI mode is active on DDI E, it shares 2 lines with
161 * DDI A (which is used for eDP)
162 */
163
164void hsw_fdi_link_train(struct drm_crtc *crtc)
165{
166 struct drm_device *dev = crtc->dev;
167 struct drm_i915_private *dev_priv = dev->dev_private;
168 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
169 u32 temp, i, rx_ctl_val;
170
171 /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
172 * mode set "sequence for CRT port" document:
173 * - TP1 to TP2 time with the default value
174 * - FDI delay to 90h
175 */
176 I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
177 FDI_RX_PWRDN_LANE0_VAL(2) |
178 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
179
180 /* Enable the PCH Receiver FDI PLL */
181 rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
182 ((intel_crtc->fdi_lanes - 1) << 19);
183 if (dev_priv->fdi_rx_polarity_reversed)
184 rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
185 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
186 POSTING_READ(_FDI_RXA_CTL);
187 udelay(220);
188
189 /* Switch from Rawclk to PCDclk */
190 rx_ctl_val |= FDI_PCDCLK;
191 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
192
193 /* Configure Port Clock Select */
194 I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
195
196 /* Start the training iterating through available voltages and emphasis,
197 * testing each value twice. */
198 for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
199 /* Configure DP_TP_CTL with auto-training */
200 I915_WRITE(DP_TP_CTL(PORT_E),
201 DP_TP_CTL_FDI_AUTOTRAIN |
202 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
203 DP_TP_CTL_LINK_TRAIN_PAT1 |
204 DP_TP_CTL_ENABLE);
205
206 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
207 I915_WRITE(DDI_BUF_CTL(PORT_E),
208 DDI_BUF_CTL_ENABLE |
209 ((intel_crtc->fdi_lanes - 1) << 1) |
210 hsw_ddi_buf_ctl_values[i / 2]);
211 POSTING_READ(DDI_BUF_CTL(PORT_E));
212
213 udelay(600);
214
215 /* Program PCH FDI Receiver TU */
216 I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
217
218 /* Enable PCH FDI Receiver with auto-training */
219 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
220 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
221 POSTING_READ(_FDI_RXA_CTL);
222
223 /* Wait for FDI receiver lane calibration */
224 udelay(30);
225
226 /* Unset FDI_RX_MISC pwrdn lanes */
227 temp = I915_READ(_FDI_RXA_MISC);
228 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
229 I915_WRITE(_FDI_RXA_MISC, temp);
230 POSTING_READ(_FDI_RXA_MISC);
231
232 /* Wait for FDI auto training time */
233 udelay(5);
234
235 temp = I915_READ(DP_TP_STATUS(PORT_E));
236 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
237 DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
238
239 /* Enable normal pixel sending for FDI */
240 I915_WRITE(DP_TP_CTL(PORT_E),
241 DP_TP_CTL_FDI_AUTOTRAIN |
242 DP_TP_CTL_LINK_TRAIN_NORMAL |
243 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
244 DP_TP_CTL_ENABLE);
245
246 return;
247 }
248
249 temp = I915_READ(DDI_BUF_CTL(PORT_E));
250 temp &= ~DDI_BUF_CTL_ENABLE;
251 I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
252 POSTING_READ(DDI_BUF_CTL(PORT_E));
253
254 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
255 temp = I915_READ(DP_TP_CTL(PORT_E));
256 temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
257 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
258 I915_WRITE(DP_TP_CTL(PORT_E), temp);
259 POSTING_READ(DP_TP_CTL(PORT_E));
260
261 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
262
263 rx_ctl_val &= ~FDI_RX_ENABLE;
264 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
265 POSTING_READ(_FDI_RXA_CTL);
266
267 /* Reset FDI_RX_MISC pwrdn lanes */
268 temp = I915_READ(_FDI_RXA_MISC);
269 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
270 temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
271 I915_WRITE(_FDI_RXA_MISC, temp);
272 POSTING_READ(_FDI_RXA_MISC);
273 }
274
275 DRM_ERROR("FDI link training failed!\n");
276}
277
278/* WRPLL clock dividers */
279struct wrpll_tmds_clock {
280 u32 clock;
281 u16 p; /* Post divider */
282 u16 n2; /* Feedback divider */
283 u16 r2; /* Reference divider */
284};
285
286/* Table of matching values for WRPLL clocks programming for each frequency.
287 * The code assumes this table is sorted. */
288static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
289 {19750, 38, 25, 18},
290 {20000, 48, 32, 18},
291 {21000, 36, 21, 15},
292 {21912, 42, 29, 17},
293 {22000, 36, 22, 15},
294 {23000, 36, 23, 15},
295 {23500, 40, 40, 23},
296 {23750, 26, 16, 14},
297 {24000, 36, 24, 15},
298 {25000, 36, 25, 15},
299 {25175, 26, 40, 33},
300 {25200, 30, 21, 15},
301 {26000, 36, 26, 15},
302 {27000, 30, 21, 14},
303 {27027, 18, 100, 111},
304 {27500, 30, 29, 19},
305 {28000, 34, 30, 17},
306 {28320, 26, 30, 22},
307 {28322, 32, 42, 25},
308 {28750, 24, 23, 18},
309 {29000, 30, 29, 18},
310 {29750, 32, 30, 17},
311 {30000, 30, 25, 15},
312 {30750, 30, 41, 24},
313 {31000, 30, 31, 18},
314 {31500, 30, 28, 16},
315 {32000, 30, 32, 18},
316 {32500, 28, 32, 19},
317 {33000, 24, 22, 15},
318 {34000, 28, 30, 17},
319 {35000, 26, 32, 19},
320 {35500, 24, 30, 19},
321 {36000, 26, 26, 15},
322 {36750, 26, 46, 26},
323 {37000, 24, 23, 14},
324 {37762, 22, 40, 26},
325 {37800, 20, 21, 15},
326 {38000, 24, 27, 16},
327 {38250, 24, 34, 20},
328 {39000, 24, 26, 15},
329 {40000, 24, 32, 18},
330 {40500, 20, 21, 14},
331 {40541, 22, 147, 89},
332 {40750, 18, 19, 14},
333 {41000, 16, 17, 14},
334 {41500, 22, 44, 26},
335 {41540, 22, 44, 26},
336 {42000, 18, 21, 15},
337 {42500, 22, 45, 26},
338 {43000, 20, 43, 27},
339 {43163, 20, 24, 15},
340 {44000, 18, 22, 15},
341 {44900, 20, 108, 65},
342 {45000, 20, 25, 15},
343 {45250, 20, 52, 31},
344 {46000, 18, 23, 15},
345 {46750, 20, 45, 26},
346 {47000, 20, 40, 23},
347 {48000, 18, 24, 15},
348 {49000, 18, 49, 30},
349 {49500, 16, 22, 15},
350 {50000, 18, 25, 15},
351 {50500, 18, 32, 19},
352 {51000, 18, 34, 20},
353 {52000, 18, 26, 15},
354 {52406, 14, 34, 25},
355 {53000, 16, 22, 14},
356 {54000, 16, 24, 15},
357 {54054, 16, 173, 108},
358 {54500, 14, 24, 17},
359 {55000, 12, 22, 18},
360 {56000, 14, 45, 31},
361 {56250, 16, 25, 15},
362 {56750, 14, 25, 17},
363 {57000, 16, 27, 16},
364 {58000, 16, 43, 25},
365 {58250, 16, 38, 22},
366 {58750, 16, 40, 23},
367 {59000, 14, 26, 17},
368 {59341, 14, 40, 26},
369 {59400, 16, 44, 25},
370 {60000, 16, 32, 18},
371 {60500, 12, 39, 29},
372 {61000, 14, 49, 31},
373 {62000, 14, 37, 23},
374 {62250, 14, 42, 26},
375 {63000, 12, 21, 15},
376 {63500, 14, 28, 17},
377 {64000, 12, 27, 19},
378 {65000, 14, 32, 19},
379 {65250, 12, 29, 20},
380 {65500, 12, 32, 22},
381 {66000, 12, 22, 15},
382 {66667, 14, 38, 22},
383 {66750, 10, 21, 17},
384 {67000, 14, 33, 19},
385 {67750, 14, 58, 33},
386 {68000, 14, 30, 17},
387 {68179, 14, 46, 26},
388 {68250, 14, 46, 26},
389 {69000, 12, 23, 15},
390 {70000, 12, 28, 18},
391 {71000, 12, 30, 19},
392 {72000, 12, 24, 15},
393 {73000, 10, 23, 17},
394 {74000, 12, 23, 14},
395 {74176, 8, 100, 91},
396 {74250, 10, 22, 16},
397 {74481, 12, 43, 26},
398 {74500, 10, 29, 21},
399 {75000, 12, 25, 15},
400 {75250, 10, 39, 28},
401 {76000, 12, 27, 16},
402 {77000, 12, 53, 31},
403 {78000, 12, 26, 15},
404 {78750, 12, 28, 16},
405 {79000, 10, 38, 26},
406 {79500, 10, 28, 19},
407 {80000, 12, 32, 18},
408 {81000, 10, 21, 14},
409 {81081, 6, 100, 111},
410 {81624, 8, 29, 24},
411 {82000, 8, 17, 14},
412 {83000, 10, 40, 26},
413 {83950, 10, 28, 18},
414 {84000, 10, 28, 18},
415 {84750, 6, 16, 17},
416 {85000, 6, 17, 18},
417 {85250, 10, 30, 19},
418 {85750, 10, 27, 17},
419 {86000, 10, 43, 27},
420 {87000, 10, 29, 18},
421 {88000, 10, 44, 27},
422 {88500, 10, 41, 25},
423 {89000, 10, 28, 17},
424 {89012, 6, 90, 91},
425 {89100, 10, 33, 20},
426 {90000, 10, 25, 15},
427 {91000, 10, 32, 19},
428 {92000, 10, 46, 27},
429 {93000, 10, 31, 18},
430 {94000, 10, 40, 23},
431 {94500, 10, 28, 16},
432 {95000, 10, 44, 25},
433 {95654, 10, 39, 22},
434 {95750, 10, 39, 22},
435 {96000, 10, 32, 18},
436 {97000, 8, 23, 16},
437 {97750, 8, 42, 29},
438 {98000, 8, 45, 31},
439 {99000, 8, 22, 15},
440 {99750, 8, 34, 23},
441 {100000, 6, 20, 18},
442 {100500, 6, 19, 17},
443 {101000, 6, 37, 33},
444 {101250, 8, 21, 14},
445 {102000, 6, 17, 15},
446 {102250, 6, 25, 22},
447 {103000, 8, 29, 19},
448 {104000, 8, 37, 24},
449 {105000, 8, 28, 18},
450 {106000, 8, 22, 14},
451 {107000, 8, 46, 29},
452 {107214, 8, 27, 17},
453 {108000, 8, 24, 15},
454 {108108, 8, 173, 108},
455 {109000, 6, 23, 19},
456 {110000, 6, 22, 18},
457 {110013, 6, 22, 18},
458 {110250, 8, 49, 30},
459 {110500, 8, 36, 22},
460 {111000, 8, 23, 14},
461 {111264, 8, 150, 91},
462 {111375, 8, 33, 20},
463 {112000, 8, 63, 38},
464 {112500, 8, 25, 15},
465 {113100, 8, 57, 34},
466 {113309, 8, 42, 25},
467 {114000, 8, 27, 16},
468 {115000, 6, 23, 18},
469 {116000, 8, 43, 25},
470 {117000, 8, 26, 15},
471 {117500, 8, 40, 23},
472 {118000, 6, 38, 29},
473 {119000, 8, 30, 17},
474 {119500, 8, 46, 26},
475 {119651, 8, 39, 22},
476 {120000, 8, 32, 18},
477 {121000, 6, 39, 29},
478 {121250, 6, 31, 23},
479 {121750, 6, 23, 17},
480 {122000, 6, 42, 31},
481 {122614, 6, 30, 22},
482 {123000, 6, 41, 30},
483 {123379, 6, 37, 27},
484 {124000, 6, 51, 37},
485 {125000, 6, 25, 18},
486 {125250, 4, 13, 14},
487 {125750, 4, 27, 29},
488 {126000, 6, 21, 15},
489 {127000, 6, 24, 17},
490 {127250, 6, 41, 29},
491 {128000, 6, 27, 19},
492 {129000, 6, 43, 30},
493 {129859, 4, 25, 26},
494 {130000, 6, 26, 18},
495 {130250, 6, 42, 29},
496 {131000, 6, 32, 22},
497 {131500, 6, 38, 26},
498 {131850, 6, 41, 28},
499 {132000, 6, 22, 15},
500 {132750, 6, 28, 19},
501 {133000, 6, 34, 23},
502 {133330, 6, 37, 25},
503 {134000, 6, 61, 41},
504 {135000, 6, 21, 14},
505 {135250, 6, 167, 111},
506 {136000, 6, 62, 41},
507 {137000, 6, 35, 23},
508 {138000, 6, 23, 15},
509 {138500, 6, 40, 26},
510 {138750, 6, 37, 24},
511 {139000, 6, 34, 22},
512 {139050, 6, 34, 22},
513 {139054, 6, 34, 22},
514 {140000, 6, 28, 18},
515 {141000, 6, 36, 23},
516 {141500, 6, 22, 14},
517 {142000, 6, 30, 19},
518 {143000, 6, 27, 17},
519 {143472, 4, 17, 16},
520 {144000, 6, 24, 15},
521 {145000, 6, 29, 18},
522 {146000, 6, 47, 29},
523 {146250, 6, 26, 16},
524 {147000, 6, 49, 30},
525 {147891, 6, 23, 14},
526 {148000, 6, 23, 14},
527 {148250, 6, 28, 17},
528 {148352, 4, 100, 91},
529 {148500, 6, 33, 20},
530 {149000, 6, 48, 29},
531 {150000, 6, 25, 15},
532 {151000, 4, 19, 17},
533 {152000, 6, 27, 16},
534 {152280, 6, 44, 26},
535 {153000, 6, 34, 20},
536 {154000, 6, 53, 31},
537 {155000, 6, 31, 18},
538 {155250, 6, 50, 29},
539 {155750, 6, 45, 26},
540 {156000, 6, 26, 15},
541 {157000, 6, 61, 35},
542 {157500, 6, 28, 16},
543 {158000, 6, 65, 37},
544 {158250, 6, 44, 25},
545 {159000, 6, 53, 30},
546 {159500, 6, 39, 22},
547 {160000, 6, 32, 18},
548 {161000, 4, 31, 26},
549 {162000, 4, 18, 15},
550 {162162, 4, 131, 109},
551 {162500, 4, 53, 44},
552 {163000, 4, 29, 24},
553 {164000, 4, 17, 14},
554 {165000, 4, 22, 18},
555 {166000, 4, 32, 26},
556 {167000, 4, 26, 21},
557 {168000, 4, 46, 37},
558 {169000, 4, 104, 83},
559 {169128, 4, 64, 51},
560 {169500, 4, 39, 31},
561 {170000, 4, 34, 27},
562 {171000, 4, 19, 15},
563 {172000, 4, 51, 40},
564 {172750, 4, 32, 25},
565 {172800, 4, 32, 25},
566 {173000, 4, 41, 32},
567 {174000, 4, 49, 38},
568 {174787, 4, 22, 17},
569 {175000, 4, 35, 27},
570 {176000, 4, 30, 23},
571 {177000, 4, 38, 29},
572 {178000, 4, 29, 22},
573 {178500, 4, 37, 28},
574 {179000, 4, 53, 40},
575 {179500, 4, 73, 55},
576 {180000, 4, 20, 15},
577 {181000, 4, 55, 41},
578 {182000, 4, 31, 23},
579 {183000, 4, 42, 31},
580 {184000, 4, 30, 22},
581 {184750, 4, 26, 19},
582 {185000, 4, 37, 27},
583 {186000, 4, 51, 37},
584 {187000, 4, 36, 26},
585 {188000, 4, 32, 23},
586 {189000, 4, 21, 15},
587 {190000, 4, 38, 27},
588 {190960, 4, 41, 29},
589 {191000, 4, 41, 29},
590 {192000, 4, 27, 19},
591 {192250, 4, 37, 26},
592 {193000, 4, 20, 14},
593 {193250, 4, 53, 37},
594 {194000, 4, 23, 16},
595 {194208, 4, 23, 16},
596 {195000, 4, 26, 18},
597 {196000, 4, 45, 31},
598 {197000, 4, 35, 24},
599 {197750, 4, 41, 28},
600 {198000, 4, 22, 15},
601 {198500, 4, 25, 17},
602 {199000, 4, 28, 19},
603 {200000, 4, 37, 25},
604 {201000, 4, 61, 41},
605 {202000, 4, 112, 75},
606 {202500, 4, 21, 14},
607 {203000, 4, 146, 97},
608 {204000, 4, 62, 41},
609 {204750, 4, 44, 29},
610 {205000, 4, 38, 25},
611 {206000, 4, 29, 19},
612 {207000, 4, 23, 15},
613 {207500, 4, 40, 26},
614 {208000, 4, 37, 24},
615 {208900, 4, 48, 31},
616 {209000, 4, 48, 31},
617 {209250, 4, 31, 20},
618 {210000, 4, 28, 18},
619 {211000, 4, 25, 16},
620 {212000, 4, 22, 14},
621 {213000, 4, 30, 19},
622 {213750, 4, 38, 24},
623 {214000, 4, 46, 29},
624 {214750, 4, 35, 22},
625 {215000, 4, 43, 27},
626 {216000, 4, 24, 15},
627 {217000, 4, 37, 23},
628 {218000, 4, 42, 26},
629 {218250, 4, 42, 26},
630 {218750, 4, 34, 21},
631 {219000, 4, 47, 29},
632 {220000, 4, 44, 27},
633 {220640, 4, 49, 30},
634 {220750, 4, 36, 22},
635 {221000, 4, 36, 22},
636 {222000, 4, 23, 14},
637 {222525, 4, 28, 17},
638 {222750, 4, 33, 20},
639 {227000, 4, 37, 22},
640 {230250, 4, 29, 17},
641 {233500, 4, 38, 22},
642 {235000, 4, 40, 23},
643 {238000, 4, 30, 17},
644 {241500, 2, 17, 19},
645 {245250, 2, 20, 22},
646 {247750, 2, 22, 24},
647 {253250, 2, 15, 16},
648 {256250, 2, 18, 19},
649 {262500, 2, 31, 32},
650 {267250, 2, 66, 67},
651 {268500, 2, 94, 95},
652 {270000, 2, 14, 14},
653 {272500, 2, 77, 76},
654 {273750, 2, 57, 56},
655 {280750, 2, 24, 23},
656 {281250, 2, 23, 22},
657 {286000, 2, 17, 16},
658 {291750, 2, 26, 24},
659 {296703, 2, 56, 51},
660 {297000, 2, 22, 20},
661 {298000, 2, 21, 19},
662};
663
664static void intel_ddi_mode_set(struct drm_encoder *encoder,
665 struct drm_display_mode *mode,
666 struct drm_display_mode *adjusted_mode)
667{
668 struct drm_crtc *crtc = encoder->crtc;
669 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
670 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
671 int port = intel_ddi_get_encoder_port(intel_encoder);
672 int pipe = intel_crtc->pipe;
673 int type = intel_encoder->type;
674
675 DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
676 port_name(port), pipe_name(pipe));
677
678 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
679 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
680
681 intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
682 switch (intel_dp->lane_count) {
683 case 1:
684 intel_dp->DP |= DDI_PORT_WIDTH_X1;
685 break;
686 case 2:
687 intel_dp->DP |= DDI_PORT_WIDTH_X2;
688 break;
689 case 4:
690 intel_dp->DP |= DDI_PORT_WIDTH_X4;
691 break;
692 default:
693 intel_dp->DP |= DDI_PORT_WIDTH_X4;
694 WARN(1, "Unexpected DP lane count %d\n",
695 intel_dp->lane_count);
696 break;
697 }
698
699 if (intel_dp->has_audio) {
700 DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
701 pipe_name(intel_crtc->pipe));
702
703 /* write eld */
704 DRM_DEBUG_DRIVER("DP audio: write eld information\n");
705 intel_write_eld(encoder, adjusted_mode);
706 }
707
708 intel_dp_init_link_config(intel_dp);
709
710 } else if (type == INTEL_OUTPUT_HDMI) {
711 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
712
713 if (intel_hdmi->has_audio) {
714 /* Proper support for digital audio needs a new logic
715 * and a new set of registers, so we leave it for future
716 * patch bombing.
717 */
718 DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
719 pipe_name(intel_crtc->pipe));
720
721 /* write eld */
722 DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
723 intel_write_eld(encoder, adjusted_mode);
724 }
725
726 intel_hdmi->set_infoframes(encoder, adjusted_mode);
727 }
728}
729
730static struct intel_encoder *
731intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
732{
733 struct drm_device *dev = crtc->dev;
734 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
735 struct intel_encoder *intel_encoder, *ret = NULL;
736 int num_encoders = 0;
737
738 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
739 ret = intel_encoder;
740 num_encoders++;
741 }
742
743 if (num_encoders != 1)
744 WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
745 intel_crtc->pipe);
746
747 BUG_ON(ret == NULL);
748 return ret;
749}
750
751void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
752{
753 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
754 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
755 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
756 uint32_t val;
757
758 switch (intel_crtc->ddi_pll_sel) {
759 case PORT_CLK_SEL_SPLL:
760 plls->spll_refcount--;
761 if (plls->spll_refcount == 0) {
762 DRM_DEBUG_KMS("Disabling SPLL\n");
763 val = I915_READ(SPLL_CTL);
764 WARN_ON(!(val & SPLL_PLL_ENABLE));
765 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
766 POSTING_READ(SPLL_CTL);
767 }
768 break;
769 case PORT_CLK_SEL_WRPLL1:
770 plls->wrpll1_refcount--;
771 if (plls->wrpll1_refcount == 0) {
772 DRM_DEBUG_KMS("Disabling WRPLL 1\n");
773 val = I915_READ(WRPLL_CTL1);
774 WARN_ON(!(val & WRPLL_PLL_ENABLE));
775 I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
776 POSTING_READ(WRPLL_CTL1);
777 }
778 break;
779 case PORT_CLK_SEL_WRPLL2:
780 plls->wrpll2_refcount--;
781 if (plls->wrpll2_refcount == 0) {
782 DRM_DEBUG_KMS("Disabling WRPLL 2\n");
783 val = I915_READ(WRPLL_CTL2);
784 WARN_ON(!(val & WRPLL_PLL_ENABLE));
785 I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
786 POSTING_READ(WRPLL_CTL2);
787 }
788 break;
789 }
790
791 WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
792 WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
793 WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
794
795 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
796}
797
798static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
799{
800 u32 i;
801
802 for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
803 if (clock <= wrpll_tmds_clock_table[i].clock)
804 break;
805
806 if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
807 i--;
808
809 *p = wrpll_tmds_clock_table[i].p;
810 *n2 = wrpll_tmds_clock_table[i].n2;
811 *r2 = wrpll_tmds_clock_table[i].r2;
812
813 if (wrpll_tmds_clock_table[i].clock != clock)
814 DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n",
815 wrpll_tmds_clock_table[i].clock, clock);
816
817 DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
818 clock, *p, *n2, *r2);
819}
820
821bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
822{
823 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
824 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
825 struct drm_encoder *encoder = &intel_encoder->base;
826 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
827 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
828 int type = intel_encoder->type;
829 enum pipe pipe = intel_crtc->pipe;
830 uint32_t reg, val;
831
832 /* TODO: reuse PLLs when possible (compare values) */
833
834 intel_ddi_put_crtc_pll(crtc);
835
836 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
837 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
838
839 switch (intel_dp->link_bw) {
840 case DP_LINK_BW_1_62:
841 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
842 break;
843 case DP_LINK_BW_2_7:
844 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
845 break;
846 case DP_LINK_BW_5_4:
847 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
848 break;
849 default:
850 DRM_ERROR("Link bandwidth %d unsupported\n",
851 intel_dp->link_bw);
852 return false;
853 }
854
855 /* We don't need to turn any PLL on because we'll use LCPLL. */
856 return true;
857
858 } else if (type == INTEL_OUTPUT_HDMI) {
859 int p, n2, r2;
860
861 if (plls->wrpll1_refcount == 0) {
862 DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
863 pipe_name(pipe));
864 plls->wrpll1_refcount++;
865 reg = WRPLL_CTL1;
866 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
867 } else if (plls->wrpll2_refcount == 0) {
868 DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
869 pipe_name(pipe));
870 plls->wrpll2_refcount++;
871 reg = WRPLL_CTL2;
872 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
873 } else {
874 DRM_ERROR("No WRPLLs available!\n");
875 return false;
876 }
877
878 WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
879 "WRPLL already enabled\n");
880
881 intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
882
883 val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
884 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
885 WRPLL_DIVIDER_POST(p);
886
887 } else if (type == INTEL_OUTPUT_ANALOG) {
888 if (plls->spll_refcount == 0) {
889 DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
890 pipe_name(pipe));
891 plls->spll_refcount++;
892 reg = SPLL_CTL;
893 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
894 }
895
896 WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
897 "SPLL already enabled\n");
898
899 val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
900
901 } else {
902 WARN(1, "Invalid DDI encoder type %d\n", type);
903 return false;
904 }
905
906 I915_WRITE(reg, val);
907 udelay(20);
908
909 return true;
910}
911
912void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
913{
914 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
915 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
916 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
917 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
918 int type = intel_encoder->type;
919 uint32_t temp;
920
921 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
922
923 temp = TRANS_MSA_SYNC_CLK;
924 switch (intel_crtc->bpp) {
925 case 18:
926 temp |= TRANS_MSA_6_BPC;
927 break;
928 case 24:
929 temp |= TRANS_MSA_8_BPC;
930 break;
931 case 30:
932 temp |= TRANS_MSA_10_BPC;
933 break;
934 case 36:
935 temp |= TRANS_MSA_12_BPC;
936 break;
937 default:
938 temp |= TRANS_MSA_8_BPC;
939 WARN(1, "%d bpp unsupported by DDI function\n",
940 intel_crtc->bpp);
941 }
942 I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
943 }
944}
945
946void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
947{
948 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
949 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
950 struct drm_encoder *encoder = &intel_encoder->base;
951 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
952 enum pipe pipe = intel_crtc->pipe;
953 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
954 enum port port = intel_ddi_get_encoder_port(intel_encoder);
955 int type = intel_encoder->type;
956 uint32_t temp;
957
958 /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
959 temp = TRANS_DDI_FUNC_ENABLE;
960 temp |= TRANS_DDI_SELECT_PORT(port);
961
962 switch (intel_crtc->bpp) {
963 case 18:
964 temp |= TRANS_DDI_BPC_6;
965 break;
966 case 24:
967 temp |= TRANS_DDI_BPC_8;
968 break;
969 case 30:
970 temp |= TRANS_DDI_BPC_10;
971 break;
972 case 36:
973 temp |= TRANS_DDI_BPC_12;
974 break;
975 default:
976 WARN(1, "%d bpp unsupported by transcoder DDI function\n",
977 intel_crtc->bpp);
978 }
979
980 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
981 temp |= TRANS_DDI_PVSYNC;
982 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
983 temp |= TRANS_DDI_PHSYNC;
984
985 if (cpu_transcoder == TRANSCODER_EDP) {
986 switch (pipe) {
987 case PIPE_A:
988 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
989 break;
990 case PIPE_B:
991 temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
992 break;
993 case PIPE_C:
994 temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
995 break;
996 default:
997 BUG();
998 break;
999 }
1000 }
1001
1002 if (type == INTEL_OUTPUT_HDMI) {
1003 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
1004
1005 if (intel_hdmi->has_hdmi_sink)
1006 temp |= TRANS_DDI_MODE_SELECT_HDMI;
1007 else
1008 temp |= TRANS_DDI_MODE_SELECT_DVI;
1009
1010 } else if (type == INTEL_OUTPUT_ANALOG) {
1011 temp |= TRANS_DDI_MODE_SELECT_FDI;
1012 temp |= (intel_crtc->fdi_lanes - 1) << 1;
1013
1014 } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
1015 type == INTEL_OUTPUT_EDP) {
1016 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1017
1018 temp |= TRANS_DDI_MODE_SELECT_DP_SST;
1019
1020 switch (intel_dp->lane_count) {
1021 case 1:
1022 temp |= TRANS_DDI_PORT_WIDTH_X1;
1023 break;
1024 case 2:
1025 temp |= TRANS_DDI_PORT_WIDTH_X2;
1026 break;
1027 case 4:
1028 temp |= TRANS_DDI_PORT_WIDTH_X4;
1029 break;
1030 default:
1031 temp |= TRANS_DDI_PORT_WIDTH_X4;
1032 WARN(1, "Unsupported lane count %d\n",
1033 intel_dp->lane_count);
1034 }
1035
1036 } else {
1037 WARN(1, "Invalid encoder type %d for pipe %d\n",
1038 intel_encoder->type, pipe);
1039 }
1040
1041 I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
1042}
1043
1044void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
1045 enum transcoder cpu_transcoder)
1046{
1047 uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1048 uint32_t val = I915_READ(reg);
1049
1050 val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
1051 val |= TRANS_DDI_PORT_NONE;
1052 I915_WRITE(reg, val);
1053}
1054
1055bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1056{
1057 struct drm_device *dev = intel_connector->base.dev;
1058 struct drm_i915_private *dev_priv = dev->dev_private;
1059 struct intel_encoder *intel_encoder = intel_connector->encoder;
1060 int type = intel_connector->base.connector_type;
1061 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1062 enum pipe pipe = 0;
1063 enum transcoder cpu_transcoder;
1064 uint32_t tmp;
1065
1066 if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
1067 return false;
1068
1069 if (port == PORT_A)
1070 cpu_transcoder = TRANSCODER_EDP;
1071 else
1072 cpu_transcoder = pipe;
1073
1074 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1075
1076 switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
1077 case TRANS_DDI_MODE_SELECT_HDMI:
1078 case TRANS_DDI_MODE_SELECT_DVI:
1079 return (type == DRM_MODE_CONNECTOR_HDMIA);
1080
1081 case TRANS_DDI_MODE_SELECT_DP_SST:
1082 if (type == DRM_MODE_CONNECTOR_eDP)
1083 return true;
1084 case TRANS_DDI_MODE_SELECT_DP_MST:
1085 return (type == DRM_MODE_CONNECTOR_DisplayPort);
1086
1087 case TRANS_DDI_MODE_SELECT_FDI:
1088 return (type == DRM_MODE_CONNECTOR_VGA);
1089
1090 default:
1091 return false;
1092 }
1093}
1094
1095bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1096 enum pipe *pipe)
1097{
1098 struct drm_device *dev = encoder->base.dev;
1099 struct drm_i915_private *dev_priv = dev->dev_private;
1100 enum port port = intel_ddi_get_encoder_port(encoder);
1101 u32 tmp;
1102 int i;
1103
1104 tmp = I915_READ(DDI_BUF_CTL(port));
1105
1106 if (!(tmp & DDI_BUF_CTL_ENABLE))
1107 return false;
1108
1109 if (port == PORT_A) {
1110 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
1111
1112 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
1113 case TRANS_DDI_EDP_INPUT_A_ON:
1114 case TRANS_DDI_EDP_INPUT_A_ONOFF:
1115 *pipe = PIPE_A;
1116 break;
1117 case TRANS_DDI_EDP_INPUT_B_ONOFF:
1118 *pipe = PIPE_B;
1119 break;
1120 case TRANS_DDI_EDP_INPUT_C_ONOFF:
1121 *pipe = PIPE_C;
1122 break;
1123 }
1124
1125 return true;
1126 } else {
1127 for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
1128 tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
1129
1130 if ((tmp & TRANS_DDI_PORT_MASK)
1131 == TRANS_DDI_SELECT_PORT(port)) {
1132 *pipe = i;
1133 return true;
1134 }
1135 }
1136 }
1137
1138 DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
1139
1140 return true;
1141}
1142
1143static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
1144 enum pipe pipe)
1145{
1146 uint32_t temp, ret;
1147 enum port port;
1148 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1149 pipe);
1150 int i;
1151
1152 if (cpu_transcoder == TRANSCODER_EDP) {
1153 port = PORT_A;
1154 } else {
1155 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1156 temp &= TRANS_DDI_PORT_MASK;
1157
1158 for (i = PORT_B; i <= PORT_E; i++)
1159 if (temp == TRANS_DDI_SELECT_PORT(i))
1160 port = i;
1161 }
1162
1163 ret = I915_READ(PORT_CLK_SEL(port));
1164
1165 DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n",
1166 pipe_name(pipe), port_name(port), ret);
1167
1168 return ret;
1169}
1170
1171void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
1172{
1173 struct drm_i915_private *dev_priv = dev->dev_private;
1174 enum pipe pipe;
1175 struct intel_crtc *intel_crtc;
1176
1177 for_each_pipe(pipe) {
1178 intel_crtc =
1179 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1180
1181 if (!intel_crtc->active)
1182 continue;
1183
1184 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
1185 pipe);
1186
1187 switch (intel_crtc->ddi_pll_sel) {
1188 case PORT_CLK_SEL_SPLL:
1189 dev_priv->ddi_plls.spll_refcount++;
1190 break;
1191 case PORT_CLK_SEL_WRPLL1:
1192 dev_priv->ddi_plls.wrpll1_refcount++;
1193 break;
1194 case PORT_CLK_SEL_WRPLL2:
1195 dev_priv->ddi_plls.wrpll2_refcount++;
1196 break;
1197 }
1198 }
1199}
1200
1201void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
1202{
1203 struct drm_crtc *crtc = &intel_crtc->base;
1204 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1205 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
1206 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1207 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
1208
1209 if (cpu_transcoder != TRANSCODER_EDP)
1210 I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
1211 TRANS_CLK_SEL_PORT(port));
1212}
1213
1214void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
1215{
1216 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1217 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
1218
1219 if (cpu_transcoder != TRANSCODER_EDP)
1220 I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
1221 TRANS_CLK_SEL_DISABLED);
1222}
1223
1224static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1225{
1226 struct drm_encoder *encoder = &intel_encoder->base;
1227 struct drm_crtc *crtc = encoder->crtc;
1228 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
1229 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1230 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1231 int type = intel_encoder->type;
1232
1233 if (type == INTEL_OUTPUT_EDP) {
1234 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1235 ironlake_edp_panel_vdd_on(intel_dp);
1236 ironlake_edp_panel_on(intel_dp);
1237 ironlake_edp_panel_vdd_off(intel_dp, true);
1238 }
1239
1240 WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
1241 I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
1242
1243 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1244 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1245
1246 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1247 intel_dp_start_link_train(intel_dp);
1248 intel_dp_complete_link_train(intel_dp);
1249 }
1250}
1251
1252static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1253{
1254 struct drm_encoder *encoder = &intel_encoder->base;
1255 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
1256 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1257 int type = intel_encoder->type;
1258 uint32_t val;
1259 bool wait = false;
1260
1261 val = I915_READ(DDI_BUF_CTL(port));
1262 if (val & DDI_BUF_CTL_ENABLE) {
1263 val &= ~DDI_BUF_CTL_ENABLE;
1264 I915_WRITE(DDI_BUF_CTL(port), val);
1265 wait = true;
1266 }
1267
1268 val = I915_READ(DP_TP_CTL(port));
1269 val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
1270 val |= DP_TP_CTL_LINK_TRAIN_PAT1;
1271 I915_WRITE(DP_TP_CTL(port), val);
1272
1273 if (wait)
1274 intel_wait_ddi_buf_idle(dev_priv, port);
1275
1276 if (type == INTEL_OUTPUT_EDP) {
1277 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1278 ironlake_edp_panel_vdd_on(intel_dp);
1279 ironlake_edp_panel_off(intel_dp);
1280 }
1281
1282 I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
1283}
1284
1285static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1286{
1287 struct drm_encoder *encoder = &intel_encoder->base;
1288 struct drm_device *dev = encoder->dev;
1289 struct drm_i915_private *dev_priv = dev->dev_private;
1290 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1291 int type = intel_encoder->type;
1292
1293 if (type == INTEL_OUTPUT_HDMI) {
1294 /* In HDMI/DVI mode, the port width, and swing/emphasis values
1295 * are ignored so nothing special needs to be done besides
1296 * enabling the port.
1297 */
1298 I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE);
1299 } else if (type == INTEL_OUTPUT_EDP) {
1300 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1301
1302 ironlake_edp_backlight_on(intel_dp);
1303 }
1304}
1305
1306static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1307{
1308 struct drm_encoder *encoder = &intel_encoder->base;
1309 int type = intel_encoder->type;
1310
1311 if (type == INTEL_OUTPUT_EDP) {
1312 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1313
1314 ironlake_edp_backlight_off(intel_dp);
1315 }
1316}
1317
1318int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1319{
1320 if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
1321 return 450;
1322 else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
1323 LCPLL_CLK_FREQ_450)
1324 return 450;
1325 else if (IS_ULT(dev_priv->dev))
1326 return 338;
1327 else
1328 return 540;
1329}
1330
1331void intel_ddi_pll_init(struct drm_device *dev)
1332{
1333 struct drm_i915_private *dev_priv = dev->dev_private;
1334 uint32_t val = I915_READ(LCPLL_CTL);
1335
1336 /* The LCPLL register should be turned on by the BIOS. For now let's
1337 * just check its state and print errors in case something is wrong.
1338 * Don't even try to turn it on.
1339 */
1340
1341 DRM_DEBUG_KMS("CDCLK running at %dMHz\n",
1342 intel_ddi_get_cdclk_freq(dev_priv));
1343
1344 if (val & LCPLL_CD_SOURCE_FCLK)
1345 DRM_ERROR("CDCLK source is not LCPLL\n");
1346
1347 if (val & LCPLL_PLL_DISABLE)
1348 DRM_ERROR("LCPLL is disabled\n");
1349}
1350
1351void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
1352{
1353 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
1354 struct intel_dp *intel_dp = &intel_dig_port->dp;
1355 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
1356 enum port port = intel_dig_port->port;
1357 bool wait;
1358 uint32_t val;
1359
1360 if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
1361 val = I915_READ(DDI_BUF_CTL(port));
1362 if (val & DDI_BUF_CTL_ENABLE) {
1363 val &= ~DDI_BUF_CTL_ENABLE;
1364 I915_WRITE(DDI_BUF_CTL(port), val);
1365 wait = true;
1366 }
1367
1368 val = I915_READ(DP_TP_CTL(port));
1369 val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
1370 val |= DP_TP_CTL_LINK_TRAIN_PAT1;
1371 I915_WRITE(DP_TP_CTL(port), val);
1372 POSTING_READ(DP_TP_CTL(port));
1373
1374 if (wait)
1375 intel_wait_ddi_buf_idle(dev_priv, port);
1376 }
1377
1378 val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
1379 DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
1380 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
1381 val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
1382 I915_WRITE(DP_TP_CTL(port), val);
1383 POSTING_READ(DP_TP_CTL(port));
1384
1385 intel_dp->DP |= DDI_BUF_CTL_ENABLE;
1386 I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
1387 POSTING_READ(DDI_BUF_CTL(port));
1388
1389 udelay(600);
1390}
1391
1392void intel_ddi_fdi_disable(struct drm_crtc *crtc)
1393{
1394 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1395 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
1396 uint32_t val;
1397
1398 intel_ddi_post_disable(intel_encoder);
1399
1400 val = I915_READ(_FDI_RXA_CTL);
1401 val &= ~FDI_RX_ENABLE;
1402 I915_WRITE(_FDI_RXA_CTL, val);
1403
1404 val = I915_READ(_FDI_RXA_MISC);
1405 val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
1406 val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
1407 I915_WRITE(_FDI_RXA_MISC, val);
1408
1409 val = I915_READ(_FDI_RXA_CTL);
1410 val &= ~FDI_PCDCLK;
1411 I915_WRITE(_FDI_RXA_CTL, val);
1412
1413 val = I915_READ(_FDI_RXA_CTL);
1414 val &= ~FDI_RX_PLL_ENABLE;
1415 I915_WRITE(_FDI_RXA_CTL, val);
1416}
1417
1418static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
1419{
1420 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
1421 int type = intel_encoder->type;
1422
1423 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
1424 intel_dp_check_link_status(intel_dp);
1425}
1426
1427static void intel_ddi_destroy(struct drm_encoder *encoder)
1428{
1429 /* HDMI has nothing special to destroy, so we can go with this. */
1430 intel_dp_encoder_destroy(encoder);
1431}
1432
1433static bool intel_ddi_mode_fixup(struct drm_encoder *encoder,
1434 const struct drm_display_mode *mode,
1435 struct drm_display_mode *adjusted_mode)
1436{
1437 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
1438 int type = intel_encoder->type;
1439
1440 WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n");
1441
1442 if (type == INTEL_OUTPUT_HDMI)
1443 return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode);
1444 else
1445 return intel_dp_mode_fixup(encoder, mode, adjusted_mode);
1446}
1447
1448static const struct drm_encoder_funcs intel_ddi_funcs = {
1449 .destroy = intel_ddi_destroy,
1450};
1451
1452static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
1453 .mode_fixup = intel_ddi_mode_fixup,
1454 .mode_set = intel_ddi_mode_set,
1455 .disable = intel_encoder_noop,
1456};
1457
1458void intel_ddi_init(struct drm_device *dev, enum port port)
1459{
1460 struct intel_digital_port *intel_dig_port;
1461 struct intel_encoder *intel_encoder;
1462 struct drm_encoder *encoder;
1463 struct intel_connector *hdmi_connector = NULL;
1464 struct intel_connector *dp_connector = NULL;
1465
1466 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
1467 if (!intel_dig_port)
1468 return;
1469
1470 dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1471 if (!dp_connector) {
1472 kfree(intel_dig_port);
1473 return;
1474 }
1475
1476 if (port != PORT_A) {
1477 hdmi_connector = kzalloc(sizeof(struct intel_connector),
1478 GFP_KERNEL);
1479 if (!hdmi_connector) {
1480 kfree(dp_connector);
1481 kfree(intel_dig_port);
1482 return;
1483 }
1484 }
1485
1486 intel_encoder = &intel_dig_port->base;
1487 encoder = &intel_encoder->base;
1488
1489 drm_encoder_init(dev, encoder, &intel_ddi_funcs,
1490 DRM_MODE_ENCODER_TMDS);
1491 drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
1492
1493 intel_encoder->enable = intel_enable_ddi;
1494 intel_encoder->pre_enable = intel_ddi_pre_enable;
1495 intel_encoder->disable = intel_disable_ddi;
1496 intel_encoder->post_disable = intel_ddi_post_disable;
1497 intel_encoder->get_hw_state = intel_ddi_get_hw_state;
1498
1499 intel_dig_port->port = port;
1500 if (hdmi_connector)
1501 intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
1502 else
1503 intel_dig_port->hdmi.sdvox_reg = 0;
1504 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
1505
1506 intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
1507 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
1508 intel_encoder->cloneable = false;
1509 intel_encoder->hot_plug = intel_ddi_hot_plug;
1510
1511 if (hdmi_connector)
1512 intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
1513 intel_dp_init_connector(intel_dig_port, dp_connector);
1514}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index da1ad9c80bb..07e7cf38068 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,92 +24,75 @@
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 */ 25 */
26 26
27#include <linux/dmi.h> 27#include <linux/cpufreq.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/input.h> 29#include <linux/input.h>
30#include <linux/i2c.h> 30#include <linux/i2c.h>
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/vgaarb.h> 33#include <linux/vgaarb.h>
34#include <drm/drm_edid.h> 34#include "drmP.h"
35#include <drm/drmP.h>
36#include "intel_drv.h" 35#include "intel_drv.h"
37#include <drm/i915_drm.h> 36#include "i915_drm.h"
38#include "i915_drv.h" 37#include "i915_drv.h"
39#include "i915_trace.h" 38#include "i915_trace.h"
40#include <drm/drm_dp_helper.h> 39#include "drm_dp_helper.h"
41#include <drm/drm_crtc_helper.h>
42#include <linux/dma_remapping.h>
43 40
44bool intel_pipe_has_type(struct drm_crtc *crtc, int type); 41#include "drm_crtc_helper.h"
42
43#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
44
45bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
46static void intel_update_watermarks(struct drm_device *dev);
45static void intel_increase_pllclock(struct drm_crtc *crtc); 47static void intel_increase_pllclock(struct drm_crtc *crtc);
46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 48static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
47 49
48typedef struct { 50typedef struct {
49 /* given values */ 51 /* given values */
50 int n; 52 int n;
51 int m1, m2; 53 int m1, m2;
52 int p1, p2; 54 int p1, p2;
53 /* derived values */ 55 /* derived values */
54 int dot; 56 int dot;
55 int vco; 57 int vco;
56 int m; 58 int m;
57 int p; 59 int p;
58} intel_clock_t; 60} intel_clock_t;
59 61
60typedef struct { 62typedef struct {
61 int min, max; 63 int min, max;
62} intel_range_t; 64} intel_range_t;
63 65
64typedef struct { 66typedef struct {
65 int dot_limit; 67 int dot_limit;
66 int p2_slow, p2_fast; 68 int p2_slow, p2_fast;
67} intel_p2_t; 69} intel_p2_t;
68 70
69#define INTEL_P2_NUM 2 71#define INTEL_P2_NUM 2
70typedef struct intel_limit intel_limit_t; 72typedef struct intel_limit intel_limit_t;
71struct intel_limit { 73struct intel_limit {
72 intel_range_t dot, vco, n, m, m1, m2, p, p1; 74 intel_range_t dot, vco, n, m, m1, m2, p, p1;
73 intel_p2_t p2; 75 intel_p2_t p2;
74 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 76 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
75 int, int, intel_clock_t *, intel_clock_t *); 77 int, int, intel_clock_t *);
76}; 78};
77 79
78/* FDI */ 80/* FDI */
79#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ 81#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
80 82
81int
82intel_pch_rawclk(struct drm_device *dev)
83{
84 struct drm_i915_private *dev_priv = dev->dev_private;
85
86 WARN_ON(!HAS_PCH_SPLIT(dev));
87
88 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
89}
90
91static bool 83static bool
92intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 84intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
93 int target, int refclk, intel_clock_t *match_clock, 85 int target, int refclk, intel_clock_t *best_clock);
94 intel_clock_t *best_clock);
95static bool 86static bool
96intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 87intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
97 int target, int refclk, intel_clock_t *match_clock, 88 int target, int refclk, intel_clock_t *best_clock);
98 intel_clock_t *best_clock);
99 89
100static bool 90static bool
101intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, 91intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
102 int target, int refclk, intel_clock_t *match_clock, 92 int target, int refclk, intel_clock_t *best_clock);
103 intel_clock_t *best_clock);
104static bool 93static bool
105intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, 94intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
106 int target, int refclk, intel_clock_t *match_clock, 95 int target, int refclk, intel_clock_t *best_clock);
107 intel_clock_t *best_clock);
108
109static bool
110intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
111 int target, int refclk, intel_clock_t *match_clock,
112 intel_clock_t *best_clock);
113 96
114static inline u32 /* units of 100MHz */ 97static inline u32 /* units of 100MHz */
115intel_fdi_link_freq(struct drm_device *dev) 98intel_fdi_link_freq(struct drm_device *dev)
@@ -122,56 +105,56 @@ intel_fdi_link_freq(struct drm_device *dev)
122} 105}
123 106
124static const intel_limit_t intel_limits_i8xx_dvo = { 107static const intel_limit_t intel_limits_i8xx_dvo = {
125 .dot = { .min = 25000, .max = 350000 }, 108 .dot = { .min = 25000, .max = 350000 },
126 .vco = { .min = 930000, .max = 1400000 }, 109 .vco = { .min = 930000, .max = 1400000 },
127 .n = { .min = 3, .max = 16 }, 110 .n = { .min = 3, .max = 16 },
128 .m = { .min = 96, .max = 140 }, 111 .m = { .min = 96, .max = 140 },
129 .m1 = { .min = 18, .max = 26 }, 112 .m1 = { .min = 18, .max = 26 },
130 .m2 = { .min = 6, .max = 16 }, 113 .m2 = { .min = 6, .max = 16 },
131 .p = { .min = 4, .max = 128 }, 114 .p = { .min = 4, .max = 128 },
132 .p1 = { .min = 2, .max = 33 }, 115 .p1 = { .min = 2, .max = 33 },
133 .p2 = { .dot_limit = 165000, 116 .p2 = { .dot_limit = 165000,
134 .p2_slow = 4, .p2_fast = 2 }, 117 .p2_slow = 4, .p2_fast = 2 },
135 .find_pll = intel_find_best_PLL, 118 .find_pll = intel_find_best_PLL,
136}; 119};
137 120
138static const intel_limit_t intel_limits_i8xx_lvds = { 121static const intel_limit_t intel_limits_i8xx_lvds = {
139 .dot = { .min = 25000, .max = 350000 }, 122 .dot = { .min = 25000, .max = 350000 },
140 .vco = { .min = 930000, .max = 1400000 }, 123 .vco = { .min = 930000, .max = 1400000 },
141 .n = { .min = 3, .max = 16 }, 124 .n = { .min = 3, .max = 16 },
142 .m = { .min = 96, .max = 140 }, 125 .m = { .min = 96, .max = 140 },
143 .m1 = { .min = 18, .max = 26 }, 126 .m1 = { .min = 18, .max = 26 },
144 .m2 = { .min = 6, .max = 16 }, 127 .m2 = { .min = 6, .max = 16 },
145 .p = { .min = 4, .max = 128 }, 128 .p = { .min = 4, .max = 128 },
146 .p1 = { .min = 1, .max = 6 }, 129 .p1 = { .min = 1, .max = 6 },
147 .p2 = { .dot_limit = 165000, 130 .p2 = { .dot_limit = 165000,
148 .p2_slow = 14, .p2_fast = 7 }, 131 .p2_slow = 14, .p2_fast = 7 },
149 .find_pll = intel_find_best_PLL, 132 .find_pll = intel_find_best_PLL,
150}; 133};
151 134
152static const intel_limit_t intel_limits_i9xx_sdvo = { 135static const intel_limit_t intel_limits_i9xx_sdvo = {
153 .dot = { .min = 20000, .max = 400000 }, 136 .dot = { .min = 20000, .max = 400000 },
154 .vco = { .min = 1400000, .max = 2800000 }, 137 .vco = { .min = 1400000, .max = 2800000 },
155 .n = { .min = 1, .max = 6 }, 138 .n = { .min = 1, .max = 6 },
156 .m = { .min = 70, .max = 120 }, 139 .m = { .min = 70, .max = 120 },
157 .m1 = { .min = 10, .max = 22 }, 140 .m1 = { .min = 10, .max = 22 },
158 .m2 = { .min = 5, .max = 9 }, 141 .m2 = { .min = 5, .max = 9 },
159 .p = { .min = 5, .max = 80 }, 142 .p = { .min = 5, .max = 80 },
160 .p1 = { .min = 1, .max = 8 }, 143 .p1 = { .min = 1, .max = 8 },
161 .p2 = { .dot_limit = 200000, 144 .p2 = { .dot_limit = 200000,
162 .p2_slow = 10, .p2_fast = 5 }, 145 .p2_slow = 10, .p2_fast = 5 },
163 .find_pll = intel_find_best_PLL, 146 .find_pll = intel_find_best_PLL,
164}; 147};
165 148
166static const intel_limit_t intel_limits_i9xx_lvds = { 149static const intel_limit_t intel_limits_i9xx_lvds = {
167 .dot = { .min = 20000, .max = 400000 }, 150 .dot = { .min = 20000, .max = 400000 },
168 .vco = { .min = 1400000, .max = 2800000 }, 151 .vco = { .min = 1400000, .max = 2800000 },
169 .n = { .min = 1, .max = 6 }, 152 .n = { .min = 1, .max = 6 },
170 .m = { .min = 70, .max = 120 }, 153 .m = { .min = 70, .max = 120 },
171 .m1 = { .min = 10, .max = 22 }, 154 .m1 = { .min = 10, .max = 22 },
172 .m2 = { .min = 5, .max = 9 }, 155 .m2 = { .min = 5, .max = 9 },
173 .p = { .min = 7, .max = 98 }, 156 .p = { .min = 7, .max = 98 },
174 .p1 = { .min = 1, .max = 8 }, 157 .p1 = { .min = 1, .max = 8 },
175 .p2 = { .dot_limit = 112000, 158 .p2 = { .dot_limit = 112000,
176 .p2_slow = 14, .p2_fast = 7 }, 159 .p2_slow = 14, .p2_fast = 7 },
177 .find_pll = intel_find_best_PLL, 160 .find_pll = intel_find_best_PLL,
@@ -239,44 +222,44 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
239}; 222};
240 223
241static const intel_limit_t intel_limits_g4x_display_port = { 224static const intel_limit_t intel_limits_g4x_display_port = {
242 .dot = { .min = 161670, .max = 227000 }, 225 .dot = { .min = 161670, .max = 227000 },
243 .vco = { .min = 1750000, .max = 3500000}, 226 .vco = { .min = 1750000, .max = 3500000},
244 .n = { .min = 1, .max = 2 }, 227 .n = { .min = 1, .max = 2 },
245 .m = { .min = 97, .max = 108 }, 228 .m = { .min = 97, .max = 108 },
246 .m1 = { .min = 0x10, .max = 0x12 }, 229 .m1 = { .min = 0x10, .max = 0x12 },
247 .m2 = { .min = 0x05, .max = 0x06 }, 230 .m2 = { .min = 0x05, .max = 0x06 },
248 .p = { .min = 10, .max = 20 }, 231 .p = { .min = 10, .max = 20 },
249 .p1 = { .min = 1, .max = 2}, 232 .p1 = { .min = 1, .max = 2},
250 .p2 = { .dot_limit = 0, 233 .p2 = { .dot_limit = 0,
251 .p2_slow = 10, .p2_fast = 10 }, 234 .p2_slow = 10, .p2_fast = 10 },
252 .find_pll = intel_find_pll_g4x_dp, 235 .find_pll = intel_find_pll_g4x_dp,
253}; 236};
254 237
255static const intel_limit_t intel_limits_pineview_sdvo = { 238static const intel_limit_t intel_limits_pineview_sdvo = {
256 .dot = { .min = 20000, .max = 400000}, 239 .dot = { .min = 20000, .max = 400000},
257 .vco = { .min = 1700000, .max = 3500000 }, 240 .vco = { .min = 1700000, .max = 3500000 },
258 /* Pineview's Ncounter is a ring counter */ 241 /* Pineview's Ncounter is a ring counter */
259 .n = { .min = 3, .max = 6 }, 242 .n = { .min = 3, .max = 6 },
260 .m = { .min = 2, .max = 256 }, 243 .m = { .min = 2, .max = 256 },
261 /* Pineview only has one combined m divider, which we treat as m2. */ 244 /* Pineview only has one combined m divider, which we treat as m2. */
262 .m1 = { .min = 0, .max = 0 }, 245 .m1 = { .min = 0, .max = 0 },
263 .m2 = { .min = 0, .max = 254 }, 246 .m2 = { .min = 0, .max = 254 },
264 .p = { .min = 5, .max = 80 }, 247 .p = { .min = 5, .max = 80 },
265 .p1 = { .min = 1, .max = 8 }, 248 .p1 = { .min = 1, .max = 8 },
266 .p2 = { .dot_limit = 200000, 249 .p2 = { .dot_limit = 200000,
267 .p2_slow = 10, .p2_fast = 5 }, 250 .p2_slow = 10, .p2_fast = 5 },
268 .find_pll = intel_find_best_PLL, 251 .find_pll = intel_find_best_PLL,
269}; 252};
270 253
271static const intel_limit_t intel_limits_pineview_lvds = { 254static const intel_limit_t intel_limits_pineview_lvds = {
272 .dot = { .min = 20000, .max = 400000 }, 255 .dot = { .min = 20000, .max = 400000 },
273 .vco = { .min = 1700000, .max = 3500000 }, 256 .vco = { .min = 1700000, .max = 3500000 },
274 .n = { .min = 3, .max = 6 }, 257 .n = { .min = 3, .max = 6 },
275 .m = { .min = 2, .max = 256 }, 258 .m = { .min = 2, .max = 256 },
276 .m1 = { .min = 0, .max = 0 }, 259 .m1 = { .min = 0, .max = 0 },
277 .m2 = { .min = 0, .max = 254 }, 260 .m2 = { .min = 0, .max = 254 },
278 .p = { .min = 7, .max = 112 }, 261 .p = { .min = 7, .max = 112 },
279 .p1 = { .min = 1, .max = 8 }, 262 .p1 = { .min = 1, .max = 8 },
280 .p2 = { .dot_limit = 112000, 263 .p2 = { .dot_limit = 112000,
281 .p2_slow = 14, .p2_fast = 14 }, 264 .p2_slow = 14, .p2_fast = 14 },
282 .find_pll = intel_find_best_PLL, 265 .find_pll = intel_find_best_PLL,
@@ -338,7 +321,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
338 .m1 = { .min = 12, .max = 22 }, 321 .m1 = { .min = 12, .max = 22 },
339 .m2 = { .min = 5, .max = 9 }, 322 .m2 = { .min = 5, .max = 9 },
340 .p = { .min = 28, .max = 112 }, 323 .p = { .min = 28, .max = 112 },
341 .p1 = { .min = 2, .max = 8 }, 324 .p1 = { .min = 2,.max = 8 },
342 .p2 = { .dot_limit = 225000, 325 .p2 = { .dot_limit = 225000,
343 .p2_slow = 14, .p2_fast = 14 }, 326 .p2_slow = 14, .p2_fast = 14 },
344 .find_pll = intel_g4x_find_best_PLL, 327 .find_pll = intel_g4x_find_best_PLL,
@@ -352,172 +335,26 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
352 .m1 = { .min = 12, .max = 22 }, 335 .m1 = { .min = 12, .max = 22 },
353 .m2 = { .min = 5, .max = 9 }, 336 .m2 = { .min = 5, .max = 9 },
354 .p = { .min = 14, .max = 42 }, 337 .p = { .min = 14, .max = 42 },
355 .p1 = { .min = 2, .max = 6 }, 338 .p1 = { .min = 2,.max = 6 },
356 .p2 = { .dot_limit = 225000, 339 .p2 = { .dot_limit = 225000,
357 .p2_slow = 7, .p2_fast = 7 }, 340 .p2_slow = 7, .p2_fast = 7 },
358 .find_pll = intel_g4x_find_best_PLL, 341 .find_pll = intel_g4x_find_best_PLL,
359}; 342};
360 343
361static const intel_limit_t intel_limits_ironlake_display_port = { 344static const intel_limit_t intel_limits_ironlake_display_port = {
362 .dot = { .min = 25000, .max = 350000 }, 345 .dot = { .min = 25000, .max = 350000 },
363 .vco = { .min = 1760000, .max = 3510000}, 346 .vco = { .min = 1760000, .max = 3510000},
364 .n = { .min = 1, .max = 2 }, 347 .n = { .min = 1, .max = 2 },
365 .m = { .min = 81, .max = 90 }, 348 .m = { .min = 81, .max = 90 },
366 .m1 = { .min = 12, .max = 22 }, 349 .m1 = { .min = 12, .max = 22 },
367 .m2 = { .min = 5, .max = 9 }, 350 .m2 = { .min = 5, .max = 9 },
368 .p = { .min = 10, .max = 20 }, 351 .p = { .min = 10, .max = 20 },
369 .p1 = { .min = 1, .max = 2}, 352 .p1 = { .min = 1, .max = 2},
370 .p2 = { .dot_limit = 0, 353 .p2 = { .dot_limit = 0,
371 .p2_slow = 10, .p2_fast = 10 }, 354 .p2_slow = 10, .p2_fast = 10 },
372 .find_pll = intel_find_pll_ironlake_dp, 355 .find_pll = intel_find_pll_ironlake_dp,
373};
374
375static const intel_limit_t intel_limits_vlv_dac = {
376 .dot = { .min = 25000, .max = 270000 },
377 .vco = { .min = 4000000, .max = 6000000 },
378 .n = { .min = 1, .max = 7 },
379 .m = { .min = 22, .max = 450 }, /* guess */
380 .m1 = { .min = 2, .max = 3 },
381 .m2 = { .min = 11, .max = 156 },
382 .p = { .min = 10, .max = 30 },
383 .p1 = { .min = 2, .max = 3 },
384 .p2 = { .dot_limit = 270000,
385 .p2_slow = 2, .p2_fast = 20 },
386 .find_pll = intel_vlv_find_best_pll,
387};
388
389static const intel_limit_t intel_limits_vlv_hdmi = {
390 .dot = { .min = 20000, .max = 165000 },
391 .vco = { .min = 4000000, .max = 5994000},
392 .n = { .min = 1, .max = 7 },
393 .m = { .min = 60, .max = 300 }, /* guess */
394 .m1 = { .min = 2, .max = 3 },
395 .m2 = { .min = 11, .max = 156 },
396 .p = { .min = 10, .max = 30 },
397 .p1 = { .min = 2, .max = 3 },
398 .p2 = { .dot_limit = 270000,
399 .p2_slow = 2, .p2_fast = 20 },
400 .find_pll = intel_vlv_find_best_pll,
401}; 356};
402 357
403static const intel_limit_t intel_limits_vlv_dp = {
404 .dot = { .min = 25000, .max = 270000 },
405 .vco = { .min = 4000000, .max = 6000000 },
406 .n = { .min = 1, .max = 7 },
407 .m = { .min = 22, .max = 450 },
408 .m1 = { .min = 2, .max = 3 },
409 .m2 = { .min = 11, .max = 156 },
410 .p = { .min = 10, .max = 30 },
411 .p1 = { .min = 2, .max = 3 },
412 .p2 = { .dot_limit = 270000,
413 .p2_slow = 2, .p2_fast = 20 },
414 .find_pll = intel_vlv_find_best_pll,
415};
416
417u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
418{
419 unsigned long flags;
420 u32 val = 0;
421
422 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
423 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
424 DRM_ERROR("DPIO idle wait timed out\n");
425 goto out_unlock;
426 }
427
428 I915_WRITE(DPIO_REG, reg);
429 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
430 DPIO_BYTE);
431 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
432 DRM_ERROR("DPIO read wait timed out\n");
433 goto out_unlock;
434 }
435 val = I915_READ(DPIO_DATA);
436
437out_unlock:
438 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
439 return val;
440}
441
442static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
443 u32 val)
444{
445 unsigned long flags;
446
447 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
448 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
449 DRM_ERROR("DPIO idle wait timed out\n");
450 goto out_unlock;
451 }
452
453 I915_WRITE(DPIO_DATA, val);
454 I915_WRITE(DPIO_REG, reg);
455 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
456 DPIO_BYTE);
457 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
458 DRM_ERROR("DPIO write wait timed out\n");
459
460out_unlock:
461 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
462}
463
464static void vlv_init_dpio(struct drm_device *dev)
465{
466 struct drm_i915_private *dev_priv = dev->dev_private;
467
468 /* Reset the DPIO config */
469 I915_WRITE(DPIO_CTL, 0);
470 POSTING_READ(DPIO_CTL);
471 I915_WRITE(DPIO_CTL, 1);
472 POSTING_READ(DPIO_CTL);
473}
474
475static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
476{
477 DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
478 return 1;
479}
480
481static const struct dmi_system_id intel_dual_link_lvds[] = {
482 {
483 .callback = intel_dual_link_lvds_callback,
484 .ident = "Apple MacBook Pro (Core i5/i7 Series)",
485 .matches = {
486 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
487 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
488 },
489 },
490 { } /* terminating entry */
491};
492
493static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
494 unsigned int reg)
495{
496 unsigned int val;
497
498 /* use the module option value if specified */
499 if (i915_lvds_channel_mode > 0)
500 return i915_lvds_channel_mode == 2;
501
502 if (dmi_check_system(intel_dual_link_lvds))
503 return true;
504
505 if (dev_priv->lvds_val)
506 val = dev_priv->lvds_val;
507 else {
508 /* BIOS should set the proper LVDS register value at boot, but
509 * in reality, it doesn't set the value when the lid is closed;
510 * we need to check "the value to be set" in VBT when LVDS
511 * register is uninitialized.
512 */
513 val = I915_READ(reg);
514 if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
515 val = dev_priv->bios_lvds_val;
516 dev_priv->lvds_val = val;
517 }
518 return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
519}
520
521static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 358static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
522 int refclk) 359 int refclk)
523{ 360{
@@ -526,7 +363,8 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
526 const intel_limit_t *limit; 363 const intel_limit_t *limit;
527 364
528 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 365 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
529 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) { 366 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
367 LVDS_CLKB_POWER_UP) {
530 /* LVDS dual channel */ 368 /* LVDS dual channel */
531 if (refclk == 100000) 369 if (refclk == 100000)
532 limit = &intel_limits_ironlake_dual_lvds_100m; 370 limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -539,7 +377,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
539 limit = &intel_limits_ironlake_single_lvds; 377 limit = &intel_limits_ironlake_single_lvds;
540 } 378 }
541 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 379 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
542 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 380 HAS_eDP)
543 limit = &intel_limits_ironlake_display_port; 381 limit = &intel_limits_ironlake_display_port;
544 else 382 else
545 limit = &intel_limits_ironlake_dac; 383 limit = &intel_limits_ironlake_dac;
@@ -554,7 +392,8 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
554 const intel_limit_t *limit; 392 const intel_limit_t *limit;
555 393
556 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 394 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
557 if (is_dual_link_lvds(dev_priv, LVDS)) 395 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
396 LVDS_CLKB_POWER_UP)
558 /* LVDS with dual channel */ 397 /* LVDS with dual channel */
559 limit = &intel_limits_g4x_dual_channel_lvds; 398 limit = &intel_limits_g4x_dual_channel_lvds;
560 else 399 else
@@ -565,7 +404,7 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
565 limit = &intel_limits_g4x_hdmi; 404 limit = &intel_limits_g4x_hdmi;
566 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { 405 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
567 limit = &intel_limits_g4x_sdvo; 406 limit = &intel_limits_g4x_sdvo;
568 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 407 } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
569 limit = &intel_limits_g4x_display_port; 408 limit = &intel_limits_g4x_display_port;
570 } else /* The option is for other outputs */ 409 } else /* The option is for other outputs */
571 limit = &intel_limits_i9xx_sdvo; 410 limit = &intel_limits_i9xx_sdvo;
@@ -587,13 +426,6 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
587 limit = &intel_limits_pineview_lvds; 426 limit = &intel_limits_pineview_lvds;
588 else 427 else
589 limit = &intel_limits_pineview_sdvo; 428 limit = &intel_limits_pineview_sdvo;
590 } else if (IS_VALLEYVIEW(dev)) {
591 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
592 limit = &intel_limits_vlv_dac;
593 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
594 limit = &intel_limits_vlv_hdmi;
595 else
596 limit = &intel_limits_vlv_dp;
597 } else if (!IS_GEN2(dev)) { 429 } else if (!IS_GEN2(dev)) {
598 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 430 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
599 limit = &intel_limits_i9xx_lvds; 431 limit = &intel_limits_i9xx_lvds;
@@ -635,10 +467,11 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock
635bool intel_pipe_has_type(struct drm_crtc *crtc, int type) 467bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
636{ 468{
637 struct drm_device *dev = crtc->dev; 469 struct drm_device *dev = crtc->dev;
470 struct drm_mode_config *mode_config = &dev->mode_config;
638 struct intel_encoder *encoder; 471 struct intel_encoder *encoder;
639 472
640 for_each_encoder_on_crtc(dev, crtc, encoder) 473 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
641 if (encoder->type == type) 474 if (encoder->base.crtc == crtc && encoder->type == type)
642 return true; 475 return true;
643 476
644 return false; 477 return false;
@@ -655,34 +488,33 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
655 const intel_clock_t *clock) 488 const intel_clock_t *clock)
656{ 489{
657 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 490 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
658 INTELPllInvalid("p1 out of range\n"); 491 INTELPllInvalid ("p1 out of range\n");
659 if (clock->p < limit->p.min || limit->p.max < clock->p) 492 if (clock->p < limit->p.min || limit->p.max < clock->p)
660 INTELPllInvalid("p out of range\n"); 493 INTELPllInvalid ("p out of range\n");
661 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 494 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
662 INTELPllInvalid("m2 out of range\n"); 495 INTELPllInvalid ("m2 out of range\n");
663 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 496 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
664 INTELPllInvalid("m1 out of range\n"); 497 INTELPllInvalid ("m1 out of range\n");
665 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) 498 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
666 INTELPllInvalid("m1 <= m2\n"); 499 INTELPllInvalid ("m1 <= m2\n");
667 if (clock->m < limit->m.min || limit->m.max < clock->m) 500 if (clock->m < limit->m.min || limit->m.max < clock->m)
668 INTELPllInvalid("m out of range\n"); 501 INTELPllInvalid ("m out of range\n");
669 if (clock->n < limit->n.min || limit->n.max < clock->n) 502 if (clock->n < limit->n.min || limit->n.max < clock->n)
670 INTELPllInvalid("n out of range\n"); 503 INTELPllInvalid ("n out of range\n");
671 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 504 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
672 INTELPllInvalid("vco out of range\n"); 505 INTELPllInvalid ("vco out of range\n");
673 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 506 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
674 * connector, etc., rather than just a single range. 507 * connector, etc., rather than just a single range.
675 */ 508 */
676 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 509 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
677 INTELPllInvalid("dot out of range\n"); 510 INTELPllInvalid ("dot out of range\n");
678 511
679 return true; 512 return true;
680} 513}
681 514
682static bool 515static bool
683intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 516intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
684 int target, int refclk, intel_clock_t *match_clock, 517 int target, int refclk, intel_clock_t *best_clock)
685 intel_clock_t *best_clock)
686 518
687{ 519{
688 struct drm_device *dev = crtc->dev; 520 struct drm_device *dev = crtc->dev;
@@ -698,7 +530,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
698 * reliably set up different single/dual channel state, if we 530 * reliably set up different single/dual channel state, if we
699 * even can. 531 * even can.
700 */ 532 */
701 if (is_dual_link_lvds(dev_priv, LVDS)) 533 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
534 LVDS_CLKB_POWER_UP)
702 clock.p2 = limit->p2.p2_fast; 535 clock.p2 = limit->p2.p2_fast;
703 else 536 else
704 clock.p2 = limit->p2.p2_slow; 537 clock.p2 = limit->p2.p2_slow;
@@ -709,7 +542,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
709 clock.p2 = limit->p2.p2_fast; 542 clock.p2 = limit->p2.p2_fast;
710 } 543 }
711 544
712 memset(best_clock, 0, sizeof(*best_clock)); 545 memset (best_clock, 0, sizeof (*best_clock));
713 546
714 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 547 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
715 clock.m1++) { 548 clock.m1++) {
@@ -728,9 +561,6 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
728 if (!intel_PLL_is_valid(dev, limit, 561 if (!intel_PLL_is_valid(dev, limit,
729 &clock)) 562 &clock))
730 continue; 563 continue;
731 if (match_clock &&
732 clock.p != match_clock->p)
733 continue;
734 564
735 this_err = abs(clock.dot - target); 565 this_err = abs(clock.dot - target);
736 if (this_err < err) { 566 if (this_err < err) {
@@ -747,8 +577,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
747 577
748static bool 578static bool
749intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 579intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
750 int target, int refclk, intel_clock_t *match_clock, 580 int target, int refclk, intel_clock_t *best_clock)
751 intel_clock_t *best_clock)
752{ 581{
753 struct drm_device *dev = crtc->dev; 582 struct drm_device *dev = crtc->dev;
754 struct drm_i915_private *dev_priv = dev->dev_private; 583 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -795,9 +624,6 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
795 if (!intel_PLL_is_valid(dev, limit, 624 if (!intel_PLL_is_valid(dev, limit,
796 &clock)) 625 &clock))
797 continue; 626 continue;
798 if (match_clock &&
799 clock.p != match_clock->p)
800 continue;
801 627
802 this_err = abs(clock.dot - target); 628 this_err = abs(clock.dot - target);
803 if (this_err < err_most) { 629 if (this_err < err_most) {
@@ -815,8 +641,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
815 641
816static bool 642static bool
817intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 643intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
818 int target, int refclk, intel_clock_t *match_clock, 644 int target, int refclk, intel_clock_t *best_clock)
819 intel_clock_t *best_clock)
820{ 645{
821 struct drm_device *dev = crtc->dev; 646 struct drm_device *dev = crtc->dev;
822 intel_clock_t clock; 647 intel_clock_t clock;
@@ -842,8 +667,7 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
842/* DisplayPort has only two frequencies, 162MHz and 270MHz */ 667/* DisplayPort has only two frequencies, 162MHz and 270MHz */
843static bool 668static bool
844intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 669intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
845 int target, int refclk, intel_clock_t *match_clock, 670 int target, int refclk, intel_clock_t *best_clock)
846 intel_clock_t *best_clock)
847{ 671{
848 intel_clock_t clock; 672 intel_clock_t clock;
849 if (target < 200000) { 673 if (target < 200000) {
@@ -866,94 +690,6 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
866 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 690 memcpy(best_clock, &clock, sizeof(intel_clock_t));
867 return true; 691 return true;
868} 692}
869static bool
870intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
871 int target, int refclk, intel_clock_t *match_clock,
872 intel_clock_t *best_clock)
873{
874 u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
875 u32 m, n, fastclk;
876 u32 updrate, minupdate, fracbits, p;
877 unsigned long bestppm, ppm, absppm;
878 int dotclk, flag;
879
880 flag = 0;
881 dotclk = target * 1000;
882 bestppm = 1000000;
883 ppm = absppm = 0;
884 fastclk = dotclk / (2*100);
885 updrate = 0;
886 minupdate = 19200;
887 fracbits = 1;
888 n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
889 bestm1 = bestm2 = bestp1 = bestp2 = 0;
890
891 /* based on hardware requirement, prefer smaller n to precision */
892 for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
893 updrate = refclk / n;
894 for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
895 for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
896 if (p2 > 10)
897 p2 = p2 - 1;
898 p = p1 * p2;
899 /* based on hardware requirement, prefer bigger m1,m2 values */
900 for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
901 m2 = (((2*(fastclk * p * n / m1 )) +
902 refclk) / (2*refclk));
903 m = m1 * m2;
904 vco = updrate * m;
905 if (vco >= limit->vco.min && vco < limit->vco.max) {
906 ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
907 absppm = (ppm > 0) ? ppm : (-ppm);
908 if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
909 bestppm = 0;
910 flag = 1;
911 }
912 if (absppm < bestppm - 10) {
913 bestppm = absppm;
914 flag = 1;
915 }
916 if (flag) {
917 bestn = n;
918 bestm1 = m1;
919 bestm2 = m2;
920 bestp1 = p1;
921 bestp2 = p2;
922 flag = 0;
923 }
924 }
925 }
926 }
927 }
928 }
929 best_clock->n = bestn;
930 best_clock->m1 = bestm1;
931 best_clock->m2 = bestm2;
932 best_clock->p1 = bestp1;
933 best_clock->p2 = bestp2;
934
935 return true;
936}
937
938enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
939 enum pipe pipe)
940{
941 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
942 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
943
944 return intel_crtc->cpu_transcoder;
945}
946
947static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
948{
949 struct drm_i915_private *dev_priv = dev->dev_private;
950 u32 frame, frame_reg = PIPEFRAME(pipe);
951
952 frame = I915_READ(frame_reg);
953
954 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
955 DRM_DEBUG_KMS("vblank wait timed out\n");
956}
957 693
958/** 694/**
959 * intel_wait_for_vblank - wait for vblank on a given pipe 695 * intel_wait_for_vblank - wait for vblank on a given pipe
@@ -968,11 +704,6 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
968 struct drm_i915_private *dev_priv = dev->dev_private; 704 struct drm_i915_private *dev_priv = dev->dev_private;
969 int pipestat_reg = PIPESTAT(pipe); 705 int pipestat_reg = PIPESTAT(pipe);
970 706
971 if (INTEL_INFO(dev)->gen >= 5) {
972 ironlake_wait_for_vblank(dev, pipe);
973 return;
974 }
975
976 /* Clear existing vblank status. Note this will clear any other 707 /* Clear existing vblank status. Note this will clear any other
977 * sticky status fields as well. 708 * sticky status fields as well.
978 * 709 *
@@ -1016,34 +747,27 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
1016void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) 747void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1017{ 748{
1018 struct drm_i915_private *dev_priv = dev->dev_private; 749 struct drm_i915_private *dev_priv = dev->dev_private;
1019 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1020 pipe);
1021 750
1022 if (INTEL_INFO(dev)->gen >= 4) { 751 if (INTEL_INFO(dev)->gen >= 4) {
1023 int reg = PIPECONF(cpu_transcoder); 752 int reg = PIPECONF(pipe);
1024 753
1025 /* Wait for the Pipe State to go off */ 754 /* Wait for the Pipe State to go off */
1026 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 755 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1027 100)) 756 100))
1028 WARN(1, "pipe_off wait timed out\n"); 757 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1029 } else { 758 } else {
1030 u32 last_line, line_mask; 759 u32 last_line;
1031 int reg = PIPEDSL(pipe); 760 int reg = PIPEDSL(pipe);
1032 unsigned long timeout = jiffies + msecs_to_jiffies(100); 761 unsigned long timeout = jiffies + msecs_to_jiffies(100);
1033 762
1034 if (IS_GEN2(dev))
1035 line_mask = DSL_LINEMASK_GEN2;
1036 else
1037 line_mask = DSL_LINEMASK_GEN3;
1038
1039 /* Wait for the display line to settle */ 763 /* Wait for the display line to settle */
1040 do { 764 do {
1041 last_line = I915_READ(reg) & line_mask; 765 last_line = I915_READ(reg) & DSL_LINEMASK;
1042 mdelay(5); 766 mdelay(5);
1043 } while (((I915_READ(reg) & line_mask) != last_line) && 767 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
1044 time_after(timeout, jiffies)); 768 time_after(timeout, jiffies));
1045 if (time_after(jiffies, timeout)) 769 if (time_after(jiffies, timeout))
1046 WARN(1, "pipe_off wait timed out\n"); 770 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1047 } 771 }
1048} 772}
1049 773
@@ -1072,49 +796,21 @@ static void assert_pll(struct drm_i915_private *dev_priv,
1072 796
1073/* For ILK+ */ 797/* For ILK+ */
1074static void assert_pch_pll(struct drm_i915_private *dev_priv, 798static void assert_pch_pll(struct drm_i915_private *dev_priv,
1075 struct intel_pch_pll *pll, 799 enum pipe pipe, bool state)
1076 struct intel_crtc *crtc,
1077 bool state)
1078{ 800{
801 int reg;
1079 u32 val; 802 u32 val;
1080 bool cur_state; 803 bool cur_state;
1081 804
1082 if (HAS_PCH_LPT(dev_priv->dev)) { 805 reg = PCH_DPLL(pipe);
1083 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n"); 806 val = I915_READ(reg);
1084 return;
1085 }
1086
1087 if (WARN (!pll,
1088 "asserting PCH PLL %s with no PLL\n", state_string(state)))
1089 return;
1090
1091 val = I915_READ(pll->pll_reg);
1092 cur_state = !!(val & DPLL_VCO_ENABLE); 807 cur_state = !!(val & DPLL_VCO_ENABLE);
1093 WARN(cur_state != state, 808 WARN(cur_state != state,
1094 "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n", 809 "PCH PLL state assertion failure (expected %s, current %s)\n",
1095 pll->pll_reg, state_string(state), state_string(cur_state), val); 810 state_string(state), state_string(cur_state));
1096
1097 /* Make sure the selected PLL is correctly attached to the transcoder */
1098 if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
1099 u32 pch_dpll;
1100
1101 pch_dpll = I915_READ(PCH_DPLL_SEL);
1102 cur_state = pll->pll_reg == _PCH_DPLL_B;
1103 if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
1104 "PLL[%d] not attached to this transcoder %d: %08x\n",
1105 cur_state, crtc->pipe, pch_dpll)) {
1106 cur_state = !!(val >> (4*crtc->pipe + 3));
1107 WARN(cur_state != state,
1108 "PLL[%d] not %s on this transcoder %d: %08x\n",
1109 pll->pll_reg == _PCH_DPLL_B,
1110 state_string(state),
1111 crtc->pipe,
1112 val);
1113 }
1114 }
1115} 811}
1116#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true) 812#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
1117#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false) 813#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
1118 814
1119static void assert_fdi_tx(struct drm_i915_private *dev_priv, 815static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1120 enum pipe pipe, bool state) 816 enum pipe pipe, bool state)
@@ -1122,19 +818,10 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1122 int reg; 818 int reg;
1123 u32 val; 819 u32 val;
1124 bool cur_state; 820 bool cur_state;
1125 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1126 pipe);
1127 821
1128 if (IS_HASWELL(dev_priv->dev)) { 822 reg = FDI_TX_CTL(pipe);
1129 /* On Haswell, DDI is used instead of FDI_TX_CTL */ 823 val = I915_READ(reg);
1130 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 824 cur_state = !!(val & FDI_TX_ENABLE);
1131 val = I915_READ(reg);
1132 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1133 } else {
1134 reg = FDI_TX_CTL(pipe);
1135 val = I915_READ(reg);
1136 cur_state = !!(val & FDI_TX_ENABLE);
1137 }
1138 WARN(cur_state != state, 825 WARN(cur_state != state,
1139 "FDI TX state assertion failure (expected %s, current %s)\n", 826 "FDI TX state assertion failure (expected %s, current %s)\n",
1140 state_string(state), state_string(cur_state)); 827 state_string(state), state_string(cur_state));
@@ -1169,10 +856,6 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1169 if (dev_priv->info->gen == 5) 856 if (dev_priv->info->gen == 5)
1170 return; 857 return;
1171 858
1172 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1173 if (IS_HASWELL(dev_priv->dev))
1174 return;
1175
1176 reg = FDI_TX_CTL(pipe); 859 reg = FDI_TX_CTL(pipe);
1177 val = I915_READ(reg); 860 val = I915_READ(reg);
1178 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 861 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
@@ -1218,45 +901,36 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1218 pipe_name(pipe)); 901 pipe_name(pipe));
1219} 902}
1220 903
1221void assert_pipe(struct drm_i915_private *dev_priv, 904static void assert_pipe(struct drm_i915_private *dev_priv,
1222 enum pipe pipe, bool state) 905 enum pipe pipe, bool state)
1223{ 906{
1224 int reg; 907 int reg;
1225 u32 val; 908 u32 val;
1226 bool cur_state; 909 bool cur_state;
1227 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1228 pipe);
1229 910
1230 /* if we need the pipe A quirk it must be always on */ 911 reg = PIPECONF(pipe);
1231 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1232 state = true;
1233
1234 reg = PIPECONF(cpu_transcoder);
1235 val = I915_READ(reg); 912 val = I915_READ(reg);
1236 cur_state = !!(val & PIPECONF_ENABLE); 913 cur_state = !!(val & PIPECONF_ENABLE);
1237 WARN(cur_state != state, 914 WARN(cur_state != state,
1238 "pipe %c assertion failure (expected %s, current %s)\n", 915 "pipe %c assertion failure (expected %s, current %s)\n",
1239 pipe_name(pipe), state_string(state), state_string(cur_state)); 916 pipe_name(pipe), state_string(state), state_string(cur_state));
1240} 917}
918#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
919#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
1241 920
1242static void assert_plane(struct drm_i915_private *dev_priv, 921static void assert_plane_enabled(struct drm_i915_private *dev_priv,
1243 enum plane plane, bool state) 922 enum plane plane)
1244{ 923{
1245 int reg; 924 int reg;
1246 u32 val; 925 u32 val;
1247 bool cur_state;
1248 926
1249 reg = DSPCNTR(plane); 927 reg = DSPCNTR(plane);
1250 val = I915_READ(reg); 928 val = I915_READ(reg);
1251 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 929 WARN(!(val & DISPLAY_PLANE_ENABLE),
1252 WARN(cur_state != state, 930 "plane %c assertion failure, should be active but is disabled\n",
1253 "plane %c assertion failure (expected %s, current %s)\n", 931 plane_name(plane));
1254 plane_name(plane), state_string(state), state_string(cur_state));
1255} 932}
1256 933
1257#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1258#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1259
1260static void assert_planes_disabled(struct drm_i915_private *dev_priv, 934static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1261 enum pipe pipe) 935 enum pipe pipe)
1262{ 936{
@@ -1265,14 +939,8 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1265 int cur_pipe; 939 int cur_pipe;
1266 940
1267 /* Planes are fixed to pipes on ILK+ */ 941 /* Planes are fixed to pipes on ILK+ */
1268 if (HAS_PCH_SPLIT(dev_priv->dev)) { 942 if (HAS_PCH_SPLIT(dev_priv->dev))
1269 reg = DSPCNTR(pipe);
1270 val = I915_READ(reg);
1271 WARN((val & DISPLAY_PLANE_ENABLE),
1272 "plane %c assertion failure, should be disabled but not\n",
1273 plane_name(pipe));
1274 return; 943 return;
1275 }
1276 944
1277 /* Need to check both planes against the pipe */ 945 /* Need to check both planes against the pipe */
1278 for (i = 0; i < 2; i++) { 946 for (i = 0; i < 2; i++) {
@@ -1291,11 +959,6 @@ static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1291 u32 val; 959 u32 val;
1292 bool enabled; 960 bool enabled;
1293 961
1294 if (HAS_PCH_LPT(dev_priv->dev)) {
1295 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1296 return;
1297 }
1298
1299 val = I915_READ(PCH_DREF_CONTROL); 962 val = I915_READ(PCH_DREF_CONTROL);
1300 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 963 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1301 DREF_SUPERSPREAD_SOURCE_MASK)); 964 DREF_SUPERSPREAD_SOURCE_MASK));
@@ -1389,23 +1052,15 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1389 WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1052 WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1390 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1053 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1391 reg, pipe_name(pipe)); 1054 reg, pipe_name(pipe));
1392
1393 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1394 && (val & DP_PIPEB_SELECT),
1395 "IBX PCH dp port still using transcoder B\n");
1396} 1055}
1397 1056
1398static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1057static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1399 enum pipe pipe, int reg) 1058 enum pipe pipe, int reg)
1400{ 1059{
1401 u32 val = I915_READ(reg); 1060 u32 val = I915_READ(reg);
1402 WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1061 WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
1403 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1062 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1404 reg, pipe_name(pipe)); 1063 reg, pipe_name(pipe));
1405
1406 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0
1407 && (val & SDVO_PIPE_B_SELECT),
1408 "IBX PCH hdmi port still using transcoder B\n");
1409} 1064}
1410 1065
1411static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1066static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
@@ -1420,13 +1075,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1420 1075
1421 reg = PCH_ADPA; 1076 reg = PCH_ADPA;
1422 val = I915_READ(reg); 1077 val = I915_READ(reg);
1423 WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1078 WARN(adpa_pipe_enabled(dev_priv, val, pipe),
1424 "PCH VGA enabled on transcoder %c, should be disabled\n", 1079 "PCH VGA enabled on transcoder %c, should be disabled\n",
1425 pipe_name(pipe)); 1080 pipe_name(pipe));
1426 1081
1427 reg = PCH_LVDS; 1082 reg = PCH_LVDS;
1428 val = I915_READ(reg); 1083 val = I915_READ(reg);
1429 WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1084 WARN(lvds_pipe_enabled(dev_priv, val, pipe),
1430 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1085 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1431 pipe_name(pipe)); 1086 pipe_name(pipe));
1432 1087
@@ -1445,8 +1100,6 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1445 * protect mechanism may be enabled. 1100 * protect mechanism may be enabled.
1446 * 1101 *
1447 * Note! This is for pre-ILK only. 1102 * Note! This is for pre-ILK only.
1448 *
1449 * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
1450 */ 1103 */
1451static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1104static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1452{ 1105{
@@ -1454,7 +1107,7 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1454 u32 val; 1107 u32 val;
1455 1108
1456 /* No really, not for ILK+ */ 1109 /* No really, not for ILK+ */
1457 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5); 1110 BUG_ON(dev_priv->info->gen >= 5);
1458 1111
1459 /* PLL is protected by panel, make sure we can write it */ 1112 /* PLL is protected by panel, make sure we can write it */
1460 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) 1113 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
@@ -1504,196 +1157,72 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1504 POSTING_READ(reg); 1157 POSTING_READ(reg);
1505} 1158}
1506 1159
1507/* SBI access */
1508static void
1509intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
1510 enum intel_sbi_destination destination)
1511{
1512 unsigned long flags;
1513 u32 tmp;
1514
1515 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1516 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
1517 DRM_ERROR("timeout waiting for SBI to become ready\n");
1518 goto out_unlock;
1519 }
1520
1521 I915_WRITE(SBI_ADDR, (reg << 16));
1522 I915_WRITE(SBI_DATA, value);
1523
1524 if (destination == SBI_ICLK)
1525 tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
1526 else
1527 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
1528 I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
1529
1530 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1531 100)) {
1532 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1533 goto out_unlock;
1534 }
1535
1536out_unlock:
1537 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1538}
1539
1540static u32
1541intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
1542 enum intel_sbi_destination destination)
1543{
1544 unsigned long flags;
1545 u32 value = 0;
1546
1547 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1548 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
1549 DRM_ERROR("timeout waiting for SBI to become ready\n");
1550 goto out_unlock;
1551 }
1552
1553 I915_WRITE(SBI_ADDR, (reg << 16));
1554
1555 if (destination == SBI_ICLK)
1556 value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
1557 else
1558 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
1559 I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
1560
1561 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1562 100)) {
1563 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1564 goto out_unlock;
1565 }
1566
1567 value = I915_READ(SBI_DATA);
1568
1569out_unlock:
1570 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1571 return value;
1572}
1573
1574/** 1160/**
1575 * ironlake_enable_pch_pll - enable PCH PLL 1161 * intel_enable_pch_pll - enable PCH PLL
1576 * @dev_priv: i915 private structure 1162 * @dev_priv: i915 private structure
1577 * @pipe: pipe PLL to enable 1163 * @pipe: pipe PLL to enable
1578 * 1164 *
1579 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1165 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1580 * drives the transcoder clock. 1166 * drives the transcoder clock.
1581 */ 1167 */
1582static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc) 1168static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1169 enum pipe pipe)
1583{ 1170{
1584 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1585 struct intel_pch_pll *pll;
1586 int reg; 1171 int reg;
1587 u32 val; 1172 u32 val;
1588 1173
1589 /* PCH PLLs only available on ILK, SNB and IVB */ 1174 /* PCH only available on ILK+ */
1590 BUG_ON(dev_priv->info->gen < 5); 1175 BUG_ON(dev_priv->info->gen < 5);
1591 pll = intel_crtc->pch_pll;
1592 if (pll == NULL)
1593 return;
1594
1595 if (WARN_ON(pll->refcount == 0))
1596 return;
1597
1598 DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1599 pll->pll_reg, pll->active, pll->on,
1600 intel_crtc->base.base.id);
1601 1176
1602 /* PCH refclock must be enabled first */ 1177 /* PCH refclock must be enabled first */
1603 assert_pch_refclk_enabled(dev_priv); 1178 assert_pch_refclk_enabled(dev_priv);
1604 1179
1605 if (pll->active++ && pll->on) { 1180 reg = PCH_DPLL(pipe);
1606 assert_pch_pll_enabled(dev_priv, pll, NULL);
1607 return;
1608 }
1609
1610 DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
1611
1612 reg = pll->pll_reg;
1613 val = I915_READ(reg); 1181 val = I915_READ(reg);
1614 val |= DPLL_VCO_ENABLE; 1182 val |= DPLL_VCO_ENABLE;
1615 I915_WRITE(reg, val); 1183 I915_WRITE(reg, val);
1616 POSTING_READ(reg); 1184 POSTING_READ(reg);
1617 udelay(200); 1185 udelay(200);
1618
1619 pll->on = true;
1620} 1186}
1621 1187
1622static void intel_disable_pch_pll(struct intel_crtc *intel_crtc) 1188static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1189 enum pipe pipe)
1623{ 1190{
1624 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1625 struct intel_pch_pll *pll = intel_crtc->pch_pll;
1626 int reg; 1191 int reg;
1627 u32 val; 1192 u32 val;
1628 1193
1629 /* PCH only available on ILK+ */ 1194 /* PCH only available on ILK+ */
1630 BUG_ON(dev_priv->info->gen < 5); 1195 BUG_ON(dev_priv->info->gen < 5);
1631 if (pll == NULL)
1632 return;
1633
1634 if (WARN_ON(pll->refcount == 0))
1635 return;
1636
1637 DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1638 pll->pll_reg, pll->active, pll->on,
1639 intel_crtc->base.base.id);
1640
1641 if (WARN_ON(pll->active == 0)) {
1642 assert_pch_pll_disabled(dev_priv, pll, NULL);
1643 return;
1644 }
1645
1646 if (--pll->active) {
1647 assert_pch_pll_enabled(dev_priv, pll, NULL);
1648 return;
1649 }
1650
1651 DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
1652 1196
1653 /* Make sure transcoder isn't still depending on us */ 1197 /* Make sure transcoder isn't still depending on us */
1654 assert_transcoder_disabled(dev_priv, intel_crtc->pipe); 1198 assert_transcoder_disabled(dev_priv, pipe);
1655 1199
1656 reg = pll->pll_reg; 1200 reg = PCH_DPLL(pipe);
1657 val = I915_READ(reg); 1201 val = I915_READ(reg);
1658 val &= ~DPLL_VCO_ENABLE; 1202 val &= ~DPLL_VCO_ENABLE;
1659 I915_WRITE(reg, val); 1203 I915_WRITE(reg, val);
1660 POSTING_READ(reg); 1204 POSTING_READ(reg);
1661 udelay(200); 1205 udelay(200);
1662
1663 pll->on = false;
1664} 1206}
1665 1207
1666static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1208static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1667 enum pipe pipe) 1209 enum pipe pipe)
1668{ 1210{
1669 struct drm_device *dev = dev_priv->dev; 1211 int reg;
1670 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1212 u32 val;
1671 uint32_t reg, val, pipeconf_val;
1672 1213
1673 /* PCH only available on ILK+ */ 1214 /* PCH only available on ILK+ */
1674 BUG_ON(dev_priv->info->gen < 5); 1215 BUG_ON(dev_priv->info->gen < 5);
1675 1216
1676 /* Make sure PCH DPLL is enabled */ 1217 /* Make sure PCH DPLL is enabled */
1677 assert_pch_pll_enabled(dev_priv, 1218 assert_pch_pll_enabled(dev_priv, pipe);
1678 to_intel_crtc(crtc)->pch_pll,
1679 to_intel_crtc(crtc));
1680 1219
1681 /* FDI must be feeding us bits for PCH ports */ 1220 /* FDI must be feeding us bits for PCH ports */
1682 assert_fdi_tx_enabled(dev_priv, pipe); 1221 assert_fdi_tx_enabled(dev_priv, pipe);
1683 assert_fdi_rx_enabled(dev_priv, pipe); 1222 assert_fdi_rx_enabled(dev_priv, pipe);
1684 1223
1685 if (HAS_PCH_CPT(dev)) {
1686 /* Workaround: Set the timing override bit before enabling the
1687 * pch transcoder. */
1688 reg = TRANS_CHICKEN2(pipe);
1689 val = I915_READ(reg);
1690 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1691 I915_WRITE(reg, val);
1692 }
1693
1694 reg = TRANSCONF(pipe); 1224 reg = TRANSCONF(pipe);
1695 val = I915_READ(reg); 1225 val = I915_READ(reg);
1696 pipeconf_val = I915_READ(PIPECONF(pipe));
1697 1226
1698 if (HAS_PCH_IBX(dev_priv->dev)) { 1227 if (HAS_PCH_IBX(dev_priv->dev)) {
1699 /* 1228 /*
@@ -1701,60 +1230,18 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1701 * that in pipeconf reg. 1230 * that in pipeconf reg.
1702 */ 1231 */
1703 val &= ~PIPE_BPC_MASK; 1232 val &= ~PIPE_BPC_MASK;
1704 val |= pipeconf_val & PIPE_BPC_MASK; 1233 val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
1705 } 1234 }
1706
1707 val &= ~TRANS_INTERLACE_MASK;
1708 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1709 if (HAS_PCH_IBX(dev_priv->dev) &&
1710 intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1711 val |= TRANS_LEGACY_INTERLACED_ILK;
1712 else
1713 val |= TRANS_INTERLACED;
1714 else
1715 val |= TRANS_PROGRESSIVE;
1716
1717 I915_WRITE(reg, val | TRANS_ENABLE); 1235 I915_WRITE(reg, val | TRANS_ENABLE);
1718 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 1236 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1719 DRM_ERROR("failed to enable transcoder %d\n", pipe); 1237 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1720} 1238}
1721 1239
1722static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1240static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1723 enum transcoder cpu_transcoder) 1241 enum pipe pipe)
1724{
1725 u32 val, pipeconf_val;
1726
1727 /* PCH only available on ILK+ */
1728 BUG_ON(dev_priv->info->gen < 5);
1729
1730 /* FDI must be feeding us bits for PCH ports */
1731 assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
1732 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1733
1734 /* Workaround: set timing override bit. */
1735 val = I915_READ(_TRANSA_CHICKEN2);
1736 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1737 I915_WRITE(_TRANSA_CHICKEN2, val);
1738
1739 val = TRANS_ENABLE;
1740 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1741
1742 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1743 PIPECONF_INTERLACED_ILK)
1744 val |= TRANS_INTERLACED;
1745 else
1746 val |= TRANS_PROGRESSIVE;
1747
1748 I915_WRITE(TRANSCONF(TRANSCODER_A), val);
1749 if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
1750 DRM_ERROR("Failed to enable PCH transcoder\n");
1751}
1752
1753static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1754 enum pipe pipe)
1755{ 1242{
1756 struct drm_device *dev = dev_priv->dev; 1243 int reg;
1757 uint32_t reg, val; 1244 u32 val;
1758 1245
1759 /* FDI relies on the transcoder */ 1246 /* FDI relies on the transcoder */
1760 assert_fdi_tx_disabled(dev_priv, pipe); 1247 assert_fdi_tx_disabled(dev_priv, pipe);
@@ -1769,32 +1256,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1769 I915_WRITE(reg, val); 1256 I915_WRITE(reg, val);
1770 /* wait for PCH transcoder off, transcoder state */ 1257 /* wait for PCH transcoder off, transcoder state */
1771 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 1258 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1772 DRM_ERROR("failed to disable transcoder %d\n", pipe); 1259 DRM_ERROR("failed to disable transcoder\n");
1773
1774 if (!HAS_PCH_IBX(dev)) {
1775 /* Workaround: Clear the timing override chicken bit again. */
1776 reg = TRANS_CHICKEN2(pipe);
1777 val = I915_READ(reg);
1778 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1779 I915_WRITE(reg, val);
1780 }
1781}
1782
1783static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1784{
1785 u32 val;
1786
1787 val = I915_READ(_TRANSACONF);
1788 val &= ~TRANS_ENABLE;
1789 I915_WRITE(_TRANSACONF, val);
1790 /* wait for PCH transcoder off, transcoder state */
1791 if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
1792 DRM_ERROR("Failed to disable PCH transcoder\n");
1793
1794 /* Workaround: clear timing override bit. */
1795 val = I915_READ(_TRANSA_CHICKEN2);
1796 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1797 I915_WRITE(_TRANSA_CHICKEN2, val);
1798} 1260}
1799 1261
1800/** 1262/**
@@ -1814,17 +1276,9 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1814static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 1276static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1815 bool pch_port) 1277 bool pch_port)
1816{ 1278{
1817 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1818 pipe);
1819 enum transcoder pch_transcoder;
1820 int reg; 1279 int reg;
1821 u32 val; 1280 u32 val;
1822 1281
1823 if (IS_HASWELL(dev_priv->dev))
1824 pch_transcoder = TRANSCODER_A;
1825 else
1826 pch_transcoder = pipe;
1827
1828 /* 1282 /*
1829 * A pipe without a PLL won't actually be able to drive bits from 1283 * A pipe without a PLL won't actually be able to drive bits from
1830 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1284 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
@@ -1835,13 +1289,13 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1835 else { 1289 else {
1836 if (pch_port) { 1290 if (pch_port) {
1837 /* if driving the PCH, we need FDI enabled */ 1291 /* if driving the PCH, we need FDI enabled */
1838 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 1292 assert_fdi_rx_pll_enabled(dev_priv, pipe);
1839 assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder); 1293 assert_fdi_tx_pll_enabled(dev_priv, pipe);
1840 } 1294 }
1841 /* FIXME: assert CPU port conditions for SNB+ */ 1295 /* FIXME: assert CPU port conditions for SNB+ */
1842 } 1296 }
1843 1297
1844 reg = PIPECONF(cpu_transcoder); 1298 reg = PIPECONF(pipe);
1845 val = I915_READ(reg); 1299 val = I915_READ(reg);
1846 if (val & PIPECONF_ENABLE) 1300 if (val & PIPECONF_ENABLE)
1847 return; 1301 return;
@@ -1865,8 +1319,6 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1865static void intel_disable_pipe(struct drm_i915_private *dev_priv, 1319static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1866 enum pipe pipe) 1320 enum pipe pipe)
1867{ 1321{
1868 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1869 pipe);
1870 int reg; 1322 int reg;
1871 u32 val; 1323 u32 val;
1872 1324
@@ -1880,7 +1332,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1880 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1332 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1881 return; 1333 return;
1882 1334
1883 reg = PIPECONF(cpu_transcoder); 1335 reg = PIPECONF(pipe);
1884 val = I915_READ(reg); 1336 val = I915_READ(reg);
1885 if ((val & PIPECONF_ENABLE) == 0) 1337 if ((val & PIPECONF_ENABLE) == 0)
1886 return; 1338 return;
@@ -1893,13 +1345,11 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1893 * Plane regs are double buffered, going from enabled->disabled needs a 1345 * Plane regs are double buffered, going from enabled->disabled needs a
1894 * trigger in order to latch. The display address reg provides this. 1346 * trigger in order to latch. The display address reg provides this.
1895 */ 1347 */
1896void intel_flush_display_plane(struct drm_i915_private *dev_priv, 1348static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1897 enum plane plane) 1349 enum plane plane)
1898{ 1350{
1899 if (dev_priv->info->gen >= 4) 1351 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1900 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); 1352 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1901 else
1902 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1903} 1353}
1904 1354
1905/** 1355/**
@@ -1953,6 +1403,543 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
1953 intel_wait_for_vblank(dev_priv->dev, pipe); 1403 intel_wait_for_vblank(dev_priv->dev, pipe);
1954} 1404}
1955 1405
1406static void disable_pch_dp(struct drm_i915_private *dev_priv,
1407 enum pipe pipe, int reg, u32 port_sel)
1408{
1409 u32 val = I915_READ(reg);
1410 if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1411 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1412 I915_WRITE(reg, val & ~DP_PORT_EN);
1413 }
1414}
1415
1416static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1417 enum pipe pipe, int reg)
1418{
1419 u32 val = I915_READ(reg);
1420 if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1421 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1422 reg, pipe);
1423 I915_WRITE(reg, val & ~PORT_ENABLE);
1424 }
1425}
1426
1427/* Disable any ports connected to this transcoder */
1428static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1429 enum pipe pipe)
1430{
1431 u32 reg, val;
1432
1433 val = I915_READ(PCH_PP_CONTROL);
1434 I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1435
1436 disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1437 disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1438 disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1439
1440 reg = PCH_ADPA;
1441 val = I915_READ(reg);
1442 if (adpa_pipe_enabled(dev_priv, val, pipe))
1443 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1444
1445 reg = PCH_LVDS;
1446 val = I915_READ(reg);
1447 if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1448 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1449 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1450 POSTING_READ(reg);
1451 udelay(100);
1452 }
1453
1454 disable_pch_hdmi(dev_priv, pipe, HDMIB);
1455 disable_pch_hdmi(dev_priv, pipe, HDMIC);
1456 disable_pch_hdmi(dev_priv, pipe, HDMID);
1457}
1458
1459static void i8xx_disable_fbc(struct drm_device *dev)
1460{
1461 struct drm_i915_private *dev_priv = dev->dev_private;
1462 u32 fbc_ctl;
1463
1464 /* Disable compression */
1465 fbc_ctl = I915_READ(FBC_CONTROL);
1466 if ((fbc_ctl & FBC_CTL_EN) == 0)
1467 return;
1468
1469 fbc_ctl &= ~FBC_CTL_EN;
1470 I915_WRITE(FBC_CONTROL, fbc_ctl);
1471
1472 /* Wait for compressing bit to clear */
1473 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1474 DRM_DEBUG_KMS("FBC idle timed out\n");
1475 return;
1476 }
1477
1478 DRM_DEBUG_KMS("disabled FBC\n");
1479}
1480
1481static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1482{
1483 struct drm_device *dev = crtc->dev;
1484 struct drm_i915_private *dev_priv = dev->dev_private;
1485 struct drm_framebuffer *fb = crtc->fb;
1486 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1487 struct drm_i915_gem_object *obj = intel_fb->obj;
1488 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1489 int cfb_pitch;
1490 int plane, i;
1491 u32 fbc_ctl, fbc_ctl2;
1492
1493 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1494 if (fb->pitch < cfb_pitch)
1495 cfb_pitch = fb->pitch;
1496
1497 /* FBC_CTL wants 64B units */
1498 cfb_pitch = (cfb_pitch / 64) - 1;
1499 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1500
1501 /* Clear old tags */
1502 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1503 I915_WRITE(FBC_TAG + (i * 4), 0);
1504
1505 /* Set it up... */
1506 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1507 fbc_ctl2 |= plane;
1508 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1509 I915_WRITE(FBC_FENCE_OFF, crtc->y);
1510
1511 /* enable it... */
1512 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1513 if (IS_I945GM(dev))
1514 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1515 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1516 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1517 fbc_ctl |= obj->fence_reg;
1518 I915_WRITE(FBC_CONTROL, fbc_ctl);
1519
1520 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1521 cfb_pitch, crtc->y, intel_crtc->plane);
1522}
1523
1524static bool i8xx_fbc_enabled(struct drm_device *dev)
1525{
1526 struct drm_i915_private *dev_priv = dev->dev_private;
1527
1528 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1529}
1530
1531static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1532{
1533 struct drm_device *dev = crtc->dev;
1534 struct drm_i915_private *dev_priv = dev->dev_private;
1535 struct drm_framebuffer *fb = crtc->fb;
1536 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1537 struct drm_i915_gem_object *obj = intel_fb->obj;
1538 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1539 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1540 unsigned long stall_watermark = 200;
1541 u32 dpfc_ctl;
1542
1543 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1544 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1545 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1546
1547 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1548 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1549 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1550 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1551
1552 /* enable it... */
1553 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1554
1555 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1556}
1557
1558static void g4x_disable_fbc(struct drm_device *dev)
1559{
1560 struct drm_i915_private *dev_priv = dev->dev_private;
1561 u32 dpfc_ctl;
1562
1563 /* Disable compression */
1564 dpfc_ctl = I915_READ(DPFC_CONTROL);
1565 if (dpfc_ctl & DPFC_CTL_EN) {
1566 dpfc_ctl &= ~DPFC_CTL_EN;
1567 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1568
1569 DRM_DEBUG_KMS("disabled FBC\n");
1570 }
1571}
1572
1573static bool g4x_fbc_enabled(struct drm_device *dev)
1574{
1575 struct drm_i915_private *dev_priv = dev->dev_private;
1576
1577 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1578}
1579
1580static void sandybridge_blit_fbc_update(struct drm_device *dev)
1581{
1582 struct drm_i915_private *dev_priv = dev->dev_private;
1583 u32 blt_ecoskpd;
1584
1585 /* Make sure blitter notifies FBC of writes */
1586 gen6_gt_force_wake_get(dev_priv);
1587 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1588 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1589 GEN6_BLITTER_LOCK_SHIFT;
1590 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1591 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1592 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1593 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1594 GEN6_BLITTER_LOCK_SHIFT);
1595 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1596 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1597 gen6_gt_force_wake_put(dev_priv);
1598}
1599
1600static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1601{
1602 struct drm_device *dev = crtc->dev;
1603 struct drm_i915_private *dev_priv = dev->dev_private;
1604 struct drm_framebuffer *fb = crtc->fb;
1605 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1606 struct drm_i915_gem_object *obj = intel_fb->obj;
1607 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1608 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1609 unsigned long stall_watermark = 200;
1610 u32 dpfc_ctl;
1611
1612 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1613 dpfc_ctl &= DPFC_RESERVED;
1614 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1615 /* Set persistent mode for front-buffer rendering, ala X. */
1616 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1617 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1618 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1619
1620 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1621 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1622 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1623 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1624 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1625 /* enable it... */
1626 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1627
1628 if (IS_GEN6(dev)) {
1629 I915_WRITE(SNB_DPFC_CTL_SA,
1630 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1631 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1632 sandybridge_blit_fbc_update(dev);
1633 }
1634
1635 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1636}
1637
1638static void ironlake_disable_fbc(struct drm_device *dev)
1639{
1640 struct drm_i915_private *dev_priv = dev->dev_private;
1641 u32 dpfc_ctl;
1642
1643 /* Disable compression */
1644 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1645 if (dpfc_ctl & DPFC_CTL_EN) {
1646 dpfc_ctl &= ~DPFC_CTL_EN;
1647 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1648
1649 DRM_DEBUG_KMS("disabled FBC\n");
1650 }
1651}
1652
1653static bool ironlake_fbc_enabled(struct drm_device *dev)
1654{
1655 struct drm_i915_private *dev_priv = dev->dev_private;
1656
1657 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1658}
1659
1660bool intel_fbc_enabled(struct drm_device *dev)
1661{
1662 struct drm_i915_private *dev_priv = dev->dev_private;
1663
1664 if (!dev_priv->display.fbc_enabled)
1665 return false;
1666
1667 return dev_priv->display.fbc_enabled(dev);
1668}
1669
1670static void intel_fbc_work_fn(struct work_struct *__work)
1671{
1672 struct intel_fbc_work *work =
1673 container_of(to_delayed_work(__work),
1674 struct intel_fbc_work, work);
1675 struct drm_device *dev = work->crtc->dev;
1676 struct drm_i915_private *dev_priv = dev->dev_private;
1677
1678 mutex_lock(&dev->struct_mutex);
1679 if (work == dev_priv->fbc_work) {
1680 /* Double check that we haven't switched fb without cancelling
1681 * the prior work.
1682 */
1683 if (work->crtc->fb == work->fb) {
1684 dev_priv->display.enable_fbc(work->crtc,
1685 work->interval);
1686
1687 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1688 dev_priv->cfb_fb = work->crtc->fb->base.id;
1689 dev_priv->cfb_y = work->crtc->y;
1690 }
1691
1692 dev_priv->fbc_work = NULL;
1693 }
1694 mutex_unlock(&dev->struct_mutex);
1695
1696 kfree(work);
1697}
1698
1699static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1700{
1701 if (dev_priv->fbc_work == NULL)
1702 return;
1703
1704 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1705
1706 /* Synchronisation is provided by struct_mutex and checking of
1707 * dev_priv->fbc_work, so we can perform the cancellation
1708 * entirely asynchronously.
1709 */
1710 if (cancel_delayed_work(&dev_priv->fbc_work->work))
1711 /* tasklet was killed before being run, clean up */
1712 kfree(dev_priv->fbc_work);
1713
1714 /* Mark the work as no longer wanted so that if it does
1715 * wake-up (because the work was already running and waiting
1716 * for our mutex), it will discover that is no longer
1717 * necessary to run.
1718 */
1719 dev_priv->fbc_work = NULL;
1720}
1721
1722static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1723{
1724 struct intel_fbc_work *work;
1725 struct drm_device *dev = crtc->dev;
1726 struct drm_i915_private *dev_priv = dev->dev_private;
1727
1728 if (!dev_priv->display.enable_fbc)
1729 return;
1730
1731 intel_cancel_fbc_work(dev_priv);
1732
1733 work = kzalloc(sizeof *work, GFP_KERNEL);
1734 if (work == NULL) {
1735 dev_priv->display.enable_fbc(crtc, interval);
1736 return;
1737 }
1738
1739 work->crtc = crtc;
1740 work->fb = crtc->fb;
1741 work->interval = interval;
1742 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1743
1744 dev_priv->fbc_work = work;
1745
1746 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1747
1748 /* Delay the actual enabling to let pageflipping cease and the
1749 * display to settle before starting the compression. Note that
1750 * this delay also serves a second purpose: it allows for a
1751 * vblank to pass after disabling the FBC before we attempt
1752 * to modify the control registers.
1753 *
1754 * A more complicated solution would involve tracking vblanks
1755 * following the termination of the page-flipping sequence
1756 * and indeed performing the enable as a co-routine and not
1757 * waiting synchronously upon the vblank.
1758 */
1759 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1760}
1761
1762void intel_disable_fbc(struct drm_device *dev)
1763{
1764 struct drm_i915_private *dev_priv = dev->dev_private;
1765
1766 intel_cancel_fbc_work(dev_priv);
1767
1768 if (!dev_priv->display.disable_fbc)
1769 return;
1770
1771 dev_priv->display.disable_fbc(dev);
1772 dev_priv->cfb_plane = -1;
1773}
1774
1775/**
1776 * intel_update_fbc - enable/disable FBC as needed
1777 * @dev: the drm_device
1778 *
1779 * Set up the framebuffer compression hardware at mode set time. We
1780 * enable it if possible:
1781 * - plane A only (on pre-965)
1782 * - no pixel mulitply/line duplication
1783 * - no alpha buffer discard
1784 * - no dual wide
1785 * - framebuffer <= 2048 in width, 1536 in height
1786 *
1787 * We can't assume that any compression will take place (worst case),
1788 * so the compressed buffer has to be the same size as the uncompressed
1789 * one. It also must reside (along with the line length buffer) in
1790 * stolen memory.
1791 *
1792 * We need to enable/disable FBC on a global basis.
1793 */
1794static void intel_update_fbc(struct drm_device *dev)
1795{
1796 struct drm_i915_private *dev_priv = dev->dev_private;
1797 struct drm_crtc *crtc = NULL, *tmp_crtc;
1798 struct intel_crtc *intel_crtc;
1799 struct drm_framebuffer *fb;
1800 struct intel_framebuffer *intel_fb;
1801 struct drm_i915_gem_object *obj;
1802 int enable_fbc;
1803
1804 DRM_DEBUG_KMS("\n");
1805
1806 if (!i915_powersave)
1807 return;
1808
1809 if (!I915_HAS_FBC(dev))
1810 return;
1811
1812 /*
1813 * If FBC is already on, we just have to verify that we can
1814 * keep it that way...
1815 * Need to disable if:
1816 * - more than one pipe is active
1817 * - changing FBC params (stride, fence, mode)
1818 * - new fb is too large to fit in compressed buffer
1819 * - going to an unsupported config (interlace, pixel multiply, etc.)
1820 */
1821 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1822 if (tmp_crtc->enabled && tmp_crtc->fb) {
1823 if (crtc) {
1824 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1825 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1826 goto out_disable;
1827 }
1828 crtc = tmp_crtc;
1829 }
1830 }
1831
1832 if (!crtc || crtc->fb == NULL) {
1833 DRM_DEBUG_KMS("no output, disabling\n");
1834 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1835 goto out_disable;
1836 }
1837
1838 intel_crtc = to_intel_crtc(crtc);
1839 fb = crtc->fb;
1840 intel_fb = to_intel_framebuffer(fb);
1841 obj = intel_fb->obj;
1842
1843 enable_fbc = i915_enable_fbc;
1844 if (enable_fbc < 0) {
1845 DRM_DEBUG_KMS("fbc set to per-chip default\n");
1846 enable_fbc = 1;
1847 if (INTEL_INFO(dev)->gen <= 5)
1848 enable_fbc = 0;
1849 }
1850 if (!enable_fbc) {
1851 DRM_DEBUG_KMS("fbc disabled per module param\n");
1852 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1853 goto out_disable;
1854 }
1855 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1856 DRM_DEBUG_KMS("framebuffer too large, disabling "
1857 "compression\n");
1858 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1859 goto out_disable;
1860 }
1861 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1862 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1863 DRM_DEBUG_KMS("mode incompatible with compression, "
1864 "disabling\n");
1865 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1866 goto out_disable;
1867 }
1868 if ((crtc->mode.hdisplay > 2048) ||
1869 (crtc->mode.vdisplay > 1536)) {
1870 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1871 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1872 goto out_disable;
1873 }
1874 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1875 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1876 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1877 goto out_disable;
1878 }
1879
1880 /* The use of a CPU fence is mandatory in order to detect writes
1881 * by the CPU to the scanout and trigger updates to the FBC.
1882 */
1883 if (obj->tiling_mode != I915_TILING_X ||
1884 obj->fence_reg == I915_FENCE_REG_NONE) {
1885 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1886 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1887 goto out_disable;
1888 }
1889
1890 /* If the kernel debugger is active, always disable compression */
1891 if (in_dbg_master())
1892 goto out_disable;
1893
1894 /* If the scanout has not changed, don't modify the FBC settings.
1895 * Note that we make the fundamental assumption that the fb->obj
1896 * cannot be unpinned (and have its GTT offset and fence revoked)
1897 * without first being decoupled from the scanout and FBC disabled.
1898 */
1899 if (dev_priv->cfb_plane == intel_crtc->plane &&
1900 dev_priv->cfb_fb == fb->base.id &&
1901 dev_priv->cfb_y == crtc->y)
1902 return;
1903
1904 if (intel_fbc_enabled(dev)) {
1905 /* We update FBC along two paths, after changing fb/crtc
1906 * configuration (modeswitching) and after page-flipping
1907 * finishes. For the latter, we know that not only did
1908 * we disable the FBC at the start of the page-flip
1909 * sequence, but also more than one vblank has passed.
1910 *
1911 * For the former case of modeswitching, it is possible
1912 * to switch between two FBC valid configurations
1913 * instantaneously so we do need to disable the FBC
1914 * before we can modify its control registers. We also
1915 * have to wait for the next vblank for that to take
1916 * effect. However, since we delay enabling FBC we can
1917 * assume that a vblank has passed since disabling and
1918 * that we can safely alter the registers in the deferred
1919 * callback.
1920 *
1921 * In the scenario that we go from a valid to invalid
1922 * and then back to valid FBC configuration we have
1923 * no strict enforcement that a vblank occurred since
1924 * disabling the FBC. However, along all current pipe
1925 * disabling paths we do need to wait for a vblank at
1926 * some point. And we wait before enabling FBC anyway.
1927 */
1928 DRM_DEBUG_KMS("disabling active FBC for update\n");
1929 intel_disable_fbc(dev);
1930 }
1931
1932 intel_enable_fbc(crtc, 500);
1933 return;
1934
1935out_disable:
1936 /* Multiple disables should be harmless */
1937 if (intel_fbc_enabled(dev)) {
1938 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1939 intel_disable_fbc(dev);
1940 }
1941}
1942
1956int 1943int
1957intel_pin_and_fence_fb_obj(struct drm_device *dev, 1944intel_pin_and_fence_fb_obj(struct drm_device *dev,
1958 struct drm_i915_gem_object *obj, 1945 struct drm_i915_gem_object *obj,
@@ -1993,11 +1980,11 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1993 * framebuffer compression. For simplicity, we always install 1980 * framebuffer compression. For simplicity, we always install
1994 * a fence as the cost is not that onerous. 1981 * a fence as the cost is not that onerous.
1995 */ 1982 */
1996 ret = i915_gem_object_get_fence(obj); 1983 if (obj->tiling_mode != I915_TILING_NONE) {
1997 if (ret) 1984 ret = i915_gem_object_get_fence(obj, pipelined);
1998 goto err_unpin; 1985 if (ret)
1999 1986 goto err_unpin;
2000 i915_gem_object_pin_fence(obj); 1987 }
2001 1988
2002 dev_priv->mm.interruptible = true; 1989 dev_priv->mm.interruptible = true;
2003 return 0; 1990 return 0;
@@ -2009,28 +1996,6 @@ err_interruptible:
2009 return ret; 1996 return ret;
2010} 1997}
2011 1998
2012void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2013{
2014 i915_gem_object_unpin_fence(obj);
2015 i915_gem_object_unpin(obj);
2016}
2017
2018/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2019 * is assumed to be a power-of-two. */
2020unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
2021 unsigned int bpp,
2022 unsigned int pitch)
2023{
2024 int tile_rows, tiles;
2025
2026 tile_rows = *y / 8;
2027 *y %= 8;
2028 tiles = *x / (512/bpp);
2029 *x %= 512/bpp;
2030
2031 return tile_rows * pitch * 8 + tiles * 4096;
2032}
2033
2034static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, 1999static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2035 int x, int y) 2000 int x, int y)
2036{ 2001{
@@ -2040,7 +2005,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2040 struct intel_framebuffer *intel_fb; 2005 struct intel_framebuffer *intel_fb;
2041 struct drm_i915_gem_object *obj; 2006 struct drm_i915_gem_object *obj;
2042 int plane = intel_crtc->plane; 2007 int plane = intel_crtc->plane;
2043 unsigned long linear_offset; 2008 unsigned long Start, Offset;
2044 u32 dspcntr; 2009 u32 dspcntr;
2045 u32 reg; 2010 u32 reg;
2046 2011
@@ -2060,38 +2025,24 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2060 dspcntr = I915_READ(reg); 2025 dspcntr = I915_READ(reg);
2061 /* Mask out pixel format bits in case we change it */ 2026 /* Mask out pixel format bits in case we change it */
2062 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 2027 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2063 switch (fb->pixel_format) { 2028 switch (fb->bits_per_pixel) {
2064 case DRM_FORMAT_C8: 2029 case 8:
2065 dspcntr |= DISPPLANE_8BPP; 2030 dspcntr |= DISPPLANE_8BPP;
2066 break; 2031 break;
2067 case DRM_FORMAT_XRGB1555: 2032 case 16:
2068 case DRM_FORMAT_ARGB1555: 2033 if (fb->depth == 15)
2069 dspcntr |= DISPPLANE_BGRX555; 2034 dspcntr |= DISPPLANE_15_16BPP;
2070 break; 2035 else
2071 case DRM_FORMAT_RGB565: 2036 dspcntr |= DISPPLANE_16BPP;
2072 dspcntr |= DISPPLANE_BGRX565;
2073 break;
2074 case DRM_FORMAT_XRGB8888:
2075 case DRM_FORMAT_ARGB8888:
2076 dspcntr |= DISPPLANE_BGRX888;
2077 break;
2078 case DRM_FORMAT_XBGR8888:
2079 case DRM_FORMAT_ABGR8888:
2080 dspcntr |= DISPPLANE_RGBX888;
2081 break;
2082 case DRM_FORMAT_XRGB2101010:
2083 case DRM_FORMAT_ARGB2101010:
2084 dspcntr |= DISPPLANE_BGRX101010;
2085 break; 2037 break;
2086 case DRM_FORMAT_XBGR2101010: 2038 case 24:
2087 case DRM_FORMAT_ABGR2101010: 2039 case 32:
2088 dspcntr |= DISPPLANE_RGBX101010; 2040 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2089 break; 2041 break;
2090 default: 2042 default:
2091 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); 2043 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2092 return -EINVAL; 2044 return -EINVAL;
2093 } 2045 }
2094
2095 if (INTEL_INFO(dev)->gen >= 4) { 2046 if (INTEL_INFO(dev)->gen >= 4) {
2096 if (obj->tiling_mode != I915_TILING_NONE) 2047 if (obj->tiling_mode != I915_TILING_NONE)
2097 dspcntr |= DISPPLANE_TILED; 2048 dspcntr |= DISPPLANE_TILED;
@@ -2101,28 +2052,18 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2101 2052
2102 I915_WRITE(reg, dspcntr); 2053 I915_WRITE(reg, dspcntr);
2103 2054
2104 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 2055 Start = obj->gtt_offset;
2056 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
2105 2057
2058 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2059 Start, Offset, x, y, fb->pitch);
2060 I915_WRITE(DSPSTRIDE(plane), fb->pitch);
2106 if (INTEL_INFO(dev)->gen >= 4) { 2061 if (INTEL_INFO(dev)->gen >= 4) {
2107 intel_crtc->dspaddr_offset = 2062 I915_WRITE(DSPSURF(plane), Start);
2108 intel_gen4_compute_offset_xtiled(&x, &y,
2109 fb->bits_per_pixel / 8,
2110 fb->pitches[0]);
2111 linear_offset -= intel_crtc->dspaddr_offset;
2112 } else {
2113 intel_crtc->dspaddr_offset = linear_offset;
2114 }
2115
2116 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
2117 obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
2118 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2119 if (INTEL_INFO(dev)->gen >= 4) {
2120 I915_MODIFY_DISPBASE(DSPSURF(plane),
2121 obj->gtt_offset + intel_crtc->dspaddr_offset);
2122 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2063 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2123 I915_WRITE(DSPLINOFF(plane), linear_offset); 2064 I915_WRITE(DSPADDR(plane), Offset);
2124 } else 2065 } else
2125 I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset); 2066 I915_WRITE(DSPADDR(plane), Start + Offset);
2126 POSTING_READ(reg); 2067 POSTING_READ(reg);
2127 2068
2128 return 0; 2069 return 0;
@@ -2137,14 +2078,13 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2137 struct intel_framebuffer *intel_fb; 2078 struct intel_framebuffer *intel_fb;
2138 struct drm_i915_gem_object *obj; 2079 struct drm_i915_gem_object *obj;
2139 int plane = intel_crtc->plane; 2080 int plane = intel_crtc->plane;
2140 unsigned long linear_offset; 2081 unsigned long Start, Offset;
2141 u32 dspcntr; 2082 u32 dspcntr;
2142 u32 reg; 2083 u32 reg;
2143 2084
2144 switch (plane) { 2085 switch (plane) {
2145 case 0: 2086 case 0:
2146 case 1: 2087 case 1:
2147 case 2:
2148 break; 2088 break;
2149 default: 2089 default:
2150 DRM_ERROR("Can't update plane %d in SAREA\n", plane); 2090 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
@@ -2158,31 +2098,27 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2158 dspcntr = I915_READ(reg); 2098 dspcntr = I915_READ(reg);
2159 /* Mask out pixel format bits in case we change it */ 2099 /* Mask out pixel format bits in case we change it */
2160 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 2100 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2161 switch (fb->pixel_format) { 2101 switch (fb->bits_per_pixel) {
2162 case DRM_FORMAT_C8: 2102 case 8:
2163 dspcntr |= DISPPLANE_8BPP; 2103 dspcntr |= DISPPLANE_8BPP;
2164 break; 2104 break;
2165 case DRM_FORMAT_RGB565: 2105 case 16:
2166 dspcntr |= DISPPLANE_BGRX565; 2106 if (fb->depth != 16)
2167 break; 2107 return -EINVAL;
2168 case DRM_FORMAT_XRGB8888: 2108
2169 case DRM_FORMAT_ARGB8888: 2109 dspcntr |= DISPPLANE_16BPP;
2170 dspcntr |= DISPPLANE_BGRX888;
2171 break;
2172 case DRM_FORMAT_XBGR8888:
2173 case DRM_FORMAT_ABGR8888:
2174 dspcntr |= DISPPLANE_RGBX888;
2175 break;
2176 case DRM_FORMAT_XRGB2101010:
2177 case DRM_FORMAT_ARGB2101010:
2178 dspcntr |= DISPPLANE_BGRX101010;
2179 break; 2110 break;
2180 case DRM_FORMAT_XBGR2101010: 2111 case 24:
2181 case DRM_FORMAT_ABGR2101010: 2112 case 32:
2182 dspcntr |= DISPPLANE_RGBX101010; 2113 if (fb->depth == 24)
2114 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2115 else if (fb->depth == 30)
2116 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2117 else
2118 return -EINVAL;
2183 break; 2119 break;
2184 default: 2120 default:
2185 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); 2121 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2186 return -EINVAL; 2122 return -EINVAL;
2187 } 2123 }
2188 2124
@@ -2196,24 +2132,15 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2196 2132
2197 I915_WRITE(reg, dspcntr); 2133 I915_WRITE(reg, dspcntr);
2198 2134
2199 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 2135 Start = obj->gtt_offset;
2200 intel_crtc->dspaddr_offset = 2136 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
2201 intel_gen4_compute_offset_xtiled(&x, &y, 2137
2202 fb->bits_per_pixel / 8, 2138 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2203 fb->pitches[0]); 2139 Start, Offset, x, y, fb->pitch);
2204 linear_offset -= intel_crtc->dspaddr_offset; 2140 I915_WRITE(DSPSTRIDE(plane), fb->pitch);
2205 2141 I915_WRITE(DSPSURF(plane), Start);
2206 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", 2142 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2207 obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); 2143 I915_WRITE(DSPADDR(plane), Offset);
2208 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2209 I915_MODIFY_DISPBASE(DSPSURF(plane),
2210 obj->gtt_offset + intel_crtc->dspaddr_offset);
2211 if (IS_HASWELL(dev)) {
2212 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2213 } else {
2214 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2215 I915_WRITE(DSPLINOFF(plane), linear_offset);
2216 }
2217 POSTING_READ(reg); 2144 POSTING_READ(reg);
2218 2145
2219 return 0; 2146 return 0;
@@ -2226,94 +2153,45 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2226{ 2153{
2227 struct drm_device *dev = crtc->dev; 2154 struct drm_device *dev = crtc->dev;
2228 struct drm_i915_private *dev_priv = dev->dev_private; 2155 struct drm_i915_private *dev_priv = dev->dev_private;
2229
2230 if (dev_priv->display.disable_fbc)
2231 dev_priv->display.disable_fbc(dev);
2232 intel_increase_pllclock(crtc);
2233
2234 return dev_priv->display.update_plane(crtc, fb, x, y);
2235}
2236
2237static int
2238intel_finish_fb(struct drm_framebuffer *old_fb)
2239{
2240 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2241 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2242 bool was_interruptible = dev_priv->mm.interruptible;
2243 int ret; 2156 int ret;
2244 2157
2245 wait_event(dev_priv->pending_flip_queue, 2158 ret = dev_priv->display.update_plane(crtc, fb, x, y);
2246 atomic_read(&dev_priv->mm.wedged) || 2159 if (ret)
2247 atomic_read(&obj->pending_flip) == 0); 2160 return ret;
2248
2249 /* Big Hammer, we also need to ensure that any pending
2250 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2251 * current scanout is retired before unpinning the old
2252 * framebuffer.
2253 *
2254 * This should only fail upon a hung GPU, in which case we
2255 * can safely continue.
2256 */
2257 dev_priv->mm.interruptible = false;
2258 ret = i915_gem_object_finish_gpu(obj);
2259 dev_priv->mm.interruptible = was_interruptible;
2260
2261 return ret;
2262}
2263
2264static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
2265{
2266 struct drm_device *dev = crtc->dev;
2267 struct drm_i915_master_private *master_priv;
2268 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2269
2270 if (!dev->primary->master)
2271 return;
2272 2161
2273 master_priv = dev->primary->master->driver_priv; 2162 intel_update_fbc(dev);
2274 if (!master_priv->sarea_priv) 2163 intel_increase_pllclock(crtc);
2275 return;
2276 2164
2277 switch (intel_crtc->pipe) { 2165 return 0;
2278 case 0:
2279 master_priv->sarea_priv->pipeA_x = x;
2280 master_priv->sarea_priv->pipeA_y = y;
2281 break;
2282 case 1:
2283 master_priv->sarea_priv->pipeB_x = x;
2284 master_priv->sarea_priv->pipeB_y = y;
2285 break;
2286 default:
2287 break;
2288 }
2289} 2166}
2290 2167
2291static int 2168static int
2292intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 2169intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2293 struct drm_framebuffer *fb) 2170 struct drm_framebuffer *old_fb)
2294{ 2171{
2295 struct drm_device *dev = crtc->dev; 2172 struct drm_device *dev = crtc->dev;
2296 struct drm_i915_private *dev_priv = dev->dev_private; 2173 struct drm_i915_master_private *master_priv;
2297 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2174 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2298 struct drm_framebuffer *old_fb;
2299 int ret; 2175 int ret;
2300 2176
2301 /* no fb bound */ 2177 /* no fb bound */
2302 if (!fb) { 2178 if (!crtc->fb) {
2303 DRM_ERROR("No FB bound\n"); 2179 DRM_ERROR("No FB bound\n");
2304 return 0; 2180 return 0;
2305 } 2181 }
2306 2182
2307 if(intel_crtc->plane > dev_priv->num_pipe) { 2183 switch (intel_crtc->plane) {
2308 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n", 2184 case 0:
2309 intel_crtc->plane, 2185 case 1:
2310 dev_priv->num_pipe); 2186 break;
2187 default:
2188 DRM_ERROR("no plane for crtc\n");
2311 return -EINVAL; 2189 return -EINVAL;
2312 } 2190 }
2313 2191
2314 mutex_lock(&dev->struct_mutex); 2192 mutex_lock(&dev->struct_mutex);
2315 ret = intel_pin_and_fence_fb_obj(dev, 2193 ret = intel_pin_and_fence_fb_obj(dev,
2316 to_intel_framebuffer(fb)->obj, 2194 to_intel_framebuffer(crtc->fb)->obj,
2317 NULL); 2195 NULL);
2318 if (ret != 0) { 2196 if (ret != 0) {
2319 mutex_unlock(&dev->struct_mutex); 2197 mutex_unlock(&dev->struct_mutex);
@@ -2321,31 +2199,56 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2321 return ret; 2199 return ret;
2322 } 2200 }
2323 2201
2324 if (crtc->fb) 2202 if (old_fb) {
2325 intel_finish_fb(crtc->fb); 2203 struct drm_i915_private *dev_priv = dev->dev_private;
2204 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2326 2205
2327 ret = dev_priv->display.update_plane(crtc, fb, x, y); 2206 wait_event(dev_priv->pending_flip_queue,
2207 atomic_read(&dev_priv->mm.wedged) ||
2208 atomic_read(&obj->pending_flip) == 0);
2209
2210 /* Big Hammer, we also need to ensure that any pending
2211 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2212 * current scanout is retired before unpinning the old
2213 * framebuffer.
2214 *
2215 * This should only fail upon a hung GPU, in which case we
2216 * can safely continue.
2217 */
2218 ret = i915_gem_object_finish_gpu(obj);
2219 (void) ret;
2220 }
2221
2222 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2223 LEAVE_ATOMIC_MODE_SET);
2328 if (ret) { 2224 if (ret) {
2329 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); 2225 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
2330 mutex_unlock(&dev->struct_mutex); 2226 mutex_unlock(&dev->struct_mutex);
2331 DRM_ERROR("failed to update base address\n"); 2227 DRM_ERROR("failed to update base address\n");
2332 return ret; 2228 return ret;
2333 } 2229 }
2334 2230
2335 old_fb = crtc->fb;
2336 crtc->fb = fb;
2337 crtc->x = x;
2338 crtc->y = y;
2339
2340 if (old_fb) { 2231 if (old_fb) {
2341 intel_wait_for_vblank(dev, intel_crtc->pipe); 2232 intel_wait_for_vblank(dev, intel_crtc->pipe);
2342 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); 2233 i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
2343 } 2234 }
2344 2235
2345 intel_update_fbc(dev);
2346 mutex_unlock(&dev->struct_mutex); 2236 mutex_unlock(&dev->struct_mutex);
2347 2237
2348 intel_crtc_update_sarea_pos(crtc, x, y); 2238 if (!dev->primary->master)
2239 return 0;
2240
2241 master_priv = dev->primary->master->driver_priv;
2242 if (!master_priv->sarea_priv)
2243 return 0;
2244
2245 if (intel_crtc->pipe) {
2246 master_priv->sarea_priv->pipeB_x = x;
2247 master_priv->sarea_priv->pipeB_y = y;
2248 } else {
2249 master_priv->sarea_priv->pipeA_x = x;
2250 master_priv->sarea_priv->pipeA_y = y;
2251 }
2349 2252
2350 return 0; 2253 return 0;
2351} 2254}
@@ -2428,27 +2331,16 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
2428 FDI_FE_ERRC_ENABLE); 2331 FDI_FE_ERRC_ENABLE);
2429} 2332}
2430 2333
2431static void ivb_modeset_global_resources(struct drm_device *dev) 2334static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2432{ 2335{
2433 struct drm_i915_private *dev_priv = dev->dev_private; 2336 struct drm_i915_private *dev_priv = dev->dev_private;
2434 struct intel_crtc *pipe_B_crtc = 2337 u32 flags = I915_READ(SOUTH_CHICKEN1);
2435 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2436 struct intel_crtc *pipe_C_crtc =
2437 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2438 uint32_t temp;
2439
2440 /* When everything is off disable fdi C so that we could enable fdi B
2441 * with all lanes. XXX: This misses the case where a pipe is not using
2442 * any pch resources and so doesn't need any fdi lanes. */
2443 if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
2444 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2445 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2446 2338
2447 temp = I915_READ(SOUTH_CHICKEN1); 2339 flags |= FDI_PHASE_SYNC_OVR(pipe);
2448 temp &= ~FDI_BC_BIFURCATION_SELECT; 2340 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2449 DRM_DEBUG_KMS("disabling fdi C rx\n"); 2341 flags |= FDI_PHASE_SYNC_EN(pipe);
2450 I915_WRITE(SOUTH_CHICKEN1, temp); 2342 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2451 } 2343 POSTING_READ(SOUTH_CHICKEN1);
2452} 2344}
2453 2345
2454/* The FDI link training functions for ILK/Ibexpeak. */ 2346/* The FDI link training functions for ILK/Ibexpeak. */
@@ -2494,9 +2386,11 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2494 udelay(150); 2386 udelay(150);
2495 2387
2496 /* Ironlake workaround, enable clock pointer after FDI enable*/ 2388 /* Ironlake workaround, enable clock pointer after FDI enable*/
2497 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2389 if (HAS_PCH_IBX(dev)) {
2498 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 2390 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2499 FDI_RX_PHASE_SYNC_POINTER_EN); 2391 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2392 FDI_RX_PHASE_SYNC_POINTER_EN);
2393 }
2500 2394
2501 reg = FDI_RX_IIR(pipe); 2395 reg = FDI_RX_IIR(pipe);
2502 for (tries = 0; tries < 5; tries++) { 2396 for (tries = 0; tries < 5; tries++) {
@@ -2546,7 +2440,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2546 2440
2547} 2441}
2548 2442
2549static const int snb_b_fdi_train_param[] = { 2443static const int snb_b_fdi_train_param [] = {
2550 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 2444 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2551 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 2445 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2552 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 2446 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
@@ -2560,7 +2454,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2560 struct drm_i915_private *dev_priv = dev->dev_private; 2454 struct drm_i915_private *dev_priv = dev->dev_private;
2561 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2455 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2562 int pipe = intel_crtc->pipe; 2456 int pipe = intel_crtc->pipe;
2563 u32 reg, temp, i, retry; 2457 u32 reg, temp, i;
2564 2458
2565 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2459 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2566 for train result */ 2460 for train result */
@@ -2585,9 +2479,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2585 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2479 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2586 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2480 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2587 2481
2588 I915_WRITE(FDI_RX_MISC(pipe),
2589 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2590
2591 reg = FDI_RX_CTL(pipe); 2482 reg = FDI_RX_CTL(pipe);
2592 temp = I915_READ(reg); 2483 temp = I915_READ(reg);
2593 if (HAS_PCH_CPT(dev)) { 2484 if (HAS_PCH_CPT(dev)) {
@@ -2602,7 +2493,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2602 POSTING_READ(reg); 2493 POSTING_READ(reg);
2603 udelay(150); 2494 udelay(150);
2604 2495
2605 for (i = 0; i < 4; i++) { 2496 if (HAS_PCH_CPT(dev))
2497 cpt_phase_pointer_enable(dev, pipe);
2498
2499 for (i = 0; i < 4; i++ ) {
2606 reg = FDI_TX_CTL(pipe); 2500 reg = FDI_TX_CTL(pipe);
2607 temp = I915_READ(reg); 2501 temp = I915_READ(reg);
2608 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2502 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2612,19 +2506,15 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2612 POSTING_READ(reg); 2506 POSTING_READ(reg);
2613 udelay(500); 2507 udelay(500);
2614 2508
2615 for (retry = 0; retry < 5; retry++) { 2509 reg = FDI_RX_IIR(pipe);
2616 reg = FDI_RX_IIR(pipe); 2510 temp = I915_READ(reg);
2617 temp = I915_READ(reg); 2511 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2618 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2512
2619 if (temp & FDI_RX_BIT_LOCK) { 2513 if (temp & FDI_RX_BIT_LOCK) {
2620 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2514 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2621 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2515 DRM_DEBUG_KMS("FDI train 1 done.\n");
2622 break;
2623 }
2624 udelay(50);
2625 }
2626 if (retry < 5)
2627 break; 2516 break;
2517 }
2628 } 2518 }
2629 if (i == 4) 2519 if (i == 4)
2630 DRM_ERROR("FDI train 1 fail!\n"); 2520 DRM_ERROR("FDI train 1 fail!\n");
@@ -2655,7 +2545,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2655 POSTING_READ(reg); 2545 POSTING_READ(reg);
2656 udelay(150); 2546 udelay(150);
2657 2547
2658 for (i = 0; i < 4; i++) { 2548 for (i = 0; i < 4; i++ ) {
2659 reg = FDI_TX_CTL(pipe); 2549 reg = FDI_TX_CTL(pipe);
2660 temp = I915_READ(reg); 2550 temp = I915_READ(reg);
2661 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2551 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2665,19 +2555,15 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2665 POSTING_READ(reg); 2555 POSTING_READ(reg);
2666 udelay(500); 2556 udelay(500);
2667 2557
2668 for (retry = 0; retry < 5; retry++) { 2558 reg = FDI_RX_IIR(pipe);
2669 reg = FDI_RX_IIR(pipe); 2559 temp = I915_READ(reg);
2670 temp = I915_READ(reg); 2560 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2671 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2561
2672 if (temp & FDI_RX_SYMBOL_LOCK) { 2562 if (temp & FDI_RX_SYMBOL_LOCK) {
2673 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2563 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2674 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2564 DRM_DEBUG_KMS("FDI train 2 done.\n");
2675 break;
2676 }
2677 udelay(50);
2678 }
2679 if (retry < 5)
2680 break; 2565 break;
2566 }
2681 } 2567 }
2682 if (i == 4) 2568 if (i == 4)
2683 DRM_ERROR("FDI train 2 fail!\n"); 2569 DRM_ERROR("FDI train 2 fail!\n");
@@ -2705,9 +2591,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2705 POSTING_READ(reg); 2591 POSTING_READ(reg);
2706 udelay(150); 2592 udelay(150);
2707 2593
2708 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2709 I915_READ(FDI_RX_IIR(pipe)));
2710
2711 /* enable CPU FDI TX and PCH FDI RX */ 2594 /* enable CPU FDI TX and PCH FDI RX */
2712 reg = FDI_TX_CTL(pipe); 2595 reg = FDI_TX_CTL(pipe);
2713 temp = I915_READ(reg); 2596 temp = I915_READ(reg);
@@ -2720,9 +2603,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2720 temp |= FDI_COMPOSITE_SYNC; 2603 temp |= FDI_COMPOSITE_SYNC;
2721 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2604 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2722 2605
2723 I915_WRITE(FDI_RX_MISC(pipe),
2724 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2725
2726 reg = FDI_RX_CTL(pipe); 2606 reg = FDI_RX_CTL(pipe);
2727 temp = I915_READ(reg); 2607 temp = I915_READ(reg);
2728 temp &= ~FDI_LINK_TRAIN_AUTO; 2608 temp &= ~FDI_LINK_TRAIN_AUTO;
@@ -2734,7 +2614,10 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2734 POSTING_READ(reg); 2614 POSTING_READ(reg);
2735 udelay(150); 2615 udelay(150);
2736 2616
2737 for (i = 0; i < 4; i++) { 2617 if (HAS_PCH_CPT(dev))
2618 cpt_phase_pointer_enable(dev, pipe);
2619
2620 for (i = 0; i < 4; i++ ) {
2738 reg = FDI_TX_CTL(pipe); 2621 reg = FDI_TX_CTL(pipe);
2739 temp = I915_READ(reg); 2622 temp = I915_READ(reg);
2740 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2623 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2751,7 +2634,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2751 if (temp & FDI_RX_BIT_LOCK || 2634 if (temp & FDI_RX_BIT_LOCK ||
2752 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 2635 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2753 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2636 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2754 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i); 2637 DRM_DEBUG_KMS("FDI train 1 done.\n");
2755 break; 2638 break;
2756 } 2639 }
2757 } 2640 }
@@ -2776,7 +2659,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2776 POSTING_READ(reg); 2659 POSTING_READ(reg);
2777 udelay(150); 2660 udelay(150);
2778 2661
2779 for (i = 0; i < 4; i++) { 2662 for (i = 0; i < 4; i++ ) {
2780 reg = FDI_TX_CTL(pipe); 2663 reg = FDI_TX_CTL(pipe);
2781 temp = I915_READ(reg); 2664 temp = I915_READ(reg);
2782 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2665 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2792,7 +2675,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2792 2675
2793 if (temp & FDI_RX_SYMBOL_LOCK) { 2676 if (temp & FDI_RX_SYMBOL_LOCK) {
2794 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2677 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2795 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i); 2678 DRM_DEBUG_KMS("FDI train 2 done.\n");
2796 break; 2679 break;
2797 } 2680 }
2798 } 2681 }
@@ -2802,13 +2685,17 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2802 DRM_DEBUG_KMS("FDI train done.\n"); 2685 DRM_DEBUG_KMS("FDI train done.\n");
2803} 2686}
2804 2687
2805static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 2688static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2806{ 2689{
2807 struct drm_device *dev = intel_crtc->base.dev; 2690 struct drm_device *dev = crtc->dev;
2808 struct drm_i915_private *dev_priv = dev->dev_private; 2691 struct drm_i915_private *dev_priv = dev->dev_private;
2692 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2809 int pipe = intel_crtc->pipe; 2693 int pipe = intel_crtc->pipe;
2810 u32 reg, temp; 2694 u32 reg, temp;
2811 2695
2696 /* Write the TU size bits so error detection works */
2697 I915_WRITE(FDI_RX_TUSIZE1(pipe),
2698 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2812 2699
2813 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 2700 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2814 reg = FDI_RX_CTL(pipe); 2701 reg = FDI_RX_CTL(pipe);
@@ -2828,50 +2715,28 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2828 POSTING_READ(reg); 2715 POSTING_READ(reg);
2829 udelay(200); 2716 udelay(200);
2830 2717
2831 /* On Haswell, the PLL configuration for ports and pipes is handled 2718 /* Enable CPU FDI TX PLL, always on for Ironlake */
2832 * separately, as part of DDI setup */ 2719 reg = FDI_TX_CTL(pipe);
2833 if (!IS_HASWELL(dev)) { 2720 temp = I915_READ(reg);
2834 /* Enable CPU FDI TX PLL, always on for Ironlake */ 2721 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2835 reg = FDI_TX_CTL(pipe); 2722 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2836 temp = I915_READ(reg);
2837 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2838 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2839 2723
2840 POSTING_READ(reg); 2724 POSTING_READ(reg);
2841 udelay(100); 2725 udelay(100);
2842 }
2843 } 2726 }
2844} 2727}
2845 2728
2846static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 2729static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2847{ 2730{
2848 struct drm_device *dev = intel_crtc->base.dev;
2849 struct drm_i915_private *dev_priv = dev->dev_private; 2731 struct drm_i915_private *dev_priv = dev->dev_private;
2850 int pipe = intel_crtc->pipe; 2732 u32 flags = I915_READ(SOUTH_CHICKEN1);
2851 u32 reg, temp;
2852
2853 /* Switch from PCDclk to Rawclk */
2854 reg = FDI_RX_CTL(pipe);
2855 temp = I915_READ(reg);
2856 I915_WRITE(reg, temp & ~FDI_PCDCLK);
2857
2858 /* Disable CPU FDI TX PLL */
2859 reg = FDI_TX_CTL(pipe);
2860 temp = I915_READ(reg);
2861 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2862
2863 POSTING_READ(reg);
2864 udelay(100);
2865 2733
2866 reg = FDI_RX_CTL(pipe); 2734 flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2867 temp = I915_READ(reg); 2735 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2868 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 2736 flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2869 2737 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2870 /* Wait for the clocks to turn off. */ 2738 POSTING_READ(SOUTH_CHICKEN1);
2871 POSTING_READ(reg);
2872 udelay(100);
2873} 2739}
2874
2875static void ironlake_fdi_disable(struct drm_crtc *crtc) 2740static void ironlake_fdi_disable(struct drm_crtc *crtc)
2876{ 2741{
2877 struct drm_device *dev = crtc->dev; 2742 struct drm_device *dev = crtc->dev;
@@ -2898,6 +2763,11 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
2898 /* Ironlake workaround, disable clock pointer after downing FDI */ 2763 /* Ironlake workaround, disable clock pointer after downing FDI */
2899 if (HAS_PCH_IBX(dev)) { 2764 if (HAS_PCH_IBX(dev)) {
2900 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2765 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2766 I915_WRITE(FDI_RX_CHICKEN(pipe),
2767 I915_READ(FDI_RX_CHICKEN(pipe) &
2768 ~FDI_RX_PHASE_SYNC_POINTER_EN));
2769 } else if (HAS_PCH_CPT(dev)) {
2770 cpt_phase_pointer_disable(dev, pipe);
2901 } 2771 }
2902 2772
2903 /* still set train pattern 1 */ 2773 /* still set train pattern 1 */
@@ -2925,52 +2795,57 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
2925 udelay(100); 2795 udelay(100);
2926} 2796}
2927 2797
2928static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 2798/*
2799 * When we disable a pipe, we need to clear any pending scanline wait events
2800 * to avoid hanging the ring, which we assume we are waiting on.
2801 */
2802static void intel_clear_scanline_wait(struct drm_device *dev)
2929{ 2803{
2930 struct drm_device *dev = crtc->dev;
2931 struct drm_i915_private *dev_priv = dev->dev_private; 2804 struct drm_i915_private *dev_priv = dev->dev_private;
2932 unsigned long flags; 2805 struct intel_ring_buffer *ring;
2933 bool pending; 2806 u32 tmp;
2934
2935 if (atomic_read(&dev_priv->mm.wedged))
2936 return false;
2937 2807
2938 spin_lock_irqsave(&dev->event_lock, flags); 2808 if (IS_GEN2(dev))
2939 pending = to_intel_crtc(crtc)->unpin_work != NULL; 2809 /* Can't break the hang on i8xx */
2940 spin_unlock_irqrestore(&dev->event_lock, flags); 2810 return;
2941 2811
2942 return pending; 2812 ring = LP_RING(dev_priv);
2813 tmp = I915_READ_CTL(ring);
2814 if (tmp & RING_WAIT)
2815 I915_WRITE_CTL(ring, tmp);
2943} 2816}
2944 2817
2945static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 2818static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2946{ 2819{
2947 struct drm_device *dev = crtc->dev; 2820 struct drm_i915_gem_object *obj;
2948 struct drm_i915_private *dev_priv = dev->dev_private; 2821 struct drm_i915_private *dev_priv;
2949 2822
2950 if (crtc->fb == NULL) 2823 if (crtc->fb == NULL)
2951 return; 2824 return;
2952 2825
2826 obj = to_intel_framebuffer(crtc->fb)->obj;
2827 dev_priv = crtc->dev->dev_private;
2953 wait_event(dev_priv->pending_flip_queue, 2828 wait_event(dev_priv->pending_flip_queue,
2954 !intel_crtc_has_pending_flip(crtc)); 2829 atomic_read(&obj->pending_flip) == 0);
2955
2956 mutex_lock(&dev->struct_mutex);
2957 intel_finish_fb(crtc->fb);
2958 mutex_unlock(&dev->struct_mutex);
2959} 2830}
2960 2831
2961static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc) 2832static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2962{ 2833{
2963 struct drm_device *dev = crtc->dev; 2834 struct drm_device *dev = crtc->dev;
2964 struct intel_encoder *intel_encoder; 2835 struct drm_mode_config *mode_config = &dev->mode_config;
2836 struct intel_encoder *encoder;
2965 2837
2966 /* 2838 /*
2967 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that 2839 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2968 * must be driven by its own crtc; no sharing is possible. 2840 * must be driven by its own crtc; no sharing is possible.
2969 */ 2841 */
2970 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 2842 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2971 switch (intel_encoder->type) { 2843 if (encoder->base.crtc != crtc)
2844 continue;
2845
2846 switch (encoder->type) {
2972 case INTEL_OUTPUT_EDP: 2847 case INTEL_OUTPUT_EDP:
2973 if (!intel_encoder_is_pch_edp(&intel_encoder->base)) 2848 if (!intel_encoder_is_pch_edp(&encoder->base))
2974 return false; 2849 return false;
2975 continue; 2850 continue;
2976 } 2851 }
@@ -2979,95 +2854,6 @@ static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
2979 return true; 2854 return true;
2980} 2855}
2981 2856
2982static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
2983{
2984 return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
2985}
2986
2987/* Program iCLKIP clock to the desired frequency */
2988static void lpt_program_iclkip(struct drm_crtc *crtc)
2989{
2990 struct drm_device *dev = crtc->dev;
2991 struct drm_i915_private *dev_priv = dev->dev_private;
2992 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2993 u32 temp;
2994
2995 /* It is necessary to ungate the pixclk gate prior to programming
2996 * the divisors, and gate it back when it is done.
2997 */
2998 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2999
3000 /* Disable SSCCTL */
3001 intel_sbi_write(dev_priv, SBI_SSCCTL6,
3002 intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3003 SBI_SSCCTL_DISABLE,
3004 SBI_ICLK);
3005
3006 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
3007 if (crtc->mode.clock == 20000) {
3008 auxdiv = 1;
3009 divsel = 0x41;
3010 phaseinc = 0x20;
3011 } else {
3012 /* The iCLK virtual clock root frequency is in MHz,
3013 * but the crtc->mode.clock in in KHz. To get the divisors,
3014 * it is necessary to divide one by another, so we
3015 * convert the virtual clock precision to KHz here for higher
3016 * precision.
3017 */
3018 u32 iclk_virtual_root_freq = 172800 * 1000;
3019 u32 iclk_pi_range = 64;
3020 u32 desired_divisor, msb_divisor_value, pi_value;
3021
3022 desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
3023 msb_divisor_value = desired_divisor / iclk_pi_range;
3024 pi_value = desired_divisor % iclk_pi_range;
3025
3026 auxdiv = 0;
3027 divsel = msb_divisor_value - 2;
3028 phaseinc = pi_value;
3029 }
3030
3031 /* This should not happen with any sane values */
3032 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3033 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3034 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3035 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3036
3037 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3038 crtc->mode.clock,
3039 auxdiv,
3040 divsel,
3041 phasedir,
3042 phaseinc);
3043
3044 /* Program SSCDIVINTPHASE6 */
3045 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3046 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3047 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3048 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3049 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3050 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3051 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3052 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3053
3054 /* Program SSCAUXDIV */
3055 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3056 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3057 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3058 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3059
3060 /* Enable modulator and associated divider */
3061 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3062 temp &= ~SBI_SSCCTL_DISABLE;
3063 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3064
3065 /* Wait for initialization time */
3066 udelay(24);
3067
3068 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3069}
3070
3071/* 2857/*
3072 * Enable PCH resources required for PCH ports: 2858 * Enable PCH resources required for PCH ports:
3073 * - PCH PLLs 2859 * - PCH PLLs
@@ -3084,48 +2870,18 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3084 int pipe = intel_crtc->pipe; 2870 int pipe = intel_crtc->pipe;
3085 u32 reg, temp; 2871 u32 reg, temp;
3086 2872
3087 assert_transcoder_disabled(dev_priv, pipe);
3088
3089 /* Write the TU size bits before fdi link training, so that error
3090 * detection works. */
3091 I915_WRITE(FDI_RX_TUSIZE1(pipe),
3092 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3093
3094 /* For PCH output, training FDI link */ 2873 /* For PCH output, training FDI link */
3095 dev_priv->display.fdi_link_train(crtc); 2874 dev_priv->display.fdi_link_train(crtc);
3096 2875
3097 /* XXX: pch pll's can be enabled any time before we enable the PCH 2876 intel_enable_pch_pll(dev_priv, pipe);
3098 * transcoder, and we actually should do this to not upset any PCH
3099 * transcoder that already use the clock when we share it.
3100 *
3101 * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
3102 * unconditionally resets the pll - we need that to have the right LVDS
3103 * enable sequence. */
3104 ironlake_enable_pch_pll(intel_crtc);
3105 2877
3106 if (HAS_PCH_CPT(dev)) { 2878 if (HAS_PCH_CPT(dev)) {
3107 u32 sel; 2879 /* Be sure PCH DPLL SEL is set */
3108
3109 temp = I915_READ(PCH_DPLL_SEL); 2880 temp = I915_READ(PCH_DPLL_SEL);
3110 switch (pipe) { 2881 if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
3111 default: 2882 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
3112 case 0: 2883 else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
3113 temp |= TRANSA_DPLL_ENABLE; 2884 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3114 sel = TRANSA_DPLLB_SEL;
3115 break;
3116 case 1:
3117 temp |= TRANSB_DPLL_ENABLE;
3118 sel = TRANSB_DPLLB_SEL;
3119 break;
3120 case 2:
3121 temp |= TRANSC_DPLL_ENABLE;
3122 sel = TRANSC_DPLLB_SEL;
3123 break;
3124 }
3125 if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
3126 temp |= sel;
3127 else
3128 temp &= ~sel;
3129 I915_WRITE(PCH_DPLL_SEL, temp); 2885 I915_WRITE(PCH_DPLL_SEL, temp);
3130 } 2886 }
3131 2887
@@ -3138,14 +2894,12 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3138 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); 2894 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3139 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); 2895 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3140 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 2896 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
3141 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
3142 2897
3143 intel_fdi_normal_train(crtc); 2898 intel_fdi_normal_train(crtc);
3144 2899
3145 /* For PCH DP, enable TRANS_DP_CTL */ 2900 /* For PCH DP, enable TRANS_DP_CTL */
3146 if (HAS_PCH_CPT(dev) && 2901 if (HAS_PCH_CPT(dev) &&
3147 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 2902 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
3148 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3149 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 2903 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
3150 reg = TRANS_DP_CTL(pipe); 2904 reg = TRANS_DP_CTL(pipe);
3151 temp = I915_READ(reg); 2905 temp = I915_READ(reg);
@@ -3172,138 +2926,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3172 temp |= TRANS_DP_PORT_SEL_D; 2926 temp |= TRANS_DP_PORT_SEL_D;
3173 break; 2927 break;
3174 default: 2928 default:
3175 BUG(); 2929 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2930 temp |= TRANS_DP_PORT_SEL_B;
2931 break;
3176 } 2932 }
3177 2933
3178 I915_WRITE(reg, temp); 2934 I915_WRITE(reg, temp);
3179 } 2935 }
3180 2936
3181 ironlake_enable_pch_transcoder(dev_priv, pipe); 2937 intel_enable_transcoder(dev_priv, pipe);
3182}
3183
3184static void lpt_pch_enable(struct drm_crtc *crtc)
3185{
3186 struct drm_device *dev = crtc->dev;
3187 struct drm_i915_private *dev_priv = dev->dev_private;
3188 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3189 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
3190
3191 assert_transcoder_disabled(dev_priv, TRANSCODER_A);
3192
3193 lpt_program_iclkip(crtc);
3194
3195 /* Set transcoder timing. */
3196 I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
3197 I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
3198 I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder)));
3199
3200 I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
3201 I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
3202 I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder)));
3203 I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
3204
3205 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3206}
3207
3208static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
3209{
3210 struct intel_pch_pll *pll = intel_crtc->pch_pll;
3211
3212 if (pll == NULL)
3213 return;
3214
3215 if (pll->refcount == 0) {
3216 WARN(1, "bad PCH PLL refcount\n");
3217 return;
3218 }
3219
3220 --pll->refcount;
3221 intel_crtc->pch_pll = NULL;
3222}
3223
3224static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
3225{
3226 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
3227 struct intel_pch_pll *pll;
3228 int i;
3229
3230 pll = intel_crtc->pch_pll;
3231 if (pll) {
3232 DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
3233 intel_crtc->base.base.id, pll->pll_reg);
3234 goto prepare;
3235 }
3236
3237 if (HAS_PCH_IBX(dev_priv->dev)) {
3238 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3239 i = intel_crtc->pipe;
3240 pll = &dev_priv->pch_plls[i];
3241
3242 DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
3243 intel_crtc->base.base.id, pll->pll_reg);
3244
3245 goto found;
3246 }
3247
3248 for (i = 0; i < dev_priv->num_pch_pll; i++) {
3249 pll = &dev_priv->pch_plls[i];
3250
3251 /* Only want to check enabled timings first */
3252 if (pll->refcount == 0)
3253 continue;
3254
3255 if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
3256 fp == I915_READ(pll->fp0_reg)) {
3257 DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
3258 intel_crtc->base.base.id,
3259 pll->pll_reg, pll->refcount, pll->active);
3260
3261 goto found;
3262 }
3263 }
3264
3265 /* Ok no matching timings, maybe there's a free one? */
3266 for (i = 0; i < dev_priv->num_pch_pll; i++) {
3267 pll = &dev_priv->pch_plls[i];
3268 if (pll->refcount == 0) {
3269 DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
3270 intel_crtc->base.base.id, pll->pll_reg);
3271 goto found;
3272 }
3273 }
3274
3275 return NULL;
3276
3277found:
3278 intel_crtc->pch_pll = pll;
3279 pll->refcount++;
3280 DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
3281prepare: /* separate function? */
3282 DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
3283
3284 /* Wait for the clocks to stabilize before rewriting the regs */
3285 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
3286 POSTING_READ(pll->pll_reg);
3287 udelay(150);
3288
3289 I915_WRITE(pll->fp0_reg, fp);
3290 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
3291 pll->on = false;
3292 return pll;
3293}
3294
3295void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3296{
3297 struct drm_i915_private *dev_priv = dev->dev_private;
3298 int dslreg = PIPEDSL(pipe);
3299 u32 temp;
3300
3301 temp = I915_READ(dslreg);
3302 udelay(500);
3303 if (wait_for(I915_READ(dslreg) != temp, 5)) {
3304 if (wait_for(I915_READ(dslreg) != temp, 5))
3305 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3306 }
3307} 2938}
3308 2939
3309static void ironlake_crtc_enable(struct drm_crtc *crtc) 2940static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -3311,14 +2942,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3311 struct drm_device *dev = crtc->dev; 2942 struct drm_device *dev = crtc->dev;
3312 struct drm_i915_private *dev_priv = dev->dev_private; 2943 struct drm_i915_private *dev_priv = dev->dev_private;
3313 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2944 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3314 struct intel_encoder *encoder;
3315 int pipe = intel_crtc->pipe; 2945 int pipe = intel_crtc->pipe;
3316 int plane = intel_crtc->plane; 2946 int plane = intel_crtc->plane;
3317 u32 temp; 2947 u32 temp;
3318 bool is_pch_port; 2948 bool is_pch_port;
3319 2949
3320 WARN_ON(!crtc->enabled);
3321
3322 if (intel_crtc->active) 2950 if (intel_crtc->active)
3323 return; 2951 return;
3324 2952
@@ -3331,35 +2959,21 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3331 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 2959 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3332 } 2960 }
3333 2961
3334 is_pch_port = ironlake_crtc_driving_pch(crtc); 2962 is_pch_port = intel_crtc_driving_pch(crtc);
3335
3336 if (is_pch_port) {
3337 /* Note: FDI PLL enabling _must_ be done before we enable the
3338 * cpu pipes, hence this is separate from all the other fdi/pch
3339 * enabling. */
3340 ironlake_fdi_pll_enable(intel_crtc);
3341 } else {
3342 assert_fdi_tx_disabled(dev_priv, pipe);
3343 assert_fdi_rx_disabled(dev_priv, pipe);
3344 }
3345 2963
3346 for_each_encoder_on_crtc(dev, crtc, encoder) 2964 if (is_pch_port)
3347 if (encoder->pre_enable) 2965 ironlake_fdi_pll_enable(crtc);
3348 encoder->pre_enable(encoder); 2966 else
2967 ironlake_fdi_disable(crtc);
3349 2968
3350 /* Enable panel fitting for LVDS */ 2969 /* Enable panel fitting for LVDS */
3351 if (dev_priv->pch_pf_size && 2970 if (dev_priv->pch_pf_size &&
3352 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 2971 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3353 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3354 /* Force use of hard-coded filter coefficients 2972 /* Force use of hard-coded filter coefficients
3355 * as some pre-programmed values are broken, 2973 * as some pre-programmed values are broken,
3356 * e.g. x201. 2974 * e.g. x201.
3357 */ 2975 */
3358 if (IS_IVYBRIDGE(dev)) 2976 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3359 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3360 PF_PIPE_SEL_IVB(pipe));
3361 else
3362 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3363 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); 2977 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3364 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); 2978 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3365 } 2979 }
@@ -3381,99 +2995,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3381 mutex_unlock(&dev->struct_mutex); 2995 mutex_unlock(&dev->struct_mutex);
3382 2996
3383 intel_crtc_update_cursor(crtc, true); 2997 intel_crtc_update_cursor(crtc, true);
3384
3385 for_each_encoder_on_crtc(dev, crtc, encoder)
3386 encoder->enable(encoder);
3387
3388 if (HAS_PCH_CPT(dev))
3389 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3390
3391 /*
3392 * There seems to be a race in PCH platform hw (at least on some
3393 * outputs) where an enabled pipe still completes any pageflip right
3394 * away (as if the pipe is off) instead of waiting for vblank. As soon
3395 * as the first vblank happend, everything works as expected. Hence just
3396 * wait for one vblank before returning to avoid strange things
3397 * happening.
3398 */
3399 intel_wait_for_vblank(dev, intel_crtc->pipe);
3400}
3401
3402static void haswell_crtc_enable(struct drm_crtc *crtc)
3403{
3404 struct drm_device *dev = crtc->dev;
3405 struct drm_i915_private *dev_priv = dev->dev_private;
3406 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3407 struct intel_encoder *encoder;
3408 int pipe = intel_crtc->pipe;
3409 int plane = intel_crtc->plane;
3410 bool is_pch_port;
3411
3412 WARN_ON(!crtc->enabled);
3413
3414 if (intel_crtc->active)
3415 return;
3416
3417 intel_crtc->active = true;
3418 intel_update_watermarks(dev);
3419
3420 is_pch_port = haswell_crtc_driving_pch(crtc);
3421
3422 if (is_pch_port)
3423 dev_priv->display.fdi_link_train(crtc);
3424
3425 for_each_encoder_on_crtc(dev, crtc, encoder)
3426 if (encoder->pre_enable)
3427 encoder->pre_enable(encoder);
3428
3429 intel_ddi_enable_pipe_clock(intel_crtc);
3430
3431 /* Enable panel fitting for eDP */
3432 if (dev_priv->pch_pf_size &&
3433 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
3434 /* Force use of hard-coded filter coefficients
3435 * as some pre-programmed values are broken,
3436 * e.g. x201.
3437 */
3438 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3439 PF_PIPE_SEL_IVB(pipe));
3440 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3441 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3442 }
3443
3444 /*
3445 * On ILK+ LUT must be loaded before the pipe is running but with
3446 * clocks enabled
3447 */
3448 intel_crtc_load_lut(crtc);
3449
3450 intel_ddi_set_pipe_settings(crtc);
3451 intel_ddi_enable_pipe_func(crtc);
3452
3453 intel_enable_pipe(dev_priv, pipe, is_pch_port);
3454 intel_enable_plane(dev_priv, plane, pipe);
3455
3456 if (is_pch_port)
3457 lpt_pch_enable(crtc);
3458
3459 mutex_lock(&dev->struct_mutex);
3460 intel_update_fbc(dev);
3461 mutex_unlock(&dev->struct_mutex);
3462
3463 intel_crtc_update_cursor(crtc, true);
3464
3465 for_each_encoder_on_crtc(dev, crtc, encoder)
3466 encoder->enable(encoder);
3467
3468 /*
3469 * There seems to be a race in PCH platform hw (at least on some
3470 * outputs) where an enabled pipe still completes any pageflip right
3471 * away (as if the pipe is off) instead of waiting for vblank. As soon
3472 * as the first vblank happend, everything works as expected. Hence just
3473 * wait for one vblank before returning to avoid strange things
3474 * happening.
3475 */
3476 intel_wait_for_vblank(dev, intel_crtc->pipe);
3477} 2998}
3478 2999
3479static void ironlake_crtc_disable(struct drm_crtc *crtc) 3000static void ironlake_crtc_disable(struct drm_crtc *crtc)
@@ -3481,18 +3002,13 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3481 struct drm_device *dev = crtc->dev; 3002 struct drm_device *dev = crtc->dev;
3482 struct drm_i915_private *dev_priv = dev->dev_private; 3003 struct drm_i915_private *dev_priv = dev->dev_private;
3483 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3004 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3484 struct intel_encoder *encoder;
3485 int pipe = intel_crtc->pipe; 3005 int pipe = intel_crtc->pipe;
3486 int plane = intel_crtc->plane; 3006 int plane = intel_crtc->plane;
3487 u32 reg, temp; 3007 u32 reg, temp;
3488 3008
3489
3490 if (!intel_crtc->active) 3009 if (!intel_crtc->active)
3491 return; 3010 return;
3492 3011
3493 for_each_encoder_on_crtc(dev, crtc, encoder)
3494 encoder->disable(encoder);
3495
3496 intel_crtc_wait_for_pending_flips(crtc); 3012 intel_crtc_wait_for_pending_flips(crtc);
3497 drm_vblank_off(dev, pipe); 3013 drm_vblank_off(dev, pipe);
3498 intel_crtc_update_cursor(crtc, false); 3014 intel_crtc_update_cursor(crtc, false);
@@ -3508,13 +3024,16 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3508 I915_WRITE(PF_CTL(pipe), 0); 3024 I915_WRITE(PF_CTL(pipe), 0);
3509 I915_WRITE(PF_WIN_SZ(pipe), 0); 3025 I915_WRITE(PF_WIN_SZ(pipe), 0);
3510 3026
3511 for_each_encoder_on_crtc(dev, crtc, encoder)
3512 if (encoder->post_disable)
3513 encoder->post_disable(encoder);
3514
3515 ironlake_fdi_disable(crtc); 3027 ironlake_fdi_disable(crtc);
3516 3028
3517 ironlake_disable_pch_transcoder(dev_priv, pipe); 3029 /* This is a horrible layering violation; we should be doing this in
3030 * the connector/encoder ->prepare instead, but we don't always have
3031 * enough information there about the config to know whether it will
3032 * actually be necessary or just cause undesired flicker.
3033 */
3034 intel_disable_pch_ports(dev_priv, pipe);
3035
3036 intel_disable_transcoder(dev_priv, pipe);
3518 3037
3519 if (HAS_PCH_CPT(dev)) { 3038 if (HAS_PCH_CPT(dev)) {
3520 /* disable TRANS_DP_CTL */ 3039 /* disable TRANS_DP_CTL */
@@ -3528,13 +3047,13 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3528 temp = I915_READ(PCH_DPLL_SEL); 3047 temp = I915_READ(PCH_DPLL_SEL);
3529 switch (pipe) { 3048 switch (pipe) {
3530 case 0: 3049 case 0:
3531 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); 3050 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
3532 break; 3051 break;
3533 case 1: 3052 case 1:
3534 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); 3053 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3535 break; 3054 break;
3536 case 2: 3055 case 2:
3537 /* C shares PLL A or B */ 3056 /* FIXME: manage transcoder PLLs? */
3538 temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); 3057 temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3539 break; 3058 break;
3540 default: 3059 default:
@@ -3544,88 +3063,60 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3544 } 3063 }
3545 3064
3546 /* disable PCH DPLL */ 3065 /* disable PCH DPLL */
3547 intel_disable_pch_pll(intel_crtc); 3066 intel_disable_pch_pll(dev_priv, pipe);
3548
3549 ironlake_fdi_pll_disable(intel_crtc);
3550
3551 intel_crtc->active = false;
3552 intel_update_watermarks(dev);
3553
3554 mutex_lock(&dev->struct_mutex);
3555 intel_update_fbc(dev);
3556 mutex_unlock(&dev->struct_mutex);
3557}
3558
3559static void haswell_crtc_disable(struct drm_crtc *crtc)
3560{
3561 struct drm_device *dev = crtc->dev;
3562 struct drm_i915_private *dev_priv = dev->dev_private;
3563 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3564 struct intel_encoder *encoder;
3565 int pipe = intel_crtc->pipe;
3566 int plane = intel_crtc->plane;
3567 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
3568 bool is_pch_port;
3569
3570 if (!intel_crtc->active)
3571 return;
3572
3573 is_pch_port = haswell_crtc_driving_pch(crtc);
3574
3575 for_each_encoder_on_crtc(dev, crtc, encoder)
3576 encoder->disable(encoder);
3577
3578 intel_crtc_wait_for_pending_flips(crtc);
3579 drm_vblank_off(dev, pipe);
3580 intel_crtc_update_cursor(crtc, false);
3581 3067
3582 intel_disable_plane(dev_priv, plane, pipe); 3068 /* Switch from PCDclk to Rawclk */
3583 3069 reg = FDI_RX_CTL(pipe);
3584 if (dev_priv->cfb_plane == plane) 3070 temp = I915_READ(reg);
3585 intel_disable_fbc(dev); 3071 I915_WRITE(reg, temp & ~FDI_PCDCLK);
3586
3587 intel_disable_pipe(dev_priv, pipe);
3588
3589 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3590 3072
3591 /* Disable PF */ 3073 /* Disable CPU FDI TX PLL */
3592 I915_WRITE(PF_CTL(pipe), 0); 3074 reg = FDI_TX_CTL(pipe);
3593 I915_WRITE(PF_WIN_SZ(pipe), 0); 3075 temp = I915_READ(reg);
3076 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3594 3077
3595 intel_ddi_disable_pipe_clock(intel_crtc); 3078 POSTING_READ(reg);
3079 udelay(100);
3596 3080
3597 for_each_encoder_on_crtc(dev, crtc, encoder) 3081 reg = FDI_RX_CTL(pipe);
3598 if (encoder->post_disable) 3082 temp = I915_READ(reg);
3599 encoder->post_disable(encoder); 3083 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3600 3084
3601 if (is_pch_port) { 3085 /* Wait for the clocks to turn off. */
3602 lpt_disable_pch_transcoder(dev_priv); 3086 POSTING_READ(reg);
3603 intel_ddi_fdi_disable(crtc); 3087 udelay(100);
3604 }
3605 3088
3606 intel_crtc->active = false; 3089 intel_crtc->active = false;
3607 intel_update_watermarks(dev); 3090 intel_update_watermarks(dev);
3608 3091
3609 mutex_lock(&dev->struct_mutex); 3092 mutex_lock(&dev->struct_mutex);
3610 intel_update_fbc(dev); 3093 intel_update_fbc(dev);
3094 intel_clear_scanline_wait(dev);
3611 mutex_unlock(&dev->struct_mutex); 3095 mutex_unlock(&dev->struct_mutex);
3612} 3096}
3613 3097
3614static void ironlake_crtc_off(struct drm_crtc *crtc) 3098static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3615{
3616 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3617 intel_put_pch_pll(intel_crtc);
3618}
3619
3620static void haswell_crtc_off(struct drm_crtc *crtc)
3621{ 3099{
3622 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3100 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3101 int pipe = intel_crtc->pipe;
3102 int plane = intel_crtc->plane;
3623 3103
3624 /* Stop saying we're using TRANSCODER_EDP because some other CRTC might 3104 /* XXX: When our outputs are all unaware of DPMS modes other than off
3625 * start using it. */ 3105 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3626 intel_crtc->cpu_transcoder = intel_crtc->pipe; 3106 */
3107 switch (mode) {
3108 case DRM_MODE_DPMS_ON:
3109 case DRM_MODE_DPMS_STANDBY:
3110 case DRM_MODE_DPMS_SUSPEND:
3111 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3112 ironlake_crtc_enable(crtc);
3113 break;
3627 3114
3628 intel_ddi_put_crtc_pll(crtc); 3115 case DRM_MODE_DPMS_OFF:
3116 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3117 ironlake_crtc_disable(crtc);
3118 break;
3119 }
3629} 3120}
3630 3121
3631static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 3122static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
@@ -3651,12 +3142,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3651 struct drm_device *dev = crtc->dev; 3142 struct drm_device *dev = crtc->dev;
3652 struct drm_i915_private *dev_priv = dev->dev_private; 3143 struct drm_i915_private *dev_priv = dev->dev_private;
3653 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3144 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3654 struct intel_encoder *encoder;
3655 int pipe = intel_crtc->pipe; 3145 int pipe = intel_crtc->pipe;
3656 int plane = intel_crtc->plane; 3146 int plane = intel_crtc->plane;
3657 3147
3658 WARN_ON(!crtc->enabled);
3659
3660 if (intel_crtc->active) 3148 if (intel_crtc->active)
3661 return; 3149 return;
3662 3150
@@ -3673,9 +3161,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3673 /* Give the overlay scaler a chance to enable if it's on this pipe */ 3161 /* Give the overlay scaler a chance to enable if it's on this pipe */
3674 intel_crtc_dpms_overlay(intel_crtc, true); 3162 intel_crtc_dpms_overlay(intel_crtc, true);
3675 intel_crtc_update_cursor(crtc, true); 3163 intel_crtc_update_cursor(crtc, true);
3676
3677 for_each_encoder_on_crtc(dev, crtc, encoder)
3678 encoder->enable(encoder);
3679} 3164}
3680 3165
3681static void i9xx_crtc_disable(struct drm_crtc *crtc) 3166static void i9xx_crtc_disable(struct drm_crtc *crtc)
@@ -3683,17 +3168,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3683 struct drm_device *dev = crtc->dev; 3168 struct drm_device *dev = crtc->dev;
3684 struct drm_i915_private *dev_priv = dev->dev_private; 3169 struct drm_i915_private *dev_priv = dev->dev_private;
3685 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3170 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3686 struct intel_encoder *encoder;
3687 int pipe = intel_crtc->pipe; 3171 int pipe = intel_crtc->pipe;
3688 int plane = intel_crtc->plane; 3172 int plane = intel_crtc->plane;
3689 3173
3690
3691 if (!intel_crtc->active) 3174 if (!intel_crtc->active)
3692 return; 3175 return;
3693 3176
3694 for_each_encoder_on_crtc(dev, crtc, encoder)
3695 encoder->disable(encoder);
3696
3697 /* Give the overlay scaler a chance to disable if it's on this pipe */ 3177 /* Give the overlay scaler a chance to disable if it's on this pipe */
3698 intel_crtc_wait_for_pending_flips(crtc); 3178 intel_crtc_wait_for_pending_flips(crtc);
3699 drm_vblank_off(dev, pipe); 3179 drm_vblank_off(dev, pipe);
@@ -3710,19 +3190,44 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3710 intel_crtc->active = false; 3190 intel_crtc->active = false;
3711 intel_update_fbc(dev); 3191 intel_update_fbc(dev);
3712 intel_update_watermarks(dev); 3192 intel_update_watermarks(dev);
3193 intel_clear_scanline_wait(dev);
3713} 3194}
3714 3195
3715static void i9xx_crtc_off(struct drm_crtc *crtc) 3196static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3716{ 3197{
3198 /* XXX: When our outputs are all unaware of DPMS modes other than off
3199 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3200 */
3201 switch (mode) {
3202 case DRM_MODE_DPMS_ON:
3203 case DRM_MODE_DPMS_STANDBY:
3204 case DRM_MODE_DPMS_SUSPEND:
3205 i9xx_crtc_enable(crtc);
3206 break;
3207 case DRM_MODE_DPMS_OFF:
3208 i9xx_crtc_disable(crtc);
3209 break;
3210 }
3717} 3211}
3718 3212
3719static void intel_crtc_update_sarea(struct drm_crtc *crtc, 3213/**
3720 bool enabled) 3214 * Sets the power management mode of the pipe and plane.
3215 */
3216static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3721{ 3217{
3722 struct drm_device *dev = crtc->dev; 3218 struct drm_device *dev = crtc->dev;
3219 struct drm_i915_private *dev_priv = dev->dev_private;
3723 struct drm_i915_master_private *master_priv; 3220 struct drm_i915_master_private *master_priv;
3724 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3221 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3725 int pipe = intel_crtc->pipe; 3222 int pipe = intel_crtc->pipe;
3223 bool enabled;
3224
3225 if (intel_crtc->dpms_mode == mode)
3226 return;
3227
3228 intel_crtc->dpms_mode = mode;
3229
3230 dev_priv->display.dpms(crtc, mode);
3726 3231
3727 if (!dev->primary->master) 3232 if (!dev->primary->master)
3728 return; 3233 return;
@@ -3731,6 +3236,8 @@ static void intel_crtc_update_sarea(struct drm_crtc *crtc,
3731 if (!master_priv->sarea_priv) 3236 if (!master_priv->sarea_priv)
3732 return; 3237 return;
3733 3238
3239 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3240
3734 switch (pipe) { 3241 switch (pipe) {
3735 case 0: 3242 case 0:
3736 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; 3243 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
@@ -3746,177 +3253,72 @@ static void intel_crtc_update_sarea(struct drm_crtc *crtc,
3746 } 3253 }
3747} 3254}
3748 3255
3749/**
3750 * Sets the power management mode of the pipe and plane.
3751 */
3752void intel_crtc_update_dpms(struct drm_crtc *crtc)
3753{
3754 struct drm_device *dev = crtc->dev;
3755 struct drm_i915_private *dev_priv = dev->dev_private;
3756 struct intel_encoder *intel_encoder;
3757 bool enable = false;
3758
3759 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3760 enable |= intel_encoder->connectors_active;
3761
3762 if (enable)
3763 dev_priv->display.crtc_enable(crtc);
3764 else
3765 dev_priv->display.crtc_disable(crtc);
3766
3767 intel_crtc_update_sarea(crtc, enable);
3768}
3769
3770static void intel_crtc_noop(struct drm_crtc *crtc)
3771{
3772}
3773
3774static void intel_crtc_disable(struct drm_crtc *crtc) 3256static void intel_crtc_disable(struct drm_crtc *crtc)
3775{ 3257{
3258 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3776 struct drm_device *dev = crtc->dev; 3259 struct drm_device *dev = crtc->dev;
3777 struct drm_connector *connector;
3778 struct drm_i915_private *dev_priv = dev->dev_private;
3779 3260
3780 /* crtc should still be enabled when we disable it. */ 3261 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3781 WARN_ON(!crtc->enabled);
3782
3783 dev_priv->display.crtc_disable(crtc);
3784 intel_crtc_update_sarea(crtc, false);
3785 dev_priv->display.off(crtc);
3786
3787 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3788 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3789 3262
3790 if (crtc->fb) { 3263 if (crtc->fb) {
3791 mutex_lock(&dev->struct_mutex); 3264 mutex_lock(&dev->struct_mutex);
3792 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); 3265 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
3793 mutex_unlock(&dev->struct_mutex); 3266 mutex_unlock(&dev->struct_mutex);
3794 crtc->fb = NULL;
3795 }
3796
3797 /* Update computed state. */
3798 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3799 if (!connector->encoder || !connector->encoder->crtc)
3800 continue;
3801
3802 if (connector->encoder->crtc != crtc)
3803 continue;
3804
3805 connector->dpms = DRM_MODE_DPMS_OFF;
3806 to_intel_encoder(connector->encoder)->connectors_active = false;
3807 } 3267 }
3808} 3268}
3809 3269
3810void intel_modeset_disable(struct drm_device *dev) 3270/* Prepare for a mode set.
3271 *
3272 * Note we could be a lot smarter here. We need to figure out which outputs
3273 * will be enabled, which disabled (in short, how the config will changes)
3274 * and perform the minimum necessary steps to accomplish that, e.g. updating
3275 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3276 * panel fitting is in the proper state, etc.
3277 */
3278static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3811{ 3279{
3812 struct drm_crtc *crtc; 3280 i9xx_crtc_disable(crtc);
3813
3814 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3815 if (crtc->enabled)
3816 intel_crtc_disable(crtc);
3817 }
3818} 3281}
3819 3282
3820void intel_encoder_noop(struct drm_encoder *encoder) 3283static void i9xx_crtc_commit(struct drm_crtc *crtc)
3821{ 3284{
3285 i9xx_crtc_enable(crtc);
3822} 3286}
3823 3287
3824void intel_encoder_destroy(struct drm_encoder *encoder) 3288static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3825{ 3289{
3826 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3290 ironlake_crtc_disable(crtc);
3827
3828 drm_encoder_cleanup(encoder);
3829 kfree(intel_encoder);
3830} 3291}
3831 3292
3832/* Simple dpms helper for encodres with just one connector, no cloning and only 3293static void ironlake_crtc_commit(struct drm_crtc *crtc)
3833 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
3834 * state of the entire output pipe. */
3835void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
3836{ 3294{
3837 if (mode == DRM_MODE_DPMS_ON) { 3295 ironlake_crtc_enable(crtc);
3838 encoder->connectors_active = true;
3839
3840 intel_crtc_update_dpms(encoder->base.crtc);
3841 } else {
3842 encoder->connectors_active = false;
3843
3844 intel_crtc_update_dpms(encoder->base.crtc);
3845 }
3846} 3296}
3847 3297
3848/* Cross check the actual hw state with our own modeset state tracking (and it's 3298void intel_encoder_prepare (struct drm_encoder *encoder)
3849 * internal consistency). */
3850static void intel_connector_check_state(struct intel_connector *connector)
3851{ 3299{
3852 if (connector->get_hw_state(connector)) { 3300 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3853 struct intel_encoder *encoder = connector->encoder; 3301 /* lvds has its own version of prepare see intel_lvds_prepare */
3854 struct drm_crtc *crtc; 3302 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3855 bool encoder_enabled;
3856 enum pipe pipe;
3857
3858 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3859 connector->base.base.id,
3860 drm_get_connector_name(&connector->base));
3861
3862 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
3863 "wrong connector dpms state\n");
3864 WARN(connector->base.encoder != &encoder->base,
3865 "active connector not linked to encoder\n");
3866 WARN(!encoder->connectors_active,
3867 "encoder->connectors_active not set\n");
3868
3869 encoder_enabled = encoder->get_hw_state(encoder, &pipe);
3870 WARN(!encoder_enabled, "encoder not enabled\n");
3871 if (WARN_ON(!encoder->base.crtc))
3872 return;
3873
3874 crtc = encoder->base.crtc;
3875
3876 WARN(!crtc->enabled, "crtc not enabled\n");
3877 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
3878 WARN(pipe != to_intel_crtc(crtc)->pipe,
3879 "encoder active on the wrong pipe\n");
3880 }
3881} 3303}
3882 3304
3883/* Even simpler default implementation, if there's really no special case to 3305void intel_encoder_commit (struct drm_encoder *encoder)
3884 * consider. */
3885void intel_connector_dpms(struct drm_connector *connector, int mode)
3886{ 3306{
3887 struct intel_encoder *encoder = intel_attached_encoder(connector); 3307 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3888 3308 /* lvds has its own version of commit see intel_lvds_commit */
3889 /* All the simple cases only support two dpms states. */ 3309 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3890 if (mode != DRM_MODE_DPMS_ON)
3891 mode = DRM_MODE_DPMS_OFF;
3892
3893 if (mode == connector->dpms)
3894 return;
3895
3896 connector->dpms = mode;
3897
3898 /* Only need to change hw state when actually enabled */
3899 if (encoder->base.crtc)
3900 intel_encoder_dpms(encoder, mode);
3901 else
3902 WARN_ON(encoder->connectors_active != false);
3903
3904 intel_modeset_check_state(connector->dev);
3905} 3310}
3906 3311
3907/* Simple connector->get_hw_state implementation for encoders that support only 3312void intel_encoder_destroy(struct drm_encoder *encoder)
3908 * one connector and no cloning and hence the encoder state determines the state
3909 * of the connector. */
3910bool intel_connector_get_hw_state(struct intel_connector *connector)
3911{ 3313{
3912 enum pipe pipe = 0; 3314 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3913 struct intel_encoder *encoder = connector->encoder;
3914 3315
3915 return encoder->get_hw_state(encoder, &pipe); 3316 drm_encoder_cleanup(encoder);
3317 kfree(intel_encoder);
3916} 3318}
3917 3319
3918static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, 3320static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3919 const struct drm_display_mode *mode, 3321 struct drm_display_mode *mode,
3920 struct drm_display_mode *adjusted_mode) 3322 struct drm_display_mode *adjusted_mode)
3921{ 3323{
3922 struct drm_device *dev = crtc->dev; 3324 struct drm_device *dev = crtc->dev;
@@ -3927,27 +3329,15 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3927 return false; 3329 return false;
3928 } 3330 }
3929 3331
3930 /* All interlaced capable intel hw wants timings in frames. Note though 3332 /* XXX some encoders set the crtcinfo, others don't.
3931 * that intel_lvds_mode_fixup does some funny tricks with the crtc 3333 * Obviously we need some form of conflict resolution here...
3932 * timings, so we need to be careful not to clobber these.*/
3933 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3934 drm_mode_set_crtcinfo(adjusted_mode, 0);
3935
3936 /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
3937 * with a hsync front porch of 0.
3938 */ 3334 */
3939 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && 3335 if (adjusted_mode->crtc_htotal == 0)
3940 adjusted_mode->hsync_start == adjusted_mode->hdisplay) 3336 drm_mode_set_crtcinfo(adjusted_mode, 0);
3941 return false;
3942 3337
3943 return true; 3338 return true;
3944} 3339}
3945 3340
3946static int valleyview_get_display_clock_speed(struct drm_device *dev)
3947{
3948 return 400000; /* FIXME */
3949}
3950
3951static int i945_get_display_clock_speed(struct drm_device *dev) 3341static int i945_get_display_clock_speed(struct drm_device *dev)
3952{ 3342{
3953 return 400000; 3343 return 400000;
@@ -4045,633 +3435,1297 @@ ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
4045 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); 3435 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
4046} 3436}
4047 3437
4048static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 3438
4049{ 3439struct intel_watermark_params {
4050 if (i915_panel_use_ssc >= 0) 3440 unsigned long fifo_size;
4051 return i915_panel_use_ssc != 0; 3441 unsigned long max_wm;
4052 return dev_priv->lvds_use_ssc 3442 unsigned long default_wm;
4053 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 3443 unsigned long guard_size;
4054} 3444 unsigned long cacheline_size;
3445};
3446
3447/* Pineview has different values for various configs */
3448static const struct intel_watermark_params pineview_display_wm = {
3449 PINEVIEW_DISPLAY_FIFO,
3450 PINEVIEW_MAX_WM,
3451 PINEVIEW_DFT_WM,
3452 PINEVIEW_GUARD_WM,
3453 PINEVIEW_FIFO_LINE_SIZE
3454};
3455static const struct intel_watermark_params pineview_display_hplloff_wm = {
3456 PINEVIEW_DISPLAY_FIFO,
3457 PINEVIEW_MAX_WM,
3458 PINEVIEW_DFT_HPLLOFF_WM,
3459 PINEVIEW_GUARD_WM,
3460 PINEVIEW_FIFO_LINE_SIZE
3461};
3462static const struct intel_watermark_params pineview_cursor_wm = {
3463 PINEVIEW_CURSOR_FIFO,
3464 PINEVIEW_CURSOR_MAX_WM,
3465 PINEVIEW_CURSOR_DFT_WM,
3466 PINEVIEW_CURSOR_GUARD_WM,
3467 PINEVIEW_FIFO_LINE_SIZE,
3468};
3469static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3470 PINEVIEW_CURSOR_FIFO,
3471 PINEVIEW_CURSOR_MAX_WM,
3472 PINEVIEW_CURSOR_DFT_WM,
3473 PINEVIEW_CURSOR_GUARD_WM,
3474 PINEVIEW_FIFO_LINE_SIZE
3475};
3476static const struct intel_watermark_params g4x_wm_info = {
3477 G4X_FIFO_SIZE,
3478 G4X_MAX_WM,
3479 G4X_MAX_WM,
3480 2,
3481 G4X_FIFO_LINE_SIZE,
3482};
3483static const struct intel_watermark_params g4x_cursor_wm_info = {
3484 I965_CURSOR_FIFO,
3485 I965_CURSOR_MAX_WM,
3486 I965_CURSOR_DFT_WM,
3487 2,
3488 G4X_FIFO_LINE_SIZE,
3489};
3490static const struct intel_watermark_params i965_cursor_wm_info = {
3491 I965_CURSOR_FIFO,
3492 I965_CURSOR_MAX_WM,
3493 I965_CURSOR_DFT_WM,
3494 2,
3495 I915_FIFO_LINE_SIZE,
3496};
3497static const struct intel_watermark_params i945_wm_info = {
3498 I945_FIFO_SIZE,
3499 I915_MAX_WM,
3500 1,
3501 2,
3502 I915_FIFO_LINE_SIZE
3503};
3504static const struct intel_watermark_params i915_wm_info = {
3505 I915_FIFO_SIZE,
3506 I915_MAX_WM,
3507 1,
3508 2,
3509 I915_FIFO_LINE_SIZE
3510};
3511static const struct intel_watermark_params i855_wm_info = {
3512 I855GM_FIFO_SIZE,
3513 I915_MAX_WM,
3514 1,
3515 2,
3516 I830_FIFO_LINE_SIZE
3517};
3518static const struct intel_watermark_params i830_wm_info = {
3519 I830_FIFO_SIZE,
3520 I915_MAX_WM,
3521 1,
3522 2,
3523 I830_FIFO_LINE_SIZE
3524};
3525
3526static const struct intel_watermark_params ironlake_display_wm_info = {
3527 ILK_DISPLAY_FIFO,
3528 ILK_DISPLAY_MAXWM,
3529 ILK_DISPLAY_DFTWM,
3530 2,
3531 ILK_FIFO_LINE_SIZE
3532};
3533static const struct intel_watermark_params ironlake_cursor_wm_info = {
3534 ILK_CURSOR_FIFO,
3535 ILK_CURSOR_MAXWM,
3536 ILK_CURSOR_DFTWM,
3537 2,
3538 ILK_FIFO_LINE_SIZE
3539};
3540static const struct intel_watermark_params ironlake_display_srwm_info = {
3541 ILK_DISPLAY_SR_FIFO,
3542 ILK_DISPLAY_MAX_SRWM,
3543 ILK_DISPLAY_DFT_SRWM,
3544 2,
3545 ILK_FIFO_LINE_SIZE
3546};
3547static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3548 ILK_CURSOR_SR_FIFO,
3549 ILK_CURSOR_MAX_SRWM,
3550 ILK_CURSOR_DFT_SRWM,
3551 2,
3552 ILK_FIFO_LINE_SIZE
3553};
3554
3555static const struct intel_watermark_params sandybridge_display_wm_info = {
3556 SNB_DISPLAY_FIFO,
3557 SNB_DISPLAY_MAXWM,
3558 SNB_DISPLAY_DFTWM,
3559 2,
3560 SNB_FIFO_LINE_SIZE
3561};
3562static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3563 SNB_CURSOR_FIFO,
3564 SNB_CURSOR_MAXWM,
3565 SNB_CURSOR_DFTWM,
3566 2,
3567 SNB_FIFO_LINE_SIZE
3568};
3569static const struct intel_watermark_params sandybridge_display_srwm_info = {
3570 SNB_DISPLAY_SR_FIFO,
3571 SNB_DISPLAY_MAX_SRWM,
3572 SNB_DISPLAY_DFT_SRWM,
3573 2,
3574 SNB_FIFO_LINE_SIZE
3575};
3576static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3577 SNB_CURSOR_SR_FIFO,
3578 SNB_CURSOR_MAX_SRWM,
3579 SNB_CURSOR_DFT_SRWM,
3580 2,
3581 SNB_FIFO_LINE_SIZE
3582};
3583
4055 3584
4056/** 3585/**
4057 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send 3586 * intel_calculate_wm - calculate watermark level
4058 * @crtc: CRTC structure 3587 * @clock_in_khz: pixel clock
4059 * @mode: requested mode 3588 * @wm: chip FIFO params
3589 * @pixel_size: display pixel size
3590 * @latency_ns: memory latency for the platform
4060 * 3591 *
4061 * A pipe may be connected to one or more outputs. Based on the depth of the 3592 * Calculate the watermark level (the level at which the display plane will
4062 * attached framebuffer, choose a good color depth to use on the pipe. 3593 * start fetching from memory again). Each chip has a different display
4063 * 3594 * FIFO size and allocation, so the caller needs to figure that out and pass
4064 * If possible, match the pipe depth to the fb depth. In some cases, this 3595 * in the correct intel_watermark_params structure.
4065 * isn't ideal, because the connected output supports a lesser or restricted
4066 * set of depths. Resolve that here:
4067 * LVDS typically supports only 6bpc, so clamp down in that case
4068 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4069 * Displays may support a restricted set as well, check EDID and clamp as
4070 * appropriate.
4071 * DP may want to dither down to 6bpc to fit larger modes
4072 * 3596 *
4073 * RETURNS: 3597 * As the pixel clock runs, the FIFO will be drained at a rate that depends
4074 * Dithering requirement (i.e. false if display bpc and pipe bpc match, 3598 * on the pixel size. When it reaches the watermark level, it'll start
4075 * true if they don't match). 3599 * fetching FIFO line sized based chunks from memory until the FIFO fills
3600 * past the watermark point. If the FIFO drains completely, a FIFO underrun
3601 * will occur, and a display engine hang could result.
4076 */ 3602 */
4077static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, 3603static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
4078 struct drm_framebuffer *fb, 3604 const struct intel_watermark_params *wm,
4079 unsigned int *pipe_bpp, 3605 int fifo_size,
4080 struct drm_display_mode *mode) 3606 int pixel_size,
3607 unsigned long latency_ns)
4081{ 3608{
4082 struct drm_device *dev = crtc->dev; 3609 long entries_required, wm_size;
4083 struct drm_i915_private *dev_priv = dev->dev_private;
4084 struct drm_connector *connector;
4085 struct intel_encoder *intel_encoder;
4086 unsigned int display_bpc = UINT_MAX, bpc;
4087 3610
4088 /* Walk the encoders & connectors on this crtc, get min bpc */ 3611 /*
4089 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 3612 * Note: we need to make sure we don't overflow for various clock &
3613 * latency values.
3614 * clocks go from a few thousand to several hundred thousand.
3615 * latency is usually a few thousand
3616 */
3617 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3618 1000;
3619 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
4090 3620
4091 if (intel_encoder->type == INTEL_OUTPUT_LVDS) { 3621 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
4092 unsigned int lvds_bpc;
4093 3622
4094 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == 3623 wm_size = fifo_size - (entries_required + wm->guard_size);
4095 LVDS_A3_POWER_UP)
4096 lvds_bpc = 8;
4097 else
4098 lvds_bpc = 6;
4099 3624
4100 if (lvds_bpc < display_bpc) { 3625 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
4101 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4102 display_bpc = lvds_bpc;
4103 }
4104 continue;
4105 }
4106 3626
4107 /* Not one of the known troublemakers, check the EDID */ 3627 /* Don't promote wm_size to unsigned... */
4108 list_for_each_entry(connector, &dev->mode_config.connector_list, 3628 if (wm_size > (long)wm->max_wm)
4109 head) { 3629 wm_size = wm->max_wm;
4110 if (connector->encoder != &intel_encoder->base) 3630 if (wm_size <= 0)
4111 continue; 3631 wm_size = wm->default_wm;
3632 return wm_size;
3633}
4112 3634
4113 /* Don't use an invalid EDID bpc value */ 3635struct cxsr_latency {
4114 if (connector->display_info.bpc && 3636 int is_desktop;
4115 connector->display_info.bpc < display_bpc) { 3637 int is_ddr3;
4116 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); 3638 unsigned long fsb_freq;
4117 display_bpc = connector->display_info.bpc; 3639 unsigned long mem_freq;
4118 } 3640 unsigned long display_sr;
4119 } 3641 unsigned long display_hpll_disable;
3642 unsigned long cursor_sr;
3643 unsigned long cursor_hpll_disable;
3644};
4120 3645
4121 if (intel_encoder->type == INTEL_OUTPUT_EDP) { 3646static const struct cxsr_latency cxsr_latency_table[] = {
4122 /* Use VBT settings if we have an eDP panel */ 3647 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
4123 unsigned int edp_bpc = dev_priv->edp.bpp / 3; 3648 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
3649 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
3650 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
3651 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
3652
3653 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
3654 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
3655 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
3656 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
3657 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
3658
3659 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
3660 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
3661 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
3662 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
3663 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
3664
3665 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
3666 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
3667 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
3668 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
3669 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
3670
3671 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
3672 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
3673 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
3674 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
3675 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
3676
3677 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
3678 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
3679 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
3680 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
3681 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
3682};
4124 3683
4125 if (edp_bpc && edp_bpc < display_bpc) { 3684static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
4126 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); 3685 int is_ddr3,
4127 display_bpc = edp_bpc; 3686 int fsb,
4128 } 3687 int mem)
4129 continue; 3688{
4130 } 3689 const struct cxsr_latency *latency;
3690 int i;
4131 3691
4132 /* 3692 if (fsb == 0 || mem == 0)
4133 * HDMI is either 12 or 8, so if the display lets 10bpc sneak 3693 return NULL;
4134 * through, clamp it down. (Note: >12bpc will be caught below.)
4135 */
4136 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4137 if (display_bpc > 8 && display_bpc < 12) {
4138 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
4139 display_bpc = 12;
4140 } else {
4141 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
4142 display_bpc = 8;
4143 }
4144 }
4145 }
4146 3694
4147 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 3695 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
4148 DRM_DEBUG_KMS("Dithering DP to 6bpc\n"); 3696 latency = &cxsr_latency_table[i];
4149 display_bpc = 6; 3697 if (is_desktop == latency->is_desktop &&
3698 is_ddr3 == latency->is_ddr3 &&
3699 fsb == latency->fsb_freq && mem == latency->mem_freq)
3700 return latency;
4150 } 3701 }
4151 3702
4152 /* 3703 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
4153 * We could just drive the pipe at the highest bpc all the time and
4154 * enable dithering as needed, but that costs bandwidth. So choose
4155 * the minimum value that expresses the full color range of the fb but
4156 * also stays within the max display bpc discovered above.
4157 */
4158 3704
4159 switch (fb->depth) { 3705 return NULL;
4160 case 8: 3706}
4161 bpc = 8; /* since we go through a colormap */
4162 break;
4163 case 15:
4164 case 16:
4165 bpc = 6; /* min is 18bpp */
4166 break;
4167 case 24:
4168 bpc = 8;
4169 break;
4170 case 30:
4171 bpc = 10;
4172 break;
4173 case 48:
4174 bpc = 12;
4175 break;
4176 default:
4177 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4178 bpc = min((unsigned int)8, display_bpc);
4179 break;
4180 }
4181 3707
4182 display_bpc = min(display_bpc, bpc); 3708static void pineview_disable_cxsr(struct drm_device *dev)
3709{
3710 struct drm_i915_private *dev_priv = dev->dev_private;
4183 3711
4184 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", 3712 /* deactivate cxsr */
4185 bpc, display_bpc); 3713 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3714}
4186 3715
4187 *pipe_bpp = display_bpc * 3; 3716/*
3717 * Latency for FIFO fetches is dependent on several factors:
3718 * - memory configuration (speed, channels)
3719 * - chipset
3720 * - current MCH state
3721 * It can be fairly high in some situations, so here we assume a fairly
3722 * pessimal value. It's a tradeoff between extra memory fetches (if we
3723 * set this value too high, the FIFO will fetch frequently to stay full)
3724 * and power consumption (set it too low to save power and we might see
3725 * FIFO underruns and display "flicker").
3726 *
3727 * A value of 5us seems to be a good balance; safe for very low end
3728 * platforms but not overly aggressive on lower latency configs.
3729 */
3730static const int latency_ns = 5000;
4188 3731
4189 return display_bpc != bpc; 3732static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3733{
3734 struct drm_i915_private *dev_priv = dev->dev_private;
3735 uint32_t dsparb = I915_READ(DSPARB);
3736 int size;
3737
3738 size = dsparb & 0x7f;
3739 if (plane)
3740 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3741
3742 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3743 plane ? "B" : "A", size);
3744
3745 return size;
4190} 3746}
4191 3747
4192static int vlv_get_refclk(struct drm_crtc *crtc) 3748static int i85x_get_fifo_size(struct drm_device *dev, int plane)
4193{ 3749{
4194 struct drm_device *dev = crtc->dev;
4195 struct drm_i915_private *dev_priv = dev->dev_private; 3750 struct drm_i915_private *dev_priv = dev->dev_private;
4196 int refclk = 27000; /* for DP & HDMI */ 3751 uint32_t dsparb = I915_READ(DSPARB);
3752 int size;
4197 3753
4198 return 100000; /* only one validated so far */ 3754 size = dsparb & 0x1ff;
3755 if (plane)
3756 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3757 size >>= 1; /* Convert to cachelines */
4199 3758
4200 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { 3759 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
4201 refclk = 96000; 3760 plane ? "B" : "A", size);
4202 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4203 if (intel_panel_use_ssc(dev_priv))
4204 refclk = 100000;
4205 else
4206 refclk = 96000;
4207 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4208 refclk = 100000;
4209 }
4210 3761
4211 return refclk; 3762 return size;
4212} 3763}
4213 3764
4214static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) 3765static int i845_get_fifo_size(struct drm_device *dev, int plane)
4215{ 3766{
4216 struct drm_device *dev = crtc->dev;
4217 struct drm_i915_private *dev_priv = dev->dev_private; 3767 struct drm_i915_private *dev_priv = dev->dev_private;
4218 int refclk; 3768 uint32_t dsparb = I915_READ(DSPARB);
3769 int size;
4219 3770
4220 if (IS_VALLEYVIEW(dev)) { 3771 size = dsparb & 0x7f;
4221 refclk = vlv_get_refclk(crtc); 3772 size >>= 2; /* Convert to cachelines */
4222 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4223 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4224 refclk = dev_priv->lvds_ssc_freq * 1000;
4225 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4226 refclk / 1000);
4227 } else if (!IS_GEN2(dev)) {
4228 refclk = 96000;
4229 } else {
4230 refclk = 48000;
4231 }
4232 3773
4233 return refclk; 3774 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3775 plane ? "B" : "A",
3776 size);
3777
3778 return size;
4234} 3779}
4235 3780
4236static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode, 3781static int i830_get_fifo_size(struct drm_device *dev, int plane)
4237 intel_clock_t *clock)
4238{ 3782{
4239 /* SDVO TV has fixed PLL values depend on its clock range,
4240 this mirrors vbios setting. */
4241 if (adjusted_mode->clock >= 100000
4242 && adjusted_mode->clock < 140500) {
4243 clock->p1 = 2;
4244 clock->p2 = 10;
4245 clock->n = 3;
4246 clock->m1 = 16;
4247 clock->m2 = 8;
4248 } else if (adjusted_mode->clock >= 140500
4249 && adjusted_mode->clock <= 200000) {
4250 clock->p1 = 1;
4251 clock->p2 = 10;
4252 clock->n = 6;
4253 clock->m1 = 12;
4254 clock->m2 = 8;
4255 }
4256}
4257
4258static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
4259 intel_clock_t *clock,
4260 intel_clock_t *reduced_clock)
4261{
4262 struct drm_device *dev = crtc->dev;
4263 struct drm_i915_private *dev_priv = dev->dev_private; 3783 struct drm_i915_private *dev_priv = dev->dev_private;
4264 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3784 uint32_t dsparb = I915_READ(DSPARB);
4265 int pipe = intel_crtc->pipe; 3785 int size;
4266 u32 fp, fp2 = 0;
4267 3786
4268 if (IS_PINEVIEW(dev)) { 3787 size = dsparb & 0x7f;
4269 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2; 3788 size >>= 1; /* Convert to cachelines */
4270 if (reduced_clock)
4271 fp2 = (1 << reduced_clock->n) << 16 |
4272 reduced_clock->m1 << 8 | reduced_clock->m2;
4273 } else {
4274 fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
4275 if (reduced_clock)
4276 fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
4277 reduced_clock->m2;
4278 }
4279 3789
4280 I915_WRITE(FP0(pipe), fp); 3790 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3791 plane ? "B" : "A", size);
4281 3792
4282 intel_crtc->lowfreq_avail = false; 3793 return size;
4283 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 3794}
4284 reduced_clock && i915_powersave) { 3795
4285 I915_WRITE(FP1(pipe), fp2); 3796static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
4286 intel_crtc->lowfreq_avail = true; 3797{
4287 } else { 3798 struct drm_crtc *crtc, *enabled = NULL;
4288 I915_WRITE(FP1(pipe), fp); 3799
3800 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3801 if (crtc->enabled && crtc->fb) {
3802 if (enabled)
3803 return NULL;
3804 enabled = crtc;
3805 }
4289 } 3806 }
3807
3808 return enabled;
4290} 3809}
4291 3810
4292static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock, 3811static void pineview_update_wm(struct drm_device *dev)
4293 struct drm_display_mode *adjusted_mode)
4294{ 3812{
4295 struct drm_device *dev = crtc->dev;
4296 struct drm_i915_private *dev_priv = dev->dev_private; 3813 struct drm_i915_private *dev_priv = dev->dev_private;
4297 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3814 struct drm_crtc *crtc;
4298 int pipe = intel_crtc->pipe; 3815 const struct cxsr_latency *latency;
4299 u32 temp; 3816 u32 reg;
3817 unsigned long wm;
4300 3818
4301 temp = I915_READ(LVDS); 3819 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
4302 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 3820 dev_priv->fsb_freq, dev_priv->mem_freq);
4303 if (pipe == 1) { 3821 if (!latency) {
4304 temp |= LVDS_PIPEB_SELECT; 3822 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3823 pineview_disable_cxsr(dev);
3824 return;
3825 }
3826
3827 crtc = single_enabled_crtc(dev);
3828 if (crtc) {
3829 int clock = crtc->mode.clock;
3830 int pixel_size = crtc->fb->bits_per_pixel / 8;
3831
3832 /* Display SR */
3833 wm = intel_calculate_wm(clock, &pineview_display_wm,
3834 pineview_display_wm.fifo_size,
3835 pixel_size, latency->display_sr);
3836 reg = I915_READ(DSPFW1);
3837 reg &= ~DSPFW_SR_MASK;
3838 reg |= wm << DSPFW_SR_SHIFT;
3839 I915_WRITE(DSPFW1, reg);
3840 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3841
3842 /* cursor SR */
3843 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3844 pineview_display_wm.fifo_size,
3845 pixel_size, latency->cursor_sr);
3846 reg = I915_READ(DSPFW3);
3847 reg &= ~DSPFW_CURSOR_SR_MASK;
3848 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
3849 I915_WRITE(DSPFW3, reg);
3850
3851 /* Display HPLL off SR */
3852 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
3853 pineview_display_hplloff_wm.fifo_size,
3854 pixel_size, latency->display_hpll_disable);
3855 reg = I915_READ(DSPFW3);
3856 reg &= ~DSPFW_HPLL_SR_MASK;
3857 reg |= wm & DSPFW_HPLL_SR_MASK;
3858 I915_WRITE(DSPFW3, reg);
3859
3860 /* cursor HPLL off SR */
3861 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
3862 pineview_display_hplloff_wm.fifo_size,
3863 pixel_size, latency->cursor_hpll_disable);
3864 reg = I915_READ(DSPFW3);
3865 reg &= ~DSPFW_HPLL_CURSOR_MASK;
3866 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
3867 I915_WRITE(DSPFW3, reg);
3868 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
3869
3870 /* activate cxsr */
3871 I915_WRITE(DSPFW3,
3872 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
3873 DRM_DEBUG_KMS("Self-refresh is enabled\n");
4305 } else { 3874 } else {
4306 temp &= ~LVDS_PIPEB_SELECT; 3875 pineview_disable_cxsr(dev);
3876 DRM_DEBUG_KMS("Self-refresh is disabled\n");
4307 } 3877 }
4308 /* set the corresponsding LVDS_BORDER bit */ 3878}
4309 temp |= dev_priv->lvds_border_bits;
4310 /* Set the B0-B3 data pairs corresponding to whether we're going to
4311 * set the DPLLs for dual-channel mode or not.
4312 */
4313 if (clock->p2 == 7)
4314 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4315 else
4316 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4317 3879
4318 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 3880static bool g4x_compute_wm0(struct drm_device *dev,
4319 * appropriately here, but we need to look more thoroughly into how 3881 int plane,
4320 * panels behave in the two modes. 3882 const struct intel_watermark_params *display,
4321 */ 3883 int display_latency_ns,
4322 /* set the dithering flag on LVDS as needed */ 3884 const struct intel_watermark_params *cursor,
4323 if (INTEL_INFO(dev)->gen >= 4) { 3885 int cursor_latency_ns,
4324 if (dev_priv->lvds_dither) 3886 int *plane_wm,
4325 temp |= LVDS_ENABLE_DITHER; 3887 int *cursor_wm)
4326 else 3888{
4327 temp &= ~LVDS_ENABLE_DITHER; 3889 struct drm_crtc *crtc;
3890 int htotal, hdisplay, clock, pixel_size;
3891 int line_time_us, line_count;
3892 int entries, tlb_miss;
3893
3894 crtc = intel_get_crtc_for_plane(dev, plane);
3895 if (crtc->fb == NULL || !crtc->enabled) {
3896 *cursor_wm = cursor->guard_size;
3897 *plane_wm = display->guard_size;
3898 return false;
4328 } 3899 }
4329 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 3900
4330 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 3901 htotal = crtc->mode.htotal;
4331 temp |= LVDS_HSYNC_POLARITY; 3902 hdisplay = crtc->mode.hdisplay;
4332 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 3903 clock = crtc->mode.clock;
4333 temp |= LVDS_VSYNC_POLARITY; 3904 pixel_size = crtc->fb->bits_per_pixel / 8;
4334 I915_WRITE(LVDS, temp); 3905
3906 /* Use the small buffer method to calculate plane watermark */
3907 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
3908 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
3909 if (tlb_miss > 0)
3910 entries += tlb_miss;
3911 entries = DIV_ROUND_UP(entries, display->cacheline_size);
3912 *plane_wm = entries + display->guard_size;
3913 if (*plane_wm > (int)display->max_wm)
3914 *plane_wm = display->max_wm;
3915
3916 /* Use the large buffer method to calculate cursor watermark */
3917 line_time_us = ((htotal * 1000) / clock);
3918 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
3919 entries = line_count * 64 * pixel_size;
3920 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
3921 if (tlb_miss > 0)
3922 entries += tlb_miss;
3923 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3924 *cursor_wm = entries + cursor->guard_size;
3925 if (*cursor_wm > (int)cursor->max_wm)
3926 *cursor_wm = (int)cursor->max_wm;
3927
3928 return true;
4335} 3929}
4336 3930
4337static void vlv_update_pll(struct drm_crtc *crtc, 3931/*
4338 struct drm_display_mode *mode, 3932 * Check the wm result.
4339 struct drm_display_mode *adjusted_mode, 3933 *
4340 intel_clock_t *clock, intel_clock_t *reduced_clock, 3934 * If any calculated watermark values is larger than the maximum value that
4341 int num_connectors) 3935 * can be programmed into the associated watermark register, that watermark
3936 * must be disabled.
3937 */
3938static bool g4x_check_srwm(struct drm_device *dev,
3939 int display_wm, int cursor_wm,
3940 const struct intel_watermark_params *display,
3941 const struct intel_watermark_params *cursor)
4342{ 3942{
4343 struct drm_device *dev = crtc->dev; 3943 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
4344 struct drm_i915_private *dev_priv = dev->dev_private; 3944 display_wm, cursor_wm);
4345 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4346 int pipe = intel_crtc->pipe;
4347 u32 dpll, mdiv, pdiv;
4348 u32 bestn, bestm1, bestm2, bestp1, bestp2;
4349 bool is_sdvo;
4350 u32 temp;
4351 3945
4352 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || 3946 if (display_wm > display->max_wm) {
4353 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); 3947 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
3948 display_wm, display->max_wm);
3949 return false;
3950 }
4354 3951
4355 dpll = DPLL_VGA_MODE_DIS; 3952 if (cursor_wm > cursor->max_wm) {
4356 dpll |= DPLL_EXT_BUFFER_ENABLE_VLV; 3953 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
4357 dpll |= DPLL_REFA_CLK_ENABLE_VLV; 3954 cursor_wm, cursor->max_wm);
4358 dpll |= DPLL_INTEGRATED_CLOCK_VLV; 3955 return false;
3956 }
4359 3957
4360 I915_WRITE(DPLL(pipe), dpll); 3958 if (!(display_wm || cursor_wm)) {
4361 POSTING_READ(DPLL(pipe)); 3959 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
3960 return false;
3961 }
4362 3962
4363 bestn = clock->n; 3963 return true;
4364 bestm1 = clock->m1; 3964}
4365 bestm2 = clock->m2;
4366 bestp1 = clock->p1;
4367 bestp2 = clock->p2;
4368 3965
4369 /* 3966static bool g4x_compute_srwm(struct drm_device *dev,
4370 * In Valleyview PLL and program lane counter registers are exposed 3967 int plane,
4371 * through DPIO interface 3968 int latency_ns,
4372 */ 3969 const struct intel_watermark_params *display,
4373 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 3970 const struct intel_watermark_params *cursor,
4374 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 3971 int *display_wm, int *cursor_wm)
4375 mdiv |= ((bestn << DPIO_N_SHIFT)); 3972{
4376 mdiv |= (1 << DPIO_POST_DIV_SHIFT); 3973 struct drm_crtc *crtc;
4377 mdiv |= (1 << DPIO_K_SHIFT); 3974 int hdisplay, htotal, pixel_size, clock;
4378 mdiv |= DPIO_ENABLE_CALIBRATION; 3975 unsigned long line_time_us;
4379 intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); 3976 int line_count, line_size;
3977 int small, large;
3978 int entries;
3979
3980 if (!latency_ns) {
3981 *display_wm = *cursor_wm = 0;
3982 return false;
3983 }
4380 3984
4381 intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000); 3985 crtc = intel_get_crtc_for_plane(dev, plane);
3986 hdisplay = crtc->mode.hdisplay;
3987 htotal = crtc->mode.htotal;
3988 clock = crtc->mode.clock;
3989 pixel_size = crtc->fb->bits_per_pixel / 8;
4382 3990
4383 pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) | 3991 line_time_us = (htotal * 1000) / clock;
4384 (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) | 3992 line_count = (latency_ns / line_time_us + 1000) / 1000;
4385 (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) | 3993 line_size = hdisplay * pixel_size;
4386 (5 << DPIO_CLK_BIAS_CTL_SHIFT);
4387 intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
4388 3994
4389 intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b); 3995 /* Use the minimum of the small and large buffer method for primary */
3996 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3997 large = line_count * line_size;
4390 3998
4391 dpll |= DPLL_VCO_ENABLE; 3999 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4392 I915_WRITE(DPLL(pipe), dpll); 4000 *display_wm = entries + display->guard_size;
4393 POSTING_READ(DPLL(pipe));
4394 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
4395 DRM_ERROR("DPLL %d failed to lock\n", pipe);
4396 4001
4397 intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620); 4002 /* calculate the self-refresh watermark for display cursor */
4003 entries = line_count * pixel_size * 64;
4004 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4005 *cursor_wm = entries + cursor->guard_size;
4398 4006
4399 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 4007 return g4x_check_srwm(dev,
4400 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4008 *display_wm, *cursor_wm,
4009 display, cursor);
4010}
4401 4011
4402 I915_WRITE(DPLL(pipe), dpll); 4012#define single_plane_enabled(mask) is_power_of_2(mask)
4403 4013
4404 /* Wait for the clocks to stabilize. */ 4014static void g4x_update_wm(struct drm_device *dev)
4405 POSTING_READ(DPLL(pipe)); 4015{
4406 udelay(150); 4016 static const int sr_latency_ns = 12000;
4017 struct drm_i915_private *dev_priv = dev->dev_private;
4018 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4019 int plane_sr, cursor_sr;
4020 unsigned int enabled = 0;
4021
4022 if (g4x_compute_wm0(dev, 0,
4023 &g4x_wm_info, latency_ns,
4024 &g4x_cursor_wm_info, latency_ns,
4025 &planea_wm, &cursora_wm))
4026 enabled |= 1;
4027
4028 if (g4x_compute_wm0(dev, 1,
4029 &g4x_wm_info, latency_ns,
4030 &g4x_cursor_wm_info, latency_ns,
4031 &planeb_wm, &cursorb_wm))
4032 enabled |= 2;
4033
4034 plane_sr = cursor_sr = 0;
4035 if (single_plane_enabled(enabled) &&
4036 g4x_compute_srwm(dev, ffs(enabled) - 1,
4037 sr_latency_ns,
4038 &g4x_wm_info,
4039 &g4x_cursor_wm_info,
4040 &plane_sr, &cursor_sr))
4041 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4042 else
4043 I915_WRITE(FW_BLC_SELF,
4044 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4045
4046 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4047 planea_wm, cursora_wm,
4048 planeb_wm, cursorb_wm,
4049 plane_sr, cursor_sr);
4050
4051 I915_WRITE(DSPFW1,
4052 (plane_sr << DSPFW_SR_SHIFT) |
4053 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4054 (planeb_wm << DSPFW_PLANEB_SHIFT) |
4055 planea_wm);
4056 I915_WRITE(DSPFW2,
4057 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4058 (cursora_wm << DSPFW_CURSORA_SHIFT));
4059 /* HPLL off in SR has some issues on G4x... disable it */
4060 I915_WRITE(DSPFW3,
4061 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4062 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4063}
4064
4065static void i965_update_wm(struct drm_device *dev)
4066{
4067 struct drm_i915_private *dev_priv = dev->dev_private;
4068 struct drm_crtc *crtc;
4069 int srwm = 1;
4070 int cursor_sr = 16;
4071
4072 /* Calc sr entries for one plane configs */
4073 crtc = single_enabled_crtc(dev);
4074 if (crtc) {
4075 /* self-refresh has much higher latency */
4076 static const int sr_latency_ns = 12000;
4077 int clock = crtc->mode.clock;
4078 int htotal = crtc->mode.htotal;
4079 int hdisplay = crtc->mode.hdisplay;
4080 int pixel_size = crtc->fb->bits_per_pixel / 8;
4081 unsigned long line_time_us;
4082 int entries;
4083
4084 line_time_us = ((htotal * 1000) / clock);
4085
4086 /* Use ns/us then divide to preserve precision */
4087 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4088 pixel_size * hdisplay;
4089 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4090 srwm = I965_FIFO_SIZE - entries;
4091 if (srwm < 0)
4092 srwm = 1;
4093 srwm &= 0x1ff;
4094 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4095 entries, srwm);
4096
4097 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4098 pixel_size * 64;
4099 entries = DIV_ROUND_UP(entries,
4100 i965_cursor_wm_info.cacheline_size);
4101 cursor_sr = i965_cursor_wm_info.fifo_size -
4102 (entries + i965_cursor_wm_info.guard_size);
4103
4104 if (cursor_sr > i965_cursor_wm_info.max_wm)
4105 cursor_sr = i965_cursor_wm_info.max_wm;
4106
4107 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4108 "cursor %d\n", srwm, cursor_sr);
4109
4110 if (IS_CRESTLINE(dev))
4111 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4112 } else {
4113 /* Turn off self refresh if both pipes are enabled */
4114 if (IS_CRESTLINE(dev))
4115 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4116 & ~FW_BLC_SELF_EN);
4117 }
4407 4118
4408 temp = 0; 4119 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4409 if (is_sdvo) { 4120 srwm);
4410 temp = intel_mode_get_pixel_multiplier(adjusted_mode); 4121
4411 if (temp > 1) 4122 /* 965 has limitations... */
4412 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 4123 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4124 (8 << 16) | (8 << 8) | (8 << 0));
4125 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4126 /* update cursor SR watermark */
4127 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4128}
4129
4130static void i9xx_update_wm(struct drm_device *dev)
4131{
4132 struct drm_i915_private *dev_priv = dev->dev_private;
4133 const struct intel_watermark_params *wm_info;
4134 uint32_t fwater_lo;
4135 uint32_t fwater_hi;
4136 int cwm, srwm = 1;
4137 int fifo_size;
4138 int planea_wm, planeb_wm;
4139 struct drm_crtc *crtc, *enabled = NULL;
4140
4141 if (IS_I945GM(dev))
4142 wm_info = &i945_wm_info;
4143 else if (!IS_GEN2(dev))
4144 wm_info = &i915_wm_info;
4145 else
4146 wm_info = &i855_wm_info;
4147
4148 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4149 crtc = intel_get_crtc_for_plane(dev, 0);
4150 if (crtc->enabled && crtc->fb) {
4151 planea_wm = intel_calculate_wm(crtc->mode.clock,
4152 wm_info, fifo_size,
4153 crtc->fb->bits_per_pixel / 8,
4154 latency_ns);
4155 enabled = crtc;
4156 } else
4157 planea_wm = fifo_size - wm_info->guard_size;
4158
4159 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4160 crtc = intel_get_crtc_for_plane(dev, 1);
4161 if (crtc->enabled && crtc->fb) {
4162 planeb_wm = intel_calculate_wm(crtc->mode.clock,
4163 wm_info, fifo_size,
4164 crtc->fb->bits_per_pixel / 8,
4165 latency_ns);
4166 if (enabled == NULL)
4167 enabled = crtc;
4413 else 4168 else
4414 temp = 0; 4169 enabled = NULL;
4415 } 4170 } else
4416 I915_WRITE(DPLL_MD(pipe), temp); 4171 planeb_wm = fifo_size - wm_info->guard_size;
4417 POSTING_READ(DPLL_MD(pipe));
4418 4172
4419 /* Now program lane control registers */ 4173 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4420 if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) 4174
4421 || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 4175 /*
4422 { 4176 * Overlay gets an aggressive default since video jitter is bad.
4423 temp = 0x1000C4; 4177 */
4424 if(pipe == 1) 4178 cwm = 2;
4425 temp |= (1 << 21); 4179
4426 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp); 4180 /* Play safe and disable self-refresh before adjusting watermarks. */
4427 } 4181 if (IS_I945G(dev) || IS_I945GM(dev))
4428 if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP)) 4182 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4429 { 4183 else if (IS_I915GM(dev))
4430 temp = 0x1000C4; 4184 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4431 if(pipe == 1) 4185
4432 temp |= (1 << 21); 4186 /* Calc sr entries for one plane configs */
4433 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp); 4187 if (HAS_FW_BLC(dev) && enabled) {
4188 /* self-refresh has much higher latency */
4189 static const int sr_latency_ns = 6000;
4190 int clock = enabled->mode.clock;
4191 int htotal = enabled->mode.htotal;
4192 int hdisplay = enabled->mode.hdisplay;
4193 int pixel_size = enabled->fb->bits_per_pixel / 8;
4194 unsigned long line_time_us;
4195 int entries;
4196
4197 line_time_us = (htotal * 1000) / clock;
4198
4199 /* Use ns/us then divide to preserve precision */
4200 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4201 pixel_size * hdisplay;
4202 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4203 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4204 srwm = wm_info->fifo_size - entries;
4205 if (srwm < 0)
4206 srwm = 1;
4207
4208 if (IS_I945G(dev) || IS_I945GM(dev))
4209 I915_WRITE(FW_BLC_SELF,
4210 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4211 else if (IS_I915GM(dev))
4212 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4213 }
4214
4215 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4216 planea_wm, planeb_wm, cwm, srwm);
4217
4218 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4219 fwater_hi = (cwm & 0x1f);
4220
4221 /* Set request length to 8 cachelines per fetch */
4222 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4223 fwater_hi = fwater_hi | (1 << 8);
4224
4225 I915_WRITE(FW_BLC, fwater_lo);
4226 I915_WRITE(FW_BLC2, fwater_hi);
4227
4228 if (HAS_FW_BLC(dev)) {
4229 if (enabled) {
4230 if (IS_I945G(dev) || IS_I945GM(dev))
4231 I915_WRITE(FW_BLC_SELF,
4232 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4233 else if (IS_I915GM(dev))
4234 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4235 DRM_DEBUG_KMS("memory self refresh enabled\n");
4236 } else
4237 DRM_DEBUG_KMS("memory self refresh disabled\n");
4434 } 4238 }
4435} 4239}
4436 4240
4437static void i9xx_update_pll(struct drm_crtc *crtc, 4241static void i830_update_wm(struct drm_device *dev)
4438 struct drm_display_mode *mode,
4439 struct drm_display_mode *adjusted_mode,
4440 intel_clock_t *clock, intel_clock_t *reduced_clock,
4441 int num_connectors)
4442{ 4242{
4443 struct drm_device *dev = crtc->dev;
4444 struct drm_i915_private *dev_priv = dev->dev_private; 4243 struct drm_i915_private *dev_priv = dev->dev_private;
4445 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4244 struct drm_crtc *crtc;
4446 int pipe = intel_crtc->pipe; 4245 uint32_t fwater_lo;
4447 u32 dpll; 4246 int planea_wm;
4448 bool is_sdvo;
4449 4247
4450 i9xx_update_pll_dividers(crtc, clock, reduced_clock); 4248 crtc = single_enabled_crtc(dev);
4249 if (crtc == NULL)
4250 return;
4451 4251
4452 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || 4252 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4453 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); 4253 dev_priv->display.get_fifo_size(dev, 0),
4254 crtc->fb->bits_per_pixel / 8,
4255 latency_ns);
4256 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4257 fwater_lo |= (3<<8) | planea_wm;
4454 4258
4455 dpll = DPLL_VGA_MODE_DIS; 4259 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4456 4260
4457 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 4261 I915_WRITE(FW_BLC, fwater_lo);
4458 dpll |= DPLLB_MODE_LVDS; 4262}
4459 else 4263
4460 dpll |= DPLLB_MODE_DAC_SERIAL; 4264#define ILK_LP0_PLANE_LATENCY 700
4461 if (is_sdvo) { 4265#define ILK_LP0_CURSOR_LATENCY 1300
4462 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 4266
4463 if (pixel_multiplier > 1) { 4267/*
4464 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4268 * Check the wm result.
4465 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 4269 *
4466 } 4270 * If any calculated watermark values is larger than the maximum value that
4467 dpll |= DPLL_DVO_HIGH_SPEED; 4271 * can be programmed into the associated watermark register, that watermark
4272 * must be disabled.
4273 */
4274static bool ironlake_check_srwm(struct drm_device *dev, int level,
4275 int fbc_wm, int display_wm, int cursor_wm,
4276 const struct intel_watermark_params *display,
4277 const struct intel_watermark_params *cursor)
4278{
4279 struct drm_i915_private *dev_priv = dev->dev_private;
4280
4281 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4282 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4283
4284 if (fbc_wm > SNB_FBC_MAX_SRWM) {
4285 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4286 fbc_wm, SNB_FBC_MAX_SRWM, level);
4287
4288 /* fbc has it's own way to disable FBC WM */
4289 I915_WRITE(DISP_ARB_CTL,
4290 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4291 return false;
4468 } 4292 }
4469 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4470 dpll |= DPLL_DVO_HIGH_SPEED;
4471 4293
4472 /* compute bitmask from p1 value */ 4294 if (display_wm > display->max_wm) {
4473 if (IS_PINEVIEW(dev)) 4295 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4474 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 4296 display_wm, SNB_DISPLAY_MAX_SRWM, level);
4475 else { 4297 return false;
4476 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4477 if (IS_G4X(dev) && reduced_clock)
4478 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4479 } 4298 }
4480 switch (clock->p2) { 4299
4481 case 5: 4300 if (cursor_wm > cursor->max_wm) {
4482 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 4301 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4483 break; 4302 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4484 case 7: 4303 return false;
4485 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4486 break;
4487 case 10:
4488 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4489 break;
4490 case 14:
4491 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4492 break;
4493 } 4304 }
4494 if (INTEL_INFO(dev)->gen >= 4)
4495 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4496 4305
4497 if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) 4306 if (!(fbc_wm || display_wm || cursor_wm)) {
4498 dpll |= PLL_REF_INPUT_TVCLKINBC; 4307 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4499 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) 4308 return false;
4500 /* XXX: just matching BIOS for now */ 4309 }
4501 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4502 dpll |= 3;
4503 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4504 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4505 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4506 else
4507 dpll |= PLL_REF_INPUT_DREFCLK;
4508 4310
4509 dpll |= DPLL_VCO_ENABLE; 4311 return true;
4510 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4312}
4511 POSTING_READ(DPLL(pipe));
4512 udelay(150);
4513 4313
4514 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4314/*
4515 * This is an exception to the general rule that mode_set doesn't turn 4315 * Compute watermark values of WM[1-3],
4516 * things on. 4316 */
4517 */ 4317static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4518 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 4318 int latency_ns,
4519 intel_update_lvds(crtc, clock, adjusted_mode); 4319 const struct intel_watermark_params *display,
4320 const struct intel_watermark_params *cursor,
4321 int *fbc_wm, int *display_wm, int *cursor_wm)
4322{
4323 struct drm_crtc *crtc;
4324 unsigned long line_time_us;
4325 int hdisplay, htotal, pixel_size, clock;
4326 int line_count, line_size;
4327 int small, large;
4328 int entries;
4329
4330 if (!latency_ns) {
4331 *fbc_wm = *display_wm = *cursor_wm = 0;
4332 return false;
4333 }
4520 4334
4521 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 4335 crtc = intel_get_crtc_for_plane(dev, plane);
4522 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4336 hdisplay = crtc->mode.hdisplay;
4337 htotal = crtc->mode.htotal;
4338 clock = crtc->mode.clock;
4339 pixel_size = crtc->fb->bits_per_pixel / 8;
4523 4340
4524 I915_WRITE(DPLL(pipe), dpll); 4341 line_time_us = (htotal * 1000) / clock;
4342 line_count = (latency_ns / line_time_us + 1000) / 1000;
4343 line_size = hdisplay * pixel_size;
4525 4344
4526 /* Wait for the clocks to stabilize. */ 4345 /* Use the minimum of the small and large buffer method for primary */
4527 POSTING_READ(DPLL(pipe)); 4346 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4528 udelay(150); 4347 large = line_count * line_size;
4529 4348
4530 if (INTEL_INFO(dev)->gen >= 4) { 4349 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4531 u32 temp = 0; 4350 *display_wm = entries + display->guard_size;
4532 if (is_sdvo) { 4351
4533 temp = intel_mode_get_pixel_multiplier(adjusted_mode); 4352 /*
4534 if (temp > 1) 4353 * Spec says:
4535 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 4354 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4536 else 4355 */
4537 temp = 0; 4356 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4538 } 4357
4539 I915_WRITE(DPLL_MD(pipe), temp); 4358 /* calculate the self-refresh watermark for display cursor */
4540 } else { 4359 entries = line_count * pixel_size * 64;
4541 /* The pixel multiplier can only be updated once the 4360 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4542 * DPLL is enabled and the clocks are stable. 4361 *cursor_wm = entries + cursor->guard_size;
4543 * 4362
4544 * So write it again. 4363 return ironlake_check_srwm(dev, level,
4545 */ 4364 *fbc_wm, *display_wm, *cursor_wm,
4546 I915_WRITE(DPLL(pipe), dpll); 4365 display, cursor);
4547 }
4548} 4366}
4549 4367
4550static void i8xx_update_pll(struct drm_crtc *crtc, 4368static void ironlake_update_wm(struct drm_device *dev)
4551 struct drm_display_mode *adjusted_mode,
4552 intel_clock_t *clock, intel_clock_t *reduced_clock,
4553 int num_connectors)
4554{ 4369{
4555 struct drm_device *dev = crtc->dev;
4556 struct drm_i915_private *dev_priv = dev->dev_private; 4370 struct drm_i915_private *dev_priv = dev->dev_private;
4557 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4371 int fbc_wm, plane_wm, cursor_wm;
4558 int pipe = intel_crtc->pipe; 4372 unsigned int enabled;
4559 u32 dpll; 4373
4560 4374 enabled = 0;
4561 i9xx_update_pll_dividers(crtc, clock, reduced_clock); 4375 if (g4x_compute_wm0(dev, 0,
4376 &ironlake_display_wm_info,
4377 ILK_LP0_PLANE_LATENCY,
4378 &ironlake_cursor_wm_info,
4379 ILK_LP0_CURSOR_LATENCY,
4380 &plane_wm, &cursor_wm)) {
4381 I915_WRITE(WM0_PIPEA_ILK,
4382 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4383 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4384 " plane %d, " "cursor: %d\n",
4385 plane_wm, cursor_wm);
4386 enabled |= 1;
4387 }
4388
4389 if (g4x_compute_wm0(dev, 1,
4390 &ironlake_display_wm_info,
4391 ILK_LP0_PLANE_LATENCY,
4392 &ironlake_cursor_wm_info,
4393 ILK_LP0_CURSOR_LATENCY,
4394 &plane_wm, &cursor_wm)) {
4395 I915_WRITE(WM0_PIPEB_ILK,
4396 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4397 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4398 " plane %d, cursor: %d\n",
4399 plane_wm, cursor_wm);
4400 enabled |= 2;
4401 }
4562 4402
4563 dpll = DPLL_VGA_MODE_DIS; 4403 /*
4404 * Calculate and update the self-refresh watermark only when one
4405 * display plane is used.
4406 */
4407 I915_WRITE(WM3_LP_ILK, 0);
4408 I915_WRITE(WM2_LP_ILK, 0);
4409 I915_WRITE(WM1_LP_ILK, 0);
4564 4410
4565 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 4411 if (!single_plane_enabled(enabled))
4566 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4412 return;
4567 } else { 4413 enabled = ffs(enabled) - 1;
4568 if (clock->p1 == 2) 4414
4569 dpll |= PLL_P1_DIVIDE_BY_TWO; 4415 /* WM1 */
4570 else 4416 if (!ironlake_compute_srwm(dev, 1, enabled,
4571 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4417 ILK_READ_WM1_LATENCY() * 500,
4572 if (clock->p2 == 4) 4418 &ironlake_display_srwm_info,
4573 dpll |= PLL_P2_DIVIDE_BY_4; 4419 &ironlake_cursor_srwm_info,
4574 } 4420 &fbc_wm, &plane_wm, &cursor_wm))
4421 return;
4575 4422
4576 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) 4423 I915_WRITE(WM1_LP_ILK,
4577 /* XXX: just matching BIOS for now */ 4424 WM1_LP_SR_EN |
4578 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 4425 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4579 dpll |= 3; 4426 (fbc_wm << WM1_LP_FBC_SHIFT) |
4580 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 4427 (plane_wm << WM1_LP_SR_SHIFT) |
4581 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 4428 cursor_wm);
4582 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4429
4583 else 4430 /* WM2 */
4584 dpll |= PLL_REF_INPUT_DREFCLK; 4431 if (!ironlake_compute_srwm(dev, 2, enabled,
4432 ILK_READ_WM2_LATENCY() * 500,
4433 &ironlake_display_srwm_info,
4434 &ironlake_cursor_srwm_info,
4435 &fbc_wm, &plane_wm, &cursor_wm))
4436 return;
4585 4437
4586 dpll |= DPLL_VCO_ENABLE; 4438 I915_WRITE(WM2_LP_ILK,
4587 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4439 WM2_LP_EN |
4588 POSTING_READ(DPLL(pipe)); 4440 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4589 udelay(150); 4441 (fbc_wm << WM1_LP_FBC_SHIFT) |
4442 (plane_wm << WM1_LP_SR_SHIFT) |
4443 cursor_wm);
4590 4444
4591 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4445 /*
4592 * This is an exception to the general rule that mode_set doesn't turn 4446 * WM3 is unsupported on ILK, probably because we don't have latency
4593 * things on. 4447 * data for that power state
4594 */ 4448 */
4595 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 4449}
4596 intel_update_lvds(crtc, clock, adjusted_mode);
4597
4598 I915_WRITE(DPLL(pipe), dpll);
4599 4450
4600 /* Wait for the clocks to stabilize. */ 4451static void sandybridge_update_wm(struct drm_device *dev)
4601 POSTING_READ(DPLL(pipe)); 4452{
4602 udelay(150); 4453 struct drm_i915_private *dev_priv = dev->dev_private;
4454 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
4455 int fbc_wm, plane_wm, cursor_wm;
4456 unsigned int enabled;
4457
4458 enabled = 0;
4459 if (g4x_compute_wm0(dev, 0,
4460 &sandybridge_display_wm_info, latency,
4461 &sandybridge_cursor_wm_info, latency,
4462 &plane_wm, &cursor_wm)) {
4463 I915_WRITE(WM0_PIPEA_ILK,
4464 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4465 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4466 " plane %d, " "cursor: %d\n",
4467 plane_wm, cursor_wm);
4468 enabled |= 1;
4469 }
4470
4471 if (g4x_compute_wm0(dev, 1,
4472 &sandybridge_display_wm_info, latency,
4473 &sandybridge_cursor_wm_info, latency,
4474 &plane_wm, &cursor_wm)) {
4475 I915_WRITE(WM0_PIPEB_ILK,
4476 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4477 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4478 " plane %d, cursor: %d\n",
4479 plane_wm, cursor_wm);
4480 enabled |= 2;
4481 }
4603 4482
4604 /* The pixel multiplier can only be updated once the 4483 /*
4605 * DPLL is enabled and the clocks are stable. 4484 * Calculate and update the self-refresh watermark only when one
4485 * display plane is used.
4486 *
4487 * SNB support 3 levels of watermark.
4488 *
4489 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4490 * and disabled in the descending order
4606 * 4491 *
4607 * So write it again.
4608 */ 4492 */
4609 I915_WRITE(DPLL(pipe), dpll); 4493 I915_WRITE(WM3_LP_ILK, 0);
4494 I915_WRITE(WM2_LP_ILK, 0);
4495 I915_WRITE(WM1_LP_ILK, 0);
4496
4497 if (!single_plane_enabled(enabled))
4498 return;
4499 enabled = ffs(enabled) - 1;
4500
4501 /* WM1 */
4502 if (!ironlake_compute_srwm(dev, 1, enabled,
4503 SNB_READ_WM1_LATENCY() * 500,
4504 &sandybridge_display_srwm_info,
4505 &sandybridge_cursor_srwm_info,
4506 &fbc_wm, &plane_wm, &cursor_wm))
4507 return;
4508
4509 I915_WRITE(WM1_LP_ILK,
4510 WM1_LP_SR_EN |
4511 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4512 (fbc_wm << WM1_LP_FBC_SHIFT) |
4513 (plane_wm << WM1_LP_SR_SHIFT) |
4514 cursor_wm);
4515
4516 /* WM2 */
4517 if (!ironlake_compute_srwm(dev, 2, enabled,
4518 SNB_READ_WM2_LATENCY() * 500,
4519 &sandybridge_display_srwm_info,
4520 &sandybridge_cursor_srwm_info,
4521 &fbc_wm, &plane_wm, &cursor_wm))
4522 return;
4523
4524 I915_WRITE(WM2_LP_ILK,
4525 WM2_LP_EN |
4526 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4527 (fbc_wm << WM1_LP_FBC_SHIFT) |
4528 (plane_wm << WM1_LP_SR_SHIFT) |
4529 cursor_wm);
4530
4531 /* WM3 */
4532 if (!ironlake_compute_srwm(dev, 3, enabled,
4533 SNB_READ_WM3_LATENCY() * 500,
4534 &sandybridge_display_srwm_info,
4535 &sandybridge_cursor_srwm_info,
4536 &fbc_wm, &plane_wm, &cursor_wm))
4537 return;
4538
4539 I915_WRITE(WM3_LP_ILK,
4540 WM3_LP_EN |
4541 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4542 (fbc_wm << WM1_LP_FBC_SHIFT) |
4543 (plane_wm << WM1_LP_SR_SHIFT) |
4544 cursor_wm);
4610} 4545}
4611 4546
4612static void intel_set_pipe_timings(struct intel_crtc *intel_crtc, 4547/**
4613 struct drm_display_mode *mode, 4548 * intel_update_watermarks - update FIFO watermark values based on current modes
4614 struct drm_display_mode *adjusted_mode) 4549 *
4550 * Calculate watermark values for the various WM regs based on current mode
4551 * and plane configuration.
4552 *
4553 * There are several cases to deal with here:
4554 * - normal (i.e. non-self-refresh)
4555 * - self-refresh (SR) mode
4556 * - lines are large relative to FIFO size (buffer can hold up to 2)
4557 * - lines are small relative to FIFO size (buffer can hold more than 2
4558 * lines), so need to account for TLB latency
4559 *
4560 * The normal calculation is:
4561 * watermark = dotclock * bytes per pixel * latency
4562 * where latency is platform & configuration dependent (we assume pessimal
4563 * values here).
4564 *
4565 * The SR calculation is:
4566 * watermark = (trunc(latency/line time)+1) * surface width *
4567 * bytes per pixel
4568 * where
4569 * line time = htotal / dotclock
4570 * surface width = hdisplay for normal plane and 64 for cursor
4571 * and latency is assumed to be high, as above.
4572 *
4573 * The final value programmed to the register should always be rounded up,
4574 * and include an extra 2 entries to account for clock crossings.
4575 *
4576 * We don't use the sprite, so we can ignore that. And on Crestline we have
4577 * to set the non-SR watermarks to 8.
4578 */
4579static void intel_update_watermarks(struct drm_device *dev)
4615{ 4580{
4616 struct drm_device *dev = intel_crtc->base.dev;
4617 struct drm_i915_private *dev_priv = dev->dev_private; 4581 struct drm_i915_private *dev_priv = dev->dev_private;
4618 enum pipe pipe = intel_crtc->pipe;
4619 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
4620 uint32_t vsyncshift;
4621 4582
4622 if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4583 if (dev_priv->display.update_wm)
4623 /* the chip adds 2 halflines automatically */ 4584 dev_priv->display.update_wm(dev);
4624 adjusted_mode->crtc_vtotal -= 1; 4585}
4625 adjusted_mode->crtc_vblank_end -= 1;
4626 vsyncshift = adjusted_mode->crtc_hsync_start
4627 - adjusted_mode->crtc_htotal / 2;
4628 } else {
4629 vsyncshift = 0;
4630 }
4631 4586
4632 if (INTEL_INFO(dev)->gen > 3) 4587static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4633 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 4588{
4589 return dev_priv->lvds_use_ssc && i915_panel_use_ssc
4590 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4591}
4634 4592
4635 I915_WRITE(HTOTAL(cpu_transcoder), 4593/**
4636 (adjusted_mode->crtc_hdisplay - 1) | 4594 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4637 ((adjusted_mode->crtc_htotal - 1) << 16)); 4595 * @crtc: CRTC structure
4638 I915_WRITE(HBLANK(cpu_transcoder), 4596 *
4639 (adjusted_mode->crtc_hblank_start - 1) | 4597 * A pipe may be connected to one or more outputs. Based on the depth of the
4640 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 4598 * attached framebuffer, choose a good color depth to use on the pipe.
4641 I915_WRITE(HSYNC(cpu_transcoder), 4599 *
4642 (adjusted_mode->crtc_hsync_start - 1) | 4600 * If possible, match the pipe depth to the fb depth. In some cases, this
4643 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 4601 * isn't ideal, because the connected output supports a lesser or restricted
4602 * set of depths. Resolve that here:
4603 * LVDS typically supports only 6bpc, so clamp down in that case
4604 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4605 * Displays may support a restricted set as well, check EDID and clamp as
4606 * appropriate.
4607 *
4608 * RETURNS:
4609 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4610 * true if they don't match).
4611 */
4612static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4613 unsigned int *pipe_bpp)
4614{
4615 struct drm_device *dev = crtc->dev;
4616 struct drm_i915_private *dev_priv = dev->dev_private;
4617 struct drm_encoder *encoder;
4618 struct drm_connector *connector;
4619 unsigned int display_bpc = UINT_MAX, bpc;
4644 4620
4645 I915_WRITE(VTOTAL(cpu_transcoder), 4621 /* Walk the encoders & connectors on this crtc, get min bpc */
4646 (adjusted_mode->crtc_vdisplay - 1) | 4622 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4647 ((adjusted_mode->crtc_vtotal - 1) << 16)); 4623 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4648 I915_WRITE(VBLANK(cpu_transcoder),
4649 (adjusted_mode->crtc_vblank_start - 1) |
4650 ((adjusted_mode->crtc_vblank_end - 1) << 16));
4651 I915_WRITE(VSYNC(cpu_transcoder),
4652 (adjusted_mode->crtc_vsync_start - 1) |
4653 ((adjusted_mode->crtc_vsync_end - 1) << 16));
4654 4624
4655 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 4625 if (encoder->crtc != crtc)
4656 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 4626 continue;
4657 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4658 * bits. */
4659 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
4660 (pipe == PIPE_B || pipe == PIPE_C))
4661 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
4662 4627
4663 /* pipesrc controls the size that is scaled from, which should 4628 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4664 * always be the user's requested size. 4629 unsigned int lvds_bpc;
4630
4631 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4632 LVDS_A3_POWER_UP)
4633 lvds_bpc = 8;
4634 else
4635 lvds_bpc = 6;
4636
4637 if (lvds_bpc < display_bpc) {
4638 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4639 display_bpc = lvds_bpc;
4640 }
4641 continue;
4642 }
4643
4644 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4645 /* Use VBT settings if we have an eDP panel */
4646 unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4647
4648 if (edp_bpc < display_bpc) {
4649 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4650 display_bpc = edp_bpc;
4651 }
4652 continue;
4653 }
4654
4655 /* Not one of the known troublemakers, check the EDID */
4656 list_for_each_entry(connector, &dev->mode_config.connector_list,
4657 head) {
4658 if (connector->encoder != encoder)
4659 continue;
4660
4661 /* Don't use an invalid EDID bpc value */
4662 if (connector->display_info.bpc &&
4663 connector->display_info.bpc < display_bpc) {
4664 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4665 display_bpc = connector->display_info.bpc;
4666 }
4667 }
4668
4669 /*
4670 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
4671 * through, clamp it down. (Note: >12bpc will be caught below.)
4672 */
4673 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4674 if (display_bpc > 8 && display_bpc < 12) {
4675 DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
4676 display_bpc = 12;
4677 } else {
4678 DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
4679 display_bpc = 8;
4680 }
4681 }
4682 }
4683
4684 /*
4685 * We could just drive the pipe at the highest bpc all the time and
4686 * enable dithering as needed, but that costs bandwidth. So choose
4687 * the minimum value that expresses the full color range of the fb but
4688 * also stays within the max display bpc discovered above.
4665 */ 4689 */
4666 I915_WRITE(PIPESRC(pipe), 4690
4667 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 4691 switch (crtc->fb->depth) {
4692 case 8:
4693 bpc = 8; /* since we go through a colormap */
4694 break;
4695 case 15:
4696 case 16:
4697 bpc = 6; /* min is 18bpp */
4698 break;
4699 case 24:
4700 bpc = 8;
4701 break;
4702 case 30:
4703 bpc = 10;
4704 break;
4705 case 48:
4706 bpc = 12;
4707 break;
4708 default:
4709 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4710 bpc = min((unsigned int)8, display_bpc);
4711 break;
4712 }
4713
4714 display_bpc = min(display_bpc, bpc);
4715
4716 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
4717 bpc, display_bpc);
4718
4719 *pipe_bpp = display_bpc * 3;
4720
4721 return display_bpc != bpc;
4668} 4722}
4669 4723
4670static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 4724static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4671 struct drm_display_mode *mode, 4725 struct drm_display_mode *mode,
4672 struct drm_display_mode *adjusted_mode, 4726 struct drm_display_mode *adjusted_mode,
4673 int x, int y, 4727 int x, int y,
4674 struct drm_framebuffer *fb) 4728 struct drm_framebuffer *old_fb)
4675{ 4729{
4676 struct drm_device *dev = crtc->dev; 4730 struct drm_device *dev = crtc->dev;
4677 struct drm_i915_private *dev_priv = dev->dev_private; 4731 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4680,14 +4734,20 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4680 int plane = intel_crtc->plane; 4734 int plane = intel_crtc->plane;
4681 int refclk, num_connectors = 0; 4735 int refclk, num_connectors = 0;
4682 intel_clock_t clock, reduced_clock; 4736 intel_clock_t clock, reduced_clock;
4683 u32 dspcntr, pipeconf; 4737 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4684 bool ok, has_reduced_clock = false, is_sdvo = false; 4738 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
4685 bool is_lvds = false, is_tv = false, is_dp = false; 4739 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4740 struct drm_mode_config *mode_config = &dev->mode_config;
4686 struct intel_encoder *encoder; 4741 struct intel_encoder *encoder;
4687 const intel_limit_t *limit; 4742 const intel_limit_t *limit;
4688 int ret; 4743 int ret;
4744 u32 temp;
4745 u32 lvds_sync = 0;
4746
4747 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4748 if (encoder->base.crtc != crtc)
4749 continue;
4689 4750
4690 for_each_encoder_on_crtc(dev, crtc, encoder) {
4691 switch (encoder->type) { 4751 switch (encoder->type) {
4692 case INTEL_OUTPUT_LVDS: 4752 case INTEL_OUTPUT_LVDS:
4693 is_lvds = true; 4753 is_lvds = true;
@@ -4698,9 +4758,15 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4698 if (encoder->needs_tv_clock) 4758 if (encoder->needs_tv_clock)
4699 is_tv = true; 4759 is_tv = true;
4700 break; 4760 break;
4761 case INTEL_OUTPUT_DVO:
4762 is_dvo = true;
4763 break;
4701 case INTEL_OUTPUT_TVOUT: 4764 case INTEL_OUTPUT_TVOUT:
4702 is_tv = true; 4765 is_tv = true;
4703 break; 4766 break;
4767 case INTEL_OUTPUT_ANALOG:
4768 is_crt = true;
4769 break;
4704 case INTEL_OUTPUT_DISPLAYPORT: 4770 case INTEL_OUTPUT_DISPLAYPORT:
4705 is_dp = true; 4771 is_dp = true;
4706 break; 4772 break;
@@ -4709,7 +4775,15 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4709 num_connectors++; 4775 num_connectors++;
4710 } 4776 }
4711 4777
4712 refclk = i9xx_get_refclk(crtc, num_connectors); 4778 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4779 refclk = dev_priv->lvds_ssc_freq * 1000;
4780 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4781 refclk / 1000);
4782 } else if (!IS_GEN2(dev)) {
4783 refclk = 96000;
4784 } else {
4785 refclk = 48000;
4786 }
4713 4787
4714 /* 4788 /*
4715 * Returns a set of divisors for the desired target clock with the given 4789 * Returns a set of divisors for the desired target clock with the given
@@ -4717,8 +4791,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4717 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 4791 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4718 */ 4792 */
4719 limit = intel_limit(crtc, refclk); 4793 limit = intel_limit(crtc, refclk);
4720 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, 4794 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
4721 &clock);
4722 if (!ok) { 4795 if (!ok) {
4723 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 4796 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4724 return -EINVAL; 4797 return -EINVAL;
@@ -4728,34 +4801,119 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4728 intel_crtc_update_cursor(crtc, true); 4801 intel_crtc_update_cursor(crtc, true);
4729 4802
4730 if (is_lvds && dev_priv->lvds_downclock_avail) { 4803 if (is_lvds && dev_priv->lvds_downclock_avail) {
4731 /*
4732 * Ensure we match the reduced clock's P to the target clock.
4733 * If the clocks don't match, we can't switch the display clock
4734 * by using the FP0/FP1. In such case we will disable the LVDS
4735 * downclock feature.
4736 */
4737 has_reduced_clock = limit->find_pll(limit, crtc, 4804 has_reduced_clock = limit->find_pll(limit, crtc,
4738 dev_priv->lvds_downclock, 4805 dev_priv->lvds_downclock,
4739 refclk, 4806 refclk,
4740 &clock,
4741 &reduced_clock); 4807 &reduced_clock);
4808 if (has_reduced_clock && (clock.p != reduced_clock.p)) {
4809 /*
4810 * If the different P is found, it means that we can't
4811 * switch the display clock by using the FP0/FP1.
4812 * In such case we will disable the LVDS downclock
4813 * feature.
4814 */
4815 DRM_DEBUG_KMS("Different P is found for "
4816 "LVDS clock/downclock\n");
4817 has_reduced_clock = 0;
4818 }
4819 }
4820 /* SDVO TV has fixed PLL values depend on its clock range,
4821 this mirrors vbios setting. */
4822 if (is_sdvo && is_tv) {
4823 if (adjusted_mode->clock >= 100000
4824 && adjusted_mode->clock < 140500) {
4825 clock.p1 = 2;
4826 clock.p2 = 10;
4827 clock.n = 3;
4828 clock.m1 = 16;
4829 clock.m2 = 8;
4830 } else if (adjusted_mode->clock >= 140500
4831 && adjusted_mode->clock <= 200000) {
4832 clock.p1 = 1;
4833 clock.p2 = 10;
4834 clock.n = 6;
4835 clock.m1 = 12;
4836 clock.m2 = 8;
4837 }
4742 } 4838 }
4743 4839
4744 if (is_sdvo && is_tv) 4840 if (IS_PINEVIEW(dev)) {
4745 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); 4841 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
4842 if (has_reduced_clock)
4843 fp2 = (1 << reduced_clock.n) << 16 |
4844 reduced_clock.m1 << 8 | reduced_clock.m2;
4845 } else {
4846 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
4847 if (has_reduced_clock)
4848 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
4849 reduced_clock.m2;
4850 }
4746 4851
4747 if (IS_GEN2(dev)) 4852 dpll = DPLL_VGA_MODE_DIS;
4748 i8xx_update_pll(crtc, adjusted_mode, &clock, 4853
4749 has_reduced_clock ? &reduced_clock : NULL, 4854 if (!IS_GEN2(dev)) {
4750 num_connectors); 4855 if (is_lvds)
4751 else if (IS_VALLEYVIEW(dev)) 4856 dpll |= DPLLB_MODE_LVDS;
4752 vlv_update_pll(crtc, mode, adjusted_mode, &clock, 4857 else
4753 has_reduced_clock ? &reduced_clock : NULL, 4858 dpll |= DPLLB_MODE_DAC_SERIAL;
4754 num_connectors); 4859 if (is_sdvo) {
4860 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4861 if (pixel_multiplier > 1) {
4862 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4863 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
4864 }
4865 dpll |= DPLL_DVO_HIGH_SPEED;
4866 }
4867 if (is_dp)
4868 dpll |= DPLL_DVO_HIGH_SPEED;
4869
4870 /* compute bitmask from p1 value */
4871 if (IS_PINEVIEW(dev))
4872 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4873 else {
4874 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4875 if (IS_G4X(dev) && has_reduced_clock)
4876 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4877 }
4878 switch (clock.p2) {
4879 case 5:
4880 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4881 break;
4882 case 7:
4883 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4884 break;
4885 case 10:
4886 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4887 break;
4888 case 14:
4889 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4890 break;
4891 }
4892 if (INTEL_INFO(dev)->gen >= 4)
4893 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4894 } else {
4895 if (is_lvds) {
4896 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4897 } else {
4898 if (clock.p1 == 2)
4899 dpll |= PLL_P1_DIVIDE_BY_TWO;
4900 else
4901 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4902 if (clock.p2 == 4)
4903 dpll |= PLL_P2_DIVIDE_BY_4;
4904 }
4905 }
4906
4907 if (is_sdvo && is_tv)
4908 dpll |= PLL_REF_INPUT_TVCLKINBC;
4909 else if (is_tv)
4910 /* XXX: just matching BIOS for now */
4911 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4912 dpll |= 3;
4913 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4914 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4755 else 4915 else
4756 i9xx_update_pll(crtc, mode, adjusted_mode, &clock, 4916 dpll |= PLL_REF_INPUT_DREFCLK;
4757 has_reduced_clock ? &reduced_clock : NULL,
4758 num_connectors);
4759 4917
4760 /* setup pipeconf */ 4918 /* setup pipeconf */
4761 pipeconf = I915_READ(PIPECONF(pipe)); 4919 pipeconf = I915_READ(PIPECONF(pipe));
@@ -4763,6 +4921,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4763 /* Set up the display plane register */ 4921 /* Set up the display plane register */
4764 dspcntr = DISPPLANE_GAMMA_ENABLE; 4922 dspcntr = DISPPLANE_GAMMA_ENABLE;
4765 4923
4924 /* Ironlake's plane is forced to pipe, bit 24 is to
4925 enable color space conversion */
4766 if (pipe == 0) 4926 if (pipe == 0)
4767 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 4927 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4768 else 4928 else
@@ -4782,45 +4942,145 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4782 pipeconf &= ~PIPECONF_DOUBLE_WIDE; 4942 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4783 } 4943 }
4784 4944
4785 /* default to 8bpc */ 4945 dpll |= DPLL_VCO_ENABLE;
4786 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); 4946
4787 if (is_dp) { 4947 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4788 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 4948 drm_mode_debug_printmodeline(mode);
4789 pipeconf |= PIPECONF_BPP_6 | 4949
4790 PIPECONF_DITHER_EN | 4950 I915_WRITE(FP0(pipe), fp);
4791 PIPECONF_DITHER_TYPE_SP; 4951 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4952
4953 POSTING_READ(DPLL(pipe));
4954 udelay(150);
4955
4956 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4957 * This is an exception to the general rule that mode_set doesn't turn
4958 * things on.
4959 */
4960 if (is_lvds) {
4961 temp = I915_READ(LVDS);
4962 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4963 if (pipe == 1) {
4964 temp |= LVDS_PIPEB_SELECT;
4965 } else {
4966 temp &= ~LVDS_PIPEB_SELECT;
4792 } 4967 }
4793 } 4968 /* set the corresponsding LVDS_BORDER bit */
4969 temp |= dev_priv->lvds_border_bits;
4970 /* Set the B0-B3 data pairs corresponding to whether we're going to
4971 * set the DPLLs for dual-channel mode or not.
4972 */
4973 if (clock.p2 == 7)
4974 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4975 else
4976 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4794 4977
4795 if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { 4978 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4796 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 4979 * appropriately here, but we need to look more thoroughly into how
4797 pipeconf |= PIPECONF_BPP_6 | 4980 * panels behave in the two modes.
4798 PIPECONF_ENABLE | 4981 */
4799 I965_PIPECONF_ACTIVE; 4982 /* set the dithering flag on LVDS as needed */
4983 if (INTEL_INFO(dev)->gen >= 4) {
4984 if (dev_priv->lvds_dither)
4985 temp |= LVDS_ENABLE_DITHER;
4986 else
4987 temp &= ~LVDS_ENABLE_DITHER;
4800 } 4988 }
4989 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4990 lvds_sync |= LVDS_HSYNC_POLARITY;
4991 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4992 lvds_sync |= LVDS_VSYNC_POLARITY;
4993 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
4994 != lvds_sync) {
4995 char flags[2] = "-+";
4996 DRM_INFO("Changing LVDS panel from "
4997 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
4998 flags[!(temp & LVDS_HSYNC_POLARITY)],
4999 flags[!(temp & LVDS_VSYNC_POLARITY)],
5000 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5001 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5002 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5003 temp |= lvds_sync;
5004 }
5005 I915_WRITE(LVDS, temp);
4801 } 5006 }
4802 5007
4803 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 5008 if (is_dp) {
4804 drm_mode_debug_printmodeline(mode); 5009 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5010 }
5011
5012 I915_WRITE(DPLL(pipe), dpll);
5013
5014 /* Wait for the clocks to stabilize. */
5015 POSTING_READ(DPLL(pipe));
5016 udelay(150);
5017
5018 if (INTEL_INFO(dev)->gen >= 4) {
5019 temp = 0;
5020 if (is_sdvo) {
5021 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5022 if (temp > 1)
5023 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5024 else
5025 temp = 0;
5026 }
5027 I915_WRITE(DPLL_MD(pipe), temp);
5028 } else {
5029 /* The pixel multiplier can only be updated once the
5030 * DPLL is enabled and the clocks are stable.
5031 *
5032 * So write it again.
5033 */
5034 I915_WRITE(DPLL(pipe), dpll);
5035 }
4805 5036
4806 if (HAS_PIPE_CXSR(dev)) { 5037 intel_crtc->lowfreq_avail = false;
4807 if (intel_crtc->lowfreq_avail) { 5038 if (is_lvds && has_reduced_clock && i915_powersave) {
5039 I915_WRITE(FP1(pipe), fp2);
5040 intel_crtc->lowfreq_avail = true;
5041 if (HAS_PIPE_CXSR(dev)) {
4808 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 5042 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4809 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 5043 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4810 } else { 5044 }
5045 } else {
5046 I915_WRITE(FP1(pipe), fp);
5047 if (HAS_PIPE_CXSR(dev)) {
4811 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 5048 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4812 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 5049 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4813 } 5050 }
4814 } 5051 }
4815 5052
4816 pipeconf &= ~PIPECONF_INTERLACE_MASK; 5053 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4817 if (!IS_GEN2(dev) &&
4818 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
4819 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 5054 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4820 else 5055 /* the chip adds 2 halflines automatically */
4821 pipeconf |= PIPECONF_PROGRESSIVE; 5056 adjusted_mode->crtc_vdisplay -= 1;
5057 adjusted_mode->crtc_vtotal -= 1;
5058 adjusted_mode->crtc_vblank_start -= 1;
5059 adjusted_mode->crtc_vblank_end -= 1;
5060 adjusted_mode->crtc_vsync_end -= 1;
5061 adjusted_mode->crtc_vsync_start -= 1;
5062 } else
5063 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
4822 5064
4823 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); 5065 I915_WRITE(HTOTAL(pipe),
5066 (adjusted_mode->crtc_hdisplay - 1) |
5067 ((adjusted_mode->crtc_htotal - 1) << 16));
5068 I915_WRITE(HBLANK(pipe),
5069 (adjusted_mode->crtc_hblank_start - 1) |
5070 ((adjusted_mode->crtc_hblank_end - 1) << 16));
5071 I915_WRITE(HSYNC(pipe),
5072 (adjusted_mode->crtc_hsync_start - 1) |
5073 ((adjusted_mode->crtc_hsync_end - 1) << 16));
5074
5075 I915_WRITE(VTOTAL(pipe),
5076 (adjusted_mode->crtc_vdisplay - 1) |
5077 ((adjusted_mode->crtc_vtotal - 1) << 16));
5078 I915_WRITE(VBLANK(pipe),
5079 (adjusted_mode->crtc_vblank_start - 1) |
5080 ((adjusted_mode->crtc_vblank_end - 1) << 16));
5081 I915_WRITE(VSYNC(pipe),
5082 (adjusted_mode->crtc_vsync_start - 1) |
5083 ((adjusted_mode->crtc_vsync_end - 1) << 16));
4824 5084
4825 /* pipesrc and dspsize control the size that is scaled from, 5085 /* pipesrc and dspsize control the size that is scaled from,
4826 * which should always be the user's requested size. 5086 * which should always be the user's requested size.
@@ -4829,6 +5089,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4829 ((mode->vdisplay - 1) << 16) | 5089 ((mode->vdisplay - 1) << 16) |
4830 (mode->hdisplay - 1)); 5090 (mode->hdisplay - 1));
4831 I915_WRITE(DSPPOS(plane), 0); 5091 I915_WRITE(DSPPOS(plane), 0);
5092 I915_WRITE(PIPESRC(pipe),
5093 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4832 5094
4833 I915_WRITE(PIPECONF(pipe), pipeconf); 5095 I915_WRITE(PIPECONF(pipe), pipeconf);
4834 POSTING_READ(PIPECONF(pipe)); 5096 POSTING_READ(PIPECONF(pipe));
@@ -4838,56 +5100,44 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4838 5100
4839 I915_WRITE(DSPCNTR(plane), dspcntr); 5101 I915_WRITE(DSPCNTR(plane), dspcntr);
4840 POSTING_READ(DSPCNTR(plane)); 5102 POSTING_READ(DSPCNTR(plane));
5103 intel_enable_plane(dev_priv, plane, pipe);
4841 5104
4842 ret = intel_pipe_set_base(crtc, x, y, fb); 5105 ret = intel_pipe_set_base(crtc, x, y, old_fb);
4843 5106
4844 intel_update_watermarks(dev); 5107 intel_update_watermarks(dev);
4845 5108
4846 return ret; 5109 return ret;
4847} 5110}
4848 5111
4849static void ironlake_init_pch_refclk(struct drm_device *dev) 5112static void ironlake_update_pch_refclk(struct drm_device *dev)
4850{ 5113{
4851 struct drm_i915_private *dev_priv = dev->dev_private; 5114 struct drm_i915_private *dev_priv = dev->dev_private;
4852 struct drm_mode_config *mode_config = &dev->mode_config; 5115 struct drm_mode_config *mode_config = &dev->mode_config;
5116 struct drm_crtc *crtc;
4853 struct intel_encoder *encoder; 5117 struct intel_encoder *encoder;
5118 struct intel_encoder *has_edp_encoder = NULL;
4854 u32 temp; 5119 u32 temp;
4855 bool has_lvds = false; 5120 bool has_lvds = false;
4856 bool has_cpu_edp = false;
4857 bool has_pch_edp = false;
4858 bool has_panel = false;
4859 bool has_ck505 = false;
4860 bool can_ssc = false;
4861 5121
4862 /* We need to take the global config into account */ 5122 /* We need to take the global config into account */
4863 list_for_each_entry(encoder, &mode_config->encoder_list, 5123 list_for_each_entry(crtc, &mode_config->crtc_list, head) {
4864 base.head) { 5124 if (!crtc->enabled)
4865 switch (encoder->type) { 5125 continue;
4866 case INTEL_OUTPUT_LVDS:
4867 has_panel = true;
4868 has_lvds = true;
4869 break;
4870 case INTEL_OUTPUT_EDP:
4871 has_panel = true;
4872 if (intel_encoder_is_pch_edp(&encoder->base))
4873 has_pch_edp = true;
4874 else
4875 has_cpu_edp = true;
4876 break;
4877 }
4878 }
4879 5126
4880 if (HAS_PCH_IBX(dev)) { 5127 list_for_each_entry(encoder, &mode_config->encoder_list,
4881 has_ck505 = dev_priv->display_clock_mode; 5128 base.head) {
4882 can_ssc = has_ck505; 5129 if (encoder->base.crtc != crtc)
4883 } else { 5130 continue;
4884 has_ck505 = false;
4885 can_ssc = true;
4886 }
4887 5131
4888 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n", 5132 switch (encoder->type) {
4889 has_panel, has_lvds, has_pch_edp, has_cpu_edp, 5133 case INTEL_OUTPUT_LVDS:
4890 has_ck505); 5134 has_lvds = true;
5135 case INTEL_OUTPUT_EDP:
5136 has_edp_encoder = encoder;
5137 break;
5138 }
5139 }
5140 }
4891 5141
4892 /* Ironlake: try to setup display ref clock before DPLL 5142 /* Ironlake: try to setup display ref clock before DPLL
4893 * enabling. This is only under driver's control after 5143 * enabling. This is only under driver's control after
@@ -4897,375 +5147,112 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
4897 temp = I915_READ(PCH_DREF_CONTROL); 5147 temp = I915_READ(PCH_DREF_CONTROL);
4898 /* Always enable nonspread source */ 5148 /* Always enable nonspread source */
4899 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 5149 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5150 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5151 temp &= ~DREF_SSC_SOURCE_MASK;
5152 temp |= DREF_SSC_SOURCE_ENABLE;
5153 I915_WRITE(PCH_DREF_CONTROL, temp);
4900 5154
4901 if (has_ck505) 5155 POSTING_READ(PCH_DREF_CONTROL);
4902 temp |= DREF_NONSPREAD_CK505_ENABLE; 5156 udelay(200);
4903 else
4904 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
4905
4906 if (has_panel) {
4907 temp &= ~DREF_SSC_SOURCE_MASK;
4908 temp |= DREF_SSC_SOURCE_ENABLE;
4909 5157
4910 /* SSC must be turned on before enabling the CPU output */ 5158 if (has_edp_encoder) {
4911 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 5159 if (intel_panel_use_ssc(dev_priv)) {
4912 DRM_DEBUG_KMS("Using SSC on panel\n");
4913 temp |= DREF_SSC1_ENABLE; 5160 temp |= DREF_SSC1_ENABLE;
4914 } else 5161 I915_WRITE(PCH_DREF_CONTROL, temp);
4915 temp &= ~DREF_SSC1_ENABLE;
4916
4917 /* Get SSC going before enabling the outputs */
4918 I915_WRITE(PCH_DREF_CONTROL, temp);
4919 POSTING_READ(PCH_DREF_CONTROL);
4920 udelay(200);
4921 5162
5163 POSTING_READ(PCH_DREF_CONTROL);
5164 udelay(200);
5165 }
4922 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 5166 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4923 5167
4924 /* Enable CPU source on CPU attached eDP */ 5168 /* Enable CPU source on CPU attached eDP */
4925 if (has_cpu_edp) { 5169 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4926 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 5170 if (intel_panel_use_ssc(dev_priv))
4927 DRM_DEBUG_KMS("Using SSC on eDP\n");
4928 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 5171 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4929 }
4930 else 5172 else
4931 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 5173 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4932 } else 5174 } else {
4933 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 5175 /* Enable SSC on PCH eDP if needed */
4934 5176 if (intel_panel_use_ssc(dev_priv)) {
4935 I915_WRITE(PCH_DREF_CONTROL, temp); 5177 DRM_ERROR("enabling SSC on PCH\n");
4936 POSTING_READ(PCH_DREF_CONTROL); 5178 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
4937 udelay(200); 5179 }
4938 } else { 5180 }
4939 DRM_DEBUG_KMS("Disabling SSC entirely\n");
4940
4941 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4942
4943 /* Turn off CPU output */
4944 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4945
4946 I915_WRITE(PCH_DREF_CONTROL, temp);
4947 POSTING_READ(PCH_DREF_CONTROL);
4948 udelay(200);
4949
4950 /* Turn off the SSC source */
4951 temp &= ~DREF_SSC_SOURCE_MASK;
4952 temp |= DREF_SSC_SOURCE_DISABLE;
4953
4954 /* Turn off SSC1 */
4955 temp &= ~ DREF_SSC1_ENABLE;
4956
4957 I915_WRITE(PCH_DREF_CONTROL, temp); 5181 I915_WRITE(PCH_DREF_CONTROL, temp);
4958 POSTING_READ(PCH_DREF_CONTROL); 5182 POSTING_READ(PCH_DREF_CONTROL);
4959 udelay(200); 5183 udelay(200);
4960 } 5184 }
4961} 5185}
4962 5186
4963/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */ 5187static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4964static void lpt_init_pch_refclk(struct drm_device *dev) 5188 struct drm_display_mode *mode,
5189 struct drm_display_mode *adjusted_mode,
5190 int x, int y,
5191 struct drm_framebuffer *old_fb)
4965{ 5192{
5193 struct drm_device *dev = crtc->dev;
4966 struct drm_i915_private *dev_priv = dev->dev_private; 5194 struct drm_i915_private *dev_priv = dev->dev_private;
5195 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5196 int pipe = intel_crtc->pipe;
5197 int plane = intel_crtc->plane;
5198 int refclk, num_connectors = 0;
5199 intel_clock_t clock, reduced_clock;
5200 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5201 bool ok, has_reduced_clock = false, is_sdvo = false;
5202 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5203 struct intel_encoder *has_edp_encoder = NULL;
4967 struct drm_mode_config *mode_config = &dev->mode_config; 5204 struct drm_mode_config *mode_config = &dev->mode_config;
4968 struct intel_encoder *encoder; 5205 struct intel_encoder *encoder;
4969 bool has_vga = false; 5206 const intel_limit_t *limit;
4970 bool is_sdv = false; 5207 int ret;
4971 u32 tmp; 5208 struct fdi_m_n m_n = {0};
5209 u32 temp;
5210 u32 lvds_sync = 0;
5211 int target_clock, pixel_multiplier, lane, link_bw, factor;
5212 unsigned int pipe_bpp;
5213 bool dither;
4972 5214
4973 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 5215 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4974 switch (encoder->type) { 5216 if (encoder->base.crtc != crtc)
4975 case INTEL_OUTPUT_ANALOG: 5217 continue;
4976 has_vga = true;
4977 break;
4978 }
4979 }
4980
4981 if (!has_vga)
4982 return;
4983
4984 /* XXX: Rip out SDV support once Haswell ships for real. */
4985 if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
4986 is_sdv = true;
4987
4988 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4989 tmp &= ~SBI_SSCCTL_DISABLE;
4990 tmp |= SBI_SSCCTL_PATHALT;
4991 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4992
4993 udelay(24);
4994
4995 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4996 tmp &= ~SBI_SSCCTL_PATHALT;
4997 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4998
4999 if (!is_sdv) {
5000 tmp = I915_READ(SOUTH_CHICKEN2);
5001 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5002 I915_WRITE(SOUTH_CHICKEN2, tmp);
5003
5004 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
5005 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5006 DRM_ERROR("FDI mPHY reset assert timeout\n");
5007
5008 tmp = I915_READ(SOUTH_CHICKEN2);
5009 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5010 I915_WRITE(SOUTH_CHICKEN2, tmp);
5011
5012 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
5013 FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
5014 100))
5015 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
5016 }
5017
5018 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5019 tmp &= ~(0xFF << 24);
5020 tmp |= (0x12 << 24);
5021 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5022
5023 if (!is_sdv) {
5024 tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
5025 tmp &= ~(0x3 << 6);
5026 tmp |= (1 << 6) | (1 << 0);
5027 intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
5028 }
5029
5030 if (is_sdv) {
5031 tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
5032 tmp |= 0x7FFF;
5033 intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
5034 }
5035
5036 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5037 tmp |= (1 << 11);
5038 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5039
5040 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5041 tmp |= (1 << 11);
5042 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5043
5044 if (is_sdv) {
5045 tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
5046 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5047 intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
5048
5049 tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
5050 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5051 intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
5052
5053 tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
5054 tmp |= (0x3F << 8);
5055 intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
5056
5057 tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
5058 tmp |= (0x3F << 8);
5059 intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
5060 }
5061
5062 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5063 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5064 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5065
5066 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5067 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5068 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5069
5070 if (!is_sdv) {
5071 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5072 tmp &= ~(7 << 13);
5073 tmp |= (5 << 13);
5074 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5075
5076 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5077 tmp &= ~(7 << 13);
5078 tmp |= (5 << 13);
5079 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5080 }
5081
5082 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5083 tmp &= ~0xFF;
5084 tmp |= 0x1C;
5085 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5086
5087 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5088 tmp &= ~0xFF;
5089 tmp |= 0x1C;
5090 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5091
5092 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5093 tmp &= ~(0xFF << 16);
5094 tmp |= (0x1C << 16);
5095 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5096
5097 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5098 tmp &= ~(0xFF << 16);
5099 tmp |= (0x1C << 16);
5100 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5101
5102 if (!is_sdv) {
5103 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5104 tmp |= (1 << 27);
5105 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5106
5107 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5108 tmp |= (1 << 27);
5109 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5110
5111 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5112 tmp &= ~(0xF << 28);
5113 tmp |= (4 << 28);
5114 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5115
5116 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5117 tmp &= ~(0xF << 28);
5118 tmp |= (4 << 28);
5119 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5120 }
5121
5122 /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
5123 tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
5124 tmp |= SBI_DBUFF0_ENABLE;
5125 intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
5126}
5127
5128/*
5129 * Initialize reference clocks when the driver loads
5130 */
5131void intel_init_pch_refclk(struct drm_device *dev)
5132{
5133 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5134 ironlake_init_pch_refclk(dev);
5135 else if (HAS_PCH_LPT(dev))
5136 lpt_init_pch_refclk(dev);
5137}
5138
5139static int ironlake_get_refclk(struct drm_crtc *crtc)
5140{
5141 struct drm_device *dev = crtc->dev;
5142 struct drm_i915_private *dev_priv = dev->dev_private;
5143 struct intel_encoder *encoder;
5144 struct intel_encoder *edp_encoder = NULL;
5145 int num_connectors = 0;
5146 bool is_lvds = false;
5147 5218
5148 for_each_encoder_on_crtc(dev, crtc, encoder) {
5149 switch (encoder->type) { 5219 switch (encoder->type) {
5150 case INTEL_OUTPUT_LVDS: 5220 case INTEL_OUTPUT_LVDS:
5151 is_lvds = true; 5221 is_lvds = true;
5152 break; 5222 break;
5153 case INTEL_OUTPUT_EDP:
5154 edp_encoder = encoder;
5155 break;
5156 }
5157 num_connectors++;
5158 }
5159
5160 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5161 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5162 dev_priv->lvds_ssc_freq);
5163 return dev_priv->lvds_ssc_freq * 1000;
5164 }
5165
5166 return 120000;
5167}
5168
5169static void ironlake_set_pipeconf(struct drm_crtc *crtc,
5170 struct drm_display_mode *adjusted_mode,
5171 bool dither)
5172{
5173 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5174 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5175 int pipe = intel_crtc->pipe;
5176 uint32_t val;
5177
5178 val = I915_READ(PIPECONF(pipe));
5179
5180 val &= ~PIPE_BPC_MASK;
5181 switch (intel_crtc->bpp) {
5182 case 18:
5183 val |= PIPE_6BPC;
5184 break;
5185 case 24:
5186 val |= PIPE_8BPC;
5187 break;
5188 case 30:
5189 val |= PIPE_10BPC;
5190 break;
5191 case 36:
5192 val |= PIPE_12BPC;
5193 break;
5194 default:
5195 /* Case prevented by intel_choose_pipe_bpp_dither. */
5196 BUG();
5197 }
5198
5199 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
5200 if (dither)
5201 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5202
5203 val &= ~PIPECONF_INTERLACE_MASK;
5204 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
5205 val |= PIPECONF_INTERLACED_ILK;
5206 else
5207 val |= PIPECONF_PROGRESSIVE;
5208
5209 I915_WRITE(PIPECONF(pipe), val);
5210 POSTING_READ(PIPECONF(pipe));
5211}
5212
5213static void haswell_set_pipeconf(struct drm_crtc *crtc,
5214 struct drm_display_mode *adjusted_mode,
5215 bool dither)
5216{
5217 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5218 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5219 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
5220 uint32_t val;
5221
5222 val = I915_READ(PIPECONF(cpu_transcoder));
5223
5224 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
5225 if (dither)
5226 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5227
5228 val &= ~PIPECONF_INTERLACE_MASK_HSW;
5229 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
5230 val |= PIPECONF_INTERLACED_ILK;
5231 else
5232 val |= PIPECONF_PROGRESSIVE;
5233
5234 I915_WRITE(PIPECONF(cpu_transcoder), val);
5235 POSTING_READ(PIPECONF(cpu_transcoder));
5236}
5237
5238static bool ironlake_compute_clocks(struct drm_crtc *crtc,
5239 struct drm_display_mode *adjusted_mode,
5240 intel_clock_t *clock,
5241 bool *has_reduced_clock,
5242 intel_clock_t *reduced_clock)
5243{
5244 struct drm_device *dev = crtc->dev;
5245 struct drm_i915_private *dev_priv = dev->dev_private;
5246 struct intel_encoder *intel_encoder;
5247 int refclk;
5248 const intel_limit_t *limit;
5249 bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
5250
5251 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5252 switch (intel_encoder->type) {
5253 case INTEL_OUTPUT_LVDS:
5254 is_lvds = true;
5255 break;
5256 case INTEL_OUTPUT_SDVO: 5223 case INTEL_OUTPUT_SDVO:
5257 case INTEL_OUTPUT_HDMI: 5224 case INTEL_OUTPUT_HDMI:
5258 is_sdvo = true; 5225 is_sdvo = true;
5259 if (intel_encoder->needs_tv_clock) 5226 if (encoder->needs_tv_clock)
5260 is_tv = true; 5227 is_tv = true;
5261 break; 5228 break;
5262 case INTEL_OUTPUT_TVOUT: 5229 case INTEL_OUTPUT_TVOUT:
5263 is_tv = true; 5230 is_tv = true;
5264 break; 5231 break;
5232 case INTEL_OUTPUT_ANALOG:
5233 is_crt = true;
5234 break;
5235 case INTEL_OUTPUT_DISPLAYPORT:
5236 is_dp = true;
5237 break;
5238 case INTEL_OUTPUT_EDP:
5239 has_edp_encoder = encoder;
5240 break;
5265 } 5241 }
5242
5243 num_connectors++;
5266 } 5244 }
5267 5245
5268 refclk = ironlake_get_refclk(crtc); 5246 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5247 refclk = dev_priv->lvds_ssc_freq * 1000;
5248 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5249 refclk / 1000);
5250 } else {
5251 refclk = 96000;
5252 if (!has_edp_encoder ||
5253 intel_encoder_is_pch_edp(&has_edp_encoder->base))
5254 refclk = 120000; /* 120Mhz refclk */
5255 }
5269 5256
5270 /* 5257 /*
5271 * Returns a set of divisors for the desired target clock with the given 5258 * Returns a set of divisors for the desired target clock with the given
@@ -5273,148 +5260,49 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
5273 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 5260 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5274 */ 5261 */
5275 limit = intel_limit(crtc, refclk); 5262 limit = intel_limit(crtc, refclk);
5276 ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, 5263 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
5277 clock); 5264 if (!ok) {
5278 if (!ret) 5265 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5279 return false; 5266 return -EINVAL;
5280
5281 if (is_lvds && dev_priv->lvds_downclock_avail) {
5282 /*
5283 * Ensure we match the reduced clock's P to the target clock.
5284 * If the clocks don't match, we can't switch the display clock
5285 * by using the FP0/FP1. In such case we will disable the LVDS
5286 * downclock feature.
5287 */
5288 *has_reduced_clock = limit->find_pll(limit, crtc,
5289 dev_priv->lvds_downclock,
5290 refclk,
5291 clock,
5292 reduced_clock);
5293 }
5294
5295 if (is_sdvo && is_tv)
5296 i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
5297
5298 return true;
5299}
5300
5301static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
5302{
5303 struct drm_i915_private *dev_priv = dev->dev_private;
5304 uint32_t temp;
5305
5306 temp = I915_READ(SOUTH_CHICKEN1);
5307 if (temp & FDI_BC_BIFURCATION_SELECT)
5308 return;
5309
5310 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5311 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5312
5313 temp |= FDI_BC_BIFURCATION_SELECT;
5314 DRM_DEBUG_KMS("enabling fdi C rx\n");
5315 I915_WRITE(SOUTH_CHICKEN1, temp);
5316 POSTING_READ(SOUTH_CHICKEN1);
5317}
5318
5319static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
5320{
5321 struct drm_device *dev = intel_crtc->base.dev;
5322 struct drm_i915_private *dev_priv = dev->dev_private;
5323 struct intel_crtc *pipe_B_crtc =
5324 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5325
5326 DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
5327 intel_crtc->pipe, intel_crtc->fdi_lanes);
5328 if (intel_crtc->fdi_lanes > 4) {
5329 DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
5330 intel_crtc->pipe, intel_crtc->fdi_lanes);
5331 /* Clamp lanes to avoid programming the hw with bogus values. */
5332 intel_crtc->fdi_lanes = 4;
5333
5334 return false;
5335 } 5267 }
5336 5268
5337 if (dev_priv->num_pipe == 2) 5269 /* Ensure that the cursor is valid for the new mode before changing... */
5338 return true; 5270 intel_crtc_update_cursor(crtc, true);
5339
5340 switch (intel_crtc->pipe) {
5341 case PIPE_A:
5342 return true;
5343 case PIPE_B:
5344 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5345 intel_crtc->fdi_lanes > 2) {
5346 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5347 intel_crtc->pipe, intel_crtc->fdi_lanes);
5348 /* Clamp lanes to avoid programming the hw with bogus values. */
5349 intel_crtc->fdi_lanes = 2;
5350
5351 return false;
5352 }
5353
5354 if (intel_crtc->fdi_lanes > 2)
5355 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
5356 else
5357 cpt_enable_fdi_bc_bifurcation(dev);
5358
5359 return true;
5360 case PIPE_C:
5361 if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
5362 if (intel_crtc->fdi_lanes > 2) {
5363 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5364 intel_crtc->pipe, intel_crtc->fdi_lanes);
5365 /* Clamp lanes to avoid programming the hw with bogus values. */
5366 intel_crtc->fdi_lanes = 2;
5367 5271
5368 return false; 5272 if (is_lvds && dev_priv->lvds_downclock_avail) {
5369 } 5273 has_reduced_clock = limit->find_pll(limit, crtc,
5370 } else { 5274 dev_priv->lvds_downclock,
5371 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 5275 refclk,
5372 return false; 5276 &reduced_clock);
5277 if (has_reduced_clock && (clock.p != reduced_clock.p)) {
5278 /*
5279 * If the different P is found, it means that we can't
5280 * switch the display clock by using the FP0/FP1.
5281 * In such case we will disable the LVDS downclock
5282 * feature.
5283 */
5284 DRM_DEBUG_KMS("Different P is found for "
5285 "LVDS clock/downclock\n");
5286 has_reduced_clock = 0;
5373 } 5287 }
5374
5375 cpt_enable_fdi_bc_bifurcation(dev);
5376
5377 return true;
5378 default:
5379 BUG();
5380 } 5288 }
5381} 5289 /* SDVO TV has fixed PLL values depend on its clock range,
5382 5290 this mirrors vbios setting. */
5383int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 5291 if (is_sdvo && is_tv) {
5384{ 5292 if (adjusted_mode->clock >= 100000
5385 /* 5293 && adjusted_mode->clock < 140500) {
5386 * Account for spread spectrum to avoid 5294 clock.p1 = 2;
5387 * oversubscribing the link. Max center spread 5295 clock.p2 = 10;
5388 * is 2.5%; use 5% for safety's sake. 5296 clock.n = 3;
5389 */ 5297 clock.m1 = 16;
5390 u32 bps = target_clock * bpp * 21 / 20; 5298 clock.m2 = 8;
5391 return bps / (link_bw * 8) + 1; 5299 } else if (adjusted_mode->clock >= 140500
5392} 5300 && adjusted_mode->clock <= 200000) {
5393 5301 clock.p1 = 1;
5394static void ironlake_set_m_n(struct drm_crtc *crtc, 5302 clock.p2 = 10;
5395 struct drm_display_mode *mode, 5303 clock.n = 6;
5396 struct drm_display_mode *adjusted_mode) 5304 clock.m1 = 12;
5397{ 5305 clock.m2 = 8;
5398 struct drm_device *dev = crtc->dev;
5399 struct drm_i915_private *dev_priv = dev->dev_private;
5400 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5401 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
5402 struct intel_encoder *intel_encoder, *edp_encoder = NULL;
5403 struct fdi_m_n m_n = {0};
5404 int target_clock, pixel_multiplier, lane, link_bw;
5405 bool is_dp = false, is_cpu_edp = false;
5406
5407 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5408 switch (intel_encoder->type) {
5409 case INTEL_OUTPUT_DISPLAYPORT:
5410 is_dp = true;
5411 break;
5412 case INTEL_OUTPUT_EDP:
5413 is_dp = true;
5414 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
5415 is_cpu_edp = true;
5416 edp_encoder = intel_encoder;
5417 break;
5418 } 5306 }
5419 } 5307 }
5420 5308
@@ -5423,9 +5311,19 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
5423 lane = 0; 5311 lane = 0;
5424 /* CPU eDP doesn't require FDI link, so just set DP M/N 5312 /* CPU eDP doesn't require FDI link, so just set DP M/N
5425 according to current link config */ 5313 according to current link config */
5426 if (is_cpu_edp) { 5314 if (has_edp_encoder &&
5427 intel_edp_link_config(edp_encoder, &lane, &link_bw); 5315 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5316 target_clock = mode->clock;
5317 intel_edp_link_config(has_edp_encoder,
5318 &lane, &link_bw);
5428 } else { 5319 } else {
5320 /* [e]DP over FDI requires target mode clock
5321 instead of link clock */
5322 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5323 target_clock = mode->clock;
5324 else
5325 target_clock = adjusted_mode->clock;
5326
5429 /* FDI is a binary signal running at ~2.7GHz, encoding 5327 /* FDI is a binary signal running at ~2.7GHz, encoding
5430 * each output octet as 10 bits. The actual frequency 5328 * each output octet as 10 bits. The actual frequency
5431 * is stored as a divider into a 100MHz clock, and the 5329 * is stored as a divider into a 100MHz clock, and the
@@ -5436,17 +5334,43 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
5436 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 5334 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5437 } 5335 }
5438 5336
5439 /* [e]DP over FDI requires target mode clock instead of link clock. */ 5337 /* determine panel color depth */
5440 if (edp_encoder) 5338 temp = I915_READ(PIPECONF(pipe));
5441 target_clock = intel_edp_target_clock(edp_encoder, mode); 5339 temp &= ~PIPE_BPC_MASK;
5442 else if (is_dp) 5340 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
5443 target_clock = mode->clock; 5341 switch (pipe_bpp) {
5444 else 5342 case 18:
5445 target_clock = adjusted_mode->clock; 5343 temp |= PIPE_6BPC;
5344 break;
5345 case 24:
5346 temp |= PIPE_8BPC;
5347 break;
5348 case 30:
5349 temp |= PIPE_10BPC;
5350 break;
5351 case 36:
5352 temp |= PIPE_12BPC;
5353 break;
5354 default:
5355 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
5356 pipe_bpp);
5357 temp |= PIPE_8BPC;
5358 pipe_bpp = 24;
5359 break;
5360 }
5361
5362 intel_crtc->bpp = pipe_bpp;
5363 I915_WRITE(PIPECONF(pipe), temp);
5446 5364
5447 if (!lane) 5365 if (!lane) {
5448 lane = ironlake_get_lanes_required(target_clock, link_bw, 5366 /*
5449 intel_crtc->bpp); 5367 * Account for spread spectrum to avoid
5368 * oversubscribing the link. Max center spread
5369 * is 2.5%; use 5% for safety's sake.
5370 */
5371 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5372 lane = bps / (link_bw * 8) + 1;
5373 }
5450 5374
5451 intel_crtc->fdi_lanes = lane; 5375 intel_crtc->fdi_lanes = lane;
5452 5376
@@ -5455,51 +5379,12 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
5455 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, 5379 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5456 &m_n); 5380 &m_n);
5457 5381
5458 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m); 5382 ironlake_update_pch_refclk(dev);
5459 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
5460 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
5461 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
5462}
5463
5464static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5465 struct drm_display_mode *adjusted_mode,
5466 intel_clock_t *clock, u32 fp)
5467{
5468 struct drm_crtc *crtc = &intel_crtc->base;
5469 struct drm_device *dev = crtc->dev;
5470 struct drm_i915_private *dev_priv = dev->dev_private;
5471 struct intel_encoder *intel_encoder;
5472 uint32_t dpll;
5473 int factor, pixel_multiplier, num_connectors = 0;
5474 bool is_lvds = false, is_sdvo = false, is_tv = false;
5475 bool is_dp = false, is_cpu_edp = false;
5476
5477 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5478 switch (intel_encoder->type) {
5479 case INTEL_OUTPUT_LVDS:
5480 is_lvds = true;
5481 break;
5482 case INTEL_OUTPUT_SDVO:
5483 case INTEL_OUTPUT_HDMI:
5484 is_sdvo = true;
5485 if (intel_encoder->needs_tv_clock)
5486 is_tv = true;
5487 break;
5488 case INTEL_OUTPUT_TVOUT:
5489 is_tv = true;
5490 break;
5491 case INTEL_OUTPUT_DISPLAYPORT:
5492 is_dp = true;
5493 break;
5494 case INTEL_OUTPUT_EDP:
5495 is_dp = true;
5496 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
5497 is_cpu_edp = true;
5498 break;
5499 }
5500 5383
5501 num_connectors++; 5384 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5502 } 5385 if (has_reduced_clock)
5386 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5387 reduced_clock.m2;
5503 5388
5504 /* Enable autotuning of the PLL clock (if permissible) */ 5389 /* Enable autotuning of the PLL clock (if permissible) */
5505 factor = 21; 5390 factor = 21;
@@ -5511,7 +5396,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5511 } else if (is_sdvo && is_tv) 5396 } else if (is_sdvo && is_tv)
5512 factor = 20; 5397 factor = 20;
5513 5398
5514 if (clock->m < factor * clock->n) 5399 if (clock.m < factor * clock.n)
5515 fp |= FP_CB_TUNE; 5400 fp |= FP_CB_TUNE;
5516 5401
5517 dpll = 0; 5402 dpll = 0;
@@ -5521,21 +5406,21 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5521 else 5406 else
5522 dpll |= DPLLB_MODE_DAC_SERIAL; 5407 dpll |= DPLLB_MODE_DAC_SERIAL;
5523 if (is_sdvo) { 5408 if (is_sdvo) {
5524 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 5409 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5525 if (pixel_multiplier > 1) { 5410 if (pixel_multiplier > 1) {
5526 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 5411 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5527 } 5412 }
5528 dpll |= DPLL_DVO_HIGH_SPEED; 5413 dpll |= DPLL_DVO_HIGH_SPEED;
5529 } 5414 }
5530 if (is_dp && !is_cpu_edp) 5415 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5531 dpll |= DPLL_DVO_HIGH_SPEED; 5416 dpll |= DPLL_DVO_HIGH_SPEED;
5532 5417
5533 /* compute bitmask from p1 value */ 5418 /* compute bitmask from p1 value */
5534 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5419 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5535 /* also FPA1 */ 5420 /* also FPA1 */
5536 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 5421 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5537 5422
5538 switch (clock->p2) { 5423 switch (clock.p2) {
5539 case 5: 5424 case 5:
5540 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 5425 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5541 break; 5426 break;
@@ -5561,90 +5446,47 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5561 else 5446 else
5562 dpll |= PLL_REF_INPUT_DREFCLK; 5447 dpll |= PLL_REF_INPUT_DREFCLK;
5563 5448
5564 return dpll; 5449 /* setup pipeconf */
5565} 5450 pipeconf = I915_READ(PIPECONF(pipe));
5566 5451
5567static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 5452 /* Set up the display plane register */
5568 struct drm_display_mode *mode, 5453 dspcntr = DISPPLANE_GAMMA_ENABLE;
5569 struct drm_display_mode *adjusted_mode,
5570 int x, int y,
5571 struct drm_framebuffer *fb)
5572{
5573 struct drm_device *dev = crtc->dev;
5574 struct drm_i915_private *dev_priv = dev->dev_private;
5575 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5576 int pipe = intel_crtc->pipe;
5577 int plane = intel_crtc->plane;
5578 int num_connectors = 0;
5579 intel_clock_t clock, reduced_clock;
5580 u32 dpll, fp = 0, fp2 = 0;
5581 bool ok, has_reduced_clock = false;
5582 bool is_lvds = false, is_dp = false, is_cpu_edp = false;
5583 struct intel_encoder *encoder;
5584 u32 temp;
5585 int ret;
5586 bool dither, fdi_config_ok;
5587 5454
5588 for_each_encoder_on_crtc(dev, crtc, encoder) { 5455 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5589 switch (encoder->type) { 5456 drm_mode_debug_printmodeline(mode);
5590 case INTEL_OUTPUT_LVDS: 5457
5591 is_lvds = true; 5458 /* PCH eDP needs FDI, but CPU eDP does not */
5459 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5460 I915_WRITE(PCH_FP0(pipe), fp);
5461 I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5462
5463 POSTING_READ(PCH_DPLL(pipe));
5464 udelay(150);
5465 }
5466
5467 /* enable transcoder DPLL */
5468 if (HAS_PCH_CPT(dev)) {
5469 temp = I915_READ(PCH_DPLL_SEL);
5470 switch (pipe) {
5471 case 0:
5472 temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
5592 break; 5473 break;
5593 case INTEL_OUTPUT_DISPLAYPORT: 5474 case 1:
5594 is_dp = true; 5475 temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
5595 break; 5476 break;
5596 case INTEL_OUTPUT_EDP: 5477 case 2:
5597 is_dp = true; 5478 /* FIXME: manage transcoder PLLs? */
5598 if (!intel_encoder_is_pch_edp(&encoder->base)) 5479 temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
5599 is_cpu_edp = true;
5600 break; 5480 break;
5481 default:
5482 BUG();
5601 } 5483 }
5484 I915_WRITE(PCH_DPLL_SEL, temp);
5602 5485
5603 num_connectors++; 5486 POSTING_READ(PCH_DPLL_SEL);
5604 } 5487 udelay(150);
5605
5606 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
5607 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
5608
5609 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5610 &has_reduced_clock, &reduced_clock);
5611 if (!ok) {
5612 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5613 return -EINVAL;
5614 } 5488 }
5615 5489
5616 /* Ensure that the cursor is valid for the new mode before changing... */
5617 intel_crtc_update_cursor(crtc, true);
5618
5619 /* determine panel color depth */
5620 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
5621 adjusted_mode);
5622 if (is_lvds && dev_priv->lvds_dither)
5623 dither = true;
5624
5625 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5626 if (has_reduced_clock)
5627 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5628 reduced_clock.m2;
5629
5630 dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
5631
5632 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5633 drm_mode_debug_printmodeline(mode);
5634
5635 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
5636 if (!is_cpu_edp) {
5637 struct intel_pch_pll *pll;
5638
5639 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
5640 if (pll == NULL) {
5641 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
5642 pipe);
5643 return -EINVAL;
5644 }
5645 } else
5646 intel_put_pch_pll(intel_crtc);
5647
5648 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 5490 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5649 * This is an exception to the general rule that mode_set doesn't turn 5491 * This is an exception to the general rule that mode_set doesn't turn
5650 * things on. 5492 * things on.
@@ -5652,16 +5494,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5652 if (is_lvds) { 5494 if (is_lvds) {
5653 temp = I915_READ(PCH_LVDS); 5495 temp = I915_READ(PCH_LVDS);
5654 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 5496 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5655 if (HAS_PCH_CPT(dev)) { 5497 if (pipe == 1) {
5656 temp &= ~PORT_TRANS_SEL_MASK; 5498 if (HAS_PCH_CPT(dev))
5657 temp |= PORT_TRANS_SEL_CPT(pipe); 5499 temp |= PORT_TRANS_B_SEL_CPT;
5658 } else { 5500 else
5659 if (pipe == 1)
5660 temp |= LVDS_PIPEB_SELECT; 5501 temp |= LVDS_PIPEB_SELECT;
5502 } else {
5503 if (HAS_PCH_CPT(dev))
5504 temp &= ~PORT_TRANS_SEL_MASK;
5661 else 5505 else
5662 temp &= ~LVDS_PIPEB_SELECT; 5506 temp &= ~LVDS_PIPEB_SELECT;
5663 } 5507 }
5664
5665 /* set the corresponsding LVDS_BORDER bit */ 5508 /* set the corresponsding LVDS_BORDER bit */
5666 temp |= dev_priv->lvds_border_bits; 5509 temp |= dev_priv->lvds_border_bits;
5667 /* Set the B0-B3 data pairs corresponding to whether we're going to 5510 /* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -5676,15 +5519,32 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5676 * appropriately here, but we need to look more thoroughly into how 5519 * appropriately here, but we need to look more thoroughly into how
5677 * panels behave in the two modes. 5520 * panels behave in the two modes.
5678 */ 5521 */
5679 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5680 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 5522 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5681 temp |= LVDS_HSYNC_POLARITY; 5523 lvds_sync |= LVDS_HSYNC_POLARITY;
5682 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 5524 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5683 temp |= LVDS_VSYNC_POLARITY; 5525 lvds_sync |= LVDS_VSYNC_POLARITY;
5526 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5527 != lvds_sync) {
5528 char flags[2] = "-+";
5529 DRM_INFO("Changing LVDS panel from "
5530 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5531 flags[!(temp & LVDS_HSYNC_POLARITY)],
5532 flags[!(temp & LVDS_VSYNC_POLARITY)],
5533 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5534 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5535 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5536 temp |= lvds_sync;
5537 }
5684 I915_WRITE(PCH_LVDS, temp); 5538 I915_WRITE(PCH_LVDS, temp);
5685 } 5539 }
5686 5540
5687 if (is_dp && !is_cpu_edp) { 5541 pipeconf &= ~PIPECONF_DITHER_EN;
5542 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5543 if ((is_lvds && dev_priv->lvds_dither) || dither) {
5544 pipeconf |= PIPECONF_DITHER_EN;
5545 pipeconf |= PIPECONF_DITHER_TYPE_ST1;
5546 }
5547 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5688 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5548 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5689 } else { 5549 } else {
5690 /* For non-DP output, clear any trans DP clock recovery setting.*/ 5550 /* For non-DP output, clear any trans DP clock recovery setting.*/
@@ -5694,11 +5554,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5694 I915_WRITE(TRANSDPLINK_N1(pipe), 0); 5554 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5695 } 5555 }
5696 5556
5697 if (intel_crtc->pch_pll) { 5557 if (!has_edp_encoder ||
5698 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); 5558 intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5559 I915_WRITE(PCH_DPLL(pipe), dpll);
5699 5560
5700 /* Wait for the clocks to stabilize. */ 5561 /* Wait for the clocks to stabilize. */
5701 POSTING_READ(intel_crtc->pch_pll->pll_reg); 5562 POSTING_READ(PCH_DPLL(pipe));
5702 udelay(150); 5563 udelay(150);
5703 5564
5704 /* The pixel multiplier can only be updated once the 5565 /* The pixel multiplier can only be updated once the
@@ -5706,255 +5567,91 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5706 * 5567 *
5707 * So write it again. 5568 * So write it again.
5708 */ 5569 */
5709 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); 5570 I915_WRITE(PCH_DPLL(pipe), dpll);
5710 } 5571 }
5711 5572
5712 intel_crtc->lowfreq_avail = false; 5573 intel_crtc->lowfreq_avail = false;
5713 if (intel_crtc->pch_pll) { 5574 if (is_lvds && has_reduced_clock && i915_powersave) {
5714 if (is_lvds && has_reduced_clock && i915_powersave) { 5575 I915_WRITE(PCH_FP1(pipe), fp2);
5715 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); 5576 intel_crtc->lowfreq_avail = true;
5716 intel_crtc->lowfreq_avail = true; 5577 if (HAS_PIPE_CXSR(dev)) {
5717 } else { 5578 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5718 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); 5579 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5719 }
5720 }
5721
5722 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5723
5724 /* Note, this also computes intel_crtc->fdi_lanes which is used below in
5725 * ironlake_check_fdi_lanes. */
5726 ironlake_set_m_n(crtc, mode, adjusted_mode);
5727
5728 fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
5729
5730 if (is_cpu_edp)
5731 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5732
5733 ironlake_set_pipeconf(crtc, adjusted_mode, dither);
5734
5735 intel_wait_for_vblank(dev, pipe);
5736
5737 /* Set up the display plane register */
5738 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
5739 POSTING_READ(DSPCNTR(plane));
5740
5741 ret = intel_pipe_set_base(crtc, x, y, fb);
5742
5743 intel_update_watermarks(dev);
5744
5745 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
5746
5747 return fdi_config_ok ? ret : -EINVAL;
5748}
5749
5750static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5751 struct drm_display_mode *mode,
5752 struct drm_display_mode *adjusted_mode,
5753 int x, int y,
5754 struct drm_framebuffer *fb)
5755{
5756 struct drm_device *dev = crtc->dev;
5757 struct drm_i915_private *dev_priv = dev->dev_private;
5758 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5759 int pipe = intel_crtc->pipe;
5760 int plane = intel_crtc->plane;
5761 int num_connectors = 0;
5762 intel_clock_t clock, reduced_clock;
5763 u32 dpll = 0, fp = 0, fp2 = 0;
5764 bool ok, has_reduced_clock = false;
5765 bool is_lvds = false, is_dp = false, is_cpu_edp = false;
5766 struct intel_encoder *encoder;
5767 u32 temp;
5768 int ret;
5769 bool dither;
5770
5771 for_each_encoder_on_crtc(dev, crtc, encoder) {
5772 switch (encoder->type) {
5773 case INTEL_OUTPUT_LVDS:
5774 is_lvds = true;
5775 break;
5776 case INTEL_OUTPUT_DISPLAYPORT:
5777 is_dp = true;
5778 break;
5779 case INTEL_OUTPUT_EDP:
5780 is_dp = true;
5781 if (!intel_encoder_is_pch_edp(&encoder->base))
5782 is_cpu_edp = true;
5783 break;
5784 } 5580 }
5785 5581 } else {
5786 num_connectors++; 5582 I915_WRITE(PCH_FP1(pipe), fp);
5787 } 5583 if (HAS_PIPE_CXSR(dev)) {
5788 5584 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5789 if (is_cpu_edp) 5585 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5790 intel_crtc->cpu_transcoder = TRANSCODER_EDP;
5791 else
5792 intel_crtc->cpu_transcoder = pipe;
5793
5794 /* We are not sure yet this won't happen. */
5795 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
5796 INTEL_PCH_TYPE(dev));
5797
5798 WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
5799 num_connectors, pipe_name(pipe));
5800
5801 WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
5802 (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
5803
5804 WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
5805
5806 if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
5807 return -EINVAL;
5808
5809 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5810 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5811 &has_reduced_clock,
5812 &reduced_clock);
5813 if (!ok) {
5814 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5815 return -EINVAL;
5816 } 5586 }
5817 } 5587 }
5818 5588
5819 /* Ensure that the cursor is valid for the new mode before changing... */ 5589 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5820 intel_crtc_update_cursor(crtc, true); 5590 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5821 5591 /* the chip adds 2 halflines automatically */
5822 /* determine panel color depth */ 5592 adjusted_mode->crtc_vdisplay -= 1;
5823 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, 5593 adjusted_mode->crtc_vtotal -= 1;
5824 adjusted_mode); 5594 adjusted_mode->crtc_vblank_start -= 1;
5825 if (is_lvds && dev_priv->lvds_dither) 5595 adjusted_mode->crtc_vblank_end -= 1;
5826 dither = true; 5596 adjusted_mode->crtc_vsync_end -= 1;
5827 5597 adjusted_mode->crtc_vsync_start -= 1;
5828 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 5598 } else
5829 drm_mode_debug_printmodeline(mode); 5599 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
5830
5831 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5832 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5833 if (has_reduced_clock)
5834 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5835 reduced_clock.m2;
5836
5837 dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
5838 fp);
5839 5600
5840 /* CPU eDP is the only output that doesn't need a PCH PLL of its 5601 I915_WRITE(HTOTAL(pipe),
5841 * own on pre-Haswell/LPT generation */ 5602 (adjusted_mode->crtc_hdisplay - 1) |
5842 if (!is_cpu_edp) { 5603 ((adjusted_mode->crtc_htotal - 1) << 16));
5843 struct intel_pch_pll *pll; 5604 I915_WRITE(HBLANK(pipe),
5605 (adjusted_mode->crtc_hblank_start - 1) |
5606 ((adjusted_mode->crtc_hblank_end - 1) << 16));
5607 I915_WRITE(HSYNC(pipe),
5608 (adjusted_mode->crtc_hsync_start - 1) |
5609 ((adjusted_mode->crtc_hsync_end - 1) << 16));
5844 5610
5845 pll = intel_get_pch_pll(intel_crtc, dpll, fp); 5611 I915_WRITE(VTOTAL(pipe),
5846 if (pll == NULL) { 5612 (adjusted_mode->crtc_vdisplay - 1) |
5847 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n", 5613 ((adjusted_mode->crtc_vtotal - 1) << 16));
5848 pipe); 5614 I915_WRITE(VBLANK(pipe),
5849 return -EINVAL; 5615 (adjusted_mode->crtc_vblank_start - 1) |
5850 } 5616 ((adjusted_mode->crtc_vblank_end - 1) << 16));
5851 } else 5617 I915_WRITE(VSYNC(pipe),
5852 intel_put_pch_pll(intel_crtc); 5618 (adjusted_mode->crtc_vsync_start - 1) |
5619 ((adjusted_mode->crtc_vsync_end - 1) << 16));
5853 5620
5854 /* The LVDS pin pair needs to be on before the DPLLs are 5621 /* pipesrc controls the size that is scaled from, which should
5855 * enabled. This is an exception to the general rule that 5622 * always be the user's requested size.
5856 * mode_set doesn't turn things on. 5623 */
5857 */ 5624 I915_WRITE(PIPESRC(pipe),
5858 if (is_lvds) { 5625 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5859 temp = I915_READ(PCH_LVDS);
5860 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5861 if (HAS_PCH_CPT(dev)) {
5862 temp &= ~PORT_TRANS_SEL_MASK;
5863 temp |= PORT_TRANS_SEL_CPT(pipe);
5864 } else {
5865 if (pipe == 1)
5866 temp |= LVDS_PIPEB_SELECT;
5867 else
5868 temp &= ~LVDS_PIPEB_SELECT;
5869 }
5870 5626
5871 /* set the corresponsding LVDS_BORDER bit */ 5627 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
5872 temp |= dev_priv->lvds_border_bits; 5628 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
5873 /* Set the B0-B3 data pairs corresponding to whether 5629 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
5874 * we're going to set the DPLLs for dual-channel mode or 5630 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5875 * not.
5876 */
5877 if (clock.p2 == 7)
5878 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5879 else
5880 temp &= ~(LVDS_B0B3_POWER_UP |
5881 LVDS_CLKB_POWER_UP);
5882 5631
5883 /* It would be nice to set 24 vs 18-bit mode 5632 if (has_edp_encoder &&
5884 * (LVDS_A3_POWER_UP) appropriately here, but we need to 5633 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5885 * look more thoroughly into how panels behave in the 5634 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5886 * two modes.
5887 */
5888 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5889 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5890 temp |= LVDS_HSYNC_POLARITY;
5891 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5892 temp |= LVDS_VSYNC_POLARITY;
5893 I915_WRITE(PCH_LVDS, temp);
5894 }
5895 } 5635 }
5896 5636
5897 if (is_dp && !is_cpu_edp) { 5637 I915_WRITE(PIPECONF(pipe), pipeconf);
5898 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5638 POSTING_READ(PIPECONF(pipe));
5899 } else {
5900 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5901 /* For non-DP output, clear any trans DP clock recovery
5902 * setting.*/
5903 I915_WRITE(TRANSDATA_M1(pipe), 0);
5904 I915_WRITE(TRANSDATA_N1(pipe), 0);
5905 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5906 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5907 }
5908 }
5909 5639
5910 intel_crtc->lowfreq_avail = false; 5640 intel_wait_for_vblank(dev, pipe);
5911 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5912 if (intel_crtc->pch_pll) {
5913 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5914
5915 /* Wait for the clocks to stabilize. */
5916 POSTING_READ(intel_crtc->pch_pll->pll_reg);
5917 udelay(150);
5918
5919 /* The pixel multiplier can only be updated once the
5920 * DPLL is enabled and the clocks are stable.
5921 *
5922 * So write it again.
5923 */
5924 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5925 }
5926 5641
5927 if (intel_crtc->pch_pll) { 5642 if (IS_GEN5(dev)) {
5928 if (is_lvds && has_reduced_clock && i915_powersave) { 5643 /* enable address swizzle for tiling buffer */
5929 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); 5644 temp = I915_READ(DISP_ARB_CTL);
5930 intel_crtc->lowfreq_avail = true; 5645 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
5931 } else {
5932 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
5933 }
5934 }
5935 } 5646 }
5936 5647
5937 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); 5648 I915_WRITE(DSPCNTR(plane), dspcntr);
5938
5939 if (!is_dp || is_cpu_edp)
5940 ironlake_set_m_n(crtc, mode, adjusted_mode);
5941
5942 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5943 if (is_cpu_edp)
5944 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5945
5946 haswell_set_pipeconf(crtc, adjusted_mode, dither);
5947
5948 /* Set up the display plane register */
5949 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
5950 POSTING_READ(DSPCNTR(plane)); 5649 POSTING_READ(DSPCNTR(plane));
5951 5650
5952 ret = intel_pipe_set_base(crtc, x, y, fb); 5651 ret = intel_pipe_set_base(crtc, x, y, old_fb);
5953 5652
5954 intel_update_watermarks(dev); 5653 intel_update_watermarks(dev);
5955 5654
5956 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
5957
5958 return ret; 5655 return ret;
5959} 5656}
5960 5657
@@ -5962,12 +5659,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5962 struct drm_display_mode *mode, 5659 struct drm_display_mode *mode,
5963 struct drm_display_mode *adjusted_mode, 5660 struct drm_display_mode *adjusted_mode,
5964 int x, int y, 5661 int x, int y,
5965 struct drm_framebuffer *fb) 5662 struct drm_framebuffer *old_fb)
5966{ 5663{
5967 struct drm_device *dev = crtc->dev; 5664 struct drm_device *dev = crtc->dev;
5968 struct drm_i915_private *dev_priv = dev->dev_private; 5665 struct drm_i915_private *dev_priv = dev->dev_private;
5969 struct drm_encoder_helper_funcs *encoder_funcs;
5970 struct intel_encoder *encoder;
5971 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5666 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5972 int pipe = intel_crtc->pipe; 5667 int pipe = intel_crtc->pipe;
5973 int ret; 5668 int ret;
@@ -5975,275 +5670,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5975 drm_vblank_pre_modeset(dev, pipe); 5670 drm_vblank_pre_modeset(dev, pipe);
5976 5671
5977 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, 5672 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
5978 x, y, fb); 5673 x, y, old_fb);
5979 drm_vblank_post_modeset(dev, pipe);
5980
5981 if (ret != 0)
5982 return ret;
5983
5984 for_each_encoder_on_crtc(dev, crtc, encoder) {
5985 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
5986 encoder->base.base.id,
5987 drm_get_encoder_name(&encoder->base),
5988 mode->base.id, mode->name);
5989 encoder_funcs = encoder->base.helper_private;
5990 encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
5991 }
5992
5993 return 0;
5994}
5995
5996static bool intel_eld_uptodate(struct drm_connector *connector,
5997 int reg_eldv, uint32_t bits_eldv,
5998 int reg_elda, uint32_t bits_elda,
5999 int reg_edid)
6000{
6001 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6002 uint8_t *eld = connector->eld;
6003 uint32_t i;
6004
6005 i = I915_READ(reg_eldv);
6006 i &= bits_eldv;
6007
6008 if (!eld[0])
6009 return !i;
6010
6011 if (!i)
6012 return false;
6013
6014 i = I915_READ(reg_elda);
6015 i &= ~bits_elda;
6016 I915_WRITE(reg_elda, i);
6017
6018 for (i = 0; i < eld[2]; i++)
6019 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
6020 return false;
6021
6022 return true;
6023}
6024
6025static void g4x_write_eld(struct drm_connector *connector,
6026 struct drm_crtc *crtc)
6027{
6028 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6029 uint8_t *eld = connector->eld;
6030 uint32_t eldv;
6031 uint32_t len;
6032 uint32_t i;
6033
6034 i = I915_READ(G4X_AUD_VID_DID);
6035
6036 if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
6037 eldv = G4X_ELDV_DEVCL_DEVBLC;
6038 else
6039 eldv = G4X_ELDV_DEVCTG;
6040
6041 if (intel_eld_uptodate(connector,
6042 G4X_AUD_CNTL_ST, eldv,
6043 G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
6044 G4X_HDMIW_HDMIEDID))
6045 return;
6046
6047 i = I915_READ(G4X_AUD_CNTL_ST);
6048 i &= ~(eldv | G4X_ELD_ADDR);
6049 len = (i >> 9) & 0x1f; /* ELD buffer size */
6050 I915_WRITE(G4X_AUD_CNTL_ST, i);
6051
6052 if (!eld[0])
6053 return;
6054
6055 len = min_t(uint8_t, eld[2], len);
6056 DRM_DEBUG_DRIVER("ELD size %d\n", len);
6057 for (i = 0; i < len; i++)
6058 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
6059
6060 i = I915_READ(G4X_AUD_CNTL_ST);
6061 i |= eldv;
6062 I915_WRITE(G4X_AUD_CNTL_ST, i);
6063}
6064
6065static void haswell_write_eld(struct drm_connector *connector,
6066 struct drm_crtc *crtc)
6067{
6068 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6069 uint8_t *eld = connector->eld;
6070 struct drm_device *dev = crtc->dev;
6071 uint32_t eldv;
6072 uint32_t i;
6073 int len;
6074 int pipe = to_intel_crtc(crtc)->pipe;
6075 int tmp;
6076
6077 int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
6078 int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
6079 int aud_config = HSW_AUD_CFG(pipe);
6080 int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
6081
6082
6083 DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
6084
6085 /* Audio output enable */
6086 DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
6087 tmp = I915_READ(aud_cntrl_st2);
6088 tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
6089 I915_WRITE(aud_cntrl_st2, tmp);
6090
6091 /* Wait for 1 vertical blank */
6092 intel_wait_for_vblank(dev, pipe);
6093
6094 /* Set ELD valid state */
6095 tmp = I915_READ(aud_cntrl_st2);
6096 DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
6097 tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
6098 I915_WRITE(aud_cntrl_st2, tmp);
6099 tmp = I915_READ(aud_cntrl_st2);
6100 DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
6101
6102 /* Enable HDMI mode */
6103 tmp = I915_READ(aud_config);
6104 DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
6105 /* clear N_programing_enable and N_value_index */
6106 tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
6107 I915_WRITE(aud_config, tmp);
6108
6109 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
6110
6111 eldv = AUDIO_ELD_VALID_A << (pipe * 4);
6112
6113 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6114 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6115 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
6116 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6117 } else
6118 I915_WRITE(aud_config, 0);
6119
6120 if (intel_eld_uptodate(connector,
6121 aud_cntrl_st2, eldv,
6122 aud_cntl_st, IBX_ELD_ADDRESS,
6123 hdmiw_hdmiedid))
6124 return;
6125
6126 i = I915_READ(aud_cntrl_st2);
6127 i &= ~eldv;
6128 I915_WRITE(aud_cntrl_st2, i);
6129
6130 if (!eld[0])
6131 return;
6132
6133 i = I915_READ(aud_cntl_st);
6134 i &= ~IBX_ELD_ADDRESS;
6135 I915_WRITE(aud_cntl_st, i);
6136 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
6137 DRM_DEBUG_DRIVER("port num:%d\n", i);
6138
6139 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
6140 DRM_DEBUG_DRIVER("ELD size %d\n", len);
6141 for (i = 0; i < len; i++)
6142 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6143
6144 i = I915_READ(aud_cntrl_st2);
6145 i |= eldv;
6146 I915_WRITE(aud_cntrl_st2, i);
6147
6148}
6149
6150static void ironlake_write_eld(struct drm_connector *connector,
6151 struct drm_crtc *crtc)
6152{
6153 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6154 uint8_t *eld = connector->eld;
6155 uint32_t eldv;
6156 uint32_t i;
6157 int len;
6158 int hdmiw_hdmiedid;
6159 int aud_config;
6160 int aud_cntl_st;
6161 int aud_cntrl_st2;
6162 int pipe = to_intel_crtc(crtc)->pipe;
6163
6164 if (HAS_PCH_IBX(connector->dev)) {
6165 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
6166 aud_config = IBX_AUD_CFG(pipe);
6167 aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
6168 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
6169 } else {
6170 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
6171 aud_config = CPT_AUD_CFG(pipe);
6172 aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
6173 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
6174 }
6175
6176 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
6177
6178 i = I915_READ(aud_cntl_st);
6179 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
6180 if (!i) {
6181 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
6182 /* operate blindly on all ports */
6183 eldv = IBX_ELD_VALIDB;
6184 eldv |= IBX_ELD_VALIDB << 4;
6185 eldv |= IBX_ELD_VALIDB << 8;
6186 } else {
6187 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
6188 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
6189 }
6190
6191 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6192 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6193 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
6194 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6195 } else
6196 I915_WRITE(aud_config, 0);
6197 5674
6198 if (intel_eld_uptodate(connector, 5675 drm_vblank_post_modeset(dev, pipe);
6199 aud_cntrl_st2, eldv,
6200 aud_cntl_st, IBX_ELD_ADDRESS,
6201 hdmiw_hdmiedid))
6202 return;
6203
6204 i = I915_READ(aud_cntrl_st2);
6205 i &= ~eldv;
6206 I915_WRITE(aud_cntrl_st2, i);
6207
6208 if (!eld[0])
6209 return;
6210
6211 i = I915_READ(aud_cntl_st);
6212 i &= ~IBX_ELD_ADDRESS;
6213 I915_WRITE(aud_cntl_st, i);
6214
6215 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
6216 DRM_DEBUG_DRIVER("ELD size %d\n", len);
6217 for (i = 0; i < len; i++)
6218 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6219
6220 i = I915_READ(aud_cntrl_st2);
6221 i |= eldv;
6222 I915_WRITE(aud_cntrl_st2, i);
6223}
6224
6225void intel_write_eld(struct drm_encoder *encoder,
6226 struct drm_display_mode *mode)
6227{
6228 struct drm_crtc *crtc = encoder->crtc;
6229 struct drm_connector *connector;
6230 struct drm_device *dev = encoder->dev;
6231 struct drm_i915_private *dev_priv = dev->dev_private;
6232
6233 connector = drm_select_eld(encoder, mode);
6234 if (!connector)
6235 return;
6236
6237 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6238 connector->base.id,
6239 drm_get_connector_name(connector),
6240 connector->encoder->base.id,
6241 drm_get_encoder_name(connector->encoder));
6242 5676
6243 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; 5677 intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
6244 5678
6245 if (dev_priv->display.write_eld) 5679 return ret;
6246 dev_priv->display.write_eld(connector, crtc);
6247} 5680}
6248 5681
6249/** Loads the palette/gamma unit for the CRTC with the prepared values */ 5682/** Loads the palette/gamma unit for the CRTC with the prepared values */
@@ -6256,7 +5689,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
6256 int i; 5689 int i;
6257 5690
6258 /* The clocks have to be on to load the palette. */ 5691 /* The clocks have to be on to load the palette. */
6259 if (!crtc->enabled || !intel_crtc->active) 5692 if (!crtc->enabled)
6260 return; 5693 return;
6261 5694
6262 /* use legacy palette for Ironlake */ 5695 /* use legacy palette for Ironlake */
@@ -6399,7 +5832,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6399 if (!visible && !intel_crtc->cursor_visible) 5832 if (!visible && !intel_crtc->cursor_visible)
6400 return; 5833 return;
6401 5834
6402 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 5835 if (IS_IVYBRIDGE(dev)) {
6403 I915_WRITE(CURPOS_IVB(pipe), pos); 5836 I915_WRITE(CURPOS_IVB(pipe), pos);
6404 ivb_update_cursor(crtc, base); 5837 ivb_update_cursor(crtc, base);
6405 } else { 5838 } else {
@@ -6409,6 +5842,9 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6409 else 5842 else
6410 i9xx_update_cursor(crtc, base); 5843 i9xx_update_cursor(crtc, base);
6411 } 5844 }
5845
5846 if (visible)
5847 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
6412} 5848}
6413 5849
6414static int intel_crtc_cursor_set(struct drm_crtc *crtc, 5850static int intel_crtc_cursor_set(struct drm_crtc *crtc,
@@ -6423,6 +5859,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6423 uint32_t addr; 5859 uint32_t addr;
6424 int ret; 5860 int ret;
6425 5861
5862 DRM_DEBUG_KMS("\n");
5863
6426 /* if we want to turn off the cursor ignore width and height */ 5864 /* if we want to turn off the cursor ignore width and height */
6427 if (!handle) { 5865 if (!handle) {
6428 DRM_DEBUG_KMS("cursor off\n"); 5866 DRM_DEBUG_KMS("cursor off\n");
@@ -6584,7 +6022,7 @@ static struct drm_display_mode load_detect_mode = {
6584 6022
6585static struct drm_framebuffer * 6023static struct drm_framebuffer *
6586intel_framebuffer_create(struct drm_device *dev, 6024intel_framebuffer_create(struct drm_device *dev,
6587 struct drm_mode_fb_cmd2 *mode_cmd, 6025 struct drm_mode_fb_cmd *mode_cmd,
6588 struct drm_i915_gem_object *obj) 6026 struct drm_i915_gem_object *obj)
6589{ 6027{
6590 struct intel_framebuffer *intel_fb; 6028 struct intel_framebuffer *intel_fb;
@@ -6626,7 +6064,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
6626 int depth, int bpp) 6064 int depth, int bpp)
6627{ 6065{
6628 struct drm_i915_gem_object *obj; 6066 struct drm_i915_gem_object *obj;
6629 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 6067 struct drm_mode_fb_cmd mode_cmd;
6630 6068
6631 obj = i915_gem_alloc_object(dev, 6069 obj = i915_gem_alloc_object(dev,
6632 intel_framebuffer_size_for_mode(mode, bpp)); 6070 intel_framebuffer_size_for_mode(mode, bpp));
@@ -6635,9 +6073,9 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
6635 6073
6636 mode_cmd.width = mode->hdisplay; 6074 mode_cmd.width = mode->hdisplay;
6637 mode_cmd.height = mode->vdisplay; 6075 mode_cmd.height = mode->vdisplay;
6638 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 6076 mode_cmd.depth = depth;
6639 bpp); 6077 mode_cmd.bpp = bpp;
6640 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 6078 mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
6641 6079
6642 return intel_framebuffer_create(dev, &mode_cmd, obj); 6080 return intel_framebuffer_create(dev, &mode_cmd, obj);
6643} 6081}
@@ -6658,28 +6096,27 @@ mode_fits_in_fbdev(struct drm_device *dev,
6658 return NULL; 6096 return NULL;
6659 6097
6660 fb = &dev_priv->fbdev->ifb.base; 6098 fb = &dev_priv->fbdev->ifb.base;
6661 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 6099 if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
6662 fb->bits_per_pixel)) 6100 fb->bits_per_pixel))
6663 return NULL; 6101 return NULL;
6664 6102
6665 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 6103 if (obj->base.size < mode->vdisplay * fb->pitch)
6666 return NULL; 6104 return NULL;
6667 6105
6668 return fb; 6106 return fb;
6669} 6107}
6670 6108
6671bool intel_get_load_detect_pipe(struct drm_connector *connector, 6109bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
6110 struct drm_connector *connector,
6672 struct drm_display_mode *mode, 6111 struct drm_display_mode *mode,
6673 struct intel_load_detect_pipe *old) 6112 struct intel_load_detect_pipe *old)
6674{ 6113{
6675 struct intel_crtc *intel_crtc; 6114 struct intel_crtc *intel_crtc;
6676 struct intel_encoder *intel_encoder =
6677 intel_attached_encoder(connector);
6678 struct drm_crtc *possible_crtc; 6115 struct drm_crtc *possible_crtc;
6679 struct drm_encoder *encoder = &intel_encoder->base; 6116 struct drm_encoder *encoder = &intel_encoder->base;
6680 struct drm_crtc *crtc = NULL; 6117 struct drm_crtc *crtc = NULL;
6681 struct drm_device *dev = encoder->dev; 6118 struct drm_device *dev = encoder->dev;
6682 struct drm_framebuffer *fb; 6119 struct drm_framebuffer *old_fb;
6683 int i = -1; 6120 int i = -1;
6684 6121
6685 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 6122 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -6700,12 +6137,21 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
6700 if (encoder->crtc) { 6137 if (encoder->crtc) {
6701 crtc = encoder->crtc; 6138 crtc = encoder->crtc;
6702 6139
6703 old->dpms_mode = connector->dpms; 6140 intel_crtc = to_intel_crtc(crtc);
6141 old->dpms_mode = intel_crtc->dpms_mode;
6704 old->load_detect_temp = false; 6142 old->load_detect_temp = false;
6705 6143
6706 /* Make sure the crtc and connector are running */ 6144 /* Make sure the crtc and connector are running */
6707 if (connector->dpms != DRM_MODE_DPMS_ON) 6145 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
6708 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); 6146 struct drm_encoder_helper_funcs *encoder_funcs;
6147 struct drm_crtc_helper_funcs *crtc_funcs;
6148
6149 crtc_funcs = crtc->helper_private;
6150 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
6151
6152 encoder_funcs = encoder->helper_private;
6153 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
6154 }
6709 6155
6710 return true; 6156 return true;
6711 } 6157 }
@@ -6729,17 +6175,19 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
6729 return false; 6175 return false;
6730 } 6176 }
6731 6177
6732 intel_encoder->new_crtc = to_intel_crtc(crtc); 6178 encoder->crtc = crtc;
6733 to_intel_connector(connector)->new_encoder = intel_encoder; 6179 connector->encoder = encoder;
6734 6180
6735 intel_crtc = to_intel_crtc(crtc); 6181 intel_crtc = to_intel_crtc(crtc);
6736 old->dpms_mode = connector->dpms; 6182 old->dpms_mode = intel_crtc->dpms_mode;
6737 old->load_detect_temp = true; 6183 old->load_detect_temp = true;
6738 old->release_fb = NULL; 6184 old->release_fb = NULL;
6739 6185
6740 if (!mode) 6186 if (!mode)
6741 mode = &load_detect_mode; 6187 mode = &load_detect_mode;
6742 6188
6189 old_fb = crtc->fb;
6190
6743 /* We need a framebuffer large enough to accommodate all accesses 6191 /* We need a framebuffer large enough to accommodate all accesses
6744 * that the plane may generate whilst we perform load detection. 6192 * that the plane may generate whilst we perform load detection.
6745 * We can not rely on the fbcon either being present (we get called 6193 * We can not rely on the fbcon either being present (we get called
@@ -6747,47 +6195,50 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
6747 * not even exist) or that it is large enough to satisfy the 6195 * not even exist) or that it is large enough to satisfy the
6748 * requested mode. 6196 * requested mode.
6749 */ 6197 */
6750 fb = mode_fits_in_fbdev(dev, mode); 6198 crtc->fb = mode_fits_in_fbdev(dev, mode);
6751 if (fb == NULL) { 6199 if (crtc->fb == NULL) {
6752 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 6200 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
6753 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 6201 crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
6754 old->release_fb = fb; 6202 old->release_fb = crtc->fb;
6755 } else 6203 } else
6756 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 6204 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
6757 if (IS_ERR(fb)) { 6205 if (IS_ERR(crtc->fb)) {
6758 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 6206 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
6207 crtc->fb = old_fb;
6759 return false; 6208 return false;
6760 } 6209 }
6761 6210
6762 if (!intel_set_mode(crtc, mode, 0, 0, fb)) { 6211 if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
6763 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 6212 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
6764 if (old->release_fb) 6213 if (old->release_fb)
6765 old->release_fb->funcs->destroy(old->release_fb); 6214 old->release_fb->funcs->destroy(old->release_fb);
6215 crtc->fb = old_fb;
6766 return false; 6216 return false;
6767 } 6217 }
6768 6218
6769 /* let the connector get through one full cycle before testing */ 6219 /* let the connector get through one full cycle before testing */
6770 intel_wait_for_vblank(dev, intel_crtc->pipe); 6220 intel_wait_for_vblank(dev, intel_crtc->pipe);
6221
6771 return true; 6222 return true;
6772} 6223}
6773 6224
6774void intel_release_load_detect_pipe(struct drm_connector *connector, 6225void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
6226 struct drm_connector *connector,
6775 struct intel_load_detect_pipe *old) 6227 struct intel_load_detect_pipe *old)
6776{ 6228{
6777 struct intel_encoder *intel_encoder =
6778 intel_attached_encoder(connector);
6779 struct drm_encoder *encoder = &intel_encoder->base; 6229 struct drm_encoder *encoder = &intel_encoder->base;
6230 struct drm_device *dev = encoder->dev;
6231 struct drm_crtc *crtc = encoder->crtc;
6232 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
6233 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
6780 6234
6781 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 6235 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6782 connector->base.id, drm_get_connector_name(connector), 6236 connector->base.id, drm_get_connector_name(connector),
6783 encoder->base.id, drm_get_encoder_name(encoder)); 6237 encoder->base.id, drm_get_encoder_name(encoder));
6784 6238
6785 if (old->load_detect_temp) { 6239 if (old->load_detect_temp) {
6786 struct drm_crtc *crtc = encoder->crtc; 6240 connector->encoder = NULL;
6787 6241 drm_helper_disable_unused_functions(dev);
6788 to_intel_connector(connector)->new_encoder = NULL;
6789 intel_encoder->new_crtc = NULL;
6790 intel_set_mode(crtc, NULL, 0, 0, NULL);
6791 6242
6792 if (old->release_fb) 6243 if (old->release_fb)
6793 old->release_fb->funcs->destroy(old->release_fb); 6244 old->release_fb->funcs->destroy(old->release_fb);
@@ -6796,8 +6247,10 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
6796 } 6247 }
6797 6248
6798 /* Switch crtc and encoder back off if necessary */ 6249 /* Switch crtc and encoder back off if necessary */
6799 if (old->dpms_mode != DRM_MODE_DPMS_ON) 6250 if (old->dpms_mode != DRM_MODE_DPMS_ON) {
6800 connector->funcs->dpms(connector, old->dpms_mode); 6251 encoder_funcs->dpms(encoder, old->dpms_mode);
6252 crtc_funcs->dpms(crtc, old->dpms_mode);
6253 }
6801} 6254}
6802 6255
6803/* Returns the clock of the currently programmed mode of the given pipe. */ 6256/* Returns the clock of the currently programmed mode of the given pipe. */
@@ -6893,12 +6346,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6893{ 6346{
6894 struct drm_i915_private *dev_priv = dev->dev_private; 6347 struct drm_i915_private *dev_priv = dev->dev_private;
6895 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6348 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6896 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 6349 int pipe = intel_crtc->pipe;
6897 struct drm_display_mode *mode; 6350 struct drm_display_mode *mode;
6898 int htot = I915_READ(HTOTAL(cpu_transcoder)); 6351 int htot = I915_READ(HTOTAL(pipe));
6899 int hsync = I915_READ(HSYNC(cpu_transcoder)); 6352 int hsync = I915_READ(HSYNC(pipe));
6900 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 6353 int vtot = I915_READ(VTOTAL(pipe));
6901 int vsync = I915_READ(VSYNC(cpu_transcoder)); 6354 int vsync = I915_READ(VSYNC(pipe));
6902 6355
6903 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 6356 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6904 if (!mode) 6357 if (!mode)
@@ -6915,10 +6368,51 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6915 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 6368 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
6916 6369
6917 drm_mode_set_name(mode); 6370 drm_mode_set_name(mode);
6371 drm_mode_set_crtcinfo(mode, 0);
6918 6372
6919 return mode; 6373 return mode;
6920} 6374}
6921 6375
6376#define GPU_IDLE_TIMEOUT 500 /* ms */
6377
6378/* When this timer fires, we've been idle for awhile */
6379static void intel_gpu_idle_timer(unsigned long arg)
6380{
6381 struct drm_device *dev = (struct drm_device *)arg;
6382 drm_i915_private_t *dev_priv = dev->dev_private;
6383
6384 if (!list_empty(&dev_priv->mm.active_list)) {
6385 /* Still processing requests, so just re-arm the timer. */
6386 mod_timer(&dev_priv->idle_timer, jiffies +
6387 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
6388 return;
6389 }
6390
6391 dev_priv->busy = false;
6392 queue_work(dev_priv->wq, &dev_priv->idle_work);
6393}
6394
6395#define CRTC_IDLE_TIMEOUT 1000 /* ms */
6396
6397static void intel_crtc_idle_timer(unsigned long arg)
6398{
6399 struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
6400 struct drm_crtc *crtc = &intel_crtc->base;
6401 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
6402 struct intel_framebuffer *intel_fb;
6403
6404 intel_fb = to_intel_framebuffer(crtc->fb);
6405 if (intel_fb && intel_fb->obj->active) {
6406 /* The framebuffer is still being accessed by the GPU. */
6407 mod_timer(&intel_crtc->idle_timer, jiffies +
6408 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
6409 return;
6410 }
6411
6412 intel_crtc->busy = false;
6413 queue_work(dev_priv->wq, &dev_priv->idle_work);
6414}
6415
6922static void intel_increase_pllclock(struct drm_crtc *crtc) 6416static void intel_increase_pllclock(struct drm_crtc *crtc)
6923{ 6417{
6924 struct drm_device *dev = crtc->dev; 6418 struct drm_device *dev = crtc->dev;
@@ -6938,7 +6432,9 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
6938 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { 6432 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
6939 DRM_DEBUG_DRIVER("upclocking LVDS\n"); 6433 DRM_DEBUG_DRIVER("upclocking LVDS\n");
6940 6434
6941 assert_panel_unlocked(dev_priv, pipe); 6435 /* Unlock panel regs */
6436 I915_WRITE(PP_CONTROL,
6437 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
6942 6438
6943 dpll &= ~DISPLAY_RATE_SELECT_FPA1; 6439 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
6944 I915_WRITE(dpll_reg, dpll); 6440 I915_WRITE(dpll_reg, dpll);
@@ -6947,7 +6443,14 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
6947 dpll = I915_READ(dpll_reg); 6443 dpll = I915_READ(dpll_reg);
6948 if (dpll & DISPLAY_RATE_SELECT_FPA1) 6444 if (dpll & DISPLAY_RATE_SELECT_FPA1)
6949 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); 6445 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
6446
6447 /* ...and lock them again */
6448 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
6950 } 6449 }
6450
6451 /* Schedule downclock */
6452 mod_timer(&intel_crtc->idle_timer, jiffies +
6453 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
6951} 6454}
6952 6455
6953static void intel_decrease_pllclock(struct drm_crtc *crtc) 6456static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -6955,6 +6458,9 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
6955 struct drm_device *dev = crtc->dev; 6458 struct drm_device *dev = crtc->dev;
6956 drm_i915_private_t *dev_priv = dev->dev_private; 6459 drm_i915_private_t *dev_priv = dev->dev_private;
6957 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6460 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6461 int pipe = intel_crtc->pipe;
6462 int dpll_reg = DPLL(pipe);
6463 int dpll = I915_READ(dpll_reg);
6958 6464
6959 if (HAS_PCH_SPLIT(dev)) 6465 if (HAS_PCH_SPLIT(dev))
6960 return; 6466 return;
@@ -6967,65 +6473,104 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
6967 * the manual case. 6473 * the manual case.
6968 */ 6474 */
6969 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 6475 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
6970 int pipe = intel_crtc->pipe;
6971 int dpll_reg = DPLL(pipe);
6972 int dpll;
6973
6974 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 6476 DRM_DEBUG_DRIVER("downclocking LVDS\n");
6975 6477
6976 assert_panel_unlocked(dev_priv, pipe); 6478 /* Unlock panel regs */
6479 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
6480 PANEL_UNLOCK_REGS);
6977 6481
6978 dpll = I915_READ(dpll_reg);
6979 dpll |= DISPLAY_RATE_SELECT_FPA1; 6482 dpll |= DISPLAY_RATE_SELECT_FPA1;
6980 I915_WRITE(dpll_reg, dpll); 6483 I915_WRITE(dpll_reg, dpll);
6981 intel_wait_for_vblank(dev, pipe); 6484 intel_wait_for_vblank(dev, pipe);
6982 dpll = I915_READ(dpll_reg); 6485 dpll = I915_READ(dpll_reg);
6983 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 6486 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
6984 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); 6487 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
6985 }
6986
6987}
6988 6488
6989void intel_mark_busy(struct drm_device *dev) 6489 /* ...and lock them again */
6990{ 6490 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
6991 i915_update_gfx_val(dev->dev_private); 6491 }
6992}
6993 6492
6994void intel_mark_idle(struct drm_device *dev)
6995{
6996} 6493}
6997 6494
6998void intel_mark_fb_busy(struct drm_i915_gem_object *obj) 6495/**
6496 * intel_idle_update - adjust clocks for idleness
6497 * @work: work struct
6498 *
6499 * Either the GPU or display (or both) went idle. Check the busy status
6500 * here and adjust the CRTC and GPU clocks as necessary.
6501 */
6502static void intel_idle_update(struct work_struct *work)
6999{ 6503{
7000 struct drm_device *dev = obj->base.dev; 6504 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
6505 idle_work);
6506 struct drm_device *dev = dev_priv->dev;
7001 struct drm_crtc *crtc; 6507 struct drm_crtc *crtc;
6508 struct intel_crtc *intel_crtc;
7002 6509
7003 if (!i915_powersave) 6510 if (!i915_powersave)
7004 return; 6511 return;
7005 6512
6513 mutex_lock(&dev->struct_mutex);
6514
6515 i915_update_gfx_val(dev_priv);
6516
7006 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 6517 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6518 /* Skip inactive CRTCs */
7007 if (!crtc->fb) 6519 if (!crtc->fb)
7008 continue; 6520 continue;
7009 6521
7010 if (to_intel_framebuffer(crtc->fb)->obj == obj) 6522 intel_crtc = to_intel_crtc(crtc);
7011 intel_increase_pllclock(crtc); 6523 if (!intel_crtc->busy)
6524 intel_decrease_pllclock(crtc);
7012 } 6525 }
6526
6527
6528 mutex_unlock(&dev->struct_mutex);
7013} 6529}
7014 6530
7015void intel_mark_fb_idle(struct drm_i915_gem_object *obj) 6531/**
6532 * intel_mark_busy - mark the GPU and possibly the display busy
6533 * @dev: drm device
6534 * @obj: object we're operating on
6535 *
6536 * Callers can use this function to indicate that the GPU is busy processing
6537 * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
6538 * buffer), we'll also mark the display as busy, so we know to increase its
6539 * clock frequency.
6540 */
6541void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
7016{ 6542{
7017 struct drm_device *dev = obj->base.dev; 6543 drm_i915_private_t *dev_priv = dev->dev_private;
7018 struct drm_crtc *crtc; 6544 struct drm_crtc *crtc = NULL;
6545 struct intel_framebuffer *intel_fb;
6546 struct intel_crtc *intel_crtc;
7019 6547
7020 if (!i915_powersave) 6548 if (!drm_core_check_feature(dev, DRIVER_MODESET))
7021 return; 6549 return;
7022 6550
6551 if (!dev_priv->busy)
6552 dev_priv->busy = true;
6553 else
6554 mod_timer(&dev_priv->idle_timer, jiffies +
6555 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
6556
7023 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 6557 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7024 if (!crtc->fb) 6558 if (!crtc->fb)
7025 continue; 6559 continue;
7026 6560
7027 if (to_intel_framebuffer(crtc->fb)->obj == obj) 6561 intel_crtc = to_intel_crtc(crtc);
7028 intel_decrease_pllclock(crtc); 6562 intel_fb = to_intel_framebuffer(crtc->fb);
6563 if (intel_fb->obj == obj) {
6564 if (!intel_crtc->busy) {
6565 /* Non-busy -> busy, upclock */
6566 intel_increase_pllclock(crtc);
6567 intel_crtc->busy = true;
6568 } else {
6569 /* Busy -> busy, put off timer */
6570 mod_timer(&intel_crtc->idle_timer, jiffies +
6571 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
6572 }
6573 }
7029 } 6574 }
7030} 6575}
7031 6576
@@ -7055,19 +6600,14 @@ static void intel_unpin_work_fn(struct work_struct *__work)
7055{ 6600{
7056 struct intel_unpin_work *work = 6601 struct intel_unpin_work *work =
7057 container_of(__work, struct intel_unpin_work, work); 6602 container_of(__work, struct intel_unpin_work, work);
7058 struct drm_device *dev = work->crtc->dev;
7059 6603
7060 mutex_lock(&dev->struct_mutex); 6604 mutex_lock(&work->dev->struct_mutex);
7061 intel_unpin_fb_obj(work->old_fb_obj); 6605 i915_gem_object_unpin(work->old_fb_obj);
7062 drm_gem_object_unreference(&work->pending_flip_obj->base); 6606 drm_gem_object_unreference(&work->pending_flip_obj->base);
7063 drm_gem_object_unreference(&work->old_fb_obj->base); 6607 drm_gem_object_unreference(&work->old_fb_obj->base);
7064 6608
7065 intel_update_fbc(dev); 6609 intel_update_fbc(work->dev);
7066 mutex_unlock(&dev->struct_mutex); 6610 mutex_unlock(&work->dev->struct_mutex);
7067
7068 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
7069 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
7070
7071 kfree(work); 6611 kfree(work);
7072} 6612}
7073 6613
@@ -7078,30 +6618,55 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
7078 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6618 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7079 struct intel_unpin_work *work; 6619 struct intel_unpin_work *work;
7080 struct drm_i915_gem_object *obj; 6620 struct drm_i915_gem_object *obj;
6621 struct drm_pending_vblank_event *e;
6622 struct timeval tnow, tvbl;
7081 unsigned long flags; 6623 unsigned long flags;
7082 6624
7083 /* Ignore early vblank irqs */ 6625 /* Ignore early vblank irqs */
7084 if (intel_crtc == NULL) 6626 if (intel_crtc == NULL)
7085 return; 6627 return;
7086 6628
6629 do_gettimeofday(&tnow);
6630
7087 spin_lock_irqsave(&dev->event_lock, flags); 6631 spin_lock_irqsave(&dev->event_lock, flags);
7088 work = intel_crtc->unpin_work; 6632 work = intel_crtc->unpin_work;
7089 6633 if (work == NULL || !work->pending) {
7090 /* Ensure we don't miss a work->pending update ... */
7091 smp_rmb();
7092
7093 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
7094 spin_unlock_irqrestore(&dev->event_lock, flags); 6634 spin_unlock_irqrestore(&dev->event_lock, flags);
7095 return; 6635 return;
7096 } 6636 }
7097 6637
7098 /* and that the unpin work is consistent wrt ->pending. */
7099 smp_rmb();
7100
7101 intel_crtc->unpin_work = NULL; 6638 intel_crtc->unpin_work = NULL;
7102 6639
7103 if (work->event) 6640 if (work->event) {
7104 drm_send_vblank_event(dev, intel_crtc->pipe, work->event); 6641 e = work->event;
6642 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
6643
6644 /* Called before vblank count and timestamps have
6645 * been updated for the vblank interval of flip
6646 * completion? Need to increment vblank count and
6647 * add one videorefresh duration to returned timestamp
6648 * to account for this. We assume this happened if we
6649 * get called over 0.9 frame durations after the last
6650 * timestamped vblank.
6651 *
6652 * This calculation can not be used with vrefresh rates
6653 * below 5Hz (10Hz to be on the safe side) without
6654 * promoting to 64 integers.
6655 */
6656 if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
6657 9 * crtc->framedur_ns) {
6658 e->event.sequence++;
6659 tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
6660 crtc->framedur_ns);
6661 }
6662
6663 e->event.tv_sec = tvbl.tv_sec;
6664 e->event.tv_usec = tvbl.tv_usec;
6665
6666 list_add_tail(&e->base.link,
6667 &e->base.file_priv->event_list);
6668 wake_up_interruptible(&e->base.file_priv->event_wait);
6669 }
7105 6670
7106 drm_vblank_put(dev, intel_crtc->pipe); 6671 drm_vblank_put(dev, intel_crtc->pipe);
7107 6672
@@ -7111,9 +6676,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
7111 6676
7112 atomic_clear_mask(1 << intel_crtc->plane, 6677 atomic_clear_mask(1 << intel_crtc->plane,
7113 &obj->pending_flip.counter); 6678 &obj->pending_flip.counter);
7114 wake_up(&dev_priv->pending_flip_queue); 6679 if (atomic_read(&obj->pending_flip) == 0)
6680 wake_up(&dev_priv->pending_flip_queue);
7115 6681
7116 queue_work(dev_priv->wq, &work->work); 6682 schedule_work(&work->work);
7117 6683
7118 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); 6684 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
7119} 6685}
@@ -7141,25 +6707,16 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
7141 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 6707 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
7142 unsigned long flags; 6708 unsigned long flags;
7143 6709
7144 /* NB: An MMIO update of the plane base pointer will also
7145 * generate a page-flip completion irq, i.e. every modeset
7146 * is also accompanied by a spurious intel_prepare_page_flip().
7147 */
7148 spin_lock_irqsave(&dev->event_lock, flags); 6710 spin_lock_irqsave(&dev->event_lock, flags);
7149 if (intel_crtc->unpin_work) 6711 if (intel_crtc->unpin_work) {
7150 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 6712 if ((++intel_crtc->unpin_work->pending) > 1)
6713 DRM_ERROR("Prepared flip multiple times\n");
6714 } else {
6715 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
6716 }
7151 spin_unlock_irqrestore(&dev->event_lock, flags); 6717 spin_unlock_irqrestore(&dev->event_lock, flags);
7152} 6718}
7153 6719
7154inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
7155{
7156 /* Ensure that the work item is consistent when activating it ... */
7157 smp_wmb();
7158 atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
7159 /* and that it is marked active as soon as the irq could fire. */
7160 smp_wmb();
7161}
7162
7163static int intel_gen2_queue_flip(struct drm_device *dev, 6720static int intel_gen2_queue_flip(struct drm_device *dev,
7164 struct drm_crtc *crtc, 6721 struct drm_crtc *crtc,
7165 struct drm_framebuffer *fb, 6722 struct drm_framebuffer *fb,
@@ -7167,17 +6724,20 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7167{ 6724{
7168 struct drm_i915_private *dev_priv = dev->dev_private; 6725 struct drm_i915_private *dev_priv = dev->dev_private;
7169 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6726 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6727 unsigned long offset;
7170 u32 flip_mask; 6728 u32 flip_mask;
7171 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7172 int ret; 6729 int ret;
7173 6730
7174 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 6731 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7175 if (ret) 6732 if (ret)
7176 goto err; 6733 goto out;
7177 6734
7178 ret = intel_ring_begin(ring, 6); 6735 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6736 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6737
6738 ret = BEGIN_LP_RING(6);
7179 if (ret) 6739 if (ret)
7180 goto err_unpin; 6740 goto out;
7181 6741
7182 /* Can't queue multiple flips, so wait for the previous 6742 /* Can't queue multiple flips, so wait for the previous
7183 * one to finish before executing the next. 6743 * one to finish before executing the next.
@@ -7186,21 +6746,15 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7186 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 6746 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7187 else 6747 else
7188 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 6748 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7189 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 6749 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7190 intel_ring_emit(ring, MI_NOOP); 6750 OUT_RING(MI_NOOP);
7191 intel_ring_emit(ring, MI_DISPLAY_FLIP | 6751 OUT_RING(MI_DISPLAY_FLIP |
7192 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 6752 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7193 intel_ring_emit(ring, fb->pitches[0]); 6753 OUT_RING(fb->pitch);
7194 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 6754 OUT_RING(obj->gtt_offset + offset);
7195 intel_ring_emit(ring, 0); /* aux display base address, unused */ 6755 OUT_RING(MI_NOOP);
7196 6756 ADVANCE_LP_RING();
7197 intel_mark_page_flip_active(intel_crtc); 6757out:
7198 intel_ring_advance(ring);
7199 return 0;
7200
7201err_unpin:
7202 intel_unpin_fb_obj(obj);
7203err:
7204 return ret; 6758 return ret;
7205} 6759}
7206 6760
@@ -7211,37 +6765,35 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
7211{ 6765{
7212 struct drm_i915_private *dev_priv = dev->dev_private; 6766 struct drm_i915_private *dev_priv = dev->dev_private;
7213 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6767 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6768 unsigned long offset;
7214 u32 flip_mask; 6769 u32 flip_mask;
7215 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7216 int ret; 6770 int ret;
7217 6771
7218 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 6772 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7219 if (ret) 6773 if (ret)
7220 goto err; 6774 goto out;
7221 6775
7222 ret = intel_ring_begin(ring, 6); 6776 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6777 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6778
6779 ret = BEGIN_LP_RING(6);
7223 if (ret) 6780 if (ret)
7224 goto err_unpin; 6781 goto out;
7225 6782
7226 if (intel_crtc->plane) 6783 if (intel_crtc->plane)
7227 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 6784 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7228 else 6785 else
7229 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 6786 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7230 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 6787 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7231 intel_ring_emit(ring, MI_NOOP); 6788 OUT_RING(MI_NOOP);
7232 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | 6789 OUT_RING(MI_DISPLAY_FLIP_I915 |
7233 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 6790 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7234 intel_ring_emit(ring, fb->pitches[0]); 6791 OUT_RING(fb->pitch);
7235 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 6792 OUT_RING(obj->gtt_offset + offset);
7236 intel_ring_emit(ring, MI_NOOP); 6793 OUT_RING(MI_NOOP);
7237 6794
7238 intel_mark_page_flip_active(intel_crtc); 6795 ADVANCE_LP_RING();
7239 intel_ring_advance(ring); 6796out:
7240 return 0;
7241
7242err_unpin:
7243 intel_unpin_fb_obj(obj);
7244err:
7245 return ret; 6797 return ret;
7246} 6798}
7247 6799
@@ -7253,27 +6805,24 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
7253 struct drm_i915_private *dev_priv = dev->dev_private; 6805 struct drm_i915_private *dev_priv = dev->dev_private;
7254 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6806 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7255 uint32_t pf, pipesrc; 6807 uint32_t pf, pipesrc;
7256 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7257 int ret; 6808 int ret;
7258 6809
7259 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 6810 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7260 if (ret) 6811 if (ret)
7261 goto err; 6812 goto out;
7262 6813
7263 ret = intel_ring_begin(ring, 4); 6814 ret = BEGIN_LP_RING(4);
7264 if (ret) 6815 if (ret)
7265 goto err_unpin; 6816 goto out;
7266 6817
7267 /* i965+ uses the linear or tiled offsets from the 6818 /* i965+ uses the linear or tiled offsets from the
7268 * Display Registers (which do not change across a page-flip) 6819 * Display Registers (which do not change across a page-flip)
7269 * so we need only reprogram the base address. 6820 * so we need only reprogram the base address.
7270 */ 6821 */
7271 intel_ring_emit(ring, MI_DISPLAY_FLIP | 6822 OUT_RING(MI_DISPLAY_FLIP |
7272 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 6823 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7273 intel_ring_emit(ring, fb->pitches[0]); 6824 OUT_RING(fb->pitch);
7274 intel_ring_emit(ring, 6825 OUT_RING(obj->gtt_offset | obj->tiling_mode);
7275 (obj->gtt_offset + intel_crtc->dspaddr_offset) |
7276 obj->tiling_mode);
7277 6826
7278 /* XXX Enabling the panel-fitter across page-flip is so far 6827 /* XXX Enabling the panel-fitter across page-flip is so far
7279 * untested on non-native modes, so ignore it for now. 6828 * untested on non-native modes, so ignore it for now.
@@ -7281,15 +6830,9 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
7281 */ 6830 */
7282 pf = 0; 6831 pf = 0;
7283 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 6832 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7284 intel_ring_emit(ring, pf | pipesrc); 6833 OUT_RING(pf | pipesrc);
7285 6834 ADVANCE_LP_RING();
7286 intel_mark_page_flip_active(intel_crtc); 6835out:
7287 intel_ring_advance(ring);
7288 return 0;
7289
7290err_unpin:
7291 intel_unpin_fb_obj(obj);
7292err:
7293 return ret; 6836 return ret;
7294} 6837}
7295 6838
@@ -7300,40 +6843,27 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
7300{ 6843{
7301 struct drm_i915_private *dev_priv = dev->dev_private; 6844 struct drm_i915_private *dev_priv = dev->dev_private;
7302 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6845 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7303 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7304 uint32_t pf, pipesrc; 6846 uint32_t pf, pipesrc;
7305 int ret; 6847 int ret;
7306 6848
7307 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 6849 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7308 if (ret) 6850 if (ret)
7309 goto err; 6851 goto out;
7310 6852
7311 ret = intel_ring_begin(ring, 4); 6853 ret = BEGIN_LP_RING(4);
7312 if (ret) 6854 if (ret)
7313 goto err_unpin; 6855 goto out;
7314 6856
7315 intel_ring_emit(ring, MI_DISPLAY_FLIP | 6857 OUT_RING(MI_DISPLAY_FLIP |
7316 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 6858 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7317 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); 6859 OUT_RING(fb->pitch | obj->tiling_mode);
7318 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 6860 OUT_RING(obj->gtt_offset);
7319 6861
7320 /* Contrary to the suggestions in the documentation, 6862 pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
7321 * "Enable Panel Fitter" does not seem to be required when page
7322 * flipping with a non-native mode, and worse causes a normal
7323 * modeset to fail.
7324 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
7325 */
7326 pf = 0;
7327 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 6863 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7328 intel_ring_emit(ring, pf | pipesrc); 6864 OUT_RING(pf | pipesrc);
7329 6865 ADVANCE_LP_RING();
7330 intel_mark_page_flip_active(intel_crtc); 6866out:
7331 intel_ring_advance(ring);
7332 return 0;
7333
7334err_unpin:
7335 intel_unpin_fb_obj(obj);
7336err:
7337 return ret; 6867 return ret;
7338} 6868}
7339 6869
@@ -7351,45 +6881,22 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7351 struct drm_i915_private *dev_priv = dev->dev_private; 6881 struct drm_i915_private *dev_priv = dev->dev_private;
7352 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6882 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7353 struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 6883 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
7354 uint32_t plane_bit = 0;
7355 int ret; 6884 int ret;
7356 6885
7357 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 6886 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7358 if (ret) 6887 if (ret)
7359 goto err; 6888 goto out;
7360
7361 switch(intel_crtc->plane) {
7362 case PLANE_A:
7363 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
7364 break;
7365 case PLANE_B:
7366 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
7367 break;
7368 case PLANE_C:
7369 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
7370 break;
7371 default:
7372 WARN_ONCE(1, "unknown plane in flip command\n");
7373 ret = -ENODEV;
7374 goto err_unpin;
7375 }
7376 6889
7377 ret = intel_ring_begin(ring, 4); 6890 ret = intel_ring_begin(ring, 4);
7378 if (ret) 6891 if (ret)
7379 goto err_unpin; 6892 goto out;
7380 6893
7381 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 6894 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
7382 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 6895 intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
7383 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 6896 intel_ring_emit(ring, (obj->gtt_offset));
7384 intel_ring_emit(ring, (MI_NOOP)); 6897 intel_ring_emit(ring, (MI_NOOP));
7385
7386 intel_mark_page_flip_active(intel_crtc);
7387 intel_ring_advance(ring); 6898 intel_ring_advance(ring);
7388 return 0; 6899out:
7389
7390err_unpin:
7391 intel_unpin_fb_obj(obj);
7392err:
7393 return ret; 6900 return ret;
7394} 6901}
7395 6902
@@ -7414,39 +6921,21 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7414 unsigned long flags; 6921 unsigned long flags;
7415 int ret; 6922 int ret;
7416 6923
7417 /* Can't change pixel format via MI display flips. */
7418 if (fb->pixel_format != crtc->fb->pixel_format)
7419 return -EINVAL;
7420
7421 /*
7422 * TILEOFF/LINOFF registers can't be changed via MI display flips.
7423 * Note that pitch changes could also affect these register.
7424 */
7425 if (INTEL_INFO(dev)->gen > 3 &&
7426 (fb->offsets[0] != crtc->fb->offsets[0] ||
7427 fb->pitches[0] != crtc->fb->pitches[0]))
7428 return -EINVAL;
7429
7430 work = kzalloc(sizeof *work, GFP_KERNEL); 6924 work = kzalloc(sizeof *work, GFP_KERNEL);
7431 if (work == NULL) 6925 if (work == NULL)
7432 return -ENOMEM; 6926 return -ENOMEM;
7433 6927
7434 work->event = event; 6928 work->event = event;
7435 work->crtc = crtc; 6929 work->dev = crtc->dev;
7436 intel_fb = to_intel_framebuffer(crtc->fb); 6930 intel_fb = to_intel_framebuffer(crtc->fb);
7437 work->old_fb_obj = intel_fb->obj; 6931 work->old_fb_obj = intel_fb->obj;
7438 INIT_WORK(&work->work, intel_unpin_work_fn); 6932 INIT_WORK(&work->work, intel_unpin_work_fn);
7439 6933
7440 ret = drm_vblank_get(dev, intel_crtc->pipe);
7441 if (ret)
7442 goto free_work;
7443
7444 /* We borrow the event spin lock for protecting unpin_work */ 6934 /* We borrow the event spin lock for protecting unpin_work */
7445 spin_lock_irqsave(&dev->event_lock, flags); 6935 spin_lock_irqsave(&dev->event_lock, flags);
7446 if (intel_crtc->unpin_work) { 6936 if (intel_crtc->unpin_work) {
7447 spin_unlock_irqrestore(&dev->event_lock, flags); 6937 spin_unlock_irqrestore(&dev->event_lock, flags);
7448 kfree(work); 6938 kfree(work);
7449 drm_vblank_put(dev, intel_crtc->pipe);
7450 6939
7451 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 6940 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
7452 return -EBUSY; 6941 return -EBUSY;
@@ -7457,12 +6946,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7457 intel_fb = to_intel_framebuffer(fb); 6946 intel_fb = to_intel_framebuffer(fb);
7458 obj = intel_fb->obj; 6947 obj = intel_fb->obj;
7459 6948
7460 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 6949 mutex_lock(&dev->struct_mutex);
7461 flush_workqueue(dev_priv->wq);
7462
7463 ret = i915_mutex_lock_interruptible(dev);
7464 if (ret)
7465 goto cleanup;
7466 6950
7467 /* Reference the objects for the scheduled work. */ 6951 /* Reference the objects for the scheduled work. */
7468 drm_gem_object_reference(&work->old_fb_obj->base); 6952 drm_gem_object_reference(&work->old_fb_obj->base);
@@ -7470,6 +6954,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7470 6954
7471 crtc->fb = fb; 6955 crtc->fb = fb;
7472 6956
6957 ret = drm_vblank_get(dev, intel_crtc->pipe);
6958 if (ret)
6959 goto cleanup_objs;
6960
7473 work->pending_flip_obj = obj; 6961 work->pending_flip_obj = obj;
7474 6962
7475 work->enable_stall_check = true; 6963 work->enable_stall_check = true;
@@ -7478,14 +6966,12 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7478 * the flip occurs and the object is no longer visible. 6966 * the flip occurs and the object is no longer visible.
7479 */ 6967 */
7480 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 6968 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7481 atomic_inc(&intel_crtc->unpin_work_count);
7482 6969
7483 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); 6970 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
7484 if (ret) 6971 if (ret)
7485 goto cleanup_pending; 6972 goto cleanup_pending;
7486 6973
7487 intel_disable_fbc(dev); 6974 intel_disable_fbc(dev);
7488 intel_mark_fb_busy(obj);
7489 mutex_unlock(&dev->struct_mutex); 6975 mutex_unlock(&dev->struct_mutex);
7490 6976
7491 trace_i915_flip_request(intel_crtc->plane, obj); 6977 trace_i915_flip_request(intel_crtc->plane, obj);
@@ -7493,837 +6979,93 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7493 return 0; 6979 return 0;
7494 6980
7495cleanup_pending: 6981cleanup_pending:
7496 atomic_dec(&intel_crtc->unpin_work_count);
7497 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 6982 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6983cleanup_objs:
7498 drm_gem_object_unreference(&work->old_fb_obj->base); 6984 drm_gem_object_unreference(&work->old_fb_obj->base);
7499 drm_gem_object_unreference(&obj->base); 6985 drm_gem_object_unreference(&obj->base);
7500 mutex_unlock(&dev->struct_mutex); 6986 mutex_unlock(&dev->struct_mutex);
7501 6987
7502cleanup:
7503 spin_lock_irqsave(&dev->event_lock, flags); 6988 spin_lock_irqsave(&dev->event_lock, flags);
7504 intel_crtc->unpin_work = NULL; 6989 intel_crtc->unpin_work = NULL;
7505 spin_unlock_irqrestore(&dev->event_lock, flags); 6990 spin_unlock_irqrestore(&dev->event_lock, flags);
7506 6991
7507 drm_vblank_put(dev, intel_crtc->pipe);
7508free_work:
7509 kfree(work); 6992 kfree(work);
7510 6993
7511 return ret; 6994 return ret;
7512} 6995}
7513 6996
7514static struct drm_crtc_helper_funcs intel_helper_funcs = { 6997static void intel_sanitize_modesetting(struct drm_device *dev,
7515 .mode_set_base_atomic = intel_pipe_set_base_atomic, 6998 int pipe, int plane)
7516 .load_lut = intel_crtc_load_lut,
7517 .disable = intel_crtc_noop,
7518};
7519
7520bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
7521{
7522 struct intel_encoder *other_encoder;
7523 struct drm_crtc *crtc = &encoder->new_crtc->base;
7524
7525 if (WARN_ON(!crtc))
7526 return false;
7527
7528 list_for_each_entry(other_encoder,
7529 &crtc->dev->mode_config.encoder_list,
7530 base.head) {
7531
7532 if (&other_encoder->new_crtc->base != crtc ||
7533 encoder == other_encoder)
7534 continue;
7535 else
7536 return true;
7537 }
7538
7539 return false;
7540}
7541
7542static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
7543 struct drm_crtc *crtc)
7544{
7545 struct drm_device *dev;
7546 struct drm_crtc *tmp;
7547 int crtc_mask = 1;
7548
7549 WARN(!crtc, "checking null crtc?\n");
7550
7551 dev = crtc->dev;
7552
7553 list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
7554 if (tmp == crtc)
7555 break;
7556 crtc_mask <<= 1;
7557 }
7558
7559 if (encoder->possible_crtcs & crtc_mask)
7560 return true;
7561 return false;
7562}
7563
7564/**
7565 * intel_modeset_update_staged_output_state
7566 *
7567 * Updates the staged output configuration state, e.g. after we've read out the
7568 * current hw state.
7569 */
7570static void intel_modeset_update_staged_output_state(struct drm_device *dev)
7571{
7572 struct intel_encoder *encoder;
7573 struct intel_connector *connector;
7574
7575 list_for_each_entry(connector, &dev->mode_config.connector_list,
7576 base.head) {
7577 connector->new_encoder =
7578 to_intel_encoder(connector->base.encoder);
7579 }
7580
7581 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7582 base.head) {
7583 encoder->new_crtc =
7584 to_intel_crtc(encoder->base.crtc);
7585 }
7586}
7587
7588/**
7589 * intel_modeset_commit_output_state
7590 *
7591 * This function copies the stage display pipe configuration to the real one.
7592 */
7593static void intel_modeset_commit_output_state(struct drm_device *dev)
7594{ 6999{
7595 struct intel_encoder *encoder; 7000 struct drm_i915_private *dev_priv = dev->dev_private;
7596 struct intel_connector *connector; 7001 u32 reg, val;
7597
7598 list_for_each_entry(connector, &dev->mode_config.connector_list,
7599 base.head) {
7600 connector->base.encoder = &connector->new_encoder->base;
7601 }
7602
7603 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7604 base.head) {
7605 encoder->base.crtc = &encoder->new_crtc->base;
7606 }
7607}
7608
7609static struct drm_display_mode *
7610intel_modeset_adjusted_mode(struct drm_crtc *crtc,
7611 struct drm_display_mode *mode)
7612{
7613 struct drm_device *dev = crtc->dev;
7614 struct drm_display_mode *adjusted_mode;
7615 struct drm_encoder_helper_funcs *encoder_funcs;
7616 struct intel_encoder *encoder;
7617
7618 adjusted_mode = drm_mode_duplicate(dev, mode);
7619 if (!adjusted_mode)
7620 return ERR_PTR(-ENOMEM);
7621
7622 /* Pass our mode to the connectors and the CRTC to give them a chance to
7623 * adjust it according to limitations or connector properties, and also
7624 * a chance to reject the mode entirely.
7625 */
7626 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7627 base.head) {
7628
7629 if (&encoder->new_crtc->base != crtc)
7630 continue;
7631 encoder_funcs = encoder->base.helper_private;
7632 if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
7633 adjusted_mode))) {
7634 DRM_DEBUG_KMS("Encoder fixup failed\n");
7635 goto fail;
7636 }
7637 }
7638
7639 if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
7640 DRM_DEBUG_KMS("CRTC fixup failed\n");
7641 goto fail;
7642 }
7643 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
7644
7645 return adjusted_mode;
7646fail:
7647 drm_mode_destroy(dev, adjusted_mode);
7648 return ERR_PTR(-EINVAL);
7649}
7650
7651/* Computes which crtcs are affected and sets the relevant bits in the mask. For
7652 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
7653static void
7654intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
7655 unsigned *prepare_pipes, unsigned *disable_pipes)
7656{
7657 struct intel_crtc *intel_crtc;
7658 struct drm_device *dev = crtc->dev;
7659 struct intel_encoder *encoder;
7660 struct intel_connector *connector;
7661 struct drm_crtc *tmp_crtc;
7662
7663 *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
7664
7665 /* Check which crtcs have changed outputs connected to them, these need
7666 * to be part of the prepare_pipes mask. We don't (yet) support global
7667 * modeset across multiple crtcs, so modeset_pipes will only have one
7668 * bit set at most. */
7669 list_for_each_entry(connector, &dev->mode_config.connector_list,
7670 base.head) {
7671 if (connector->base.encoder == &connector->new_encoder->base)
7672 continue;
7673
7674 if (connector->base.encoder) {
7675 tmp_crtc = connector->base.encoder->crtc;
7676
7677 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
7678 }
7679
7680 if (connector->new_encoder)
7681 *prepare_pipes |=
7682 1 << connector->new_encoder->new_crtc->pipe;
7683 }
7684
7685 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7686 base.head) {
7687 if (encoder->base.crtc == &encoder->new_crtc->base)
7688 continue;
7689
7690 if (encoder->base.crtc) {
7691 tmp_crtc = encoder->base.crtc;
7692
7693 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
7694 }
7695
7696 if (encoder->new_crtc)
7697 *prepare_pipes |= 1 << encoder->new_crtc->pipe;
7698 }
7699
7700 /* Check for any pipes that will be fully disabled ... */
7701 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
7702 base.head) {
7703 bool used = false;
7704
7705 /* Don't try to disable disabled crtcs. */
7706 if (!intel_crtc->base.enabled)
7707 continue;
7708
7709 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7710 base.head) {
7711 if (encoder->new_crtc == intel_crtc)
7712 used = true;
7713 }
7714
7715 if (!used)
7716 *disable_pipes |= 1 << intel_crtc->pipe;
7717 }
7718
7719 7002
7720 /* set_mode is also used to update properties on life display pipes. */ 7003 if (HAS_PCH_SPLIT(dev))
7721 intel_crtc = to_intel_crtc(crtc); 7004 return;
7722 if (crtc->enabled)
7723 *prepare_pipes |= 1 << intel_crtc->pipe;
7724 7005
7725 /* We only support modeset on one single crtc, hence we need to do that 7006 /* Who knows what state these registers were left in by the BIOS or
7726 * only for the passed in crtc iff we change anything else than just 7007 * grub?
7727 * disable crtcs.
7728 * 7008 *
7729 * This is actually not true, to be fully compatible with the old crtc 7009 * If we leave the registers in a conflicting state (e.g. with the
7730 * helper we automatically disable _any_ output (i.e. doesn't need to be 7010 * display plane reading from the other pipe than the one we intend
7731 * connected to the crtc we're modesetting on) if it's disconnected. 7011 * to use) then when we attempt to teardown the active mode, we will
7732 * Which is a rather nutty api (since changed the output configuration 7012 * not disable the pipes and planes in the correct order -- leaving
7733 * without userspace's explicit request can lead to confusion), but 7013 * a plane reading from a disabled pipe and possibly leading to
7734 * alas. Hence we currently need to modeset on all pipes we prepare. */ 7014 * undefined behaviour.
7735 if (*prepare_pipes) 7015 */
7736 *modeset_pipes = *prepare_pipes;
7737
7738 /* ... and mask these out. */
7739 *modeset_pipes &= ~(*disable_pipes);
7740 *prepare_pipes &= ~(*disable_pipes);
7741}
7742
7743static bool intel_crtc_in_use(struct drm_crtc *crtc)
7744{
7745 struct drm_encoder *encoder;
7746 struct drm_device *dev = crtc->dev;
7747
7748 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
7749 if (encoder->crtc == crtc)
7750 return true;
7751
7752 return false;
7753}
7754
7755static void
7756intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
7757{
7758 struct intel_encoder *intel_encoder;
7759 struct intel_crtc *intel_crtc;
7760 struct drm_connector *connector;
7761
7762 list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
7763 base.head) {
7764 if (!intel_encoder->base.crtc)
7765 continue;
7766
7767 intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
7768
7769 if (prepare_pipes & (1 << intel_crtc->pipe))
7770 intel_encoder->connectors_active = false;
7771 }
7772
7773 intel_modeset_commit_output_state(dev);
7774
7775 /* Update computed state. */
7776 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
7777 base.head) {
7778 intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
7779 }
7780
7781 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
7782 if (!connector->encoder || !connector->encoder->crtc)
7783 continue;
7784
7785 intel_crtc = to_intel_crtc(connector->encoder->crtc);
7786
7787 if (prepare_pipes & (1 << intel_crtc->pipe)) {
7788 struct drm_property *dpms_property =
7789 dev->mode_config.dpms_property;
7790
7791 connector->dpms = DRM_MODE_DPMS_ON;
7792 drm_object_property_set_value(&connector->base,
7793 dpms_property,
7794 DRM_MODE_DPMS_ON);
7795
7796 intel_encoder = to_intel_encoder(connector->encoder);
7797 intel_encoder->connectors_active = true;
7798 }
7799 }
7800
7801}
7802
7803#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
7804 list_for_each_entry((intel_crtc), \
7805 &(dev)->mode_config.crtc_list, \
7806 base.head) \
7807 if (mask & (1 <<(intel_crtc)->pipe)) \
7808
7809void
7810intel_modeset_check_state(struct drm_device *dev)
7811{
7812 struct intel_crtc *crtc;
7813 struct intel_encoder *encoder;
7814 struct intel_connector *connector;
7815
7816 list_for_each_entry(connector, &dev->mode_config.connector_list,
7817 base.head) {
7818 /* This also checks the encoder/connector hw state with the
7819 * ->get_hw_state callbacks. */
7820 intel_connector_check_state(connector);
7821
7822 WARN(&connector->new_encoder->base != connector->base.encoder,
7823 "connector's staged encoder doesn't match current encoder\n");
7824 }
7825
7826 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7827 base.head) {
7828 bool enabled = false;
7829 bool active = false;
7830 enum pipe pipe, tracked_pipe;
7831
7832 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
7833 encoder->base.base.id,
7834 drm_get_encoder_name(&encoder->base));
7835
7836 WARN(&encoder->new_crtc->base != encoder->base.crtc,
7837 "encoder's stage crtc doesn't match current crtc\n");
7838 WARN(encoder->connectors_active && !encoder->base.crtc,
7839 "encoder's active_connectors set, but no crtc\n");
7840
7841 list_for_each_entry(connector, &dev->mode_config.connector_list,
7842 base.head) {
7843 if (connector->base.encoder != &encoder->base)
7844 continue;
7845 enabled = true;
7846 if (connector->base.dpms != DRM_MODE_DPMS_OFF)
7847 active = true;
7848 }
7849 WARN(!!encoder->base.crtc != enabled,
7850 "encoder's enabled state mismatch "
7851 "(expected %i, found %i)\n",
7852 !!encoder->base.crtc, enabled);
7853 WARN(active && !encoder->base.crtc,
7854 "active encoder with no crtc\n");
7855
7856 WARN(encoder->connectors_active != active,
7857 "encoder's computed active state doesn't match tracked active state "
7858 "(expected %i, found %i)\n", active, encoder->connectors_active);
7859
7860 active = encoder->get_hw_state(encoder, &pipe);
7861 WARN(active != encoder->connectors_active,
7862 "encoder's hw state doesn't match sw tracking "
7863 "(expected %i, found %i)\n",
7864 encoder->connectors_active, active);
7865
7866 if (!encoder->base.crtc)
7867 continue;
7868
7869 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
7870 WARN(active && pipe != tracked_pipe,
7871 "active encoder's pipe doesn't match"
7872 "(expected %i, found %i)\n",
7873 tracked_pipe, pipe);
7874
7875 }
7876
7877 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
7878 base.head) {
7879 bool enabled = false;
7880 bool active = false;
7881 7016
7882 DRM_DEBUG_KMS("[CRTC:%d]\n", 7017 reg = DSPCNTR(plane);
7883 crtc->base.base.id); 7018 val = I915_READ(reg);
7884 7019
7885 WARN(crtc->active && !crtc->base.enabled, 7020 if ((val & DISPLAY_PLANE_ENABLE) == 0)
7886 "active crtc, but not enabled in sw tracking\n"); 7021 return;
7022 if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
7023 return;
7887 7024
7888 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 7025 /* This display plane is active and attached to the other CPU pipe. */
7889 base.head) { 7026 pipe = !pipe;
7890 if (encoder->base.crtc != &crtc->base)
7891 continue;
7892 enabled = true;
7893 if (encoder->connectors_active)
7894 active = true;
7895 }
7896 WARN(active != crtc->active,
7897 "crtc's computed active state doesn't match tracked active state "
7898 "(expected %i, found %i)\n", active, crtc->active);
7899 WARN(enabled != crtc->base.enabled,
7900 "crtc's computed enabled state doesn't match tracked enabled state "
7901 "(expected %i, found %i)\n", enabled, crtc->base.enabled);
7902 7027
7903 assert_pipe(dev->dev_private, crtc->pipe, crtc->active); 7028 /* Disable the plane and wait for it to stop reading from the pipe. */
7904 } 7029 intel_disable_plane(dev_priv, plane, pipe);
7030 intel_disable_pipe(dev_priv, pipe);
7905} 7031}
7906 7032
7907bool intel_set_mode(struct drm_crtc *crtc, 7033static void intel_crtc_reset(struct drm_crtc *crtc)
7908 struct drm_display_mode *mode,
7909 int x, int y, struct drm_framebuffer *fb)
7910{ 7034{
7911 struct drm_device *dev = crtc->dev; 7035 struct drm_device *dev = crtc->dev;
7912 drm_i915_private_t *dev_priv = dev->dev_private; 7036 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7913 struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
7914 struct intel_crtc *intel_crtc;
7915 unsigned disable_pipes, prepare_pipes, modeset_pipes;
7916 bool ret = true;
7917
7918 intel_modeset_affected_pipes(crtc, &modeset_pipes,
7919 &prepare_pipes, &disable_pipes);
7920
7921 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
7922 modeset_pipes, prepare_pipes, disable_pipes);
7923
7924 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
7925 intel_crtc_disable(&intel_crtc->base);
7926
7927 saved_hwmode = crtc->hwmode;
7928 saved_mode = crtc->mode;
7929
7930 /* Hack: Because we don't (yet) support global modeset on multiple
7931 * crtcs, we don't keep track of the new mode for more than one crtc.
7932 * Hence simply check whether any bit is set in modeset_pipes in all the
7933 * pieces of code that are not yet converted to deal with mutliple crtcs
7934 * changing their mode at the same time. */
7935 adjusted_mode = NULL;
7936 if (modeset_pipes) {
7937 adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
7938 if (IS_ERR(adjusted_mode)) {
7939 return false;
7940 }
7941 }
7942
7943 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
7944 if (intel_crtc->base.enabled)
7945 dev_priv->display.crtc_disable(&intel_crtc->base);
7946 }
7947
7948 /* crtc->mode is already used by the ->mode_set callbacks, hence we need
7949 * to set it here already despite that we pass it down the callchain.
7950 */
7951 if (modeset_pipes)
7952 crtc->mode = *mode;
7953
7954 /* Only after disabling all output pipelines that will be changed can we
7955 * update the the output configuration. */
7956 intel_modeset_update_state(dev, prepare_pipes);
7957
7958 if (dev_priv->display.modeset_global_resources)
7959 dev_priv->display.modeset_global_resources(dev);
7960 7037
7961 /* Set up the DPLL and any encoders state that needs to adjust or depend 7038 /* Reset flags back to the 'unknown' status so that they
7962 * on the DPLL. 7039 * will be correctly set on the initial modeset.
7963 */ 7040 */
7964 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { 7041 intel_crtc->dpms_mode = -1;
7965 ret = !intel_crtc_mode_set(&intel_crtc->base,
7966 mode, adjusted_mode,
7967 x, y, fb);
7968 if (!ret)
7969 goto done;
7970 }
7971
7972 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
7973 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
7974 dev_priv->display.crtc_enable(&intel_crtc->base);
7975
7976 if (modeset_pipes) {
7977 /* Store real post-adjustment hardware mode. */
7978 crtc->hwmode = *adjusted_mode;
7979
7980 /* Calculate and store various constants which
7981 * are later needed by vblank and swap-completion
7982 * timestamping. They are derived from true hwmode.
7983 */
7984 drm_calc_timestamping_constants(crtc);
7985 }
7986
7987 /* FIXME: add subpixel order */
7988done:
7989 drm_mode_destroy(dev, adjusted_mode);
7990 if (!ret && crtc->enabled) {
7991 crtc->hwmode = saved_hwmode;
7992 crtc->mode = saved_mode;
7993 } else {
7994 intel_modeset_check_state(dev);
7995 }
7996
7997 return ret;
7998}
7999
8000#undef for_each_intel_crtc_masked
8001
8002static void intel_set_config_free(struct intel_set_config *config)
8003{
8004 if (!config)
8005 return;
8006 7042
8007 kfree(config->save_connector_encoders); 7043 /* We need to fix up any BIOS configuration that conflicts with
8008 kfree(config->save_encoder_crtcs); 7044 * our expectations.
8009 kfree(config);
8010}
8011
8012static int intel_set_config_save_state(struct drm_device *dev,
8013 struct intel_set_config *config)
8014{
8015 struct drm_encoder *encoder;
8016 struct drm_connector *connector;
8017 int count;
8018
8019 config->save_encoder_crtcs =
8020 kcalloc(dev->mode_config.num_encoder,
8021 sizeof(struct drm_crtc *), GFP_KERNEL);
8022 if (!config->save_encoder_crtcs)
8023 return -ENOMEM;
8024
8025 config->save_connector_encoders =
8026 kcalloc(dev->mode_config.num_connector,
8027 sizeof(struct drm_encoder *), GFP_KERNEL);
8028 if (!config->save_connector_encoders)
8029 return -ENOMEM;
8030
8031 /* Copy data. Note that driver private data is not affected.
8032 * Should anything bad happen only the expected state is
8033 * restored, not the drivers personal bookkeeping.
8034 */ 7045 */
8035 count = 0; 7046 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
8036 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
8037 config->save_encoder_crtcs[count++] = encoder->crtc;
8038 }
8039
8040 count = 0;
8041 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
8042 config->save_connector_encoders[count++] = connector->encoder;
8043 }
8044
8045 return 0;
8046}
8047
8048static void intel_set_config_restore_state(struct drm_device *dev,
8049 struct intel_set_config *config)
8050{
8051 struct intel_encoder *encoder;
8052 struct intel_connector *connector;
8053 int count;
8054
8055 count = 0;
8056 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
8057 encoder->new_crtc =
8058 to_intel_crtc(config->save_encoder_crtcs[count++]);
8059 }
8060
8061 count = 0;
8062 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
8063 connector->new_encoder =
8064 to_intel_encoder(config->save_connector_encoders[count++]);
8065 }
8066}
8067
8068static void
8069intel_set_config_compute_mode_changes(struct drm_mode_set *set,
8070 struct intel_set_config *config)
8071{
8072
8073 /* We should be able to check here if the fb has the same properties
8074 * and then just flip_or_move it */
8075 if (set->crtc->fb != set->fb) {
8076 /* If we have no fb then treat it as a full mode set */
8077 if (set->crtc->fb == NULL) {
8078 DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
8079 config->mode_changed = true;
8080 } else if (set->fb == NULL) {
8081 config->mode_changed = true;
8082 } else if (set->fb->depth != set->crtc->fb->depth) {
8083 config->mode_changed = true;
8084 } else if (set->fb->bits_per_pixel !=
8085 set->crtc->fb->bits_per_pixel) {
8086 config->mode_changed = true;
8087 } else
8088 config->fb_changed = true;
8089 }
8090
8091 if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
8092 config->fb_changed = true;
8093
8094 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
8095 DRM_DEBUG_KMS("modes are different, full mode set\n");
8096 drm_mode_debug_printmodeline(&set->crtc->mode);
8097 drm_mode_debug_printmodeline(set->mode);
8098 config->mode_changed = true;
8099 }
8100} 7047}
8101 7048
8102static int 7049static struct drm_crtc_helper_funcs intel_helper_funcs = {
8103intel_modeset_stage_output_state(struct drm_device *dev, 7050 .dpms = intel_crtc_dpms,
8104 struct drm_mode_set *set, 7051 .mode_fixup = intel_crtc_mode_fixup,
8105 struct intel_set_config *config) 7052 .mode_set = intel_crtc_mode_set,
8106{ 7053 .mode_set_base = intel_pipe_set_base,
8107 struct drm_crtc *new_crtc; 7054 .mode_set_base_atomic = intel_pipe_set_base_atomic,
8108 struct intel_connector *connector; 7055 .load_lut = intel_crtc_load_lut,
8109 struct intel_encoder *encoder; 7056 .disable = intel_crtc_disable,
8110 int count, ro; 7057};
8111
8112 /* The upper layers ensure that we either disabl a crtc or have a list
8113 * of connectors. For paranoia, double-check this. */
8114 WARN_ON(!set->fb && (set->num_connectors != 0));
8115 WARN_ON(set->fb && (set->num_connectors == 0));
8116
8117 count = 0;
8118 list_for_each_entry(connector, &dev->mode_config.connector_list,
8119 base.head) {
8120 /* Otherwise traverse passed in connector list and get encoders
8121 * for them. */
8122 for (ro = 0; ro < set->num_connectors; ro++) {
8123 if (set->connectors[ro] == &connector->base) {
8124 connector->new_encoder = connector->encoder;
8125 break;
8126 }
8127 }
8128
8129 /* If we disable the crtc, disable all its connectors. Also, if
8130 * the connector is on the changing crtc but not on the new
8131 * connector list, disable it. */
8132 if ((!set->fb || ro == set->num_connectors) &&
8133 connector->base.encoder &&
8134 connector->base.encoder->crtc == set->crtc) {
8135 connector->new_encoder = NULL;
8136
8137 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
8138 connector->base.base.id,
8139 drm_get_connector_name(&connector->base));
8140 }
8141
8142
8143 if (&connector->new_encoder->base != connector->base.encoder) {
8144 DRM_DEBUG_KMS("encoder changed, full mode switch\n");
8145 config->mode_changed = true;
8146 }
8147 }
8148 /* connector->new_encoder is now updated for all connectors. */
8149
8150 /* Update crtc of enabled connectors. */
8151 count = 0;
8152 list_for_each_entry(connector, &dev->mode_config.connector_list,
8153 base.head) {
8154 if (!connector->new_encoder)
8155 continue;
8156
8157 new_crtc = connector->new_encoder->base.crtc;
8158
8159 for (ro = 0; ro < set->num_connectors; ro++) {
8160 if (set->connectors[ro] == &connector->base)
8161 new_crtc = set->crtc;
8162 }
8163
8164 /* Make sure the new CRTC will work with the encoder */
8165 if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
8166 new_crtc)) {
8167 return -EINVAL;
8168 }
8169 connector->encoder->new_crtc = to_intel_crtc(new_crtc);
8170
8171 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
8172 connector->base.base.id,
8173 drm_get_connector_name(&connector->base),
8174 new_crtc->base.id);
8175 }
8176
8177 /* Check for any encoders that needs to be disabled. */
8178 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8179 base.head) {
8180 list_for_each_entry(connector,
8181 &dev->mode_config.connector_list,
8182 base.head) {
8183 if (connector->new_encoder == encoder) {
8184 WARN_ON(!connector->new_encoder->new_crtc);
8185
8186 goto next_encoder;
8187 }
8188 }
8189 encoder->new_crtc = NULL;
8190next_encoder:
8191 /* Only now check for crtc changes so we don't miss encoders
8192 * that will be disabled. */
8193 if (&encoder->new_crtc->base != encoder->base.crtc) {
8194 DRM_DEBUG_KMS("crtc changed, full mode switch\n");
8195 config->mode_changed = true;
8196 }
8197 }
8198 /* Now we've also updated encoder->new_crtc for all encoders. */
8199
8200 return 0;
8201}
8202
8203static int intel_crtc_set_config(struct drm_mode_set *set)
8204{
8205 struct drm_device *dev;
8206 struct drm_mode_set save_set;
8207 struct intel_set_config *config;
8208 int ret;
8209
8210 BUG_ON(!set);
8211 BUG_ON(!set->crtc);
8212 BUG_ON(!set->crtc->helper_private);
8213
8214 if (!set->mode)
8215 set->fb = NULL;
8216
8217 /* The fb helper likes to play gross jokes with ->mode_set_config.
8218 * Unfortunately the crtc helper doesn't do much at all for this case,
8219 * so we have to cope with this madness until the fb helper is fixed up. */
8220 if (set->fb && set->num_connectors == 0)
8221 return 0;
8222
8223 if (set->fb) {
8224 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
8225 set->crtc->base.id, set->fb->base.id,
8226 (int)set->num_connectors, set->x, set->y);
8227 } else {
8228 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
8229 }
8230
8231 dev = set->crtc->dev;
8232
8233 ret = -ENOMEM;
8234 config = kzalloc(sizeof(*config), GFP_KERNEL);
8235 if (!config)
8236 goto out_config;
8237
8238 ret = intel_set_config_save_state(dev, config);
8239 if (ret)
8240 goto out_config;
8241
8242 save_set.crtc = set->crtc;
8243 save_set.mode = &set->crtc->mode;
8244 save_set.x = set->crtc->x;
8245 save_set.y = set->crtc->y;
8246 save_set.fb = set->crtc->fb;
8247
8248 /* Compute whether we need a full modeset, only an fb base update or no
8249 * change at all. In the future we might also check whether only the
8250 * mode changed, e.g. for LVDS where we only change the panel fitter in
8251 * such cases. */
8252 intel_set_config_compute_mode_changes(set, config);
8253
8254 ret = intel_modeset_stage_output_state(dev, set, config);
8255 if (ret)
8256 goto fail;
8257
8258 if (config->mode_changed) {
8259 if (set->mode) {
8260 DRM_DEBUG_KMS("attempting to set mode from"
8261 " userspace\n");
8262 drm_mode_debug_printmodeline(set->mode);
8263 }
8264
8265 if (!intel_set_mode(set->crtc, set->mode,
8266 set->x, set->y, set->fb)) {
8267 DRM_ERROR("failed to set mode on [CRTC:%d]\n",
8268 set->crtc->base.id);
8269 ret = -EINVAL;
8270 goto fail;
8271 }
8272 } else if (config->fb_changed) {
8273 ret = intel_pipe_set_base(set->crtc,
8274 set->x, set->y, set->fb);
8275 }
8276
8277 intel_set_config_free(config);
8278
8279 return 0;
8280
8281fail:
8282 intel_set_config_restore_state(dev, config);
8283
8284 /* Try to restore the config */
8285 if (config->mode_changed &&
8286 !intel_set_mode(save_set.crtc, save_set.mode,
8287 save_set.x, save_set.y, save_set.fb))
8288 DRM_ERROR("failed to restore config after modeset failure\n");
8289
8290out_config:
8291 intel_set_config_free(config);
8292 return ret;
8293}
8294 7058
8295static const struct drm_crtc_funcs intel_crtc_funcs = { 7059static const struct drm_crtc_funcs intel_crtc_funcs = {
7060 .reset = intel_crtc_reset,
8296 .cursor_set = intel_crtc_cursor_set, 7061 .cursor_set = intel_crtc_cursor_set,
8297 .cursor_move = intel_crtc_cursor_move, 7062 .cursor_move = intel_crtc_cursor_move,
8298 .gamma_set = intel_crtc_gamma_set, 7063 .gamma_set = intel_crtc_gamma_set,
8299 .set_config = intel_crtc_set_config, 7064 .set_config = drm_crtc_helper_set_config,
8300 .destroy = intel_crtc_destroy, 7065 .destroy = intel_crtc_destroy,
8301 .page_flip = intel_crtc_page_flip, 7066 .page_flip = intel_crtc_page_flip,
8302}; 7067};
8303 7068
8304static void intel_cpu_pll_init(struct drm_device *dev)
8305{
8306 if (IS_HASWELL(dev))
8307 intel_ddi_pll_init(dev);
8308}
8309
8310static void intel_pch_pll_init(struct drm_device *dev)
8311{
8312 drm_i915_private_t *dev_priv = dev->dev_private;
8313 int i;
8314
8315 if (dev_priv->num_pch_pll == 0) {
8316 DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
8317 return;
8318 }
8319
8320 for (i = 0; i < dev_priv->num_pch_pll; i++) {
8321 dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
8322 dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
8323 dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
8324 }
8325}
8326
8327static void intel_crtc_init(struct drm_device *dev, int pipe) 7069static void intel_crtc_init(struct drm_device *dev, int pipe)
8328{ 7070{
8329 drm_i915_private_t *dev_priv = dev->dev_private; 7071 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -8346,7 +7088,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
8346 /* Swap pipes & planes for FBC on pre-965 */ 7088 /* Swap pipes & planes for FBC on pre-965 */
8347 intel_crtc->pipe = pipe; 7089 intel_crtc->pipe = pipe;
8348 intel_crtc->plane = pipe; 7090 intel_crtc->plane = pipe;
8349 intel_crtc->cpu_transcoder = pipe;
8350 if (IS_MOBILE(dev) && IS_GEN3(dev)) { 7091 if (IS_MOBILE(dev) && IS_GEN3(dev)) {
8351 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 7092 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
8352 intel_crtc->plane = !pipe; 7093 intel_crtc->plane = !pipe;
@@ -8357,20 +7098,38 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
8357 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 7098 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
8358 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 7099 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
8359 7100
7101 intel_crtc_reset(&intel_crtc->base);
7102 intel_crtc->active = true; /* force the pipe off on setup_init_config */
8360 intel_crtc->bpp = 24; /* default for pre-Ironlake */ 7103 intel_crtc->bpp = 24; /* default for pre-Ironlake */
8361 7104
7105 if (HAS_PCH_SPLIT(dev)) {
7106 intel_helper_funcs.prepare = ironlake_crtc_prepare;
7107 intel_helper_funcs.commit = ironlake_crtc_commit;
7108 } else {
7109 intel_helper_funcs.prepare = i9xx_crtc_prepare;
7110 intel_helper_funcs.commit = i9xx_crtc_commit;
7111 }
7112
8362 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 7113 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
7114
7115 intel_crtc->busy = false;
7116
7117 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
7118 (unsigned long)intel_crtc);
8363} 7119}
8364 7120
8365int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 7121int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
8366 struct drm_file *file) 7122 struct drm_file *file)
8367{ 7123{
7124 drm_i915_private_t *dev_priv = dev->dev_private;
8368 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 7125 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
8369 struct drm_mode_object *drmmode_obj; 7126 struct drm_mode_object *drmmode_obj;
8370 struct intel_crtc *crtc; 7127 struct intel_crtc *crtc;
8371 7128
8372 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 7129 if (!dev_priv) {
8373 return -ENODEV; 7130 DRM_ERROR("called with no initialization\n");
7131 return -EINVAL;
7132 }
8374 7133
8375 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, 7134 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
8376 DRM_MODE_OBJECT_CRTC); 7135 DRM_MODE_OBJECT_CRTC);
@@ -8386,23 +7145,15 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
8386 return 0; 7145 return 0;
8387} 7146}
8388 7147
8389static int intel_encoder_clones(struct intel_encoder *encoder) 7148static int intel_encoder_clones(struct drm_device *dev, int type_mask)
8390{ 7149{
8391 struct drm_device *dev = encoder->base.dev; 7150 struct intel_encoder *encoder;
8392 struct intel_encoder *source_encoder;
8393 int index_mask = 0; 7151 int index_mask = 0;
8394 int entry = 0; 7152 int entry = 0;
8395 7153
8396 list_for_each_entry(source_encoder, 7154 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
8397 &dev->mode_config.encoder_list, base.head) { 7155 if (type_mask & encoder->clone_mask)
8398
8399 if (encoder == source_encoder)
8400 index_mask |= (1 << entry);
8401
8402 /* Intel hw has only one MUX where enocoders could be cloned. */
8403 if (encoder->cloneable && source_encoder->cloneable)
8404 index_mask |= (1 << entry); 7156 index_mask |= (1 << entry);
8405
8406 entry++; 7157 entry++;
8407 } 7158 }
8408 7159
@@ -8431,97 +7182,65 @@ static void intel_setup_outputs(struct drm_device *dev)
8431 struct drm_i915_private *dev_priv = dev->dev_private; 7182 struct drm_i915_private *dev_priv = dev->dev_private;
8432 struct intel_encoder *encoder; 7183 struct intel_encoder *encoder;
8433 bool dpd_is_edp = false; 7184 bool dpd_is_edp = false;
8434 bool has_lvds; 7185 bool has_lvds = false;
8435 7186
8436 has_lvds = intel_lvds_init(dev); 7187 if (IS_MOBILE(dev) && !IS_I830(dev))
7188 has_lvds = intel_lvds_init(dev);
8437 if (!has_lvds && !HAS_PCH_SPLIT(dev)) { 7189 if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
8438 /* disable the panel fitter on everything but LVDS */ 7190 /* disable the panel fitter on everything but LVDS */
8439 I915_WRITE(PFIT_CONTROL, 0); 7191 I915_WRITE(PFIT_CONTROL, 0);
8440 } 7192 }
8441 7193
8442 if (!(IS_HASWELL(dev) && 7194 if (HAS_PCH_SPLIT(dev)) {
8443 (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
8444 intel_crt_init(dev);
8445
8446 if (IS_HASWELL(dev)) {
8447 int found;
8448
8449 /* Haswell uses DDI functions to detect digital outputs */
8450 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
8451 /* DDI A only supports eDP */
8452 if (found)
8453 intel_ddi_init(dev, PORT_A);
8454
8455 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
8456 * register */
8457 found = I915_READ(SFUSE_STRAP);
8458
8459 if (found & SFUSE_STRAP_DDIB_DETECTED)
8460 intel_ddi_init(dev, PORT_B);
8461 if (found & SFUSE_STRAP_DDIC_DETECTED)
8462 intel_ddi_init(dev, PORT_C);
8463 if (found & SFUSE_STRAP_DDID_DETECTED)
8464 intel_ddi_init(dev, PORT_D);
8465 } else if (HAS_PCH_SPLIT(dev)) {
8466 int found;
8467 dpd_is_edp = intel_dpd_is_edp(dev); 7195 dpd_is_edp = intel_dpd_is_edp(dev);
8468 7196
8469 if (has_edp_a(dev)) 7197 if (has_edp_a(dev))
8470 intel_dp_init(dev, DP_A, PORT_A); 7198 intel_dp_init(dev, DP_A);
7199
7200 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7201 intel_dp_init(dev, PCH_DP_D);
7202 }
7203
7204 intel_crt_init(dev);
7205
7206 if (HAS_PCH_SPLIT(dev)) {
7207 int found;
8471 7208
8472 if (I915_READ(HDMIB) & PORT_DETECTED) { 7209 if (I915_READ(HDMIB) & PORT_DETECTED) {
8473 /* PCH SDVOB multiplex with HDMIB */ 7210 /* PCH SDVOB multiplex with HDMIB */
8474 found = intel_sdvo_init(dev, PCH_SDVOB, true); 7211 found = intel_sdvo_init(dev, PCH_SDVOB);
8475 if (!found) 7212 if (!found)
8476 intel_hdmi_init(dev, HDMIB, PORT_B); 7213 intel_hdmi_init(dev, HDMIB);
8477 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 7214 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
8478 intel_dp_init(dev, PCH_DP_B, PORT_B); 7215 intel_dp_init(dev, PCH_DP_B);
8479 } 7216 }
8480 7217
8481 if (I915_READ(HDMIC) & PORT_DETECTED) 7218 if (I915_READ(HDMIC) & PORT_DETECTED)
8482 intel_hdmi_init(dev, HDMIC, PORT_C); 7219 intel_hdmi_init(dev, HDMIC);
8483 7220
8484 if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED) 7221 if (I915_READ(HDMID) & PORT_DETECTED)
8485 intel_hdmi_init(dev, HDMID, PORT_D); 7222 intel_hdmi_init(dev, HDMID);
8486 7223
8487 if (I915_READ(PCH_DP_C) & DP_DETECTED) 7224 if (I915_READ(PCH_DP_C) & DP_DETECTED)
8488 intel_dp_init(dev, PCH_DP_C, PORT_C); 7225 intel_dp_init(dev, PCH_DP_C);
8489
8490 if (I915_READ(PCH_DP_D) & DP_DETECTED)
8491 intel_dp_init(dev, PCH_DP_D, PORT_D);
8492 } else if (IS_VALLEYVIEW(dev)) {
8493 int found;
8494
8495 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
8496 if (I915_READ(DP_C) & DP_DETECTED)
8497 intel_dp_init(dev, DP_C, PORT_C);
8498
8499 if (I915_READ(SDVOB) & PORT_DETECTED) {
8500 /* SDVOB multiplex with HDMIB */
8501 found = intel_sdvo_init(dev, SDVOB, true);
8502 if (!found)
8503 intel_hdmi_init(dev, SDVOB, PORT_B);
8504 if (!found && (I915_READ(DP_B) & DP_DETECTED))
8505 intel_dp_init(dev, DP_B, PORT_B);
8506 }
8507 7226
8508 if (I915_READ(SDVOC) & PORT_DETECTED) 7227 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
8509 intel_hdmi_init(dev, SDVOC, PORT_C); 7228 intel_dp_init(dev, PCH_DP_D);
8510 7229
8511 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 7230 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
8512 bool found = false; 7231 bool found = false;
8513 7232
8514 if (I915_READ(SDVOB) & SDVO_DETECTED) { 7233 if (I915_READ(SDVOB) & SDVO_DETECTED) {
8515 DRM_DEBUG_KMS("probing SDVOB\n"); 7234 DRM_DEBUG_KMS("probing SDVOB\n");
8516 found = intel_sdvo_init(dev, SDVOB, true); 7235 found = intel_sdvo_init(dev, SDVOB);
8517 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 7236 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
8518 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 7237 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
8519 intel_hdmi_init(dev, SDVOB, PORT_B); 7238 intel_hdmi_init(dev, SDVOB);
8520 } 7239 }
8521 7240
8522 if (!found && SUPPORTS_INTEGRATED_DP(dev)) { 7241 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
8523 DRM_DEBUG_KMS("probing DP_B\n"); 7242 DRM_DEBUG_KMS("probing DP_B\n");
8524 intel_dp_init(dev, DP_B, PORT_B); 7243 intel_dp_init(dev, DP_B);
8525 } 7244 }
8526 } 7245 }
8527 7246
@@ -8529,25 +7248,25 @@ static void intel_setup_outputs(struct drm_device *dev)
8529 7248
8530 if (I915_READ(SDVOB) & SDVO_DETECTED) { 7249 if (I915_READ(SDVOB) & SDVO_DETECTED) {
8531 DRM_DEBUG_KMS("probing SDVOC\n"); 7250 DRM_DEBUG_KMS("probing SDVOC\n");
8532 found = intel_sdvo_init(dev, SDVOC, false); 7251 found = intel_sdvo_init(dev, SDVOC);
8533 } 7252 }
8534 7253
8535 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 7254 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
8536 7255
8537 if (SUPPORTS_INTEGRATED_HDMI(dev)) { 7256 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
8538 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 7257 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
8539 intel_hdmi_init(dev, SDVOC, PORT_C); 7258 intel_hdmi_init(dev, SDVOC);
8540 } 7259 }
8541 if (SUPPORTS_INTEGRATED_DP(dev)) { 7260 if (SUPPORTS_INTEGRATED_DP(dev)) {
8542 DRM_DEBUG_KMS("probing DP_C\n"); 7261 DRM_DEBUG_KMS("probing DP_C\n");
8543 intel_dp_init(dev, DP_C, PORT_C); 7262 intel_dp_init(dev, DP_C);
8544 } 7263 }
8545 } 7264 }
8546 7265
8547 if (SUPPORTS_INTEGRATED_DP(dev) && 7266 if (SUPPORTS_INTEGRATED_DP(dev) &&
8548 (I915_READ(DP_D) & DP_DETECTED)) { 7267 (I915_READ(DP_D) & DP_DETECTED)) {
8549 DRM_DEBUG_KMS("probing DP_D\n"); 7268 DRM_DEBUG_KMS("probing DP_D\n");
8550 intel_dp_init(dev, DP_D, PORT_D); 7269 intel_dp_init(dev, DP_D);
8551 } 7270 }
8552 } else if (IS_GEN2(dev)) 7271 } else if (IS_GEN2(dev))
8553 intel_dvo_init(dev); 7272 intel_dvo_init(dev);
@@ -8558,12 +7277,11 @@ static void intel_setup_outputs(struct drm_device *dev)
8558 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 7277 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
8559 encoder->base.possible_crtcs = encoder->crtc_mask; 7278 encoder->base.possible_crtcs = encoder->crtc_mask;
8560 encoder->base.possible_clones = 7279 encoder->base.possible_clones =
8561 intel_encoder_clones(encoder); 7280 intel_encoder_clones(dev, encoder->clone_mask);
8562 } 7281 }
8563 7282
8564 intel_init_pch_refclk(dev); 7283 /* disable all the possible outputs/crtcs before entering KMS mode */
8565 7284 drm_helper_disable_unused_functions(dev);
8566 drm_helper_move_panel_connectors_to_head(dev);
8567} 7285}
8568 7286
8569static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 7287static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -8593,79 +7311,32 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
8593 7311
8594int intel_framebuffer_init(struct drm_device *dev, 7312int intel_framebuffer_init(struct drm_device *dev,
8595 struct intel_framebuffer *intel_fb, 7313 struct intel_framebuffer *intel_fb,
8596 struct drm_mode_fb_cmd2 *mode_cmd, 7314 struct drm_mode_fb_cmd *mode_cmd,
8597 struct drm_i915_gem_object *obj) 7315 struct drm_i915_gem_object *obj)
8598{ 7316{
8599 int ret; 7317 int ret;
8600 7318
8601 if (obj->tiling_mode == I915_TILING_Y) { 7319 if (obj->tiling_mode == I915_TILING_Y)
8602 DRM_DEBUG("hardware does not support tiling Y\n");
8603 return -EINVAL;
8604 }
8605
8606 if (mode_cmd->pitches[0] & 63) {
8607 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
8608 mode_cmd->pitches[0]);
8609 return -EINVAL; 7320 return -EINVAL;
8610 }
8611 7321
8612 /* FIXME <= Gen4 stride limits are bit unclear */ 7322 if (mode_cmd->pitch & 63)
8613 if (mode_cmd->pitches[0] > 32768) {
8614 DRM_DEBUG("pitch (%d) must be at less than 32768\n",
8615 mode_cmd->pitches[0]);
8616 return -EINVAL; 7323 return -EINVAL;
8617 }
8618 7324
8619 if (obj->tiling_mode != I915_TILING_NONE && 7325 switch (mode_cmd->bpp) {
8620 mode_cmd->pitches[0] != obj->stride) { 7326 case 8:
8621 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", 7327 case 16:
8622 mode_cmd->pitches[0], obj->stride); 7328 /* Only pre-ILK can handle 5:5:5 */
8623 return -EINVAL; 7329 if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
8624 }
8625
8626 /* Reject formats not supported by any plane early. */
8627 switch (mode_cmd->pixel_format) {
8628 case DRM_FORMAT_C8:
8629 case DRM_FORMAT_RGB565:
8630 case DRM_FORMAT_XRGB8888:
8631 case DRM_FORMAT_ARGB8888:
8632 break;
8633 case DRM_FORMAT_XRGB1555:
8634 case DRM_FORMAT_ARGB1555:
8635 if (INTEL_INFO(dev)->gen > 3) {
8636 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
8637 return -EINVAL;
8638 }
8639 break;
8640 case DRM_FORMAT_XBGR8888:
8641 case DRM_FORMAT_ABGR8888:
8642 case DRM_FORMAT_XRGB2101010:
8643 case DRM_FORMAT_ARGB2101010:
8644 case DRM_FORMAT_XBGR2101010:
8645 case DRM_FORMAT_ABGR2101010:
8646 if (INTEL_INFO(dev)->gen < 4) {
8647 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
8648 return -EINVAL; 7330 return -EINVAL;
8649 }
8650 break; 7331 break;
8651 case DRM_FORMAT_YUYV: 7332
8652 case DRM_FORMAT_UYVY: 7333 case 24:
8653 case DRM_FORMAT_YVYU: 7334 case 32:
8654 case DRM_FORMAT_VYUY:
8655 if (INTEL_INFO(dev)->gen < 5) {
8656 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
8657 return -EINVAL;
8658 }
8659 break; 7335 break;
8660 default: 7336 default:
8661 DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
8662 return -EINVAL; 7337 return -EINVAL;
8663 } 7338 }
8664 7339
8665 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
8666 if (mode_cmd->offsets[0] != 0)
8667 return -EINVAL;
8668
8669 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 7340 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
8670 if (ret) { 7341 if (ret) {
8671 DRM_ERROR("framebuffer init failed %d\n", ret); 7342 DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -8680,12 +7351,11 @@ int intel_framebuffer_init(struct drm_device *dev,
8680static struct drm_framebuffer * 7351static struct drm_framebuffer *
8681intel_user_framebuffer_create(struct drm_device *dev, 7352intel_user_framebuffer_create(struct drm_device *dev,
8682 struct drm_file *filp, 7353 struct drm_file *filp,
8683 struct drm_mode_fb_cmd2 *mode_cmd) 7354 struct drm_mode_fb_cmd *mode_cmd)
8684{ 7355{
8685 struct drm_i915_gem_object *obj; 7356 struct drm_i915_gem_object *obj;
8686 7357
8687 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 7358 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
8688 mode_cmd->handles[0]));
8689 if (&obj->base == NULL) 7359 if (&obj->base == NULL)
8690 return ERR_PTR(-ENOENT); 7360 return ERR_PTR(-ENOENT);
8691 7361
@@ -8697,37 +7367,840 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
8697 .output_poll_changed = intel_fb_output_poll_changed, 7367 .output_poll_changed = intel_fb_output_poll_changed,
8698}; 7368};
8699 7369
7370static struct drm_i915_gem_object *
7371intel_alloc_context_page(struct drm_device *dev)
7372{
7373 struct drm_i915_gem_object *ctx;
7374 int ret;
7375
7376 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
7377
7378 ctx = i915_gem_alloc_object(dev, 4096);
7379 if (!ctx) {
7380 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
7381 return NULL;
7382 }
7383
7384 ret = i915_gem_object_pin(ctx, 4096, true);
7385 if (ret) {
7386 DRM_ERROR("failed to pin power context: %d\n", ret);
7387 goto err_unref;
7388 }
7389
7390 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
7391 if (ret) {
7392 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
7393 goto err_unpin;
7394 }
7395
7396 return ctx;
7397
7398err_unpin:
7399 i915_gem_object_unpin(ctx);
7400err_unref:
7401 drm_gem_object_unreference(&ctx->base);
7402 mutex_unlock(&dev->struct_mutex);
7403 return NULL;
7404}
7405
7406bool ironlake_set_drps(struct drm_device *dev, u8 val)
7407{
7408 struct drm_i915_private *dev_priv = dev->dev_private;
7409 u16 rgvswctl;
7410
7411 rgvswctl = I915_READ16(MEMSWCTL);
7412 if (rgvswctl & MEMCTL_CMD_STS) {
7413 DRM_DEBUG("gpu busy, RCS change rejected\n");
7414 return false; /* still busy with another command */
7415 }
7416
7417 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
7418 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
7419 I915_WRITE16(MEMSWCTL, rgvswctl);
7420 POSTING_READ16(MEMSWCTL);
7421
7422 rgvswctl |= MEMCTL_CMD_STS;
7423 I915_WRITE16(MEMSWCTL, rgvswctl);
7424
7425 return true;
7426}
7427
7428void ironlake_enable_drps(struct drm_device *dev)
7429{
7430 struct drm_i915_private *dev_priv = dev->dev_private;
7431 u32 rgvmodectl = I915_READ(MEMMODECTL);
7432 u8 fmax, fmin, fstart, vstart;
7433
7434 /* Enable temp reporting */
7435 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
7436 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
7437
7438 /* 100ms RC evaluation intervals */
7439 I915_WRITE(RCUPEI, 100000);
7440 I915_WRITE(RCDNEI, 100000);
7441
7442 /* Set max/min thresholds to 90ms and 80ms respectively */
7443 I915_WRITE(RCBMAXAVG, 90000);
7444 I915_WRITE(RCBMINAVG, 80000);
7445
7446 I915_WRITE(MEMIHYST, 1);
7447
7448 /* Set up min, max, and cur for interrupt handling */
7449 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
7450 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
7451 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
7452 MEMMODE_FSTART_SHIFT;
7453
7454 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
7455 PXVFREQ_PX_SHIFT;
7456
7457 dev_priv->fmax = fmax; /* IPS callback will increase this */
7458 dev_priv->fstart = fstart;
7459
7460 dev_priv->max_delay = fstart;
7461 dev_priv->min_delay = fmin;
7462 dev_priv->cur_delay = fstart;
7463
7464 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
7465 fmax, fmin, fstart);
7466
7467 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
7468
7469 /*
7470 * Interrupts will be enabled in ironlake_irq_postinstall
7471 */
7472
7473 I915_WRITE(VIDSTART, vstart);
7474 POSTING_READ(VIDSTART);
7475
7476 rgvmodectl |= MEMMODE_SWMODE_EN;
7477 I915_WRITE(MEMMODECTL, rgvmodectl);
7478
7479 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
7480 DRM_ERROR("stuck trying to change perf mode\n");
7481 msleep(1);
7482
7483 ironlake_set_drps(dev, fstart);
7484
7485 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
7486 I915_READ(0x112e0);
7487 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
7488 dev_priv->last_count2 = I915_READ(0x112f4);
7489 getrawmonotonic(&dev_priv->last_time2);
7490}
7491
7492void ironlake_disable_drps(struct drm_device *dev)
7493{
7494 struct drm_i915_private *dev_priv = dev->dev_private;
7495 u16 rgvswctl = I915_READ16(MEMSWCTL);
7496
7497 /* Ack interrupts, disable EFC interrupt */
7498 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
7499 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
7500 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
7501 I915_WRITE(DEIIR, DE_PCU_EVENT);
7502 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
7503
7504 /* Go back to the starting frequency */
7505 ironlake_set_drps(dev, dev_priv->fstart);
7506 msleep(1);
7507 rgvswctl |= MEMCTL_CMD_STS;
7508 I915_WRITE(MEMSWCTL, rgvswctl);
7509 msleep(1);
7510
7511}
7512
7513void gen6_set_rps(struct drm_device *dev, u8 val)
7514{
7515 struct drm_i915_private *dev_priv = dev->dev_private;
7516 u32 swreq;
7517
7518 swreq = (val & 0x3ff) << 25;
7519 I915_WRITE(GEN6_RPNSWREQ, swreq);
7520}
7521
7522void gen6_disable_rps(struct drm_device *dev)
7523{
7524 struct drm_i915_private *dev_priv = dev->dev_private;
7525
7526 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
7527 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
7528 I915_WRITE(GEN6_PMIER, 0);
7529
7530 spin_lock_irq(&dev_priv->rps_lock);
7531 dev_priv->pm_iir = 0;
7532 spin_unlock_irq(&dev_priv->rps_lock);
7533
7534 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
7535}
7536
7537static unsigned long intel_pxfreq(u32 vidfreq)
7538{
7539 unsigned long freq;
7540 int div = (vidfreq & 0x3f0000) >> 16;
7541 int post = (vidfreq & 0x3000) >> 12;
7542 int pre = (vidfreq & 0x7);
7543
7544 if (!pre)
7545 return 0;
7546
7547 freq = ((div * 133333) / ((1<<post) * pre));
7548
7549 return freq;
7550}
7551
7552void intel_init_emon(struct drm_device *dev)
7553{
7554 struct drm_i915_private *dev_priv = dev->dev_private;
7555 u32 lcfuse;
7556 u8 pxw[16];
7557 int i;
7558
7559 /* Disable to program */
7560 I915_WRITE(ECR, 0);
7561 POSTING_READ(ECR);
7562
7563 /* Program energy weights for various events */
7564 I915_WRITE(SDEW, 0x15040d00);
7565 I915_WRITE(CSIEW0, 0x007f0000);
7566 I915_WRITE(CSIEW1, 0x1e220004);
7567 I915_WRITE(CSIEW2, 0x04000004);
7568
7569 for (i = 0; i < 5; i++)
7570 I915_WRITE(PEW + (i * 4), 0);
7571 for (i = 0; i < 3; i++)
7572 I915_WRITE(DEW + (i * 4), 0);
7573
7574 /* Program P-state weights to account for frequency power adjustment */
7575 for (i = 0; i < 16; i++) {
7576 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
7577 unsigned long freq = intel_pxfreq(pxvidfreq);
7578 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
7579 PXVFREQ_PX_SHIFT;
7580 unsigned long val;
7581
7582 val = vid * vid;
7583 val *= (freq / 1000);
7584 val *= 255;
7585 val /= (127*127*900);
7586 if (val > 0xff)
7587 DRM_ERROR("bad pxval: %ld\n", val);
7588 pxw[i] = val;
7589 }
7590 /* Render standby states get 0 weight */
7591 pxw[14] = 0;
7592 pxw[15] = 0;
7593
7594 for (i = 0; i < 4; i++) {
7595 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
7596 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
7597 I915_WRITE(PXW + (i * 4), val);
7598 }
7599
7600 /* Adjust magic regs to magic values (more experimental results) */
7601 I915_WRITE(OGW0, 0);
7602 I915_WRITE(OGW1, 0);
7603 I915_WRITE(EG0, 0x00007f00);
7604 I915_WRITE(EG1, 0x0000000e);
7605 I915_WRITE(EG2, 0x000e0000);
7606 I915_WRITE(EG3, 0x68000300);
7607 I915_WRITE(EG4, 0x42000000);
7608 I915_WRITE(EG5, 0x00140031);
7609 I915_WRITE(EG6, 0);
7610 I915_WRITE(EG7, 0);
7611
7612 for (i = 0; i < 8; i++)
7613 I915_WRITE(PXWL + (i * 4), 0);
7614
7615 /* Enable PMON + select events */
7616 I915_WRITE(ECR, 0x80000019);
7617
7618 lcfuse = I915_READ(LCFUSE02);
7619
7620 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
7621}
7622
7623void gen6_enable_rps(struct drm_i915_private *dev_priv)
7624{
7625 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
7626 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
7627 u32 pcu_mbox, rc6_mask = 0;
7628 int cur_freq, min_freq, max_freq;
7629 int i;
7630
7631 /* Here begins a magic sequence of register writes to enable
7632 * auto-downclocking.
7633 *
7634 * Perhaps there might be some value in exposing these to
7635 * userspace...
7636 */
7637 I915_WRITE(GEN6_RC_STATE, 0);
7638 mutex_lock(&dev_priv->dev->struct_mutex);
7639 gen6_gt_force_wake_get(dev_priv);
7640
7641 /* disable the counters and set deterministic thresholds */
7642 I915_WRITE(GEN6_RC_CONTROL, 0);
7643
7644 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
7645 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
7646 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
7647 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
7648 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
7649
7650 for (i = 0; i < I915_NUM_RINGS; i++)
7651 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
7652
7653 I915_WRITE(GEN6_RC_SLEEP, 0);
7654 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
7655 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
7656 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
7657 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
7658
7659 if (i915_enable_rc6)
7660 rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
7661 GEN6_RC_CTL_RC6_ENABLE;
7662
7663 I915_WRITE(GEN6_RC_CONTROL,
7664 rc6_mask |
7665 GEN6_RC_CTL_EI_MODE(1) |
7666 GEN6_RC_CTL_HW_ENABLE);
7667
7668 I915_WRITE(GEN6_RPNSWREQ,
7669 GEN6_FREQUENCY(10) |
7670 GEN6_OFFSET(0) |
7671 GEN6_AGGRESSIVE_TURBO);
7672 I915_WRITE(GEN6_RC_VIDEO_FREQ,
7673 GEN6_FREQUENCY(12));
7674
7675 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
7676 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
7677 18 << 24 |
7678 6 << 16);
7679 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
7680 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
7681 I915_WRITE(GEN6_RP_UP_EI, 100000);
7682 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
7683 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7684 I915_WRITE(GEN6_RP_CONTROL,
7685 GEN6_RP_MEDIA_TURBO |
7686 GEN6_RP_USE_NORMAL_FREQ |
7687 GEN6_RP_MEDIA_IS_GFX |
7688 GEN6_RP_ENABLE |
7689 GEN6_RP_UP_BUSY_AVG |
7690 GEN6_RP_DOWN_IDLE_CONT);
7691
7692 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7693 500))
7694 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
7695
7696 I915_WRITE(GEN6_PCODE_DATA, 0);
7697 I915_WRITE(GEN6_PCODE_MAILBOX,
7698 GEN6_PCODE_READY |
7699 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
7700 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7701 500))
7702 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
7703
7704 min_freq = (rp_state_cap & 0xff0000) >> 16;
7705 max_freq = rp_state_cap & 0xff;
7706 cur_freq = (gt_perf_status & 0xff00) >> 8;
7707
7708 /* Check for overclock support */
7709 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7710 500))
7711 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
7712 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
7713 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
7714 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7715 500))
7716 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
7717 if (pcu_mbox & (1<<31)) { /* OC supported */
7718 max_freq = pcu_mbox & 0xff;
7719 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
7720 }
7721
7722 /* In units of 100MHz */
7723 dev_priv->max_delay = max_freq;
7724 dev_priv->min_delay = min_freq;
7725 dev_priv->cur_delay = cur_freq;
7726
7727 /* requires MSI enabled */
7728 I915_WRITE(GEN6_PMIER,
7729 GEN6_PM_MBOX_EVENT |
7730 GEN6_PM_THERMAL_EVENT |
7731 GEN6_PM_RP_DOWN_TIMEOUT |
7732 GEN6_PM_RP_UP_THRESHOLD |
7733 GEN6_PM_RP_DOWN_THRESHOLD |
7734 GEN6_PM_RP_UP_EI_EXPIRED |
7735 GEN6_PM_RP_DOWN_EI_EXPIRED);
7736 spin_lock_irq(&dev_priv->rps_lock);
7737 WARN_ON(dev_priv->pm_iir != 0);
7738 I915_WRITE(GEN6_PMIMR, 0);
7739 spin_unlock_irq(&dev_priv->rps_lock);
7740 /* enable all PM interrupts */
7741 I915_WRITE(GEN6_PMINTRMSK, 0);
7742
7743 gen6_gt_force_wake_put(dev_priv);
7744 mutex_unlock(&dev_priv->dev->struct_mutex);
7745}
7746
7747void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
7748{
7749 int min_freq = 15;
7750 int gpu_freq, ia_freq, max_ia_freq;
7751 int scaling_factor = 180;
7752
7753 max_ia_freq = cpufreq_quick_get_max(0);
7754 /*
7755 * Default to measured freq if none found, PCU will ensure we don't go
7756 * over
7757 */
7758 if (!max_ia_freq)
7759 max_ia_freq = tsc_khz;
7760
7761 /* Convert from kHz to MHz */
7762 max_ia_freq /= 1000;
7763
7764 mutex_lock(&dev_priv->dev->struct_mutex);
7765
7766 /*
7767 * For each potential GPU frequency, load a ring frequency we'd like
7768 * to use for memory access. We do this by specifying the IA frequency
7769 * the PCU should use as a reference to determine the ring frequency.
7770 */
7771 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
7772 gpu_freq--) {
7773 int diff = dev_priv->max_delay - gpu_freq;
7774
7775 /*
7776 * For GPU frequencies less than 750MHz, just use the lowest
7777 * ring freq.
7778 */
7779 if (gpu_freq < min_freq)
7780 ia_freq = 800;
7781 else
7782 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
7783 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
7784
7785 I915_WRITE(GEN6_PCODE_DATA,
7786 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
7787 gpu_freq);
7788 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
7789 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
7790 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
7791 GEN6_PCODE_READY) == 0, 10)) {
7792 DRM_ERROR("pcode write of freq table timed out\n");
7793 continue;
7794 }
7795 }
7796
7797 mutex_unlock(&dev_priv->dev->struct_mutex);
7798}
7799
7800static void ironlake_init_clock_gating(struct drm_device *dev)
7801{
7802 struct drm_i915_private *dev_priv = dev->dev_private;
7803 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7804
7805 /* Required for FBC */
7806 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
7807 DPFCRUNIT_CLOCK_GATE_DISABLE |
7808 DPFDUNIT_CLOCK_GATE_DISABLE;
7809 /* Required for CxSR */
7810 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
7811
7812 I915_WRITE(PCH_3DCGDIS0,
7813 MARIUNIT_CLOCK_GATE_DISABLE |
7814 SVSMUNIT_CLOCK_GATE_DISABLE);
7815 I915_WRITE(PCH_3DCGDIS1,
7816 VFMUNIT_CLOCK_GATE_DISABLE);
7817
7818 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7819
7820 /*
7821 * According to the spec the following bits should be set in
7822 * order to enable memory self-refresh
7823 * The bit 22/21 of 0x42004
7824 * The bit 5 of 0x42020
7825 * The bit 15 of 0x45000
7826 */
7827 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7828 (I915_READ(ILK_DISPLAY_CHICKEN2) |
7829 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7830 I915_WRITE(ILK_DSPCLK_GATE,
7831 (I915_READ(ILK_DSPCLK_GATE) |
7832 ILK_DPARB_CLK_GATE));
7833 I915_WRITE(DISP_ARB_CTL,
7834 (I915_READ(DISP_ARB_CTL) |
7835 DISP_FBC_WM_DIS));
7836 I915_WRITE(WM3_LP_ILK, 0);
7837 I915_WRITE(WM2_LP_ILK, 0);
7838 I915_WRITE(WM1_LP_ILK, 0);
7839
7840 /*
7841 * Based on the document from hardware guys the following bits
7842 * should be set unconditionally in order to enable FBC.
7843 * The bit 22 of 0x42000
7844 * The bit 22 of 0x42004
7845 * The bit 7,8,9 of 0x42020.
7846 */
7847 if (IS_IRONLAKE_M(dev)) {
7848 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7849 I915_READ(ILK_DISPLAY_CHICKEN1) |
7850 ILK_FBCQ_DIS);
7851 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7852 I915_READ(ILK_DISPLAY_CHICKEN2) |
7853 ILK_DPARB_GATE);
7854 I915_WRITE(ILK_DSPCLK_GATE,
7855 I915_READ(ILK_DSPCLK_GATE) |
7856 ILK_DPFC_DIS1 |
7857 ILK_DPFC_DIS2 |
7858 ILK_CLK_FBC);
7859 }
7860
7861 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7862 I915_READ(ILK_DISPLAY_CHICKEN2) |
7863 ILK_ELPIN_409_SELECT);
7864 I915_WRITE(_3D_CHICKEN2,
7865 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
7866 _3D_CHICKEN2_WM_READ_PIPELINED);
7867}
7868
7869static void gen6_init_clock_gating(struct drm_device *dev)
7870{
7871 struct drm_i915_private *dev_priv = dev->dev_private;
7872 int pipe;
7873 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7874
7875 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7876
7877 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7878 I915_READ(ILK_DISPLAY_CHICKEN2) |
7879 ILK_ELPIN_409_SELECT);
7880
7881 I915_WRITE(WM3_LP_ILK, 0);
7882 I915_WRITE(WM2_LP_ILK, 0);
7883 I915_WRITE(WM1_LP_ILK, 0);
7884
7885 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
7886 * gating disable must be set. Failure to set it results in
7887 * flickering pixels due to Z write ordering failures after
7888 * some amount of runtime in the Mesa "fire" demo, and Unigine
7889 * Sanctuary and Tropics, and apparently anything else with
7890 * alpha test or pixel discard.
7891 *
7892 * According to the spec, bit 11 (RCCUNIT) must also be set,
7893 * but we didn't debug actual testcases to find it out.
7894 */
7895 I915_WRITE(GEN6_UCGCTL2,
7896 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
7897 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
7898
7899 /*
7900 * According to the spec the following bits should be
7901 * set in order to enable memory self-refresh and fbc:
7902 * The bit21 and bit22 of 0x42000
7903 * The bit21 and bit22 of 0x42004
7904 * The bit5 and bit7 of 0x42020
7905 * The bit14 of 0x70180
7906 * The bit14 of 0x71180
7907 */
7908 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7909 I915_READ(ILK_DISPLAY_CHICKEN1) |
7910 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7911 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7912 I915_READ(ILK_DISPLAY_CHICKEN2) |
7913 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7914 I915_WRITE(ILK_DSPCLK_GATE,
7915 I915_READ(ILK_DSPCLK_GATE) |
7916 ILK_DPARB_CLK_GATE |
7917 ILK_DPFD_CLK_GATE);
7918
7919 for_each_pipe(pipe) {
7920 I915_WRITE(DSPCNTR(pipe),
7921 I915_READ(DSPCNTR(pipe)) |
7922 DISPPLANE_TRICKLE_FEED_DISABLE);
7923 intel_flush_display_plane(dev_priv, pipe);
7924 }
7925}
7926
7927static void ivybridge_init_clock_gating(struct drm_device *dev)
7928{
7929 struct drm_i915_private *dev_priv = dev->dev_private;
7930 int pipe;
7931 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7932
7933 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7934
7935 I915_WRITE(WM3_LP_ILK, 0);
7936 I915_WRITE(WM2_LP_ILK, 0);
7937 I915_WRITE(WM1_LP_ILK, 0);
7938
7939 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
7940
7941 for_each_pipe(pipe) {
7942 I915_WRITE(DSPCNTR(pipe),
7943 I915_READ(DSPCNTR(pipe)) |
7944 DISPPLANE_TRICKLE_FEED_DISABLE);
7945 intel_flush_display_plane(dev_priv, pipe);
7946 }
7947}
7948
7949static void g4x_init_clock_gating(struct drm_device *dev)
7950{
7951 struct drm_i915_private *dev_priv = dev->dev_private;
7952 uint32_t dspclk_gate;
7953
7954 I915_WRITE(RENCLK_GATE_D1, 0);
7955 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7956 GS_UNIT_CLOCK_GATE_DISABLE |
7957 CL_UNIT_CLOCK_GATE_DISABLE);
7958 I915_WRITE(RAMCLK_GATE_D, 0);
7959 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7960 OVRUNIT_CLOCK_GATE_DISABLE |
7961 OVCUNIT_CLOCK_GATE_DISABLE;
7962 if (IS_GM45(dev))
7963 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7964 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7965}
7966
7967static void crestline_init_clock_gating(struct drm_device *dev)
7968{
7969 struct drm_i915_private *dev_priv = dev->dev_private;
7970
7971 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7972 I915_WRITE(RENCLK_GATE_D2, 0);
7973 I915_WRITE(DSPCLK_GATE_D, 0);
7974 I915_WRITE(RAMCLK_GATE_D, 0);
7975 I915_WRITE16(DEUC, 0);
7976}
7977
7978static void broadwater_init_clock_gating(struct drm_device *dev)
7979{
7980 struct drm_i915_private *dev_priv = dev->dev_private;
7981
7982 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7983 I965_RCC_CLOCK_GATE_DISABLE |
7984 I965_RCPB_CLOCK_GATE_DISABLE |
7985 I965_ISC_CLOCK_GATE_DISABLE |
7986 I965_FBC_CLOCK_GATE_DISABLE);
7987 I915_WRITE(RENCLK_GATE_D2, 0);
7988}
7989
7990static void gen3_init_clock_gating(struct drm_device *dev)
7991{
7992 struct drm_i915_private *dev_priv = dev->dev_private;
7993 u32 dstate = I915_READ(D_STATE);
7994
7995 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7996 DSTATE_DOT_CLOCK_GATING;
7997 I915_WRITE(D_STATE, dstate);
7998}
7999
8000static void i85x_init_clock_gating(struct drm_device *dev)
8001{
8002 struct drm_i915_private *dev_priv = dev->dev_private;
8003
8004 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
8005}
8006
8007static void i830_init_clock_gating(struct drm_device *dev)
8008{
8009 struct drm_i915_private *dev_priv = dev->dev_private;
8010
8011 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
8012}
8013
8014static void ibx_init_clock_gating(struct drm_device *dev)
8015{
8016 struct drm_i915_private *dev_priv = dev->dev_private;
8017
8018 /*
8019 * On Ibex Peak and Cougar Point, we need to disable clock
8020 * gating for the panel power sequencer or it will fail to
8021 * start up when no ports are active.
8022 */
8023 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8024}
8025
8026static void cpt_init_clock_gating(struct drm_device *dev)
8027{
8028 struct drm_i915_private *dev_priv = dev->dev_private;
8029 int pipe;
8030
8031 /*
8032 * On Ibex Peak and Cougar Point, we need to disable clock
8033 * gating for the panel power sequencer or it will fail to
8034 * start up when no ports are active.
8035 */
8036 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8037 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
8038 DPLS_EDP_PPS_FIX_DIS);
8039 /* Without this, mode sets may fail silently on FDI */
8040 for_each_pipe(pipe)
8041 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
8042}
8043
8044static void ironlake_teardown_rc6(struct drm_device *dev)
8045{
8046 struct drm_i915_private *dev_priv = dev->dev_private;
8047
8048 if (dev_priv->renderctx) {
8049 i915_gem_object_unpin(dev_priv->renderctx);
8050 drm_gem_object_unreference(&dev_priv->renderctx->base);
8051 dev_priv->renderctx = NULL;
8052 }
8053
8054 if (dev_priv->pwrctx) {
8055 i915_gem_object_unpin(dev_priv->pwrctx);
8056 drm_gem_object_unreference(&dev_priv->pwrctx->base);
8057 dev_priv->pwrctx = NULL;
8058 }
8059}
8060
8061static void ironlake_disable_rc6(struct drm_device *dev)
8062{
8063 struct drm_i915_private *dev_priv = dev->dev_private;
8064
8065 if (I915_READ(PWRCTXA)) {
8066 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
8067 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
8068 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
8069 50);
8070
8071 I915_WRITE(PWRCTXA, 0);
8072 POSTING_READ(PWRCTXA);
8073
8074 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8075 POSTING_READ(RSTDBYCTL);
8076 }
8077
8078 ironlake_teardown_rc6(dev);
8079}
8080
8081static int ironlake_setup_rc6(struct drm_device *dev)
8082{
8083 struct drm_i915_private *dev_priv = dev->dev_private;
8084
8085 if (dev_priv->renderctx == NULL)
8086 dev_priv->renderctx = intel_alloc_context_page(dev);
8087 if (!dev_priv->renderctx)
8088 return -ENOMEM;
8089
8090 if (dev_priv->pwrctx == NULL)
8091 dev_priv->pwrctx = intel_alloc_context_page(dev);
8092 if (!dev_priv->pwrctx) {
8093 ironlake_teardown_rc6(dev);
8094 return -ENOMEM;
8095 }
8096
8097 return 0;
8098}
8099
8100void ironlake_enable_rc6(struct drm_device *dev)
8101{
8102 struct drm_i915_private *dev_priv = dev->dev_private;
8103 int ret;
8104
8105 /* rc6 disabled by default due to repeated reports of hanging during
8106 * boot and resume.
8107 */
8108 if (!i915_enable_rc6)
8109 return;
8110
8111 mutex_lock(&dev->struct_mutex);
8112 ret = ironlake_setup_rc6(dev);
8113 if (ret) {
8114 mutex_unlock(&dev->struct_mutex);
8115 return;
8116 }
8117
8118 /*
8119 * GPU can automatically power down the render unit if given a page
8120 * to save state.
8121 */
8122 ret = BEGIN_LP_RING(6);
8123 if (ret) {
8124 ironlake_teardown_rc6(dev);
8125 mutex_unlock(&dev->struct_mutex);
8126 return;
8127 }
8128
8129 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
8130 OUT_RING(MI_SET_CONTEXT);
8131 OUT_RING(dev_priv->renderctx->gtt_offset |
8132 MI_MM_SPACE_GTT |
8133 MI_SAVE_EXT_STATE_EN |
8134 MI_RESTORE_EXT_STATE_EN |
8135 MI_RESTORE_INHIBIT);
8136 OUT_RING(MI_SUSPEND_FLUSH);
8137 OUT_RING(MI_NOOP);
8138 OUT_RING(MI_FLUSH);
8139 ADVANCE_LP_RING();
8140
8141 /*
8142 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
8143 * does an implicit flush, combined with MI_FLUSH above, it should be
8144 * safe to assume that renderctx is valid
8145 */
8146 ret = intel_wait_ring_idle(LP_RING(dev_priv));
8147 if (ret) {
8148 DRM_ERROR("failed to enable ironlake power power savings\n");
8149 ironlake_teardown_rc6(dev);
8150 mutex_unlock(&dev->struct_mutex);
8151 return;
8152 }
8153
8154 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
8155 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8156 mutex_unlock(&dev->struct_mutex);
8157}
8158
8159void intel_init_clock_gating(struct drm_device *dev)
8160{
8161 struct drm_i915_private *dev_priv = dev->dev_private;
8162
8163 dev_priv->display.init_clock_gating(dev);
8164
8165 if (dev_priv->display.init_pch_clock_gating)
8166 dev_priv->display.init_pch_clock_gating(dev);
8167}
8168
8700/* Set up chip specific display functions */ 8169/* Set up chip specific display functions */
8701static void intel_init_display(struct drm_device *dev) 8170static void intel_init_display(struct drm_device *dev)
8702{ 8171{
8703 struct drm_i915_private *dev_priv = dev->dev_private; 8172 struct drm_i915_private *dev_priv = dev->dev_private;
8704 8173
8705 /* We always want a DPMS function */ 8174 /* We always want a DPMS function */
8706 if (IS_HASWELL(dev)) { 8175 if (HAS_PCH_SPLIT(dev)) {
8707 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; 8176 dev_priv->display.dpms = ironlake_crtc_dpms;
8708 dev_priv->display.crtc_enable = haswell_crtc_enable;
8709 dev_priv->display.crtc_disable = haswell_crtc_disable;
8710 dev_priv->display.off = haswell_crtc_off;
8711 dev_priv->display.update_plane = ironlake_update_plane;
8712 } else if (HAS_PCH_SPLIT(dev)) {
8713 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 8177 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
8714 dev_priv->display.crtc_enable = ironlake_crtc_enable;
8715 dev_priv->display.crtc_disable = ironlake_crtc_disable;
8716 dev_priv->display.off = ironlake_crtc_off;
8717 dev_priv->display.update_plane = ironlake_update_plane; 8178 dev_priv->display.update_plane = ironlake_update_plane;
8718 } else { 8179 } else {
8180 dev_priv->display.dpms = i9xx_crtc_dpms;
8719 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 8181 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
8720 dev_priv->display.crtc_enable = i9xx_crtc_enable;
8721 dev_priv->display.crtc_disable = i9xx_crtc_disable;
8722 dev_priv->display.off = i9xx_crtc_off;
8723 dev_priv->display.update_plane = i9xx_update_plane; 8182 dev_priv->display.update_plane = i9xx_update_plane;
8724 } 8183 }
8725 8184
8185 if (I915_HAS_FBC(dev)) {
8186 if (HAS_PCH_SPLIT(dev)) {
8187 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
8188 dev_priv->display.enable_fbc = ironlake_enable_fbc;
8189 dev_priv->display.disable_fbc = ironlake_disable_fbc;
8190 } else if (IS_GM45(dev)) {
8191 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
8192 dev_priv->display.enable_fbc = g4x_enable_fbc;
8193 dev_priv->display.disable_fbc = g4x_disable_fbc;
8194 } else if (IS_CRESTLINE(dev)) {
8195 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
8196 dev_priv->display.enable_fbc = i8xx_enable_fbc;
8197 dev_priv->display.disable_fbc = i8xx_disable_fbc;
8198 }
8199 /* 855GM needs testing */
8200 }
8201
8726 /* Returns the core display clock speed */ 8202 /* Returns the core display clock speed */
8727 if (IS_VALLEYVIEW(dev)) 8203 if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
8728 dev_priv->display.get_display_clock_speed =
8729 valleyview_get_display_clock_speed;
8730 else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
8731 dev_priv->display.get_display_clock_speed = 8204 dev_priv->display.get_display_clock_speed =
8732 i945_get_display_clock_speed; 8205 i945_get_display_clock_speed;
8733 else if (IS_I915G(dev)) 8206 else if (IS_I915G(dev))
@@ -8749,26 +8222,91 @@ static void intel_init_display(struct drm_device *dev)
8749 dev_priv->display.get_display_clock_speed = 8222 dev_priv->display.get_display_clock_speed =
8750 i830_get_display_clock_speed; 8223 i830_get_display_clock_speed;
8751 8224
8225 /* For FIFO watermark updates */
8752 if (HAS_PCH_SPLIT(dev)) { 8226 if (HAS_PCH_SPLIT(dev)) {
8227 if (HAS_PCH_IBX(dev))
8228 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
8229 else if (HAS_PCH_CPT(dev))
8230 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
8231
8753 if (IS_GEN5(dev)) { 8232 if (IS_GEN5(dev)) {
8233 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
8234 dev_priv->display.update_wm = ironlake_update_wm;
8235 else {
8236 DRM_DEBUG_KMS("Failed to get proper latency. "
8237 "Disable CxSR\n");
8238 dev_priv->display.update_wm = NULL;
8239 }
8754 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 8240 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
8755 dev_priv->display.write_eld = ironlake_write_eld; 8241 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
8756 } else if (IS_GEN6(dev)) { 8242 } else if (IS_GEN6(dev)) {
8243 if (SNB_READ_WM0_LATENCY()) {
8244 dev_priv->display.update_wm = sandybridge_update_wm;
8245 } else {
8246 DRM_DEBUG_KMS("Failed to read display plane latency. "
8247 "Disable CxSR\n");
8248 dev_priv->display.update_wm = NULL;
8249 }
8757 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 8250 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
8758 dev_priv->display.write_eld = ironlake_write_eld; 8251 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
8759 } else if (IS_IVYBRIDGE(dev)) { 8252 } else if (IS_IVYBRIDGE(dev)) {
8760 /* FIXME: detect B0+ stepping and use auto training */ 8253 /* FIXME: detect B0+ stepping and use auto training */
8761 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 8254 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
8762 dev_priv->display.write_eld = ironlake_write_eld; 8255 if (SNB_READ_WM0_LATENCY()) {
8763 dev_priv->display.modeset_global_resources = 8256 dev_priv->display.update_wm = sandybridge_update_wm;
8764 ivb_modeset_global_resources; 8257 } else {
8765 } else if (IS_HASWELL(dev)) { 8258 DRM_DEBUG_KMS("Failed to read display plane latency. "
8766 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 8259 "Disable CxSR\n");
8767 dev_priv->display.write_eld = haswell_write_eld; 8260 dev_priv->display.update_wm = NULL;
8261 }
8262 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
8263
8768 } else 8264 } else
8769 dev_priv->display.update_wm = NULL; 8265 dev_priv->display.update_wm = NULL;
8266 } else if (IS_PINEVIEW(dev)) {
8267 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
8268 dev_priv->is_ddr3,
8269 dev_priv->fsb_freq,
8270 dev_priv->mem_freq)) {
8271 DRM_INFO("failed to find known CxSR latency "
8272 "(found ddr%s fsb freq %d, mem freq %d), "
8273 "disabling CxSR\n",
8274 (dev_priv->is_ddr3 == 1) ? "3": "2",
8275 dev_priv->fsb_freq, dev_priv->mem_freq);
8276 /* Disable CxSR and never update its watermark again */
8277 pineview_disable_cxsr(dev);
8278 dev_priv->display.update_wm = NULL;
8279 } else
8280 dev_priv->display.update_wm = pineview_update_wm;
8281 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
8770 } else if (IS_G4X(dev)) { 8282 } else if (IS_G4X(dev)) {
8771 dev_priv->display.write_eld = g4x_write_eld; 8283 dev_priv->display.update_wm = g4x_update_wm;
8284 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
8285 } else if (IS_GEN4(dev)) {
8286 dev_priv->display.update_wm = i965_update_wm;
8287 if (IS_CRESTLINE(dev))
8288 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
8289 else if (IS_BROADWATER(dev))
8290 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
8291 } else if (IS_GEN3(dev)) {
8292 dev_priv->display.update_wm = i9xx_update_wm;
8293 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
8294 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
8295 } else if (IS_I865G(dev)) {
8296 dev_priv->display.update_wm = i830_update_wm;
8297 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
8298 dev_priv->display.get_fifo_size = i830_get_fifo_size;
8299 } else if (IS_I85X(dev)) {
8300 dev_priv->display.update_wm = i9xx_update_wm;
8301 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
8302 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
8303 } else {
8304 dev_priv->display.update_wm = i830_update_wm;
8305 dev_priv->display.init_clock_gating = i830_init_clock_gating;
8306 if (IS_845G(dev))
8307 dev_priv->display.get_fifo_size = i845_get_fifo_size;
8308 else
8309 dev_priv->display.get_fifo_size = i830_get_fifo_size;
8772 } 8310 }
8773 8311
8774 /* Default just returns -ENODEV to indicate unsupported */ 8312 /* Default just returns -ENODEV to indicate unsupported */
@@ -8802,12 +8340,12 @@ static void intel_init_display(struct drm_device *dev)
8802 * resume, or other times. This quirk makes sure that's the case for 8340 * resume, or other times. This quirk makes sure that's the case for
8803 * affected systems. 8341 * affected systems.
8804 */ 8342 */
8805static void quirk_pipea_force(struct drm_device *dev) 8343static void quirk_pipea_force (struct drm_device *dev)
8806{ 8344{
8807 struct drm_i915_private *dev_priv = dev->dev_private; 8345 struct drm_i915_private *dev_priv = dev->dev_private;
8808 8346
8809 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 8347 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
8810 DRM_INFO("applying pipe a force quirk\n"); 8348 DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
8811} 8349}
8812 8350
8813/* 8351/*
@@ -8817,18 +8355,6 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
8817{ 8355{
8818 struct drm_i915_private *dev_priv = dev->dev_private; 8356 struct drm_i915_private *dev_priv = dev->dev_private;
8819 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 8357 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
8820 DRM_INFO("applying lvds SSC disable quirk\n");
8821}
8822
8823/*
8824 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
8825 * brightness value
8826 */
8827static void quirk_invert_brightness(struct drm_device *dev)
8828{
8829 struct drm_i915_private *dev_priv = dev->dev_private;
8830 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
8831 DRM_INFO("applying inverted panel brightness quirk\n");
8832} 8358}
8833 8359
8834struct intel_quirk { 8360struct intel_quirk {
@@ -8838,56 +8364,33 @@ struct intel_quirk {
8838 void (*hook)(struct drm_device *dev); 8364 void (*hook)(struct drm_device *dev);
8839}; 8365};
8840 8366
8841/* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 8367struct intel_quirk intel_quirks[] = {
8842struct intel_dmi_quirk { 8368 /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
8843 void (*hook)(struct drm_device *dev); 8369 { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
8844 const struct dmi_system_id (*dmi_id_list)[];
8845};
8846
8847static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
8848{
8849 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
8850 return 1;
8851}
8852
8853static const struct intel_dmi_quirk intel_dmi_quirks[] = {
8854 {
8855 .dmi_id_list = &(const struct dmi_system_id[]) {
8856 {
8857 .callback = intel_dmi_reverse_brightness,
8858 .ident = "NCR Corporation",
8859 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
8860 DMI_MATCH(DMI_PRODUCT_NAME, ""),
8861 },
8862 },
8863 { } /* terminating entry */
8864 },
8865 .hook = quirk_invert_brightness,
8866 },
8867};
8868
8869static struct intel_quirk intel_quirks[] = {
8870 /* HP Mini needs pipe A force quirk (LP: #322104) */ 8370 /* HP Mini needs pipe A force quirk (LP: #322104) */
8871 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, 8371 { 0x27ae,0x103c, 0x361a, quirk_pipea_force },
8872 8372
8373 /* Thinkpad R31 needs pipe A force quirk */
8374 { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
8873 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 8375 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
8874 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 8376 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
8875 8377
8378 /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
8379 { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
8380 /* ThinkPad X40 needs pipe A force quirk */
8381
8876 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 8382 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
8877 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 8383 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
8878 8384
8879 /* 830/845 need to leave pipe A & dpll A up */ 8385 /* 855 & before need to leave pipe A & dpll A up */
8386 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
8880 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 8387 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
8881 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
8882 8388
8883 /* Lenovo U160 cannot use SSC on LVDS */ 8389 /* Lenovo U160 cannot use SSC on LVDS */
8884 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 8390 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
8885 8391
8886 /* Sony Vaio Y cannot use SSC on LVDS */ 8392 /* Sony Vaio Y cannot use SSC on LVDS */
8887 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 8393 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
8888
8889 /* Acer Aspire 5734Z must invert backlight brightness */
8890 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
8891}; 8394};
8892 8395
8893static void intel_init_quirks(struct drm_device *dev) 8396static void intel_init_quirks(struct drm_device *dev)
@@ -8905,10 +8408,6 @@ static void intel_init_quirks(struct drm_device *dev)
8905 q->subsystem_device == PCI_ANY_ID)) 8408 q->subsystem_device == PCI_ANY_ID))
8906 q->hook(dev); 8409 q->hook(dev);
8907 } 8410 }
8908 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
8909 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
8910 intel_dmi_quirks[i].hook(dev);
8911 }
8912} 8411}
8913 8412
8914/* Disable the VGA plane that we never use */ 8413/* Disable the VGA plane that we never use */
@@ -8924,7 +8423,7 @@ static void i915_disable_vga(struct drm_device *dev)
8924 vga_reg = VGACNTRL; 8423 vga_reg = VGACNTRL;
8925 8424
8926 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 8425 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
8927 outb(SR01, VGA_SR_INDEX); 8426 outb(1, VGA_SR_INDEX);
8928 sr1 = inb(VGA_SR_DATA); 8427 sr1 = inb(VGA_SR_DATA);
8929 outb(sr1 | 1<<5, VGA_SR_DATA); 8428 outb(sr1 | 1<<5, VGA_SR_DATA);
8930 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 8429 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
@@ -8934,41 +8433,20 @@ static void i915_disable_vga(struct drm_device *dev)
8934 POSTING_READ(vga_reg); 8433 POSTING_READ(vga_reg);
8935} 8434}
8936 8435
8937void intel_modeset_init_hw(struct drm_device *dev)
8938{
8939 /* We attempt to init the necessary power wells early in the initialization
8940 * time, so the subsystems that expect power to be enabled can work.
8941 */
8942 intel_init_power_wells(dev);
8943
8944 intel_prepare_ddi(dev);
8945
8946 intel_init_clock_gating(dev);
8947
8948 mutex_lock(&dev->struct_mutex);
8949 intel_enable_gt_powersave(dev);
8950 mutex_unlock(&dev->struct_mutex);
8951}
8952
8953void intel_modeset_init(struct drm_device *dev) 8436void intel_modeset_init(struct drm_device *dev)
8954{ 8437{
8955 struct drm_i915_private *dev_priv = dev->dev_private; 8438 struct drm_i915_private *dev_priv = dev->dev_private;
8956 int i, ret; 8439 int i;
8957 8440
8958 drm_mode_config_init(dev); 8441 drm_mode_config_init(dev);
8959 8442
8960 dev->mode_config.min_width = 0; 8443 dev->mode_config.min_width = 0;
8961 dev->mode_config.min_height = 0; 8444 dev->mode_config.min_height = 0;
8962 8445
8963 dev->mode_config.preferred_depth = 24; 8446 dev->mode_config.funcs = (void *)&intel_mode_funcs;
8964 dev->mode_config.prefer_shadow = 1;
8965
8966 dev->mode_config.funcs = &intel_mode_funcs;
8967 8447
8968 intel_init_quirks(dev); 8448 intel_init_quirks(dev);
8969 8449
8970 intel_init_pm(dev);
8971
8972 intel_init_display(dev); 8450 intel_init_display(dev);
8973 8451
8974 if (IS_GEN2(dev)) { 8452 if (IS_GEN2(dev)) {
@@ -8981,348 +8459,42 @@ void intel_modeset_init(struct drm_device *dev)
8981 dev->mode_config.max_width = 8192; 8459 dev->mode_config.max_width = 8192;
8982 dev->mode_config.max_height = 8192; 8460 dev->mode_config.max_height = 8192;
8983 } 8461 }
8984 dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr; 8462 dev->mode_config.fb_base = dev->agp->base;
8985 8463
8986 DRM_DEBUG_KMS("%d display pipe%s available.\n", 8464 DRM_DEBUG_KMS("%d display pipe%s available.\n",
8987 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); 8465 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
8988 8466
8989 for (i = 0; i < dev_priv->num_pipe; i++) { 8467 for (i = 0; i < dev_priv->num_pipe; i++) {
8990 intel_crtc_init(dev, i); 8468 intel_crtc_init(dev, i);
8991 ret = intel_plane_init(dev, i);
8992 if (ret)
8993 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
8994 } 8469 }
8995 8470
8996 intel_cpu_pll_init(dev);
8997 intel_pch_pll_init(dev);
8998
8999 /* Just disable it once at startup */ 8471 /* Just disable it once at startup */
9000 i915_disable_vga(dev); 8472 i915_disable_vga(dev);
9001 intel_setup_outputs(dev); 8473 intel_setup_outputs(dev);
9002}
9003
9004static void
9005intel_connector_break_all_links(struct intel_connector *connector)
9006{
9007 connector->base.dpms = DRM_MODE_DPMS_OFF;
9008 connector->base.encoder = NULL;
9009 connector->encoder->connectors_active = false;
9010 connector->encoder->base.crtc = NULL;
9011}
9012
9013static void intel_enable_pipe_a(struct drm_device *dev)
9014{
9015 struct intel_connector *connector;
9016 struct drm_connector *crt = NULL;
9017 struct intel_load_detect_pipe load_detect_temp;
9018
9019 /* We can't just switch on the pipe A, we need to set things up with a
9020 * proper mode and output configuration. As a gross hack, enable pipe A
9021 * by enabling the load detect pipe once. */
9022 list_for_each_entry(connector,
9023 &dev->mode_config.connector_list,
9024 base.head) {
9025 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
9026 crt = &connector->base;
9027 break;
9028 }
9029 }
9030
9031 if (!crt)
9032 return;
9033
9034 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
9035 intel_release_load_detect_pipe(crt, &load_detect_temp);
9036
9037
9038}
9039
9040static bool
9041intel_check_plane_mapping(struct intel_crtc *crtc)
9042{
9043 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
9044 u32 reg, val;
9045
9046 if (dev_priv->num_pipe == 1)
9047 return true;
9048
9049 reg = DSPCNTR(!crtc->plane);
9050 val = I915_READ(reg);
9051
9052 if ((val & DISPLAY_PLANE_ENABLE) &&
9053 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
9054 return false;
9055
9056 return true;
9057}
9058 8474
9059static void intel_sanitize_crtc(struct intel_crtc *crtc) 8475 intel_init_clock_gating(dev);
9060{
9061 struct drm_device *dev = crtc->base.dev;
9062 struct drm_i915_private *dev_priv = dev->dev_private;
9063 u32 reg;
9064
9065 /* Clear any frame start delays used for debugging left by the BIOS */
9066 reg = PIPECONF(crtc->cpu_transcoder);
9067 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
9068
9069 /* We need to sanitize the plane -> pipe mapping first because this will
9070 * disable the crtc (and hence change the state) if it is wrong. Note
9071 * that gen4+ has a fixed plane -> pipe mapping. */
9072 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
9073 struct intel_connector *connector;
9074 bool plane;
9075
9076 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
9077 crtc->base.base.id);
9078
9079 /* Pipe has the wrong plane attached and the plane is active.
9080 * Temporarily change the plane mapping and disable everything
9081 * ... */
9082 plane = crtc->plane;
9083 crtc->plane = !plane;
9084 dev_priv->display.crtc_disable(&crtc->base);
9085 crtc->plane = plane;
9086
9087 /* ... and break all links. */
9088 list_for_each_entry(connector, &dev->mode_config.connector_list,
9089 base.head) {
9090 if (connector->encoder->base.crtc != &crtc->base)
9091 continue;
9092
9093 intel_connector_break_all_links(connector);
9094 }
9095
9096 WARN_ON(crtc->active);
9097 crtc->base.enabled = false;
9098 }
9099
9100 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
9101 crtc->pipe == PIPE_A && !crtc->active) {
9102 /* BIOS forgot to enable pipe A, this mostly happens after
9103 * resume. Force-enable the pipe to fix this, the update_dpms
9104 * call below we restore the pipe to the right state, but leave
9105 * the required bits on. */
9106 intel_enable_pipe_a(dev);
9107 }
9108
9109 /* Adjust the state of the output pipe according to whether we
9110 * have active connectors/encoders. */
9111 intel_crtc_update_dpms(&crtc->base);
9112
9113 if (crtc->active != crtc->base.enabled) {
9114 struct intel_encoder *encoder;
9115
9116 /* This can happen either due to bugs in the get_hw_state
9117 * functions or because the pipe is force-enabled due to the
9118 * pipe A quirk. */
9119 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
9120 crtc->base.base.id,
9121 crtc->base.enabled ? "enabled" : "disabled",
9122 crtc->active ? "enabled" : "disabled");
9123
9124 crtc->base.enabled = crtc->active;
9125
9126 /* Because we only establish the connector -> encoder ->
9127 * crtc links if something is active, this means the
9128 * crtc is now deactivated. Break the links. connector
9129 * -> encoder links are only establish when things are
9130 * actually up, hence no need to break them. */
9131 WARN_ON(crtc->active);
9132
9133 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
9134 WARN_ON(encoder->connectors_active);
9135 encoder->base.crtc = NULL;
9136 }
9137 }
9138}
9139
9140static void intel_sanitize_encoder(struct intel_encoder *encoder)
9141{
9142 struct intel_connector *connector;
9143 struct drm_device *dev = encoder->base.dev;
9144
9145 /* We need to check both for a crtc link (meaning that the
9146 * encoder is active and trying to read from a pipe) and the
9147 * pipe itself being active. */
9148 bool has_active_crtc = encoder->base.crtc &&
9149 to_intel_crtc(encoder->base.crtc)->active;
9150
9151 if (encoder->connectors_active && !has_active_crtc) {
9152 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
9153 encoder->base.base.id,
9154 drm_get_encoder_name(&encoder->base));
9155
9156 /* Connector is active, but has no active pipe. This is
9157 * fallout from our resume register restoring. Disable
9158 * the encoder manually again. */
9159 if (encoder->base.crtc) {
9160 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
9161 encoder->base.base.id,
9162 drm_get_encoder_name(&encoder->base));
9163 encoder->disable(encoder);
9164 }
9165
9166 /* Inconsistent output/port/pipe state happens presumably due to
9167 * a bug in one of the get_hw_state functions. Or someplace else
9168 * in our code, like the register restore mess on resume. Clamp
9169 * things to off as a safer default. */
9170 list_for_each_entry(connector,
9171 &dev->mode_config.connector_list,
9172 base.head) {
9173 if (connector->encoder != encoder)
9174 continue;
9175
9176 intel_connector_break_all_links(connector);
9177 }
9178 }
9179 /* Enabled encoders without active connectors will be fixed in
9180 * the crtc fixup. */
9181}
9182
9183static void i915_redisable_vga(struct drm_device *dev)
9184{
9185 struct drm_i915_private *dev_priv = dev->dev_private;
9186 u32 vga_reg;
9187
9188 if (HAS_PCH_SPLIT(dev))
9189 vga_reg = CPU_VGACNTRL;
9190 else
9191 vga_reg = VGACNTRL;
9192
9193 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
9194 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
9195 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9196 POSTING_READ(vga_reg);
9197 }
9198}
9199
9200/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
9201 * and i915 state tracking structures. */
9202void intel_modeset_setup_hw_state(struct drm_device *dev,
9203 bool force_restore)
9204{
9205 struct drm_i915_private *dev_priv = dev->dev_private;
9206 enum pipe pipe;
9207 u32 tmp;
9208 struct intel_crtc *crtc;
9209 struct intel_encoder *encoder;
9210 struct intel_connector *connector;
9211
9212 if (IS_HASWELL(dev)) {
9213 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9214
9215 if (tmp & TRANS_DDI_FUNC_ENABLE) {
9216 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9217 case TRANS_DDI_EDP_INPUT_A_ON:
9218 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9219 pipe = PIPE_A;
9220 break;
9221 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9222 pipe = PIPE_B;
9223 break;
9224 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9225 pipe = PIPE_C;
9226 break;
9227 }
9228
9229 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9230 crtc->cpu_transcoder = TRANSCODER_EDP;
9231
9232 DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
9233 pipe_name(pipe));
9234 }
9235 }
9236
9237 for_each_pipe(pipe) {
9238 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9239
9240 tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
9241 if (tmp & PIPECONF_ENABLE)
9242 crtc->active = true;
9243 else
9244 crtc->active = false;
9245
9246 crtc->base.enabled = crtc->active;
9247
9248 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
9249 crtc->base.base.id,
9250 crtc->active ? "enabled" : "disabled");
9251 }
9252
9253 if (IS_HASWELL(dev))
9254 intel_ddi_setup_hw_pll_state(dev);
9255
9256 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9257 base.head) {
9258 pipe = 0;
9259
9260 if (encoder->get_hw_state(encoder, &pipe)) {
9261 encoder->base.crtc =
9262 dev_priv->pipe_to_crtc_mapping[pipe];
9263 } else {
9264 encoder->base.crtc = NULL;
9265 }
9266
9267 encoder->connectors_active = false;
9268 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
9269 encoder->base.base.id,
9270 drm_get_encoder_name(&encoder->base),
9271 encoder->base.crtc ? "enabled" : "disabled",
9272 pipe);
9273 }
9274
9275 list_for_each_entry(connector, &dev->mode_config.connector_list,
9276 base.head) {
9277 if (connector->get_hw_state(connector)) {
9278 connector->base.dpms = DRM_MODE_DPMS_ON;
9279 connector->encoder->connectors_active = true;
9280 connector->base.encoder = &connector->encoder->base;
9281 } else {
9282 connector->base.dpms = DRM_MODE_DPMS_OFF;
9283 connector->base.encoder = NULL;
9284 }
9285 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
9286 connector->base.base.id,
9287 drm_get_connector_name(&connector->base),
9288 connector->base.encoder ? "enabled" : "disabled");
9289 }
9290
9291 /* HW state is read out, now we need to sanitize this mess. */
9292 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9293 base.head) {
9294 intel_sanitize_encoder(encoder);
9295 }
9296 8476
9297 for_each_pipe(pipe) { 8477 if (IS_IRONLAKE_M(dev)) {
9298 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 8478 ironlake_enable_drps(dev);
9299 intel_sanitize_crtc(crtc); 8479 intel_init_emon(dev);
9300 } 8480 }
9301 8481
9302 if (force_restore) { 8482 if (IS_GEN6(dev) || IS_GEN7(dev)) {
9303 for_each_pipe(pipe) { 8483 gen6_enable_rps(dev_priv);
9304 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 8484 gen6_update_ring_freq(dev_priv);
9305 intel_set_mode(&crtc->base, &crtc->base.mode,
9306 crtc->base.x, crtc->base.y, crtc->base.fb);
9307 }
9308
9309 i915_redisable_vga(dev);
9310 } else {
9311 intel_modeset_update_staged_output_state(dev);
9312 } 8485 }
9313 8486
9314 intel_modeset_check_state(dev); 8487 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
9315 8488 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
9316 drm_mode_config_reset(dev); 8489 (unsigned long)dev);
9317} 8490}
9318 8491
9319void intel_modeset_gem_init(struct drm_device *dev) 8492void intel_modeset_gem_init(struct drm_device *dev)
9320{ 8493{
9321 intel_modeset_init_hw(dev); 8494 if (IS_IRONLAKE_M(dev))
8495 ironlake_enable_rc6(dev);
9322 8496
9323 intel_setup_overlay(dev); 8497 intel_setup_overlay(dev);
9324
9325 intel_modeset_setup_hw_state(dev, false);
9326} 8498}
9327 8499
9328void intel_modeset_cleanup(struct drm_device *dev) 8500void intel_modeset_cleanup(struct drm_device *dev)
@@ -9348,12 +8520,13 @@ void intel_modeset_cleanup(struct drm_device *dev)
9348 8520
9349 intel_disable_fbc(dev); 8521 intel_disable_fbc(dev);
9350 8522
9351 intel_disable_gt_powersave(dev); 8523 if (IS_IRONLAKE_M(dev))
9352 8524 ironlake_disable_drps(dev);
9353 ironlake_teardown_rc6(dev); 8525 if (IS_GEN6(dev) || IS_GEN7(dev))
8526 gen6_disable_rps(dev);
9354 8527
9355 if (IS_VALLEYVIEW(dev)) 8528 if (IS_IRONLAKE_M(dev))
9356 vlv_init_dpio(dev); 8529 ironlake_disable_rc6(dev);
9357 8530
9358 mutex_unlock(&dev->struct_mutex); 8531 mutex_unlock(&dev->struct_mutex);
9359 8532
@@ -9361,11 +8534,18 @@ void intel_modeset_cleanup(struct drm_device *dev)
9361 * enqueue unpin/hotplug work. */ 8534 * enqueue unpin/hotplug work. */
9362 drm_irq_uninstall(dev); 8535 drm_irq_uninstall(dev);
9363 cancel_work_sync(&dev_priv->hotplug_work); 8536 cancel_work_sync(&dev_priv->hotplug_work);
9364 cancel_work_sync(&dev_priv->rps.work);
9365 8537
9366 /* flush any delayed tasks or pending work */ 8538 /* flush any delayed tasks or pending work */
9367 flush_scheduled_work(); 8539 flush_scheduled_work();
9368 8540
8541 /* Shut off idle work before the crtcs get freed. */
8542 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8543 intel_crtc = to_intel_crtc(crtc);
8544 del_timer_sync(&intel_crtc->idle_timer);
8545 }
8546 del_timer_sync(&dev_priv->idle_timer);
8547 cancel_work_sync(&dev_priv->idle_work);
8548
9369 drm_mode_config_cleanup(dev); 8549 drm_mode_config_cleanup(dev);
9370} 8550}
9371 8551
@@ -9411,7 +8591,7 @@ struct intel_display_error_state {
9411 u32 position; 8591 u32 position;
9412 u32 base; 8592 u32 base;
9413 u32 size; 8593 u32 size;
9414 } cursor[I915_MAX_PIPES]; 8594 } cursor[2];
9415 8595
9416 struct intel_pipe_error_state { 8596 struct intel_pipe_error_state {
9417 u32 conf; 8597 u32 conf;
@@ -9423,7 +8603,7 @@ struct intel_display_error_state {
9423 u32 vtotal; 8603 u32 vtotal;
9424 u32 vblank; 8604 u32 vblank;
9425 u32 vsync; 8605 u32 vsync;
9426 } pipe[I915_MAX_PIPES]; 8606 } pipe[2];
9427 8607
9428 struct intel_plane_error_state { 8608 struct intel_plane_error_state {
9429 u32 control; 8609 u32 control;
@@ -9433,24 +8613,21 @@ struct intel_display_error_state {
9433 u32 addr; 8613 u32 addr;
9434 u32 surface; 8614 u32 surface;
9435 u32 tile_offset; 8615 u32 tile_offset;
9436 } plane[I915_MAX_PIPES]; 8616 } plane[2];
9437}; 8617};
9438 8618
9439struct intel_display_error_state * 8619struct intel_display_error_state *
9440intel_display_capture_error_state(struct drm_device *dev) 8620intel_display_capture_error_state(struct drm_device *dev)
9441{ 8621{
9442 drm_i915_private_t *dev_priv = dev->dev_private; 8622 drm_i915_private_t *dev_priv = dev->dev_private;
9443 struct intel_display_error_state *error; 8623 struct intel_display_error_state *error;
9444 enum transcoder cpu_transcoder;
9445 int i; 8624 int i;
9446 8625
9447 error = kmalloc(sizeof(*error), GFP_ATOMIC); 8626 error = kmalloc(sizeof(*error), GFP_ATOMIC);
9448 if (error == NULL) 8627 if (error == NULL)
9449 return NULL; 8628 return NULL;
9450 8629
9451 for_each_pipe(i) { 8630 for (i = 0; i < 2; i++) {
9452 cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
9453
9454 error->cursor[i].control = I915_READ(CURCNTR(i)); 8631 error->cursor[i].control = I915_READ(CURCNTR(i));
9455 error->cursor[i].position = I915_READ(CURPOS(i)); 8632 error->cursor[i].position = I915_READ(CURPOS(i));
9456 error->cursor[i].base = I915_READ(CURBASE(i)); 8633 error->cursor[i].base = I915_READ(CURBASE(i));
@@ -9458,21 +8635,21 @@ intel_display_capture_error_state(struct drm_device *dev)
9458 error->plane[i].control = I915_READ(DSPCNTR(i)); 8635 error->plane[i].control = I915_READ(DSPCNTR(i));
9459 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 8636 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
9460 error->plane[i].size = I915_READ(DSPSIZE(i)); 8637 error->plane[i].size = I915_READ(DSPSIZE(i));
9461 error->plane[i].pos = I915_READ(DSPPOS(i)); 8638 error->plane[i].pos= I915_READ(DSPPOS(i));
9462 error->plane[i].addr = I915_READ(DSPADDR(i)); 8639 error->plane[i].addr = I915_READ(DSPADDR(i));
9463 if (INTEL_INFO(dev)->gen >= 4) { 8640 if (INTEL_INFO(dev)->gen >= 4) {
9464 error->plane[i].surface = I915_READ(DSPSURF(i)); 8641 error->plane[i].surface = I915_READ(DSPSURF(i));
9465 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 8642 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
9466 } 8643 }
9467 8644
9468 error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 8645 error->pipe[i].conf = I915_READ(PIPECONF(i));
9469 error->pipe[i].source = I915_READ(PIPESRC(i)); 8646 error->pipe[i].source = I915_READ(PIPESRC(i));
9470 error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 8647 error->pipe[i].htotal = I915_READ(HTOTAL(i));
9471 error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 8648 error->pipe[i].hblank = I915_READ(HBLANK(i));
9472 error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 8649 error->pipe[i].hsync = I915_READ(HSYNC(i));
9473 error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 8650 error->pipe[i].vtotal = I915_READ(VTOTAL(i));
9474 error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 8651 error->pipe[i].vblank = I915_READ(VBLANK(i));
9475 error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 8652 error->pipe[i].vsync = I915_READ(VSYNC(i));
9476 } 8653 }
9477 8654
9478 return error; 8655 return error;
@@ -9483,11 +8660,9 @@ intel_display_print_error_state(struct seq_file *m,
9483 struct drm_device *dev, 8660 struct drm_device *dev,
9484 struct intel_display_error_state *error) 8661 struct intel_display_error_state *error)
9485{ 8662{
9486 drm_i915_private_t *dev_priv = dev->dev_private;
9487 int i; 8663 int i;
9488 8664
9489 seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe); 8665 for (i = 0; i < 2; i++) {
9490 for_each_pipe(i) {
9491 seq_printf(m, "Pipe [%d]:\n", i); 8666 seq_printf(m, "Pipe [%d]:\n", i);
9492 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); 8667 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
9493 seq_printf(m, " SRC: %08x\n", error->pipe[i].source); 8668 seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1b63d55318a..7c64db48c1c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -27,17 +27,40 @@
27 27
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/export.h> 30#include "drmP.h"
31#include <drm/drmP.h> 31#include "drm.h"
32#include <drm/drm_crtc.h> 32#include "drm_crtc.h"
33#include <drm/drm_crtc_helper.h> 33#include "drm_crtc_helper.h"
34#include <drm/drm_edid.h>
35#include "intel_drv.h" 34#include "intel_drv.h"
36#include <drm/i915_drm.h> 35#include "i915_drm.h"
37#include "i915_drv.h" 36#include "i915_drv.h"
37#include "drm_dp_helper.h"
38 38
39
40#define DP_LINK_STATUS_SIZE 6
39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 41#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40 42
43#define DP_LINK_CONFIGURATION_SIZE 9
44
45struct intel_dp {
46 struct intel_encoder base;
47 uint32_t output_reg;
48 uint32_t DP;
49 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
50 bool has_audio;
51 int force_audio;
52 uint32_t color_range;
53 int dpms_mode;
54 uint8_t link_bw;
55 uint8_t lane_count;
56 uint8_t dpcd[8];
57 struct i2c_adapter adapter;
58 struct i2c_algo_dp_aux_data algo;
59 bool is_pch_edp;
60 uint8_t train_set[4];
61 uint8_t link_status[DP_LINK_STATUS_SIZE];
62};
63
41/** 64/**
42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 65 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
43 * @intel_dp: DP struct 66 * @intel_dp: DP struct
@@ -47,9 +70,7 @@
47 */ 70 */
48static bool is_edp(struct intel_dp *intel_dp) 71static bool is_edp(struct intel_dp *intel_dp)
49{ 72{
50 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 73 return intel_dp->base.type == INTEL_OUTPUT_EDP;
51
52 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
53} 74}
54 75
55/** 76/**
@@ -65,27 +86,15 @@ static bool is_pch_edp(struct intel_dp *intel_dp)
65 return intel_dp->is_pch_edp; 86 return intel_dp->is_pch_edp;
66} 87}
67 88
68/** 89static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
69 * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
70 * @intel_dp: DP struct
71 *
72 * Returns true if the given DP struct corresponds to a CPU eDP port.
73 */
74static bool is_cpu_edp(struct intel_dp *intel_dp)
75{ 90{
76 return is_edp(intel_dp) && !is_pch_edp(intel_dp); 91 return container_of(encoder, struct intel_dp, base.base);
77}
78
79static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
80{
81 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
82
83 return intel_dig_port->base.base.dev;
84} 92}
85 93
86static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 94static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
87{ 95{
88 return enc_to_intel_dp(&intel_attached_encoder(connector)->base); 96 return container_of(intel_attached_encoder(connector),
97 struct intel_dp, base);
89} 98}
90 99
91/** 100/**
@@ -107,29 +116,38 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
107 return is_pch_edp(intel_dp); 116 return is_pch_edp(intel_dp);
108} 117}
109 118
119static void intel_dp_start_link_train(struct intel_dp *intel_dp);
120static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
110static void intel_dp_link_down(struct intel_dp *intel_dp); 121static void intel_dp_link_down(struct intel_dp *intel_dp);
111 122
112void 123void
113intel_edp_link_config(struct intel_encoder *intel_encoder, 124intel_edp_link_config (struct intel_encoder *intel_encoder,
114 int *lane_num, int *link_bw) 125 int *lane_num, int *link_bw)
115{ 126{
116 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 127 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
117 128
118 *lane_num = intel_dp->lane_count; 129 *lane_num = intel_dp->lane_count;
119 *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 130 if (intel_dp->link_bw == DP_LINK_BW_1_62)
131 *link_bw = 162000;
132 else if (intel_dp->link_bw == DP_LINK_BW_2_7)
133 *link_bw = 270000;
120} 134}
121 135
122int 136static int
123intel_edp_target_clock(struct intel_encoder *intel_encoder, 137intel_dp_max_lane_count(struct intel_dp *intel_dp)
124 struct drm_display_mode *mode)
125{ 138{
126 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 139 int max_lane_count = 4;
127 struct intel_connector *intel_connector = intel_dp->attached_connector;
128 140
129 if (intel_connector->panel.fixed_mode) 141 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
130 return intel_connector->panel.fixed_mode->clock; 142 max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
131 else 143 switch (max_lane_count) {
132 return mode->clock; 144 case 1: case 2: case 4:
145 break;
146 default:
147 max_lane_count = 4;
148 }
149 }
150 return max_lane_count;
133} 151}
134 152
135static int 153static int
@@ -157,27 +175,18 @@ intel_dp_link_clock(uint8_t link_bw)
157 return 162000; 175 return 162000;
158} 176}
159 177
160/* 178/* I think this is a fiction */
161 * The units on the numbers in the next two are... bizarre. Examples will
162 * make it clearer; this one parallels an example in the eDP spec.
163 *
164 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
165 *
166 * 270000 * 1 * 8 / 10 == 216000
167 *
168 * The actual data capacity of that configuration is 2.16Gbit/s, so the
169 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
170 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
171 * 119000. At 18bpp that's 2142000 kilobits per second.
172 *
173 * Thus the strange-looking division by 10 in intel_dp_link_required, to
174 * get the result in decakilobits instead of kilobits.
175 */
176
177static int 179static int
178intel_dp_link_required(int pixel_clock, int bpp) 180intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
179{ 181{
180 return (pixel_clock * bpp + 9) / 10; 182 struct drm_crtc *crtc = intel_dp->base.base.crtc;
183 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
184 int bpp = 24;
185
186 if (intel_crtc)
187 bpp = intel_crtc->bpp;
188
189 return (pixel_clock * bpp + 7) / 8;
181} 190}
182 191
183static int 192static int
@@ -186,58 +195,34 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
186 return (max_link_clock * max_lanes * 8) / 10; 195 return (max_link_clock * max_lanes * 8) / 10;
187} 196}
188 197
189static bool
190intel_dp_adjust_dithering(struct intel_dp *intel_dp,
191 struct drm_display_mode *mode,
192 bool adjust_mode)
193{
194 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
195 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
196 int max_rate, mode_rate;
197
198 mode_rate = intel_dp_link_required(mode->clock, 24);
199 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
200
201 if (mode_rate > max_rate) {
202 mode_rate = intel_dp_link_required(mode->clock, 18);
203 if (mode_rate > max_rate)
204 return false;
205
206 if (adjust_mode)
207 mode->private_flags
208 |= INTEL_MODE_DP_FORCE_6BPC;
209
210 return true;
211 }
212
213 return true;
214}
215
216static int 198static int
217intel_dp_mode_valid(struct drm_connector *connector, 199intel_dp_mode_valid(struct drm_connector *connector,
218 struct drm_display_mode *mode) 200 struct drm_display_mode *mode)
219{ 201{
220 struct intel_dp *intel_dp = intel_attached_dp(connector); 202 struct intel_dp *intel_dp = intel_attached_dp(connector);
221 struct intel_connector *intel_connector = to_intel_connector(connector); 203 struct drm_device *dev = connector->dev;
222 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 204 struct drm_i915_private *dev_priv = dev->dev_private;
205 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
206 int max_lanes = intel_dp_max_lane_count(intel_dp);
223 207
224 if (is_edp(intel_dp) && fixed_mode) { 208 if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
225 if (mode->hdisplay > fixed_mode->hdisplay) 209 if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
226 return MODE_PANEL; 210 return MODE_PANEL;
227 211
228 if (mode->vdisplay > fixed_mode->vdisplay) 212 if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay)
229 return MODE_PANEL; 213 return MODE_PANEL;
230 } 214 }
231 215
232 if (!intel_dp_adjust_dithering(intel_dp, mode, false)) 216 /* only refuse the mode on non eDP since we have seen some weird eDP panels
217 which are outside spec tolerances but somehow work by magic */
218 if (!is_edp(intel_dp) &&
219 (intel_dp_link_required(connector->dev, intel_dp, mode->clock)
220 > intel_dp_max_data_rate(max_link_clock, max_lanes)))
233 return MODE_CLOCK_HIGH; 221 return MODE_CLOCK_HIGH;
234 222
235 if (mode->clock < 10000) 223 if (mode->clock < 10000)
236 return MODE_CLOCK_LOW; 224 return MODE_CLOCK_LOW;
237 225
238 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
239 return MODE_H_ILLEGAL;
240
241 return MODE_OK; 226 return MODE_OK;
242} 227}
243 228
@@ -271,10 +256,6 @@ intel_hrawclk(struct drm_device *dev)
271 struct drm_i915_private *dev_priv = dev->dev_private; 256 struct drm_i915_private *dev_priv = dev->dev_private;
272 uint32_t clkcfg; 257 uint32_t clkcfg;
273 258
274 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
275 if (IS_VALLEYVIEW(dev))
276 return 200;
277
278 clkcfg = I915_READ(CLKCFG); 259 clkcfg = I915_READ(CLKCFG);
279 switch (clkcfg & CLKCFG_FSB_MASK) { 260 switch (clkcfg & CLKCFG_FSB_MASK) {
280 case CLKCFG_FSB_400: 261 case CLKCFG_FSB_400:
@@ -298,46 +279,13 @@ intel_hrawclk(struct drm_device *dev)
298 } 279 }
299} 280}
300 281
301static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
302{
303 struct drm_device *dev = intel_dp_to_dev(intel_dp);
304 struct drm_i915_private *dev_priv = dev->dev_private;
305
306 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
307}
308
309static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
310{
311 struct drm_device *dev = intel_dp_to_dev(intel_dp);
312 struct drm_i915_private *dev_priv = dev->dev_private;
313
314 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
315}
316
317static void
318intel_dp_check_edp(struct intel_dp *intel_dp)
319{
320 struct drm_device *dev = intel_dp_to_dev(intel_dp);
321 struct drm_i915_private *dev_priv = dev->dev_private;
322
323 if (!is_edp(intel_dp))
324 return;
325 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
326 WARN(1, "eDP powered off while attempting aux channel communication.\n");
327 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
328 I915_READ(PCH_PP_STATUS),
329 I915_READ(PCH_PP_CONTROL));
330 }
331}
332
333static int 282static int
334intel_dp_aux_ch(struct intel_dp *intel_dp, 283intel_dp_aux_ch(struct intel_dp *intel_dp,
335 uint8_t *send, int send_bytes, 284 uint8_t *send, int send_bytes,
336 uint8_t *recv, int recv_size) 285 uint8_t *recv, int recv_size)
337{ 286{
338 uint32_t output_reg = intel_dp->output_reg; 287 uint32_t output_reg = intel_dp->output_reg;
339 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 288 struct drm_device *dev = intel_dp->base.base.dev;
340 struct drm_device *dev = intel_dig_port->base.base.dev;
341 struct drm_i915_private *dev_priv = dev->dev_private; 289 struct drm_i915_private *dev_priv = dev->dev_private;
342 uint32_t ch_ctl = output_reg + 0x10; 290 uint32_t ch_ctl = output_reg + 0x10;
343 uint32_t ch_data = ch_ctl + 4; 291 uint32_t ch_data = ch_ctl + 4;
@@ -347,30 +295,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
347 uint32_t aux_clock_divider; 295 uint32_t aux_clock_divider;
348 int try, precharge; 296 int try, precharge;
349 297
350 if (IS_HASWELL(dev)) {
351 switch (intel_dig_port->port) {
352 case PORT_A:
353 ch_ctl = DPA_AUX_CH_CTL;
354 ch_data = DPA_AUX_CH_DATA1;
355 break;
356 case PORT_B:
357 ch_ctl = PCH_DPB_AUX_CH_CTL;
358 ch_data = PCH_DPB_AUX_CH_DATA1;
359 break;
360 case PORT_C:
361 ch_ctl = PCH_DPC_AUX_CH_CTL;
362 ch_data = PCH_DPC_AUX_CH_DATA1;
363 break;
364 case PORT_D:
365 ch_ctl = PCH_DPD_AUX_CH_CTL;
366 ch_data = PCH_DPD_AUX_CH_DATA1;
367 break;
368 default:
369 BUG();
370 }
371 }
372
373 intel_dp_check_edp(intel_dp);
374 /* The clock divider is based off the hrawclk, 298 /* The clock divider is based off the hrawclk,
375 * and would like to run at 2MHz. So, take the 299 * and would like to run at 2MHz. So, take the
376 * hrawclk value and divide by 2 and use that 300 * hrawclk value and divide by 2 and use that
@@ -378,17 +302,13 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
378 * Note that PCH attached eDP panels should use a 125MHz input 302 * Note that PCH attached eDP panels should use a 125MHz input
379 * clock divider. 303 * clock divider.
380 */ 304 */
381 if (is_cpu_edp(intel_dp)) { 305 if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
382 if (IS_HASWELL(dev)) 306 if (IS_GEN6(dev))
383 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; 307 aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
384 else if (IS_VALLEYVIEW(dev))
385 aux_clock_divider = 100;
386 else if (IS_GEN6(dev) || IS_GEN7(dev))
387 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
388 else 308 else
389 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 309 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
390 } else if (HAS_PCH_SPLIT(dev)) 310 } else if (HAS_PCH_SPLIT(dev))
391 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 311 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
392 else 312 else
393 aux_clock_divider = intel_hrawclk(dev) / 2; 313 aux_clock_divider = intel_hrawclk(dev) / 2;
394 314
@@ -417,7 +337,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
417 for (i = 0; i < send_bytes; i += 4) 337 for (i = 0; i < send_bytes; i += 4)
418 I915_WRITE(ch_data + i, 338 I915_WRITE(ch_data + i,
419 pack_aux(send + i, send_bytes - i)); 339 pack_aux(send + i, send_bytes - i));
420 340
421 /* Send the command and wait for it to complete */ 341 /* Send the command and wait for it to complete */
422 I915_WRITE(ch_ctl, 342 I915_WRITE(ch_ctl,
423 DP_AUX_CH_CTL_SEND_BUSY | 343 DP_AUX_CH_CTL_SEND_BUSY |
@@ -434,17 +354,13 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
434 break; 354 break;
435 udelay(100); 355 udelay(100);
436 } 356 }
437 357
438 /* Clear done status and any errors */ 358 /* Clear done status and any errors */
439 I915_WRITE(ch_ctl, 359 I915_WRITE(ch_ctl,
440 status | 360 status |
441 DP_AUX_CH_CTL_DONE | 361 DP_AUX_CH_CTL_DONE |
442 DP_AUX_CH_CTL_TIME_OUT_ERROR | 362 DP_AUX_CH_CTL_TIME_OUT_ERROR |
443 DP_AUX_CH_CTL_RECEIVE_ERROR); 363 DP_AUX_CH_CTL_RECEIVE_ERROR);
444
445 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
446 DP_AUX_CH_CTL_RECEIVE_ERROR))
447 continue;
448 if (status & DP_AUX_CH_CTL_DONE) 364 if (status & DP_AUX_CH_CTL_DONE)
449 break; 365 break;
450 } 366 }
@@ -474,7 +390,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
474 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 390 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
475 if (recv_bytes > recv_size) 391 if (recv_bytes > recv_size)
476 recv_bytes = recv_size; 392 recv_bytes = recv_size;
477 393
478 for (i = 0; i < recv_bytes; i += 4) 394 for (i = 0; i < recv_bytes; i += 4)
479 unpack_aux(I915_READ(ch_data + i), 395 unpack_aux(I915_READ(ch_data + i),
480 recv + i, recv_bytes - i); 396 recv + i, recv_bytes - i);
@@ -492,7 +408,6 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
492 int msg_bytes; 408 int msg_bytes;
493 uint8_t ack; 409 uint8_t ack;
494 410
495 intel_dp_check_edp(intel_dp);
496 if (send_bytes > 16) 411 if (send_bytes > 16)
497 return -1; 412 return -1;
498 msg[0] = AUX_NATIVE_WRITE << 4; 413 msg[0] = AUX_NATIVE_WRITE << 4;
@@ -535,7 +450,6 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
535 uint8_t ack; 450 uint8_t ack;
536 int ret; 451 int ret;
537 452
538 intel_dp_check_edp(intel_dp);
539 msg[0] = AUX_NATIVE_READ << 4; 453 msg[0] = AUX_NATIVE_READ << 4;
540 msg[1] = address >> 8; 454 msg[1] = address >> 8;
541 msg[2] = address & 0xff; 455 msg[2] = address & 0xff;
@@ -579,7 +493,6 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
579 int reply_bytes; 493 int reply_bytes;
580 int ret; 494 int ret;
581 495
582 intel_dp_check_edp(intel_dp);
583 /* Set up the command byte */ 496 /* Set up the command byte */
584 if (mode & MODE_I2C_READ) 497 if (mode & MODE_I2C_READ)
585 msg[0] = AUX_I2C_READ << 4; 498 msg[0] = AUX_I2C_READ << 4;
@@ -664,81 +577,76 @@ static int
664intel_dp_i2c_init(struct intel_dp *intel_dp, 577intel_dp_i2c_init(struct intel_dp *intel_dp,
665 struct intel_connector *intel_connector, const char *name) 578 struct intel_connector *intel_connector, const char *name)
666{ 579{
667 int ret;
668
669 DRM_DEBUG_KMS("i2c_init %s\n", name); 580 DRM_DEBUG_KMS("i2c_init %s\n", name);
670 intel_dp->algo.running = false; 581 intel_dp->algo.running = false;
671 intel_dp->algo.address = 0; 582 intel_dp->algo.address = 0;
672 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; 583 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
673 584
674 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); 585 memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
675 intel_dp->adapter.owner = THIS_MODULE; 586 intel_dp->adapter.owner = THIS_MODULE;
676 intel_dp->adapter.class = I2C_CLASS_DDC; 587 intel_dp->adapter.class = I2C_CLASS_DDC;
677 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 588 strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
678 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 589 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
679 intel_dp->adapter.algo_data = &intel_dp->algo; 590 intel_dp->adapter.algo_data = &intel_dp->algo;
680 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 591 intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
681 592
682 ironlake_edp_panel_vdd_on(intel_dp); 593 return i2c_dp_aux_add_bus(&intel_dp->adapter);
683 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
684 ironlake_edp_panel_vdd_off(intel_dp, false);
685 return ret;
686} 594}
687 595
688bool 596static bool
689intel_dp_mode_fixup(struct drm_encoder *encoder, 597intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
690 const struct drm_display_mode *mode,
691 struct drm_display_mode *adjusted_mode) 598 struct drm_display_mode *adjusted_mode)
692{ 599{
693 struct drm_device *dev = encoder->dev; 600 struct drm_device *dev = encoder->dev;
601 struct drm_i915_private *dev_priv = dev->dev_private;
694 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 602 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
695 struct intel_connector *intel_connector = intel_dp->attached_connector;
696 int lane_count, clock; 603 int lane_count, clock;
697 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 604 int max_lane_count = intel_dp_max_lane_count(intel_dp);
698 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 605 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
699 int bpp, mode_rate;
700 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 606 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
701 607
702 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 608 if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
703 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 609 intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
704 adjusted_mode); 610 intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
705 intel_pch_panel_fitting(dev,
706 intel_connector->panel.fitting_mode,
707 mode, adjusted_mode); 611 mode, adjusted_mode);
612 /*
613 * the mode->clock is used to calculate the Data&Link M/N
614 * of the pipe. For the eDP the fixed clock should be used.
615 */
616 mode->clock = dev_priv->panel_fixed_mode->clock;
708 } 617 }
709 618
710 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 619 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
711 return false; 620 for (clock = 0; clock <= max_clock; clock++) {
712
713 DRM_DEBUG_KMS("DP link computation with max lane count %i "
714 "max bw %02x pixel clock %iKHz\n",
715 max_lane_count, bws[max_clock], adjusted_mode->clock);
716
717 if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true))
718 return false;
719
720 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
721 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
722
723 for (clock = 0; clock <= max_clock; clock++) {
724 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
725 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 621 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
726 622
727 if (mode_rate <= link_avail) { 623 if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock)
624 <= link_avail) {
728 intel_dp->link_bw = bws[clock]; 625 intel_dp->link_bw = bws[clock];
729 intel_dp->lane_count = lane_count; 626 intel_dp->lane_count = lane_count;
730 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); 627 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
731 DRM_DEBUG_KMS("DP link bw %02x lane " 628 DRM_DEBUG_KMS("Display port link bw %02x lane "
732 "count %d clock %d bpp %d\n", 629 "count %d clock %d\n",
733 intel_dp->link_bw, intel_dp->lane_count, 630 intel_dp->link_bw, intel_dp->lane_count,
734 adjusted_mode->clock, bpp); 631 adjusted_mode->clock);
735 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
736 mode_rate, link_avail);
737 return true; 632 return true;
738 } 633 }
739 } 634 }
740 } 635 }
741 636
637 if (is_edp(intel_dp)) {
638 /* okay we failed just pick the highest */
639 intel_dp->lane_count = max_lane_count;
640 intel_dp->link_bw = bws[max_clock];
641 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
642 DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
643 "count %d clock %d\n",
644 intel_dp->link_bw, intel_dp->lane_count,
645 adjusted_mode->clock);
646
647 return true;
648 }
649
742 return false; 650 return false;
743} 651}
744 652
@@ -780,26 +688,30 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
780 struct drm_display_mode *adjusted_mode) 688 struct drm_display_mode *adjusted_mode)
781{ 689{
782 struct drm_device *dev = crtc->dev; 690 struct drm_device *dev = crtc->dev;
783 struct intel_encoder *intel_encoder; 691 struct drm_mode_config *mode_config = &dev->mode_config;
784 struct intel_dp *intel_dp; 692 struct drm_encoder *encoder;
785 struct drm_i915_private *dev_priv = dev->dev_private; 693 struct drm_i915_private *dev_priv = dev->dev_private;
786 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 694 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
787 int lane_count = 4; 695 int lane_count = 4;
788 struct intel_dp_m_n m_n; 696 struct intel_dp_m_n m_n;
789 int pipe = intel_crtc->pipe; 697 int pipe = intel_crtc->pipe;
790 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
791 698
792 /* 699 /*
793 * Find the lane count in the intel_encoder private 700 * Find the lane count in the intel_encoder private
794 */ 701 */
795 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 702 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
796 intel_dp = enc_to_intel_dp(&intel_encoder->base); 703 struct intel_dp *intel_dp;
797 704
798 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 705 if (encoder->crtc != crtc)
799 intel_encoder->type == INTEL_OUTPUT_EDP) 706 continue;
800 { 707
708 intel_dp = enc_to_intel_dp(encoder);
709 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
801 lane_count = intel_dp->lane_count; 710 lane_count = intel_dp->lane_count;
802 break; 711 break;
712 } else if (is_edp(intel_dp)) {
713 lane_count = dev_priv->edp.lanes;
714 break;
803 } 715 }
804 } 716 }
805 717
@@ -811,80 +723,44 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
811 intel_dp_compute_m_n(intel_crtc->bpp, lane_count, 723 intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
812 mode->clock, adjusted_mode->clock, &m_n); 724 mode->clock, adjusted_mode->clock, &m_n);
813 725
814 if (IS_HASWELL(dev)) { 726 if (HAS_PCH_SPLIT(dev)) {
815 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), 727 I915_WRITE(TRANSDATA_M1(pipe),
816 TU_SIZE(m_n.tu) | m_n.gmch_m); 728 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
817 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); 729 m_n.gmch_m);
818 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
819 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
820 } else if (HAS_PCH_SPLIT(dev)) {
821 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
822 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); 730 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
823 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); 731 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
824 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); 732 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
825 } else if (IS_VALLEYVIEW(dev)) {
826 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
827 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
828 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
829 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
830 } else { 733 } else {
831 I915_WRITE(PIPE_GMCH_DATA_M(pipe), 734 I915_WRITE(PIPE_GMCH_DATA_M(pipe),
832 TU_SIZE(m_n.tu) | m_n.gmch_m); 735 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
736 m_n.gmch_m);
833 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); 737 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
834 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); 738 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
835 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); 739 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
836 } 740 }
837} 741}
838 742
839void intel_dp_init_link_config(struct intel_dp *intel_dp)
840{
841 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
842 intel_dp->link_configuration[0] = intel_dp->link_bw;
843 intel_dp->link_configuration[1] = intel_dp->lane_count;
844 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
845 /*
846 * Check for DPCD version > 1.1 and enhanced framing support
847 */
848 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
849 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
850 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
851 }
852}
853
854static void 743static void
855intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 744intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
856 struct drm_display_mode *adjusted_mode) 745 struct drm_display_mode *adjusted_mode)
857{ 746{
858 struct drm_device *dev = encoder->dev; 747 struct drm_device *dev = encoder->dev;
859 struct drm_i915_private *dev_priv = dev->dev_private;
860 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 748 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
861 struct drm_crtc *crtc = encoder->crtc; 749 struct drm_crtc *crtc = intel_dp->base.base.crtc;
862 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 750 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
863 751
864 /* 752 intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
865 * There are four kinds of DP registers: 753 intel_dp->DP |= intel_dp->color_range;
866 *
867 * IBX PCH
868 * SNB CPU
869 * IVB CPU
870 * CPT PCH
871 *
872 * IBX PCH and CPU are the same for almost everything,
873 * except that the CPU DP PLL is configured in this
874 * register
875 *
876 * CPT PCH is quite different, having many bits moved
877 * to the TRANS_DP_CTL register instead. That
878 * configuration happens (oddly) in ironlake_pch_enable
879 */
880 754
881 /* Preserve the BIOS-computed detected bit. This is 755 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
882 * supposed to be read-only. 756 intel_dp->DP |= DP_SYNC_HS_HIGH;
883 */ 757 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
884 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 758 intel_dp->DP |= DP_SYNC_VS_HIGH;
885 759
886 /* Handle DP bits in common between all three register formats */ 760 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
887 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 761 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
762 else
763 intel_dp->DP |= DP_LINK_TRAIN_OFF;
888 764
889 switch (intel_dp->lane_count) { 765 switch (intel_dp->lane_count) {
890 case 1: 766 case 1:
@@ -897,292 +773,135 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
897 intel_dp->DP |= DP_PORT_WIDTH_4; 773 intel_dp->DP |= DP_PORT_WIDTH_4;
898 break; 774 break;
899 } 775 }
900 if (intel_dp->has_audio) { 776 if (intel_dp->has_audio)
901 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
902 pipe_name(intel_crtc->pipe));
903 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 777 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
904 intel_write_eld(encoder, adjusted_mode);
905 }
906
907 intel_dp_init_link_config(intel_dp);
908
909 /* Split out the IBX/CPU vs CPT settings */
910 778
911 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 779 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
912 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 780 intel_dp->link_configuration[0] = intel_dp->link_bw;
913 intel_dp->DP |= DP_SYNC_HS_HIGH; 781 intel_dp->link_configuration[1] = intel_dp->lane_count;
914 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 782 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
915 intel_dp->DP |= DP_SYNC_VS_HIGH;
916 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
917 783
918 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 784 /*
919 intel_dp->DP |= DP_ENHANCED_FRAMING; 785 * Check for DPCD version > 1.1 and enhanced framing support
786 */
787 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
788 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
789 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
790 intel_dp->DP |= DP_ENHANCED_FRAMING;
791 }
920 792
921 intel_dp->DP |= intel_crtc->pipe << 29; 793 /* CPT DP's pipe select is decided in TRANS_DP_CTL */
794 if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
795 intel_dp->DP |= DP_PIPEB_SELECT;
922 796
797 if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
923 /* don't miss out required setting for eDP */ 798 /* don't miss out required setting for eDP */
799 intel_dp->DP |= DP_PLL_ENABLE;
924 if (adjusted_mode->clock < 200000) 800 if (adjusted_mode->clock < 200000)
925 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 801 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
926 else 802 else
927 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 803 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
928 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
929 intel_dp->DP |= intel_dp->color_range;
930
931 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
932 intel_dp->DP |= DP_SYNC_HS_HIGH;
933 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
934 intel_dp->DP |= DP_SYNC_VS_HIGH;
935 intel_dp->DP |= DP_LINK_TRAIN_OFF;
936
937 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
938 intel_dp->DP |= DP_ENHANCED_FRAMING;
939
940 if (intel_crtc->pipe == 1)
941 intel_dp->DP |= DP_PIPEB_SELECT;
942
943 if (is_cpu_edp(intel_dp)) {
944 /* don't miss out required setting for eDP */
945 if (adjusted_mode->clock < 200000)
946 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
947 else
948 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
949 }
950 } else {
951 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
952 }
953}
954
955#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
956#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
957
958#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
959#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
960
961#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
962#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
963
964static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
965 u32 mask,
966 u32 value)
967{
968 struct drm_device *dev = intel_dp_to_dev(intel_dp);
969 struct drm_i915_private *dev_priv = dev->dev_private;
970
971 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
972 mask, value,
973 I915_READ(PCH_PP_STATUS),
974 I915_READ(PCH_PP_CONTROL));
975
976 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
977 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
978 I915_READ(PCH_PP_STATUS),
979 I915_READ(PCH_PP_CONTROL));
980 } 804 }
981} 805}
982 806
983static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 807static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
984{
985 DRM_DEBUG_KMS("Wait for panel power on\n");
986 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
987}
988
989static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
990{
991 DRM_DEBUG_KMS("Wait for panel power off time\n");
992 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
993}
994
995static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
996{ 808{
997 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 809 struct drm_device *dev = intel_dp->base.base.dev;
998 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
999}
1000
1001
1002/* Read the current pp_control value, unlocking the register if it
1003 * is locked
1004 */
1005
1006static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
1007{
1008 u32 control = I915_READ(PCH_PP_CONTROL);
1009
1010 control &= ~PANEL_UNLOCK_MASK;
1011 control |= PANEL_UNLOCK_REGS;
1012 return control;
1013}
1014
1015void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1016{
1017 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1018 struct drm_i915_private *dev_priv = dev->dev_private; 810 struct drm_i915_private *dev_priv = dev->dev_private;
1019 u32 pp; 811 u32 pp;
1020 812
1021 if (!is_edp(intel_dp)) 813 /*
1022 return; 814 * If the panel wasn't on, make sure there's not a currently
1023 DRM_DEBUG_KMS("Turn eDP VDD on\n"); 815 * active PP sequence before enabling AUX VDD.
1024 816 */
1025 WARN(intel_dp->want_panel_vdd, 817 if (!(I915_READ(PCH_PP_STATUS) & PP_ON))
1026 "eDP VDD already requested on\n"); 818 msleep(dev_priv->panel_t3);
1027
1028 intel_dp->want_panel_vdd = true;
1029
1030 if (ironlake_edp_have_panel_vdd(intel_dp)) {
1031 DRM_DEBUG_KMS("eDP VDD already on\n");
1032 return;
1033 }
1034
1035 if (!ironlake_edp_have_panel_power(intel_dp))
1036 ironlake_wait_panel_power_cycle(intel_dp);
1037 819
1038 pp = ironlake_get_pp_control(dev_priv); 820 pp = I915_READ(PCH_PP_CONTROL);
1039 pp |= EDP_FORCE_VDD; 821 pp |= EDP_FORCE_VDD;
1040 I915_WRITE(PCH_PP_CONTROL, pp); 822 I915_WRITE(PCH_PP_CONTROL, pp);
1041 POSTING_READ(PCH_PP_CONTROL); 823 POSTING_READ(PCH_PP_CONTROL);
1042 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1043 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1044
1045 /*
1046 * If the panel wasn't on, delay before accessing aux channel
1047 */
1048 if (!ironlake_edp_have_panel_power(intel_dp)) {
1049 DRM_DEBUG_KMS("eDP was not running\n");
1050 msleep(intel_dp->panel_power_up_delay);
1051 }
1052} 824}
1053 825
1054static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 826static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp)
1055{ 827{
1056 struct drm_device *dev = intel_dp_to_dev(intel_dp); 828 struct drm_device *dev = intel_dp->base.base.dev;
1057 struct drm_i915_private *dev_priv = dev->dev_private; 829 struct drm_i915_private *dev_priv = dev->dev_private;
1058 u32 pp; 830 u32 pp;
1059 831
1060 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 832 pp = I915_READ(PCH_PP_CONTROL);
1061 pp = ironlake_get_pp_control(dev_priv); 833 pp &= ~EDP_FORCE_VDD;
1062 pp &= ~EDP_FORCE_VDD; 834 I915_WRITE(PCH_PP_CONTROL, pp);
1063 I915_WRITE(PCH_PP_CONTROL, pp); 835 POSTING_READ(PCH_PP_CONTROL);
1064 POSTING_READ(PCH_PP_CONTROL);
1065
1066 /* Make sure sequencer is idle before allowing subsequent activity */
1067 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1068 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1069
1070 msleep(intel_dp->panel_power_down_delay);
1071 }
1072}
1073
1074static void ironlake_panel_vdd_work(struct work_struct *__work)
1075{
1076 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1077 struct intel_dp, panel_vdd_work);
1078 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1079
1080 mutex_lock(&dev->mode_config.mutex);
1081 ironlake_panel_vdd_off_sync(intel_dp);
1082 mutex_unlock(&dev->mode_config.mutex);
1083}
1084
1085void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1086{
1087 if (!is_edp(intel_dp))
1088 return;
1089
1090 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1091 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1092
1093 intel_dp->want_panel_vdd = false;
1094 836
1095 if (sync) { 837 /* Make sure sequencer is idle before allowing subsequent activity */
1096 ironlake_panel_vdd_off_sync(intel_dp); 838 msleep(dev_priv->panel_t12);
1097 } else {
1098 /*
1099 * Queue the timer to fire a long
1100 * time from now (relative to the power down delay)
1101 * to keep the panel power up across a sequence of operations
1102 */
1103 schedule_delayed_work(&intel_dp->panel_vdd_work,
1104 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1105 }
1106} 839}
1107 840
1108void ironlake_edp_panel_on(struct intel_dp *intel_dp) 841/* Returns true if the panel was already on when called */
842static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
1109{ 843{
1110 struct drm_device *dev = intel_dp_to_dev(intel_dp); 844 struct drm_device *dev = intel_dp->base.base.dev;
1111 struct drm_i915_private *dev_priv = dev->dev_private; 845 struct drm_i915_private *dev_priv = dev->dev_private;
1112 u32 pp; 846 u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
1113 847
1114 if (!is_edp(intel_dp)) 848 if (I915_READ(PCH_PP_STATUS) & PP_ON)
1115 return; 849 return true;
1116
1117 DRM_DEBUG_KMS("Turn eDP power on\n");
1118 850
1119 if (ironlake_edp_have_panel_power(intel_dp)) { 851 pp = I915_READ(PCH_PP_CONTROL);
1120 DRM_DEBUG_KMS("eDP power already on\n");
1121 return;
1122 }
1123 852
1124 ironlake_wait_panel_power_cycle(intel_dp); 853 /* ILK workaround: disable reset around power sequence */
854 pp &= ~PANEL_POWER_RESET;
855 I915_WRITE(PCH_PP_CONTROL, pp);
856 POSTING_READ(PCH_PP_CONTROL);
1125 857
1126 pp = ironlake_get_pp_control(dev_priv); 858 pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
1127 if (IS_GEN5(dev)) { 859 I915_WRITE(PCH_PP_CONTROL, pp);
1128 /* ILK workaround: disable reset around power sequence */ 860 POSTING_READ(PCH_PP_CONTROL);
1129 pp &= ~PANEL_POWER_RESET;
1130 I915_WRITE(PCH_PP_CONTROL, pp);
1131 POSTING_READ(PCH_PP_CONTROL);
1132 }
1133 861
1134 pp |= POWER_TARGET_ON; 862 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
1135 if (!IS_GEN5(dev)) 863 5000))
1136 pp |= PANEL_POWER_RESET; 864 DRM_ERROR("panel on wait timed out: 0x%08x\n",
865 I915_READ(PCH_PP_STATUS));
1137 866
867 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1138 I915_WRITE(PCH_PP_CONTROL, pp); 868 I915_WRITE(PCH_PP_CONTROL, pp);
1139 POSTING_READ(PCH_PP_CONTROL); 869 POSTING_READ(PCH_PP_CONTROL);
1140 870
1141 ironlake_wait_panel_on(intel_dp); 871 return false;
1142
1143 if (IS_GEN5(dev)) {
1144 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1145 I915_WRITE(PCH_PP_CONTROL, pp);
1146 POSTING_READ(PCH_PP_CONTROL);
1147 }
1148} 872}
1149 873
1150void ironlake_edp_panel_off(struct intel_dp *intel_dp) 874static void ironlake_edp_panel_off (struct drm_device *dev)
1151{ 875{
1152 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1153 struct drm_i915_private *dev_priv = dev->dev_private; 876 struct drm_i915_private *dev_priv = dev->dev_private;
1154 u32 pp; 877 u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
878 PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
1155 879
1156 if (!is_edp(intel_dp)) 880 pp = I915_READ(PCH_PP_CONTROL);
1157 return;
1158 881
1159 DRM_DEBUG_KMS("Turn eDP power off\n"); 882 /* ILK workaround: disable reset around power sequence */
1160 883 pp &= ~PANEL_POWER_RESET;
1161 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 884 I915_WRITE(PCH_PP_CONTROL, pp);
885 POSTING_READ(PCH_PP_CONTROL);
1162 886
1163 pp = ironlake_get_pp_control(dev_priv); 887 pp &= ~POWER_TARGET_ON;
1164 /* We need to switch off panel power _and_ force vdd, for otherwise some
1165 * panels get very unhappy and cease to work. */
1166 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1167 I915_WRITE(PCH_PP_CONTROL, pp); 888 I915_WRITE(PCH_PP_CONTROL, pp);
1168 POSTING_READ(PCH_PP_CONTROL); 889 POSTING_READ(PCH_PP_CONTROL);
1169 890
1170 intel_dp->want_panel_vdd = false; 891 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
892 DRM_ERROR("panel off wait timed out: 0x%08x\n",
893 I915_READ(PCH_PP_STATUS));
1171 894
1172 ironlake_wait_panel_off(intel_dp); 895 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
896 I915_WRITE(PCH_PP_CONTROL, pp);
897 POSTING_READ(PCH_PP_CONTROL);
1173} 898}
1174 899
1175void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 900static void ironlake_edp_backlight_on (struct drm_device *dev)
1176{ 901{
1177 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1178 struct drm_device *dev = intel_dig_port->base.base.dev;
1179 struct drm_i915_private *dev_priv = dev->dev_private; 902 struct drm_i915_private *dev_priv = dev->dev_private;
1180 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
1181 u32 pp; 903 u32 pp;
1182 904
1183 if (!is_edp(intel_dp))
1184 return;
1185
1186 DRM_DEBUG_KMS("\n"); 905 DRM_DEBUG_KMS("\n");
1187 /* 906 /*
1188 * If we enable the backlight right away following a panel power 907 * If we enable the backlight right away following a panel power
@@ -1190,79 +909,44 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1190 * link. So delay a bit to make sure the image is solid before 909 * link. So delay a bit to make sure the image is solid before
1191 * allowing it to appear. 910 * allowing it to appear.
1192 */ 911 */
1193 msleep(intel_dp->backlight_on_delay); 912 msleep(300);
1194 pp = ironlake_get_pp_control(dev_priv); 913 pp = I915_READ(PCH_PP_CONTROL);
1195 pp |= EDP_BLC_ENABLE; 914 pp |= EDP_BLC_ENABLE;
1196 I915_WRITE(PCH_PP_CONTROL, pp); 915 I915_WRITE(PCH_PP_CONTROL, pp);
1197 POSTING_READ(PCH_PP_CONTROL);
1198
1199 intel_panel_enable_backlight(dev, pipe);
1200} 916}
1201 917
1202void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 918static void ironlake_edp_backlight_off (struct drm_device *dev)
1203{ 919{
1204 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1205 struct drm_i915_private *dev_priv = dev->dev_private; 920 struct drm_i915_private *dev_priv = dev->dev_private;
1206 u32 pp; 921 u32 pp;
1207 922
1208 if (!is_edp(intel_dp))
1209 return;
1210
1211 intel_panel_disable_backlight(dev);
1212
1213 DRM_DEBUG_KMS("\n"); 923 DRM_DEBUG_KMS("\n");
1214 pp = ironlake_get_pp_control(dev_priv); 924 pp = I915_READ(PCH_PP_CONTROL);
1215 pp &= ~EDP_BLC_ENABLE; 925 pp &= ~EDP_BLC_ENABLE;
1216 I915_WRITE(PCH_PP_CONTROL, pp); 926 I915_WRITE(PCH_PP_CONTROL, pp);
1217 POSTING_READ(PCH_PP_CONTROL);
1218 msleep(intel_dp->backlight_off_delay);
1219} 927}
1220 928
1221static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 929static void ironlake_edp_pll_on(struct drm_encoder *encoder)
1222{ 930{
1223 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 931 struct drm_device *dev = encoder->dev;
1224 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1225 struct drm_device *dev = crtc->dev;
1226 struct drm_i915_private *dev_priv = dev->dev_private; 932 struct drm_i915_private *dev_priv = dev->dev_private;
1227 u32 dpa_ctl; 933 u32 dpa_ctl;
1228 934
1229 assert_pipe_disabled(dev_priv,
1230 to_intel_crtc(crtc)->pipe);
1231
1232 DRM_DEBUG_KMS("\n"); 935 DRM_DEBUG_KMS("\n");
1233 dpa_ctl = I915_READ(DP_A); 936 dpa_ctl = I915_READ(DP_A);
1234 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); 937 dpa_ctl |= DP_PLL_ENABLE;
1235 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 938 I915_WRITE(DP_A, dpa_ctl);
1236
1237 /* We don't adjust intel_dp->DP while tearing down the link, to
1238 * facilitate link retraining (e.g. after hotplug). Hence clear all
1239 * enable bits here to ensure that we don't enable too much. */
1240 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1241 intel_dp->DP |= DP_PLL_ENABLE;
1242 I915_WRITE(DP_A, intel_dp->DP);
1243 POSTING_READ(DP_A); 939 POSTING_READ(DP_A);
1244 udelay(200); 940 udelay(200);
1245} 941}
1246 942
1247static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 943static void ironlake_edp_pll_off(struct drm_encoder *encoder)
1248{ 944{
1249 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 945 struct drm_device *dev = encoder->dev;
1250 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1251 struct drm_device *dev = crtc->dev;
1252 struct drm_i915_private *dev_priv = dev->dev_private; 946 struct drm_i915_private *dev_priv = dev->dev_private;
1253 u32 dpa_ctl; 947 u32 dpa_ctl;
1254 948
1255 assert_pipe_disabled(dev_priv,
1256 to_intel_crtc(crtc)->pipe);
1257
1258 dpa_ctl = I915_READ(DP_A); 949 dpa_ctl = I915_READ(DP_A);
1259 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1260 "dp pll off, should be on\n");
1261 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1262
1263 /* We can't rely on the value tracked for the DP register in
1264 * intel_dp->DP because link_down must not change that (otherwise link
1265 * re-training will fail. */
1266 dpa_ctl &= ~DP_PLL_ENABLE; 950 dpa_ctl &= ~DP_PLL_ENABLE;
1267 I915_WRITE(DP_A, dpa_ctl); 951 I915_WRITE(DP_A, dpa_ctl);
1268 POSTING_READ(DP_A); 952 POSTING_READ(DP_A);
@@ -1270,7 +954,7 @@ static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1270} 954}
1271 955
1272/* If the sink supports it, try to set the power state appropriately */ 956/* If the sink supports it, try to set the power state appropriately */
1273void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 957static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1274{ 958{
1275 int ret, i; 959 int ret, i;
1276 960
@@ -1299,106 +983,81 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1299 } 983 }
1300} 984}
1301 985
1302static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 986static void intel_dp_prepare(struct drm_encoder *encoder)
1303 enum pipe *pipe)
1304{ 987{
1305 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 988 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1306 struct drm_device *dev = encoder->base.dev; 989 struct drm_device *dev = encoder->dev;
1307 struct drm_i915_private *dev_priv = dev->dev_private;
1308 u32 tmp = I915_READ(intel_dp->output_reg);
1309
1310 if (!(tmp & DP_PORT_EN))
1311 return false;
1312
1313 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
1314 *pipe = PORT_TO_PIPE_CPT(tmp);
1315 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
1316 *pipe = PORT_TO_PIPE(tmp);
1317 } else {
1318 u32 trans_sel;
1319 u32 trans_dp;
1320 int i;
1321
1322 switch (intel_dp->output_reg) {
1323 case PCH_DP_B:
1324 trans_sel = TRANS_DP_PORT_SEL_B;
1325 break;
1326 case PCH_DP_C:
1327 trans_sel = TRANS_DP_PORT_SEL_C;
1328 break;
1329 case PCH_DP_D:
1330 trans_sel = TRANS_DP_PORT_SEL_D;
1331 break;
1332 default:
1333 return true;
1334 }
1335 990
1336 for_each_pipe(i) { 991 /* Wake up the sink first */
1337 trans_dp = I915_READ(TRANS_DP_CTL(i)); 992 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1338 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1339 *pipe = i;
1340 return true;
1341 }
1342 }
1343 993
1344 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", 994 if (is_edp(intel_dp)) {
1345 intel_dp->output_reg); 995 ironlake_edp_backlight_off(dev);
996 ironlake_edp_panel_off(dev);
997 if (!is_pch_edp(intel_dp))
998 ironlake_edp_pll_on(encoder);
999 else
1000 ironlake_edp_pll_off(encoder);
1346 } 1001 }
1347 1002 intel_dp_link_down(intel_dp);
1348 return true;
1349} 1003}
1350 1004
1351static void intel_disable_dp(struct intel_encoder *encoder) 1005static void intel_dp_commit(struct drm_encoder *encoder)
1352{ 1006{
1353 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1007 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1354 1008 struct drm_device *dev = encoder->dev;
1355 /* Make sure the panel is off before trying to change the mode. But also
1356 * ensure that we have vdd while we switch off the panel. */
1357 ironlake_edp_panel_vdd_on(intel_dp);
1358 ironlake_edp_backlight_off(intel_dp);
1359 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1360 ironlake_edp_panel_off(intel_dp);
1361 1009
1362 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1010 if (is_edp(intel_dp))
1363 if (!is_cpu_edp(intel_dp)) 1011 ironlake_edp_panel_vdd_on(intel_dp);
1364 intel_dp_link_down(intel_dp);
1365}
1366 1012
1367static void intel_post_disable_dp(struct intel_encoder *encoder) 1013 intel_dp_start_link_train(intel_dp);
1368{
1369 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1370 1014
1371 if (is_cpu_edp(intel_dp)) { 1015 if (is_edp(intel_dp)) {
1372 intel_dp_link_down(intel_dp); 1016 ironlake_edp_panel_on(intel_dp);
1373 ironlake_edp_pll_off(intel_dp); 1017 ironlake_edp_panel_vdd_off(intel_dp);
1374 } 1018 }
1375}
1376 1019
1377static void intel_enable_dp(struct intel_encoder *encoder) 1020 intel_dp_complete_link_train(intel_dp);
1378{
1379 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1380 struct drm_device *dev = encoder->base.dev;
1381 struct drm_i915_private *dev_priv = dev->dev_private;
1382 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1383 1021
1384 if (WARN_ON(dp_reg & DP_PORT_EN)) 1022 if (is_edp(intel_dp))
1385 return; 1023 ironlake_edp_backlight_on(dev);
1386 1024
1387 ironlake_edp_panel_vdd_on(intel_dp); 1025 intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
1388 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1389 intel_dp_start_link_train(intel_dp);
1390 ironlake_edp_panel_on(intel_dp);
1391 ironlake_edp_panel_vdd_off(intel_dp, true);
1392 intel_dp_complete_link_train(intel_dp);
1393 ironlake_edp_backlight_on(intel_dp);
1394} 1026}
1395 1027
1396static void intel_pre_enable_dp(struct intel_encoder *encoder) 1028static void
1029intel_dp_dpms(struct drm_encoder *encoder, int mode)
1397{ 1030{
1398 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1031 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1032 struct drm_device *dev = encoder->dev;
1033 struct drm_i915_private *dev_priv = dev->dev_private;
1034 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1399 1035
1400 if (is_cpu_edp(intel_dp)) 1036 if (mode != DRM_MODE_DPMS_ON) {
1401 ironlake_edp_pll_on(intel_dp); 1037 if (is_edp(intel_dp))
1038 ironlake_edp_backlight_off(dev);
1039 intel_dp_sink_dpms(intel_dp, mode);
1040 intel_dp_link_down(intel_dp);
1041 if (is_edp(intel_dp))
1042 ironlake_edp_panel_off(dev);
1043 if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
1044 ironlake_edp_pll_off(encoder);
1045 } else {
1046 if (is_edp(intel_dp))
1047 ironlake_edp_panel_vdd_on(intel_dp);
1048 intel_dp_sink_dpms(intel_dp, mode);
1049 if (!(dp_reg & DP_PORT_EN)) {
1050 intel_dp_start_link_train(intel_dp);
1051 if (is_edp(intel_dp)) {
1052 ironlake_edp_panel_on(intel_dp);
1053 ironlake_edp_panel_vdd_off(intel_dp);
1054 }
1055 intel_dp_complete_link_train(intel_dp);
1056 }
1057 if (is_edp(intel_dp))
1058 ironlake_edp_backlight_on(dev);
1059 }
1060 intel_dp->dpms_mode = mode;
1402} 1061}
1403 1062
1404/* 1063/*
@@ -1431,14 +1090,48 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1431 * link status information 1090 * link status information
1432 */ 1091 */
1433static bool 1092static bool
1434intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1093intel_dp_get_link_status(struct intel_dp *intel_dp)
1435{ 1094{
1436 return intel_dp_aux_native_read_retry(intel_dp, 1095 return intel_dp_aux_native_read_retry(intel_dp,
1437 DP_LANE0_1_STATUS, 1096 DP_LANE0_1_STATUS,
1438 link_status, 1097 intel_dp->link_status,
1439 DP_LINK_STATUS_SIZE); 1098 DP_LINK_STATUS_SIZE);
1440} 1099}
1441 1100
1101static uint8_t
1102intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1103 int r)
1104{
1105 return link_status[r - DP_LANE0_1_STATUS];
1106}
1107
1108static uint8_t
1109intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
1110 int lane)
1111{
1112 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1113 int s = ((lane & 1) ?
1114 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
1115 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
1116 uint8_t l = intel_dp_link_status(link_status, i);
1117
1118 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
1119}
1120
1121static uint8_t
1122intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
1123 int lane)
1124{
1125 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1126 int s = ((lane & 1) ?
1127 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
1128 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
1129 uint8_t l = intel_dp_link_status(link_status, i);
1130
1131 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1132}
1133
1134
1442#if 0 1135#if 0
1443static char *voltage_names[] = { 1136static char *voltage_names[] = {
1444 "0.4V", "0.6V", "0.8V", "1.2V" 1137 "0.4V", "0.6V", "0.8V", "1.2V"
@@ -1455,74 +1148,34 @@ static char *link_train_names[] = {
1455 * These are source-specific values; current Intel hardware supports 1148 * These are source-specific values; current Intel hardware supports
1456 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1149 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1457 */ 1150 */
1151#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
1458 1152
1459static uint8_t 1153static uint8_t
1460intel_dp_voltage_max(struct intel_dp *intel_dp) 1154intel_dp_pre_emphasis_max(uint8_t voltage_swing)
1461{ 1155{
1462 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1156 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1463 1157 case DP_TRAIN_VOLTAGE_SWING_400:
1464 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) 1158 return DP_TRAIN_PRE_EMPHASIS_6;
1465 return DP_TRAIN_VOLTAGE_SWING_800; 1159 case DP_TRAIN_VOLTAGE_SWING_600:
1466 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1160 return DP_TRAIN_PRE_EMPHASIS_6;
1467 return DP_TRAIN_VOLTAGE_SWING_1200; 1161 case DP_TRAIN_VOLTAGE_SWING_800:
1468 else 1162 return DP_TRAIN_PRE_EMPHASIS_3_5;
1469 return DP_TRAIN_VOLTAGE_SWING_800; 1163 case DP_TRAIN_VOLTAGE_SWING_1200:
1470} 1164 default:
1471 1165 return DP_TRAIN_PRE_EMPHASIS_0;
1472static uint8_t
1473intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1474{
1475 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1476
1477 if (IS_HASWELL(dev)) {
1478 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1479 case DP_TRAIN_VOLTAGE_SWING_400:
1480 return DP_TRAIN_PRE_EMPHASIS_9_5;
1481 case DP_TRAIN_VOLTAGE_SWING_600:
1482 return DP_TRAIN_PRE_EMPHASIS_6;
1483 case DP_TRAIN_VOLTAGE_SWING_800:
1484 return DP_TRAIN_PRE_EMPHASIS_3_5;
1485 case DP_TRAIN_VOLTAGE_SWING_1200:
1486 default:
1487 return DP_TRAIN_PRE_EMPHASIS_0;
1488 }
1489 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1490 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1491 case DP_TRAIN_VOLTAGE_SWING_400:
1492 return DP_TRAIN_PRE_EMPHASIS_6;
1493 case DP_TRAIN_VOLTAGE_SWING_600:
1494 case DP_TRAIN_VOLTAGE_SWING_800:
1495 return DP_TRAIN_PRE_EMPHASIS_3_5;
1496 default:
1497 return DP_TRAIN_PRE_EMPHASIS_0;
1498 }
1499 } else {
1500 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1501 case DP_TRAIN_VOLTAGE_SWING_400:
1502 return DP_TRAIN_PRE_EMPHASIS_6;
1503 case DP_TRAIN_VOLTAGE_SWING_600:
1504 return DP_TRAIN_PRE_EMPHASIS_6;
1505 case DP_TRAIN_VOLTAGE_SWING_800:
1506 return DP_TRAIN_PRE_EMPHASIS_3_5;
1507 case DP_TRAIN_VOLTAGE_SWING_1200:
1508 default:
1509 return DP_TRAIN_PRE_EMPHASIS_0;
1510 }
1511 } 1166 }
1512} 1167}
1513 1168
1514static void 1169static void
1515intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1170intel_get_adjust_train(struct intel_dp *intel_dp)
1516{ 1171{
1517 uint8_t v = 0; 1172 uint8_t v = 0;
1518 uint8_t p = 0; 1173 uint8_t p = 0;
1519 int lane; 1174 int lane;
1520 uint8_t voltage_max;
1521 uint8_t preemph_max;
1522 1175
1523 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1176 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1524 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); 1177 uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
1525 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); 1178 uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
1526 1179
1527 if (this_v > v) 1180 if (this_v > v)
1528 v = this_v; 1181 v = this_v;
@@ -1530,20 +1183,18 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
1530 p = this_p; 1183 p = this_p;
1531 } 1184 }
1532 1185
1533 voltage_max = intel_dp_voltage_max(intel_dp); 1186 if (v >= I830_DP_VOLTAGE_MAX)
1534 if (v >= voltage_max) 1187 v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
1535 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1536 1188
1537 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); 1189 if (p >= intel_dp_pre_emphasis_max(v))
1538 if (p >= preemph_max) 1190 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1539 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1540 1191
1541 for (lane = 0; lane < 4; lane++) 1192 for (lane = 0; lane < 4; lane++)
1542 intel_dp->train_set[lane] = v | p; 1193 intel_dp->train_set[lane] = v | p;
1543} 1194}
1544 1195
1545static uint32_t 1196static uint32_t
1546intel_dp_signal_levels(uint8_t train_set) 1197intel_dp_signal_levels(uint8_t train_set, int lane_count)
1547{ 1198{
1548 uint32_t signal_levels = 0; 1199 uint32_t signal_levels = 0;
1549 1200
@@ -1608,69 +1259,53 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
1608 } 1259 }
1609} 1260}
1610 1261
1611/* Gen7's DP voltage swing and pre-emphasis control */ 1262static uint8_t
1612static uint32_t 1263intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1613intel_gen7_edp_signal_levels(uint8_t train_set) 1264 int lane)
1614{ 1265{
1615 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1266 int i = DP_LANE0_1_STATUS + (lane >> 1);
1616 DP_TRAIN_PRE_EMPHASIS_MASK); 1267 int s = (lane & 1) * 4;
1617 switch (signal_levels) { 1268 uint8_t l = intel_dp_link_status(link_status, i);
1618 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1619 return EDP_LINK_TRAIN_400MV_0DB_IVB;
1620 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1621 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1622 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1623 return EDP_LINK_TRAIN_400MV_6DB_IVB;
1624 1269
1625 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1270 return (l >> s) & 0xf;
1626 return EDP_LINK_TRAIN_600MV_0DB_IVB; 1271}
1627 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1628 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1629 1272
1630 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1273/* Check for clock recovery is done on all channels */
1631 return EDP_LINK_TRAIN_800MV_0DB_IVB; 1274static bool
1632 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1275intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
1633 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 1276{
1277 int lane;
1278 uint8_t lane_status;
1634 1279
1635 default: 1280 for (lane = 0; lane < lane_count; lane++) {
1636 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1281 lane_status = intel_get_lane_status(link_status, lane);
1637 "0x%x\n", signal_levels); 1282 if ((lane_status & DP_LANE_CR_DONE) == 0)
1638 return EDP_LINK_TRAIN_500MV_0DB_IVB; 1283 return false;
1639 } 1284 }
1285 return true;
1640} 1286}
1641 1287
1642/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ 1288/* Check to see if channel eq is done on all channels */
1643static uint32_t 1289#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
1644intel_dp_signal_levels_hsw(uint8_t train_set) 1290 DP_LANE_CHANNEL_EQ_DONE|\
1291 DP_LANE_SYMBOL_LOCKED)
1292static bool
1293intel_channel_eq_ok(struct intel_dp *intel_dp)
1645{ 1294{
1646 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1295 uint8_t lane_align;
1647 DP_TRAIN_PRE_EMPHASIS_MASK); 1296 uint8_t lane_status;
1648 switch (signal_levels) { 1297 int lane;
1649 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1650 return DDI_BUF_EMP_400MV_0DB_HSW;
1651 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1652 return DDI_BUF_EMP_400MV_3_5DB_HSW;
1653 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1654 return DDI_BUF_EMP_400MV_6DB_HSW;
1655 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
1656 return DDI_BUF_EMP_400MV_9_5DB_HSW;
1657
1658 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1659 return DDI_BUF_EMP_600MV_0DB_HSW;
1660 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1661 return DDI_BUF_EMP_600MV_3_5DB_HSW;
1662 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1663 return DDI_BUF_EMP_600MV_6DB_HSW;
1664 1298
1665 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1299 lane_align = intel_dp_link_status(intel_dp->link_status,
1666 return DDI_BUF_EMP_800MV_0DB_HSW; 1300 DP_LANE_ALIGN_STATUS_UPDATED);
1667 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1301 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
1668 return DDI_BUF_EMP_800MV_3_5DB_HSW; 1302 return false;
1669 default: 1303 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1670 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1304 lane_status = intel_get_lane_status(intel_dp->link_status, lane);
1671 "0x%x\n", signal_levels); 1305 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
1672 return DDI_BUF_EMP_400MV_0DB_HSW; 1306 return false;
1673 } 1307 }
1308 return true;
1674} 1309}
1675 1310
1676static bool 1311static bool
@@ -1678,86 +1313,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1678 uint32_t dp_reg_value, 1313 uint32_t dp_reg_value,
1679 uint8_t dp_train_pat) 1314 uint8_t dp_train_pat)
1680{ 1315{
1681 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1316 struct drm_device *dev = intel_dp->base.base.dev;
1682 struct drm_device *dev = intel_dig_port->base.base.dev;
1683 struct drm_i915_private *dev_priv = dev->dev_private; 1317 struct drm_i915_private *dev_priv = dev->dev_private;
1684 enum port port = intel_dig_port->port;
1685 int ret; 1318 int ret;
1686 uint32_t temp;
1687
1688 if (IS_HASWELL(dev)) {
1689 temp = I915_READ(DP_TP_CTL(port));
1690
1691 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1692 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
1693 else
1694 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
1695
1696 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1697 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1698 case DP_TRAINING_PATTERN_DISABLE:
1699 temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
1700 I915_WRITE(DP_TP_CTL(port), temp);
1701
1702 if (wait_for((I915_READ(DP_TP_STATUS(port)) &
1703 DP_TP_STATUS_IDLE_DONE), 1))
1704 DRM_ERROR("Timed out waiting for DP idle patterns\n");
1705
1706 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1707 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1708
1709 break;
1710 case DP_TRAINING_PATTERN_1:
1711 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
1712 break;
1713 case DP_TRAINING_PATTERN_2:
1714 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
1715 break;
1716 case DP_TRAINING_PATTERN_3:
1717 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
1718 break;
1719 }
1720 I915_WRITE(DP_TP_CTL(port), temp);
1721
1722 } else if (HAS_PCH_CPT(dev) &&
1723 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1724 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1725
1726 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1727 case DP_TRAINING_PATTERN_DISABLE:
1728 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
1729 break;
1730 case DP_TRAINING_PATTERN_1:
1731 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
1732 break;
1733 case DP_TRAINING_PATTERN_2:
1734 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1735 break;
1736 case DP_TRAINING_PATTERN_3:
1737 DRM_ERROR("DP training pattern 3 not supported\n");
1738 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1739 break;
1740 }
1741
1742 } else {
1743 dp_reg_value &= ~DP_LINK_TRAIN_MASK;
1744
1745 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1746 case DP_TRAINING_PATTERN_DISABLE:
1747 dp_reg_value |= DP_LINK_TRAIN_OFF;
1748 break;
1749 case DP_TRAINING_PATTERN_1:
1750 dp_reg_value |= DP_LINK_TRAIN_PAT_1;
1751 break;
1752 case DP_TRAINING_PATTERN_2:
1753 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1754 break;
1755 case DP_TRAINING_PATTERN_3:
1756 DRM_ERROR("DP training pattern 3 not supported\n");
1757 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1758 break;
1759 }
1760 }
1761 1319
1762 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1320 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1763 POSTING_READ(intel_dp->output_reg); 1321 POSTING_READ(intel_dp->output_reg);
@@ -1766,33 +1324,39 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1766 DP_TRAINING_PATTERN_SET, 1324 DP_TRAINING_PATTERN_SET,
1767 dp_train_pat); 1325 dp_train_pat);
1768 1326
1769 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != 1327 ret = intel_dp_aux_native_write(intel_dp,
1770 DP_TRAINING_PATTERN_DISABLE) { 1328 DP_TRAINING_LANE0_SET,
1771 ret = intel_dp_aux_native_write(intel_dp, 1329 intel_dp->train_set, 4);
1772 DP_TRAINING_LANE0_SET, 1330 if (ret != 4)
1773 intel_dp->train_set, 1331 return false;
1774 intel_dp->lane_count);
1775 if (ret != intel_dp->lane_count)
1776 return false;
1777 }
1778 1332
1779 return true; 1333 return true;
1780} 1334}
1781 1335
1782/* Enable corresponding port and start training pattern 1 */ 1336/* Enable corresponding port and start training pattern 1 */
1783void 1337static void
1784intel_dp_start_link_train(struct intel_dp *intel_dp) 1338intel_dp_start_link_train(struct intel_dp *intel_dp)
1785{ 1339{
1786 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; 1340 struct drm_device *dev = intel_dp->base.base.dev;
1787 struct drm_device *dev = encoder->dev; 1341 struct drm_i915_private *dev_priv = dev->dev_private;
1342 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
1788 int i; 1343 int i;
1789 uint8_t voltage; 1344 uint8_t voltage;
1790 bool clock_recovery = false; 1345 bool clock_recovery = false;
1791 int voltage_tries, loop_tries; 1346 int tries;
1347 u32 reg;
1792 uint32_t DP = intel_dp->DP; 1348 uint32_t DP = intel_dp->DP;
1793 1349
1794 if (IS_HASWELL(dev)) 1350 /*
1795 intel_ddi_prepare_link_retrain(encoder); 1351 * On CPT we have to enable the port in training pattern 1, which
1352 * will happen below in intel_dp_set_link_train. Otherwise, enable
1353 * the port and wait for it to become active.
1354 */
1355 if (!HAS_PCH_CPT(dev)) {
1356 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
1357 POSTING_READ(intel_dp->output_reg);
1358 intel_wait_for_vblank(dev, intel_crtc->pipe);
1359 }
1796 1360
1797 /* Write the link configuration data */ 1361 /* Write the link configuration data */
1798 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1362 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
@@ -1800,48 +1364,41 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1800 DP_LINK_CONFIGURATION_SIZE); 1364 DP_LINK_CONFIGURATION_SIZE);
1801 1365
1802 DP |= DP_PORT_EN; 1366 DP |= DP_PORT_EN;
1803 1367 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
1368 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1369 else
1370 DP &= ~DP_LINK_TRAIN_MASK;
1804 memset(intel_dp->train_set, 0, 4); 1371 memset(intel_dp->train_set, 0, 4);
1805 voltage = 0xff; 1372 voltage = 0xff;
1806 voltage_tries = 0; 1373 tries = 0;
1807 loop_tries = 0;
1808 clock_recovery = false; 1374 clock_recovery = false;
1809 for (;;) { 1375 for (;;) {
1810 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1376 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1811 uint8_t link_status[DP_LINK_STATUS_SIZE];
1812 uint32_t signal_levels; 1377 uint32_t signal_levels;
1813 1378 if (IS_GEN6(dev) && is_edp(intel_dp)) {
1814 if (IS_HASWELL(dev)) {
1815 signal_levels = intel_dp_signal_levels_hsw(
1816 intel_dp->train_set[0]);
1817 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1818 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1819 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1820 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1821 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1822 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1379 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1823 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1380 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1824 } else { 1381 } else {
1825 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1382 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
1826 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1383 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1827 } 1384 }
1828 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
1829 signal_levels);
1830 1385
1831 /* Set training pattern 1 */ 1386 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
1832 if (!intel_dp_set_link_train(intel_dp, DP, 1387 reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
1388 else
1389 reg = DP | DP_LINK_TRAIN_PAT_1;
1390
1391 if (!intel_dp_set_link_train(intel_dp, reg,
1833 DP_TRAINING_PATTERN_1 | 1392 DP_TRAINING_PATTERN_1 |
1834 DP_LINK_SCRAMBLING_DISABLE)) 1393 DP_LINK_SCRAMBLING_DISABLE))
1835 break; 1394 break;
1395 /* Set training pattern 1 */
1836 1396
1837 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 1397 udelay(100);
1838 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1398 if (!intel_dp_get_link_status(intel_dp))
1839 DRM_ERROR("failed to get link status\n");
1840 break; 1399 break;
1841 }
1842 1400
1843 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1401 if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
1844 DRM_DEBUG_KMS("clock recovery OK\n");
1845 clock_recovery = true; 1402 clock_recovery = true;
1846 break; 1403 break;
1847 } 1404 }
@@ -1850,41 +1407,33 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1850 for (i = 0; i < intel_dp->lane_count; i++) 1407 for (i = 0; i < intel_dp->lane_count; i++)
1851 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1408 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1852 break; 1409 break;
1853 if (i == intel_dp->lane_count && voltage_tries == 5) { 1410 if (i == intel_dp->lane_count)
1854 ++loop_tries; 1411 break;
1855 if (loop_tries == 5) {
1856 DRM_DEBUG_KMS("too many full retries, give up\n");
1857 break;
1858 }
1859 memset(intel_dp->train_set, 0, 4);
1860 voltage_tries = 0;
1861 continue;
1862 }
1863 1412
1864 /* Check to see if we've tried the same voltage 5 times */ 1413 /* Check to see if we've tried the same voltage 5 times */
1865 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1414 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1866 ++voltage_tries; 1415 ++tries;
1867 if (voltage_tries == 5) { 1416 if (tries == 5)
1868 DRM_DEBUG_KMS("too many voltage retries, give up\n");
1869 break; 1417 break;
1870 }
1871 } else 1418 } else
1872 voltage_tries = 0; 1419 tries = 0;
1873 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1420 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1874 1421
1875 /* Compute new intel_dp->train_set as requested by target */ 1422 /* Compute new intel_dp->train_set as requested by target */
1876 intel_get_adjust_train(intel_dp, link_status); 1423 intel_get_adjust_train(intel_dp);
1877 } 1424 }
1878 1425
1879 intel_dp->DP = DP; 1426 intel_dp->DP = DP;
1880} 1427}
1881 1428
1882void 1429static void
1883intel_dp_complete_link_train(struct intel_dp *intel_dp) 1430intel_dp_complete_link_train(struct intel_dp *intel_dp)
1884{ 1431{
1885 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1432 struct drm_device *dev = intel_dp->base.base.dev;
1433 struct drm_i915_private *dev_priv = dev->dev_private;
1886 bool channel_eq = false; 1434 bool channel_eq = false;
1887 int tries, cr_tries; 1435 int tries, cr_tries;
1436 u32 reg;
1888 uint32_t DP = intel_dp->DP; 1437 uint32_t DP = intel_dp->DP;
1889 1438
1890 /* channel equalization */ 1439 /* channel equalization */
@@ -1894,7 +1443,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1894 for (;;) { 1443 for (;;) {
1895 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1444 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1896 uint32_t signal_levels; 1445 uint32_t signal_levels;
1897 uint8_t link_status[DP_LINK_STATUS_SIZE];
1898 1446
1899 if (cr_tries > 5) { 1447 if (cr_tries > 5) {
1900 DRM_ERROR("failed to train DP, aborting\n"); 1448 DRM_ERROR("failed to train DP, aborting\n");
@@ -1902,38 +1450,37 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1902 break; 1450 break;
1903 } 1451 }
1904 1452
1905 if (IS_HASWELL(dev)) { 1453 if (IS_GEN6(dev) && is_edp(intel_dp)) {
1906 signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
1907 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1908 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1909 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1910 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1911 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1912 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1454 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1913 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1455 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1914 } else { 1456 } else {
1915 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1457 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
1916 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1458 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1917 } 1459 }
1918 1460
1461 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
1462 reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
1463 else
1464 reg = DP | DP_LINK_TRAIN_PAT_2;
1465
1919 /* channel eq pattern */ 1466 /* channel eq pattern */
1920 if (!intel_dp_set_link_train(intel_dp, DP, 1467 if (!intel_dp_set_link_train(intel_dp, reg,
1921 DP_TRAINING_PATTERN_2 | 1468 DP_TRAINING_PATTERN_2 |
1922 DP_LINK_SCRAMBLING_DISABLE)) 1469 DP_LINK_SCRAMBLING_DISABLE))
1923 break; 1470 break;
1924 1471
1925 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 1472 udelay(400);
1926 if (!intel_dp_get_link_status(intel_dp, link_status)) 1473 if (!intel_dp_get_link_status(intel_dp))
1927 break; 1474 break;
1928 1475
1929 /* Make sure clock is still ok */ 1476 /* Make sure clock is still ok */
1930 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1477 if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
1931 intel_dp_start_link_train(intel_dp); 1478 intel_dp_start_link_train(intel_dp);
1932 cr_tries++; 1479 cr_tries++;
1933 continue; 1480 continue;
1934 } 1481 }
1935 1482
1936 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 1483 if (intel_channel_eq_ok(intel_dp)) {
1937 channel_eq = true; 1484 channel_eq = true;
1938 break; 1485 break;
1939 } 1486 }
@@ -1948,48 +1495,41 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1948 } 1495 }
1949 1496
1950 /* Compute new intel_dp->train_set as requested by target */ 1497 /* Compute new intel_dp->train_set as requested by target */
1951 intel_get_adjust_train(intel_dp, link_status); 1498 intel_get_adjust_train(intel_dp);
1952 ++tries; 1499 ++tries;
1953 } 1500 }
1954 1501
1955 if (channel_eq) 1502 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
1956 DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); 1503 reg = DP | DP_LINK_TRAIN_OFF_CPT;
1504 else
1505 reg = DP | DP_LINK_TRAIN_OFF;
1957 1506
1958 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 1507 I915_WRITE(intel_dp->output_reg, reg);
1508 POSTING_READ(intel_dp->output_reg);
1509 intel_dp_aux_native_write_1(intel_dp,
1510 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
1959} 1511}
1960 1512
1961static void 1513static void
1962intel_dp_link_down(struct intel_dp *intel_dp) 1514intel_dp_link_down(struct intel_dp *intel_dp)
1963{ 1515{
1964 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1516 struct drm_device *dev = intel_dp->base.base.dev;
1965 struct drm_device *dev = intel_dig_port->base.base.dev;
1966 struct drm_i915_private *dev_priv = dev->dev_private; 1517 struct drm_i915_private *dev_priv = dev->dev_private;
1967 uint32_t DP = intel_dp->DP; 1518 uint32_t DP = intel_dp->DP;
1968 1519
1969 /* 1520 if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
1970 * DDI code has a strict mode set sequence and we should try to respect
1971 * it, otherwise we might hang the machine in many different ways. So we
1972 * really should be disabling the port only on a complete crtc_disable
1973 * sequence. This function is just called under two conditions on DDI
1974 * code:
1975 * - Link train failed while doing crtc_enable, and on this case we
1976 * really should respect the mode set sequence and wait for a
1977 * crtc_disable.
1978 * - Someone turned the monitor off and intel_dp_check_link_status
1979 * called us. We don't need to disable the whole port on this case, so
1980 * when someone turns the monitor on again,
1981 * intel_ddi_prepare_link_retrain will take care of redoing the link
1982 * train.
1983 */
1984 if (IS_HASWELL(dev))
1985 return;
1986
1987 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1988 return; 1521 return;
1989 1522
1990 DRM_DEBUG_KMS("\n"); 1523 DRM_DEBUG_KMS("\n");
1991 1524
1992 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1525 if (is_edp(intel_dp)) {
1526 DP &= ~DP_PLL_ENABLE;
1527 I915_WRITE(intel_dp->output_reg, DP);
1528 POSTING_READ(intel_dp->output_reg);
1529 udelay(100);
1530 }
1531
1532 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) {
1993 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1533 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1994 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1534 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1995 } else { 1535 } else {
@@ -2000,9 +1540,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
2000 1540
2001 msleep(17); 1541 msleep(17);
2002 1542
2003 if (HAS_PCH_IBX(dev) && 1543 if (is_edp(intel_dp))
1544 DP |= DP_LINK_TRAIN_OFF;
1545
1546 if (!HAS_PCH_CPT(dev) &&
2004 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1547 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
2005 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 1548 struct drm_crtc *crtc = intel_dp->base.base.crtc;
2006 1549
2007 /* Hardware workaround: leaving our transcoder select 1550 /* Hardware workaround: leaving our transcoder select
2008 * set to transcoder B while it's off will prevent the 1551 * set to transcoder B while it's off will prevent the
@@ -2033,77 +1576,20 @@ intel_dp_link_down(struct intel_dp *intel_dp)
2033 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 1576 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
2034 } 1577 }
2035 1578
2036 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
2037 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 1579 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2038 POSTING_READ(intel_dp->output_reg); 1580 POSTING_READ(intel_dp->output_reg);
2039 msleep(intel_dp->panel_power_down_delay);
2040} 1581}
2041 1582
2042static bool 1583static bool
2043intel_dp_get_dpcd(struct intel_dp *intel_dp) 1584intel_dp_get_dpcd(struct intel_dp *intel_dp)
2044{ 1585{
2045 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 1586 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
2046 sizeof(intel_dp->dpcd)) == 0) 1587 sizeof (intel_dp->dpcd)) &&
2047 return false; /* aux transfer failed */ 1588 (intel_dp->dpcd[DP_DPCD_REV] != 0)) {
2048 1589 return true;
2049 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 1590 }
2050 return false; /* DPCD not present */
2051
2052 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2053 DP_DWN_STRM_PORT_PRESENT))
2054 return true; /* native DP sink */
2055
2056 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2057 return true; /* no per-port downstream info */
2058
2059 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2060 intel_dp->downstream_ports,
2061 DP_MAX_DOWNSTREAM_PORTS) == 0)
2062 return false; /* downstream port status fetch failed */
2063
2064 return true;
2065}
2066
2067static void
2068intel_dp_probe_oui(struct intel_dp *intel_dp)
2069{
2070 u8 buf[3];
2071
2072 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2073 return;
2074
2075 ironlake_edp_panel_vdd_on(intel_dp);
2076
2077 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2078 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2079 buf[0], buf[1], buf[2]);
2080
2081 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2082 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2083 buf[0], buf[1], buf[2]);
2084
2085 ironlake_edp_panel_vdd_off(intel_dp, false);
2086}
2087
2088static bool
2089intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2090{
2091 int ret;
2092
2093 ret = intel_dp_aux_native_read_retry(intel_dp,
2094 DP_DEVICE_SERVICE_IRQ_VECTOR,
2095 sink_irq_vector, 1);
2096 if (!ret)
2097 return false;
2098
2099 return true;
2100}
2101 1591
2102static void 1592 return false;
2103intel_dp_handle_test_request(struct intel_dp *intel_dp)
2104{
2105 /* NAK by default */
2106 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
2107} 1593}
2108 1594
2109/* 1595/*
@@ -2115,21 +1601,17 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
2115 * 4. Check link status on receipt of hot-plug interrupt 1601 * 4. Check link status on receipt of hot-plug interrupt
2116 */ 1602 */
2117 1603
2118void 1604static void
2119intel_dp_check_link_status(struct intel_dp *intel_dp) 1605intel_dp_check_link_status(struct intel_dp *intel_dp)
2120{ 1606{
2121 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 1607 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
2122 u8 sink_irq_vector;
2123 u8 link_status[DP_LINK_STATUS_SIZE];
2124
2125 if (!intel_encoder->connectors_active)
2126 return; 1608 return;
2127 1609
2128 if (WARN_ON(!intel_encoder->base.crtc)) 1610 if (!intel_dp->base.base.crtc)
2129 return; 1611 return;
2130 1612
2131 /* Try to read receiver status if the link appears to be up */ 1613 /* Try to read receiver status if the link appears to be up */
2132 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1614 if (!intel_dp_get_link_status(intel_dp)) {
2133 intel_dp_link_down(intel_dp); 1615 intel_dp_link_down(intel_dp);
2134 return; 1616 return;
2135 } 1617 }
@@ -2140,77 +1622,30 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
2140 return; 1622 return;
2141 } 1623 }
2142 1624
2143 /* Try to read the source of the interrupt */ 1625 if (!intel_channel_eq_ok(intel_dp)) {
2144 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2145 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2146 /* Clear interrupt source */
2147 intel_dp_aux_native_write_1(intel_dp,
2148 DP_DEVICE_SERVICE_IRQ_VECTOR,
2149 sink_irq_vector);
2150
2151 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2152 intel_dp_handle_test_request(intel_dp);
2153 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2154 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2155 }
2156
2157 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2158 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 1626 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
2159 drm_get_encoder_name(&intel_encoder->base)); 1627 drm_get_encoder_name(&intel_dp->base.base));
2160 intel_dp_start_link_train(intel_dp); 1628 intel_dp_start_link_train(intel_dp);
2161 intel_dp_complete_link_train(intel_dp); 1629 intel_dp_complete_link_train(intel_dp);
2162 } 1630 }
2163} 1631}
2164 1632
2165/* XXX this is probably wrong for multiple downstream ports */
2166static enum drm_connector_status 1633static enum drm_connector_status
2167intel_dp_detect_dpcd(struct intel_dp *intel_dp) 1634intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2168{ 1635{
2169 uint8_t *dpcd = intel_dp->dpcd; 1636 if (intel_dp_get_dpcd(intel_dp))
2170 bool hpd;
2171 uint8_t type;
2172
2173 if (!intel_dp_get_dpcd(intel_dp))
2174 return connector_status_disconnected;
2175
2176 /* if there's no downstream port, we're done */
2177 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2178 return connector_status_connected; 1637 return connector_status_connected;
2179
2180 /* If we're HPD-aware, SINK_COUNT changes dynamically */
2181 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
2182 if (hpd) {
2183 uint8_t reg;
2184 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
2185 &reg, 1))
2186 return connector_status_unknown;
2187 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2188 : connector_status_disconnected;
2189 }
2190
2191 /* If no HPD, poke DDC gently */
2192 if (drm_probe_ddc(&intel_dp->adapter))
2193 return connector_status_connected;
2194
2195 /* Well we tried, say unknown for unreliable port types */
2196 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2197 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
2198 return connector_status_unknown;
2199
2200 /* Anything else is out of spec, warn and ignore */
2201 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2202 return connector_status_disconnected; 1638 return connector_status_disconnected;
2203} 1639}
2204 1640
2205static enum drm_connector_status 1641static enum drm_connector_status
2206ironlake_dp_detect(struct intel_dp *intel_dp) 1642ironlake_dp_detect(struct intel_dp *intel_dp)
2207{ 1643{
2208 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2209 enum drm_connector_status status; 1644 enum drm_connector_status status;
2210 1645
2211 /* Can't disconnect eDP, but you can close the lid... */ 1646 /* Can't disconnect eDP, but you can close the lid... */
2212 if (is_edp(intel_dp)) { 1647 if (is_edp(intel_dp)) {
2213 status = intel_panel_detect(dev); 1648 status = intel_panel_detect(intel_dp->base.base.dev);
2214 if (status == connector_status_unknown) 1649 if (status == connector_status_unknown)
2215 status = connector_status_connected; 1650 status = connector_status_connected;
2216 return status; 1651 return status;
@@ -2222,25 +1657,27 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
2222static enum drm_connector_status 1657static enum drm_connector_status
2223g4x_dp_detect(struct intel_dp *intel_dp) 1658g4x_dp_detect(struct intel_dp *intel_dp)
2224{ 1659{
2225 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1660 struct drm_device *dev = intel_dp->base.base.dev;
2226 struct drm_i915_private *dev_priv = dev->dev_private; 1661 struct drm_i915_private *dev_priv = dev->dev_private;
2227 uint32_t bit; 1662 uint32_t temp, bit;
2228 1663
2229 switch (intel_dp->output_reg) { 1664 switch (intel_dp->output_reg) {
2230 case DP_B: 1665 case DP_B:
2231 bit = DPB_HOTPLUG_LIVE_STATUS; 1666 bit = DPB_HOTPLUG_INT_STATUS;
2232 break; 1667 break;
2233 case DP_C: 1668 case DP_C:
2234 bit = DPC_HOTPLUG_LIVE_STATUS; 1669 bit = DPC_HOTPLUG_INT_STATUS;
2235 break; 1670 break;
2236 case DP_D: 1671 case DP_D:
2237 bit = DPD_HOTPLUG_LIVE_STATUS; 1672 bit = DPD_HOTPLUG_INT_STATUS;
2238 break; 1673 break;
2239 default: 1674 default:
2240 return connector_status_unknown; 1675 return connector_status_unknown;
2241 } 1676 }
2242 1677
2243 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 1678 temp = I915_READ(PORT_HOTPLUG_STAT);
1679
1680 if ((temp & bit) == 0)
2244 return connector_status_disconnected; 1681 return connector_status_disconnected;
2245 1682
2246 return intel_dp_detect_dpcd(intel_dp); 1683 return intel_dp_detect_dpcd(intel_dp);
@@ -2249,45 +1686,25 @@ g4x_dp_detect(struct intel_dp *intel_dp)
2249static struct edid * 1686static struct edid *
2250intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 1687intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2251{ 1688{
2252 struct intel_connector *intel_connector = to_intel_connector(connector); 1689 struct intel_dp *intel_dp = intel_attached_dp(connector);
2253 1690 struct edid *edid;
2254 /* use cached edid if we have one */
2255 if (intel_connector->edid) {
2256 struct edid *edid;
2257 int size;
2258
2259 /* invalid edid */
2260 if (IS_ERR(intel_connector->edid))
2261 return NULL;
2262
2263 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
2264 edid = kmalloc(size, GFP_KERNEL);
2265 if (!edid)
2266 return NULL;
2267
2268 memcpy(edid, intel_connector->edid, size);
2269 return edid;
2270 }
2271 1691
2272 return drm_get_edid(connector, adapter); 1692 ironlake_edp_panel_vdd_on(intel_dp);
1693 edid = drm_get_edid(connector, adapter);
1694 ironlake_edp_panel_vdd_off(intel_dp);
1695 return edid;
2273} 1696}
2274 1697
2275static int 1698static int
2276intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 1699intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
2277{ 1700{
2278 struct intel_connector *intel_connector = to_intel_connector(connector); 1701 struct intel_dp *intel_dp = intel_attached_dp(connector);
2279 1702 int ret;
2280 /* use cached edid if we have one */
2281 if (intel_connector->edid) {
2282 /* invalid edid */
2283 if (IS_ERR(intel_connector->edid))
2284 return 0;
2285
2286 return intel_connector_update_modes(connector,
2287 intel_connector->edid);
2288 }
2289 1703
2290 return intel_ddc_get_modes(connector, adapter); 1704 ironlake_edp_panel_vdd_on(intel_dp);
1705 ret = intel_ddc_get_modes(connector, adapter);
1706 ironlake_edp_panel_vdd_off(intel_dp);
1707 return ret;
2291} 1708}
2292 1709
2293 1710
@@ -2301,12 +1718,9 @@ static enum drm_connector_status
2301intel_dp_detect(struct drm_connector *connector, bool force) 1718intel_dp_detect(struct drm_connector *connector, bool force)
2302{ 1719{
2303 struct intel_dp *intel_dp = intel_attached_dp(connector); 1720 struct intel_dp *intel_dp = intel_attached_dp(connector);
2304 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1721 struct drm_device *dev = intel_dp->base.base.dev;
2305 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2306 struct drm_device *dev = connector->dev;
2307 enum drm_connector_status status; 1722 enum drm_connector_status status;
2308 struct edid *edid = NULL; 1723 struct edid *edid = NULL;
2309 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2310 1724
2311 intel_dp->has_audio = false; 1725 intel_dp->has_audio = false;
2312 1726
@@ -2315,50 +1729,60 @@ intel_dp_detect(struct drm_connector *connector, bool force)
2315 else 1729 else
2316 status = g4x_dp_detect(intel_dp); 1730 status = g4x_dp_detect(intel_dp);
2317 1731
2318 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), 1732 DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
2319 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); 1733 intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
2320 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); 1734 intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
1735 intel_dp->dpcd[6], intel_dp->dpcd[7]);
2321 1736
2322 if (status != connector_status_connected) 1737 if (status != connector_status_connected)
2323 return status; 1738 return status;
2324 1739
2325 intel_dp_probe_oui(intel_dp); 1740 if (intel_dp->force_audio) {
2326 1741 intel_dp->has_audio = intel_dp->force_audio > 0;
2327 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2328 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
2329 } else { 1742 } else {
2330 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 1743 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2331 if (edid) { 1744 if (edid) {
2332 intel_dp->has_audio = drm_detect_monitor_audio(edid); 1745 intel_dp->has_audio = drm_detect_monitor_audio(edid);
1746 connector->display_info.raw_edid = NULL;
2333 kfree(edid); 1747 kfree(edid);
2334 } 1748 }
2335 } 1749 }
2336 1750
2337 if (intel_encoder->type != INTEL_OUTPUT_EDP)
2338 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2339 return connector_status_connected; 1751 return connector_status_connected;
2340} 1752}
2341 1753
2342static int intel_dp_get_modes(struct drm_connector *connector) 1754static int intel_dp_get_modes(struct drm_connector *connector)
2343{ 1755{
2344 struct intel_dp *intel_dp = intel_attached_dp(connector); 1756 struct intel_dp *intel_dp = intel_attached_dp(connector);
2345 struct intel_connector *intel_connector = to_intel_connector(connector); 1757 struct drm_device *dev = intel_dp->base.base.dev;
2346 struct drm_device *dev = connector->dev; 1758 struct drm_i915_private *dev_priv = dev->dev_private;
2347 int ret; 1759 int ret;
2348 1760
2349 /* We should parse the EDID data and find out if it has an audio sink 1761 /* We should parse the EDID data and find out if it has an audio sink
2350 */ 1762 */
2351 1763
2352 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); 1764 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
2353 if (ret) 1765 if (ret) {
1766 if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
1767 struct drm_display_mode *newmode;
1768 list_for_each_entry(newmode, &connector->probed_modes,
1769 head) {
1770 if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
1771 dev_priv->panel_fixed_mode =
1772 drm_mode_duplicate(dev, newmode);
1773 break;
1774 }
1775 }
1776 }
1777
2354 return ret; 1778 return ret;
1779 }
2355 1780
2356 /* if eDP has no EDID, fall back to fixed mode */ 1781 /* if eDP has no EDID, try to use fixed panel mode from VBT */
2357 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 1782 if (is_edp(intel_dp)) {
2358 struct drm_display_mode *mode; 1783 if (dev_priv->panel_fixed_mode != NULL) {
2359 mode = drm_mode_duplicate(dev, 1784 struct drm_display_mode *mode;
2360 intel_connector->panel.fixed_mode); 1785 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
2361 if (mode) {
2362 drm_mode_probed_add(connector, mode); 1786 drm_mode_probed_add(connector, mode);
2363 return 1; 1787 return 1;
2364 } 1788 }
@@ -2376,6 +1800,8 @@ intel_dp_detect_audio(struct drm_connector *connector)
2376 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 1800 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2377 if (edid) { 1801 if (edid) {
2378 has_audio = drm_detect_monitor_audio(edid); 1802 has_audio = drm_detect_monitor_audio(edid);
1803
1804 connector->display_info.raw_edid = NULL;
2379 kfree(edid); 1805 kfree(edid);
2380 } 1806 }
2381 1807
@@ -2388,12 +1814,10 @@ intel_dp_set_property(struct drm_connector *connector,
2388 uint64_t val) 1814 uint64_t val)
2389{ 1815{
2390 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1816 struct drm_i915_private *dev_priv = connector->dev->dev_private;
2391 struct intel_connector *intel_connector = to_intel_connector(connector); 1817 struct intel_dp *intel_dp = intel_attached_dp(connector);
2392 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
2393 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2394 int ret; 1818 int ret;
2395 1819
2396 ret = drm_object_property_set_value(&connector->base, property, val); 1820 ret = drm_connector_property_set_value(connector, property, val);
2397 if (ret) 1821 if (ret)
2398 return ret; 1822 return ret;
2399 1823
@@ -2406,10 +1830,10 @@ intel_dp_set_property(struct drm_connector *connector,
2406 1830
2407 intel_dp->force_audio = i; 1831 intel_dp->force_audio = i;
2408 1832
2409 if (i == HDMI_AUDIO_AUTO) 1833 if (i == 0)
2410 has_audio = intel_dp_detect_audio(connector); 1834 has_audio = intel_dp_detect_audio(connector);
2411 else 1835 else
2412 has_audio = (i == HDMI_AUDIO_ON); 1836 has_audio = i > 0;
2413 1837
2414 if (has_audio == intel_dp->has_audio) 1838 if (has_audio == intel_dp->has_audio)
2415 return 0; 1839 return 0;
@@ -2426,76 +1850,51 @@ intel_dp_set_property(struct drm_connector *connector,
2426 goto done; 1850 goto done;
2427 } 1851 }
2428 1852
2429 if (is_edp(intel_dp) &&
2430 property == connector->dev->mode_config.scaling_mode_property) {
2431 if (val == DRM_MODE_SCALE_NONE) {
2432 DRM_DEBUG_KMS("no scaling not supported\n");
2433 return -EINVAL;
2434 }
2435
2436 if (intel_connector->panel.fitting_mode == val) {
2437 /* the eDP scaling property is not changed */
2438 return 0;
2439 }
2440 intel_connector->panel.fitting_mode = val;
2441
2442 goto done;
2443 }
2444
2445 return -EINVAL; 1853 return -EINVAL;
2446 1854
2447done: 1855done:
2448 if (intel_encoder->base.crtc) { 1856 if (intel_dp->base.base.crtc) {
2449 struct drm_crtc *crtc = intel_encoder->base.crtc; 1857 struct drm_crtc *crtc = intel_dp->base.base.crtc;
2450 intel_set_mode(crtc, &crtc->mode, 1858 drm_crtc_helper_set_mode(crtc, &crtc->mode,
2451 crtc->x, crtc->y, crtc->fb); 1859 crtc->x, crtc->y,
1860 crtc->fb);
2452 } 1861 }
2453 1862
2454 return 0; 1863 return 0;
2455} 1864}
2456 1865
2457static void 1866static void
2458intel_dp_destroy(struct drm_connector *connector) 1867intel_dp_destroy (struct drm_connector *connector)
2459{ 1868{
2460 struct drm_device *dev = connector->dev; 1869 struct drm_device *dev = connector->dev;
2461 struct intel_dp *intel_dp = intel_attached_dp(connector);
2462 struct intel_connector *intel_connector = to_intel_connector(connector);
2463
2464 if (!IS_ERR_OR_NULL(intel_connector->edid))
2465 kfree(intel_connector->edid);
2466 1870
2467 if (is_edp(intel_dp)) { 1871 if (intel_dpd_is_edp(dev))
2468 intel_panel_destroy_backlight(dev); 1872 intel_panel_destroy_backlight(dev);
2469 intel_panel_fini(&intel_connector->panel);
2470 }
2471 1873
2472 drm_sysfs_connector_remove(connector); 1874 drm_sysfs_connector_remove(connector);
2473 drm_connector_cleanup(connector); 1875 drm_connector_cleanup(connector);
2474 kfree(connector); 1876 kfree(connector);
2475} 1877}
2476 1878
2477void intel_dp_encoder_destroy(struct drm_encoder *encoder) 1879static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2478{ 1880{
2479 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 1881 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2480 struct intel_dp *intel_dp = &intel_dig_port->dp;
2481 1882
2482 i2c_del_adapter(&intel_dp->adapter); 1883 i2c_del_adapter(&intel_dp->adapter);
2483 drm_encoder_cleanup(encoder); 1884 drm_encoder_cleanup(encoder);
2484 if (is_edp(intel_dp)) { 1885 kfree(intel_dp);
2485 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2486 ironlake_panel_vdd_off_sync(intel_dp);
2487 }
2488 kfree(intel_dig_port);
2489} 1886}
2490 1887
2491static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 1888static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
1889 .dpms = intel_dp_dpms,
2492 .mode_fixup = intel_dp_mode_fixup, 1890 .mode_fixup = intel_dp_mode_fixup,
1891 .prepare = intel_dp_prepare,
2493 .mode_set = intel_dp_mode_set, 1892 .mode_set = intel_dp_mode_set,
2494 .disable = intel_encoder_noop, 1893 .commit = intel_dp_commit,
2495}; 1894};
2496 1895
2497static const struct drm_connector_funcs intel_dp_connector_funcs = { 1896static const struct drm_connector_funcs intel_dp_connector_funcs = {
2498 .dpms = intel_connector_dpms, 1897 .dpms = drm_helper_connector_dpms,
2499 .detect = intel_dp_detect, 1898 .detect = intel_dp_detect,
2500 .fill_modes = drm_helper_probe_single_connector_modes, 1899 .fill_modes = drm_helper_probe_single_connector_modes,
2501 .set_property = intel_dp_set_property, 1900 .set_property = intel_dp_set_property,
@@ -2515,24 +1914,27 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
2515static void 1914static void
2516intel_dp_hot_plug(struct intel_encoder *intel_encoder) 1915intel_dp_hot_plug(struct intel_encoder *intel_encoder)
2517{ 1916{
2518 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 1917 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
2519 1918
2520 intel_dp_check_link_status(intel_dp); 1919 intel_dp_check_link_status(intel_dp);
2521} 1920}
2522 1921
2523/* Return which DP Port should be selected for Transcoder DP control */ 1922/* Return which DP Port should be selected for Transcoder DP control */
2524int 1923int
2525intel_trans_dp_port_sel(struct drm_crtc *crtc) 1924intel_trans_dp_port_sel (struct drm_crtc *crtc)
2526{ 1925{
2527 struct drm_device *dev = crtc->dev; 1926 struct drm_device *dev = crtc->dev;
2528 struct intel_encoder *intel_encoder; 1927 struct drm_mode_config *mode_config = &dev->mode_config;
2529 struct intel_dp *intel_dp; 1928 struct drm_encoder *encoder;
2530 1929
2531 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 1930 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
2532 intel_dp = enc_to_intel_dp(&intel_encoder->base); 1931 struct intel_dp *intel_dp;
1932
1933 if (encoder->crtc != crtc)
1934 continue;
2533 1935
2534 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 1936 intel_dp = enc_to_intel_dp(encoder);
2535 intel_encoder->type == INTEL_OUTPUT_EDP) 1937 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
2536 return intel_dp->output_reg; 1938 return intel_dp->output_reg;
2537 } 1939 }
2538 1940
@@ -2562,229 +1964,118 @@ bool intel_dpd_is_edp(struct drm_device *dev)
2562static void 1964static void
2563intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 1965intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
2564{ 1966{
2565 struct intel_connector *intel_connector = to_intel_connector(connector);
2566
2567 intel_attach_force_audio_property(connector); 1967 intel_attach_force_audio_property(connector);
2568 intel_attach_broadcast_rgb_property(connector); 1968 intel_attach_broadcast_rgb_property(connector);
2569
2570 if (is_edp(intel_dp)) {
2571 drm_mode_create_scaling_mode_property(connector->dev);
2572 drm_object_attach_property(
2573 &connector->base,
2574 connector->dev->mode_config.scaling_mode_property,
2575 DRM_MODE_SCALE_ASPECT);
2576 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
2577 }
2578}
2579
2580static void
2581intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2582 struct intel_dp *intel_dp)
2583{
2584 struct drm_i915_private *dev_priv = dev->dev_private;
2585 struct edp_power_seq cur, vbt, spec, final;
2586 u32 pp_on, pp_off, pp_div, pp;
2587
2588 /* Workaround: Need to write PP_CONTROL with the unlock key as
2589 * the very first thing. */
2590 pp = ironlake_get_pp_control(dev_priv);
2591 I915_WRITE(PCH_PP_CONTROL, pp);
2592
2593 pp_on = I915_READ(PCH_PP_ON_DELAYS);
2594 pp_off = I915_READ(PCH_PP_OFF_DELAYS);
2595 pp_div = I915_READ(PCH_PP_DIVISOR);
2596
2597 /* Pull timing values out of registers */
2598 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2599 PANEL_POWER_UP_DELAY_SHIFT;
2600
2601 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2602 PANEL_LIGHT_ON_DELAY_SHIFT;
2603
2604 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2605 PANEL_LIGHT_OFF_DELAY_SHIFT;
2606
2607 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2608 PANEL_POWER_DOWN_DELAY_SHIFT;
2609
2610 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2611 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2612
2613 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2614 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2615
2616 vbt = dev_priv->edp.pps;
2617
2618 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
2619 * our hw here, which are all in 100usec. */
2620 spec.t1_t3 = 210 * 10;
2621 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
2622 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
2623 spec.t10 = 500 * 10;
2624 /* This one is special and actually in units of 100ms, but zero
2625 * based in the hw (so we need to add 100 ms). But the sw vbt
2626 * table multiplies it with 1000 to make it in units of 100usec,
2627 * too. */
2628 spec.t11_t12 = (510 + 100) * 10;
2629
2630 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2631 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2632
2633 /* Use the max of the register settings and vbt. If both are
2634 * unset, fall back to the spec limits. */
2635#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
2636 spec.field : \
2637 max(cur.field, vbt.field))
2638 assign_final(t1_t3);
2639 assign_final(t8);
2640 assign_final(t9);
2641 assign_final(t10);
2642 assign_final(t11_t12);
2643#undef assign_final
2644
2645#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
2646 intel_dp->panel_power_up_delay = get_delay(t1_t3);
2647 intel_dp->backlight_on_delay = get_delay(t8);
2648 intel_dp->backlight_off_delay = get_delay(t9);
2649 intel_dp->panel_power_down_delay = get_delay(t10);
2650 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2651#undef get_delay
2652
2653 /* And finally store the new values in the power sequencer. */
2654 pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
2655 (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
2656 pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
2657 (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
2658 /* Compute the divisor for the pp clock, simply match the Bspec
2659 * formula. */
2660 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
2661 << PP_REFERENCE_DIVIDER_SHIFT;
2662 pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000)
2663 << PANEL_POWER_CYCLE_DELAY_SHIFT);
2664
2665 /* Haswell doesn't have any port selection bits for the panel
2666 * power sequencer any more. */
2667 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
2668 if (is_cpu_edp(intel_dp))
2669 pp_on |= PANEL_POWER_PORT_DP_A;
2670 else
2671 pp_on |= PANEL_POWER_PORT_DP_D;
2672 }
2673
2674 I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
2675 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
2676 I915_WRITE(PCH_PP_DIVISOR, pp_div);
2677
2678
2679 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2680 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2681 intel_dp->panel_power_cycle_delay);
2682
2683 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2684 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2685
2686 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
2687 I915_READ(PCH_PP_ON_DELAYS),
2688 I915_READ(PCH_PP_OFF_DELAYS),
2689 I915_READ(PCH_PP_DIVISOR));
2690} 1969}
2691 1970
2692void 1971void
2693intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 1972intel_dp_init(struct drm_device *dev, int output_reg)
2694 struct intel_connector *intel_connector)
2695{ 1973{
2696 struct drm_connector *connector = &intel_connector->base;
2697 struct intel_dp *intel_dp = &intel_dig_port->dp;
2698 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2699 struct drm_device *dev = intel_encoder->base.dev;
2700 struct drm_i915_private *dev_priv = dev->dev_private; 1974 struct drm_i915_private *dev_priv = dev->dev_private;
2701 struct drm_display_mode *fixed_mode = NULL; 1975 struct drm_connector *connector;
2702 enum port port = intel_dig_port->port; 1976 struct intel_dp *intel_dp;
1977 struct intel_encoder *intel_encoder;
1978 struct intel_connector *intel_connector;
2703 const char *name = NULL; 1979 const char *name = NULL;
2704 int type; 1980 int type;
2705 1981
2706 /* Preserve the current hw state. */ 1982 intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
2707 intel_dp->DP = I915_READ(intel_dp->output_reg); 1983 if (!intel_dp)
2708 intel_dp->attached_connector = intel_connector; 1984 return;
1985
1986 intel_dp->output_reg = output_reg;
1987 intel_dp->dpms_mode = -1;
2709 1988
2710 if (HAS_PCH_SPLIT(dev) && port == PORT_D) 1989 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1990 if (!intel_connector) {
1991 kfree(intel_dp);
1992 return;
1993 }
1994 intel_encoder = &intel_dp->base;
1995
1996 if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
2711 if (intel_dpd_is_edp(dev)) 1997 if (intel_dpd_is_edp(dev))
2712 intel_dp->is_pch_edp = true; 1998 intel_dp->is_pch_edp = true;
2713 1999
2714 /* 2000 if (output_reg == DP_A || is_pch_edp(intel_dp)) {
2715 * FIXME : We need to initialize built-in panels before external panels.
2716 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
2717 */
2718 if (IS_VALLEYVIEW(dev) && port == PORT_C) {
2719 type = DRM_MODE_CONNECTOR_eDP;
2720 intel_encoder->type = INTEL_OUTPUT_EDP;
2721 } else if (port == PORT_A || is_pch_edp(intel_dp)) {
2722 type = DRM_MODE_CONNECTOR_eDP; 2001 type = DRM_MODE_CONNECTOR_eDP;
2723 intel_encoder->type = INTEL_OUTPUT_EDP; 2002 intel_encoder->type = INTEL_OUTPUT_EDP;
2724 } else { 2003 } else {
2725 /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
2726 * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
2727 * rewrite it.
2728 */
2729 type = DRM_MODE_CONNECTOR_DisplayPort; 2004 type = DRM_MODE_CONNECTOR_DisplayPort;
2005 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2730 } 2006 }
2731 2007
2008 connector = &intel_connector->base;
2732 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 2009 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
2733 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 2010 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
2734 2011
2735 connector->polled = DRM_CONNECTOR_POLL_HPD; 2012 connector->polled = DRM_CONNECTOR_POLL_HPD;
2013
2014 if (output_reg == DP_B || output_reg == PCH_DP_B)
2015 intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
2016 else if (output_reg == DP_C || output_reg == PCH_DP_C)
2017 intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
2018 else if (output_reg == DP_D || output_reg == PCH_DP_D)
2019 intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
2020
2021 if (is_edp(intel_dp))
2022 intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
2023
2024 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
2736 connector->interlace_allowed = true; 2025 connector->interlace_allowed = true;
2737 connector->doublescan_allowed = 0; 2026 connector->doublescan_allowed = 0;
2738 2027
2739 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 2028 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
2740 ironlake_panel_vdd_work); 2029 DRM_MODE_ENCODER_TMDS);
2030 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
2741 2031
2742 intel_connector_attach_encoder(intel_connector, intel_encoder); 2032 intel_connector_attach_encoder(intel_connector, intel_encoder);
2743 drm_sysfs_connector_add(connector); 2033 drm_sysfs_connector_add(connector);
2744 2034
2745 if (IS_HASWELL(dev))
2746 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
2747 else
2748 intel_connector->get_hw_state = intel_connector_get_hw_state;
2749
2750
2751 /* Set up the DDC bus. */ 2035 /* Set up the DDC bus. */
2752 switch (port) { 2036 switch (output_reg) {
2753 case PORT_A: 2037 case DP_A:
2754 name = "DPDDC-A"; 2038 name = "DPDDC-A";
2755 break; 2039 break;
2756 case PORT_B: 2040 case DP_B:
2757 dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS; 2041 case PCH_DP_B:
2758 name = "DPDDC-B"; 2042 dev_priv->hotplug_supported_mask |=
2759 break; 2043 HDMIB_HOTPLUG_INT_STATUS;
2760 case PORT_C: 2044 name = "DPDDC-B";
2761 dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS; 2045 break;
2762 name = "DPDDC-C"; 2046 case DP_C:
2763 break; 2047 case PCH_DP_C:
2764 case PORT_D: 2048 dev_priv->hotplug_supported_mask |=
2765 dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS; 2049 HDMIC_HOTPLUG_INT_STATUS;
2766 name = "DPDDC-D"; 2050 name = "DPDDC-C";
2767 break; 2051 break;
2768 default: 2052 case DP_D:
2769 WARN(1, "Invalid port %c\n", port_name(port)); 2053 case PCH_DP_D:
2770 break; 2054 dev_priv->hotplug_supported_mask |=
2055 HDMID_HOTPLUG_INT_STATUS;
2056 name = "DPDDC-D";
2057 break;
2771 } 2058 }
2772 2059
2773 if (is_edp(intel_dp))
2774 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2775
2776 intel_dp_i2c_init(intel_dp, intel_connector, name); 2060 intel_dp_i2c_init(intel_dp, intel_connector, name);
2777 2061
2778 /* Cache DPCD and EDID for edp. */ 2062 /* Cache some DPCD data in the eDP case */
2779 if (is_edp(intel_dp)) { 2063 if (is_edp(intel_dp)) {
2780 bool ret; 2064 bool ret;
2781 struct drm_display_mode *scan; 2065 u32 pp_on, pp_div;
2782 struct edid *edid; 2066
2067 pp_on = I915_READ(PCH_PP_ON_DELAYS);
2068 pp_div = I915_READ(PCH_PP_DIVISOR);
2069
2070 /* Get T3 & T12 values (note: VESA not bspec terminology) */
2071 dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16;
2072 dev_priv->panel_t3 /= 10; /* t3 in 100us units */
2073 dev_priv->panel_t12 = pp_div & 0xf;
2074 dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
2783 2075
2784 ironlake_edp_panel_vdd_on(intel_dp); 2076 ironlake_edp_panel_vdd_on(intel_dp);
2785 ret = intel_dp_get_dpcd(intel_dp); 2077 ret = intel_dp_get_dpcd(intel_dp);
2786 ironlake_edp_panel_vdd_off(intel_dp, false); 2078 ironlake_edp_panel_vdd_off(intel_dp);
2787
2788 if (ret) { 2079 if (ret) {
2789 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2080 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2790 dev_priv->no_aux_handshake = 2081 dev_priv->no_aux_handshake =
@@ -2793,47 +2084,26 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2793 } else { 2084 } else {
2794 /* if this fails, presume the device is a ghost */ 2085 /* if this fails, presume the device is a ghost */
2795 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 2086 DRM_INFO("failed to retrieve link info, disabling eDP\n");
2796 intel_dp_encoder_destroy(&intel_encoder->base); 2087 intel_dp_encoder_destroy(&intel_dp->base.base);
2797 intel_dp_destroy(connector); 2088 intel_dp_destroy(&intel_connector->base);
2798 return; 2089 return;
2799 } 2090 }
2091 }
2800 2092
2801 ironlake_edp_panel_vdd_on(intel_dp); 2093 intel_encoder->hot_plug = intel_dp_hot_plug;
2802 edid = drm_get_edid(connector, &intel_dp->adapter);
2803 if (edid) {
2804 if (drm_add_edid_modes(connector, edid)) {
2805 drm_mode_connector_update_edid_property(connector, edid);
2806 drm_edid_to_eld(connector, edid);
2807 } else {
2808 kfree(edid);
2809 edid = ERR_PTR(-EINVAL);
2810 }
2811 } else {
2812 edid = ERR_PTR(-ENOENT);
2813 }
2814 intel_connector->edid = edid;
2815 2094
2816 /* prefer fixed mode from EDID if available */ 2095 if (is_edp(intel_dp)) {
2817 list_for_each_entry(scan, &connector->probed_modes, head) { 2096 /* initialize panel mode from VBT if available for eDP */
2818 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 2097 if (dev_priv->lfp_lvds_vbt_mode) {
2819 fixed_mode = drm_mode_duplicate(dev, scan); 2098 dev_priv->panel_fixed_mode =
2820 break; 2099 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
2100 if (dev_priv->panel_fixed_mode) {
2101 dev_priv->panel_fixed_mode->type |=
2102 DRM_MODE_TYPE_PREFERRED;
2821 } 2103 }
2822 } 2104 }
2823 2105 dev_priv->int_edp_connector = connector;
2824 /* fallback to VBT if available for eDP */ 2106 intel_panel_setup_backlight(dev);
2825 if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
2826 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
2827 if (fixed_mode)
2828 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
2829 }
2830
2831 ironlake_edp_panel_vdd_off(intel_dp, false);
2832 }
2833
2834 if (is_edp(intel_dp)) {
2835 intel_panel_init(&intel_connector->panel, fixed_mode);
2836 intel_panel_setup_backlight(connector);
2837 } 2107 }
2838 2108
2839 intel_dp_add_properties(intel_dp, connector); 2109 intel_dp_add_properties(intel_dp, connector);
@@ -2847,45 +2117,3 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2847 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 2117 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
2848 } 2118 }
2849} 2119}
2850
2851void
2852intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2853{
2854 struct intel_digital_port *intel_dig_port;
2855 struct intel_encoder *intel_encoder;
2856 struct drm_encoder *encoder;
2857 struct intel_connector *intel_connector;
2858
2859 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
2860 if (!intel_dig_port)
2861 return;
2862
2863 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
2864 if (!intel_connector) {
2865 kfree(intel_dig_port);
2866 return;
2867 }
2868
2869 intel_encoder = &intel_dig_port->base;
2870 encoder = &intel_encoder->base;
2871
2872 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
2873 DRM_MODE_ENCODER_TMDS);
2874 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
2875
2876 intel_encoder->enable = intel_enable_dp;
2877 intel_encoder->pre_enable = intel_pre_enable_dp;
2878 intel_encoder->disable = intel_disable_dp;
2879 intel_encoder->post_disable = intel_post_disable_dp;
2880 intel_encoder->get_hw_state = intel_dp_get_hw_state;
2881
2882 intel_dig_port->port = port;
2883 intel_dig_port->dp.output_reg = output_reg;
2884
2885 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2886 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2887 intel_encoder->cloneable = false;
2888 intel_encoder->hot_plug = intel_dp_hot_plug;
2889
2890 intel_dp_init_connector(intel_dig_port, intel_connector);
2891}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8a1bd4a3ad0..2480cfa7c0c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,39 +26,20 @@
26#define __INTEL_DRV_H__ 26#define __INTEL_DRV_H__
27 27
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <drm/i915_drm.h>
30#include "i915_drv.h" 29#include "i915_drv.h"
31#include <drm/drm_crtc.h> 30#include "drm_crtc.h"
32#include <drm/drm_crtc_helper.h> 31#include "drm_crtc_helper.h"
33#include <drm/drm_fb_helper.h> 32#include "drm_fb_helper.h"
34#include <drm/drm_dp_helper.h>
35 33
36#define _wait_for(COND, MS, W) ({ \ 34#define _wait_for(COND, MS, W) ({ \
37 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ 35 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
38 int ret__ = 0; \ 36 int ret__ = 0; \
39 while (!(COND)) { \ 37 while (! (COND)) { \
40 if (time_after(jiffies, timeout__)) { \ 38 if (time_after(jiffies, timeout__)) { \
41 ret__ = -ETIMEDOUT; \ 39 ret__ = -ETIMEDOUT; \
42 break; \ 40 break; \
43 } \ 41 } \
44 if (W && drm_can_sleep()) { \ 42 if (W && !(in_atomic() || in_dbg_master())) msleep(W); \
45 msleep(W); \
46 } else { \
47 cpu_relax(); \
48 } \
49 } \
50 ret__; \
51})
52
53#define wait_for_atomic_us(COND, US) ({ \
54 unsigned long timeout__ = jiffies + usecs_to_jiffies(US); \
55 int ret__ = 0; \
56 while (!(COND)) { \
57 if (time_after(jiffies, timeout__)) { \
58 ret__ = -ETIMEDOUT; \
59 break; \
60 } \
61 cpu_relax(); \
62 } \ 43 } \
63 ret__; \ 44 ret__; \
64}) 45})
@@ -66,6 +47,13 @@
66#define wait_for(COND, MS) _wait_for(COND, MS, 1) 47#define wait_for(COND, MS) _wait_for(COND, MS, 1)
67#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) 48#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
68 49
50#define MSLEEP(x) do { \
51 if (in_dbg_master()) \
52 mdelay(x); \
53 else \
54 msleep(x); \
55} while(0)
56
69#define KHz(x) (1000*x) 57#define KHz(x) (1000*x)
70#define MHz(x) KHz(1000*x) 58#define MHz(x) KHz(1000*x)
71 59
@@ -94,7 +82,25 @@
94#define INTEL_OUTPUT_HDMI 6 82#define INTEL_OUTPUT_HDMI 6
95#define INTEL_OUTPUT_DISPLAYPORT 7 83#define INTEL_OUTPUT_DISPLAYPORT 7
96#define INTEL_OUTPUT_EDP 8 84#define INTEL_OUTPUT_EDP 8
97#define INTEL_OUTPUT_UNKNOWN 9 85
86/* Intel Pipe Clone Bit */
87#define INTEL_HDMIB_CLONE_BIT 1
88#define INTEL_HDMIC_CLONE_BIT 2
89#define INTEL_HDMID_CLONE_BIT 3
90#define INTEL_HDMIE_CLONE_BIT 4
91#define INTEL_HDMIF_CLONE_BIT 5
92#define INTEL_SDVO_NON_TV_CLONE_BIT 6
93#define INTEL_SDVO_TV_CLONE_BIT 7
94#define INTEL_SDVO_LVDS_CLONE_BIT 8
95#define INTEL_ANALOG_CLONE_BIT 9
96#define INTEL_TV_CLONE_BIT 10
97#define INTEL_DP_B_CLONE_BIT 11
98#define INTEL_DP_C_CLONE_BIT 12
99#define INTEL_DP_D_CLONE_BIT 13
100#define INTEL_LVDS_CLONE_BIT 14
101#define INTEL_DVO_TMDS_CLONE_BIT 15
102#define INTEL_DVO_LVDS_CLONE_BIT 16
103#define INTEL_EDP_CLONE_BIT 17
98 104
99#define INTEL_DVO_CHIP_NONE 0 105#define INTEL_DVO_CHIP_NONE 0
100#define INTEL_DVO_CHIP_LVDS 1 106#define INTEL_DVO_CHIP_LVDS 1
@@ -104,11 +110,6 @@
104/* drm_display_mode->private_flags */ 110/* drm_display_mode->private_flags */
105#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) 111#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
106#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) 112#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
107#define INTEL_MODE_DP_FORCE_6BPC (0x10)
108/* This flag must be set by the encoder's mode_fixup if it changes the crtc
109 * timings in the mode to prevent the crtc fixup from overwriting them.
110 * Currently only lvds needs that. */
111#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
112 113
113static inline void 114static inline void
114intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, 115intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
@@ -138,151 +139,50 @@ struct intel_fbdev {
138 139
139struct intel_encoder { 140struct intel_encoder {
140 struct drm_encoder base; 141 struct drm_encoder base;
141 /*
142 * The new crtc this encoder will be driven from. Only differs from
143 * base->crtc while a modeset is in progress.
144 */
145 struct intel_crtc *new_crtc;
146
147 int type; 142 int type;
148 bool needs_tv_clock; 143 bool needs_tv_clock;
149 /*
150 * Intel hw has only one MUX where encoders could be clone, hence a
151 * simple flag is enough to compute the possible_clones mask.
152 */
153 bool cloneable;
154 bool connectors_active;
155 void (*hot_plug)(struct intel_encoder *); 144 void (*hot_plug)(struct intel_encoder *);
156 void (*pre_enable)(struct intel_encoder *);
157 void (*enable)(struct intel_encoder *);
158 void (*disable)(struct intel_encoder *);
159 void (*post_disable)(struct intel_encoder *);
160 /* Read out the current hw state of this connector, returning true if
161 * the encoder is active. If the encoder is enabled it also set the pipe
162 * it is connected to in the pipe parameter. */
163 bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
164 int crtc_mask; 145 int crtc_mask;
165}; 146 int clone_mask;
166
167struct intel_panel {
168 struct drm_display_mode *fixed_mode;
169 int fitting_mode;
170}; 147};
171 148
172struct intel_connector { 149struct intel_connector {
173 struct drm_connector base; 150 struct drm_connector base;
174 /*
175 * The fixed encoder this connector is connected to.
176 */
177 struct intel_encoder *encoder; 151 struct intel_encoder *encoder;
178
179 /*
180 * The new encoder this connector will be driven. Only differs from
181 * encoder while a modeset is in progress.
182 */
183 struct intel_encoder *new_encoder;
184
185 /* Reads out the current hw, returning true if the connector is enabled
186 * and active (i.e. dpms ON state). */
187 bool (*get_hw_state)(struct intel_connector *);
188
189 /* Panel info for eDP and LVDS */
190 struct intel_panel panel;
191
192 /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
193 struct edid *edid;
194}; 152};
195 153
196struct intel_crtc { 154struct intel_crtc {
197 struct drm_crtc base; 155 struct drm_crtc base;
198 enum pipe pipe; 156 enum pipe pipe;
199 enum plane plane; 157 enum plane plane;
200 enum transcoder cpu_transcoder;
201 u8 lut_r[256], lut_g[256], lut_b[256]; 158 u8 lut_r[256], lut_g[256], lut_b[256];
202 /* 159 int dpms_mode;
203 * Whether the crtc and the connected output pipeline is active. Implies 160 bool active; /* is the crtc on? independent of the dpms mode */
204 * that crtc->enabled is set, i.e. the current mode configuration has 161 bool busy; /* is scanout buffer being updated frequently? */
205 * some outputs connected to this crtc. 162 struct timer_list idle_timer;
206 */
207 bool active;
208 bool primary_disabled; /* is the crtc obscured by a plane? */
209 bool lowfreq_avail; 163 bool lowfreq_avail;
210 struct intel_overlay *overlay; 164 struct intel_overlay *overlay;
211 struct intel_unpin_work *unpin_work; 165 struct intel_unpin_work *unpin_work;
212 int fdi_lanes; 166 int fdi_lanes;
213 167
214 atomic_t unpin_work_count;
215
216 /* Display surface base address adjustement for pageflips. Note that on
217 * gen4+ this only adjusts up to a tile, offsets within a tile are
218 * handled in the hw itself (with the TILEOFF register). */
219 unsigned long dspaddr_offset;
220
221 struct drm_i915_gem_object *cursor_bo; 168 struct drm_i915_gem_object *cursor_bo;
222 uint32_t cursor_addr; 169 uint32_t cursor_addr;
223 int16_t cursor_x, cursor_y; 170 int16_t cursor_x, cursor_y;
224 int16_t cursor_width, cursor_height; 171 int16_t cursor_width, cursor_height;
225 bool cursor_visible; 172 bool cursor_visible;
226 unsigned int bpp; 173 unsigned int bpp;
227
228 /* We can share PLLs across outputs if the timings match */
229 struct intel_pch_pll *pch_pll;
230 uint32_t ddi_pll_sel;
231};
232
233struct intel_plane {
234 struct drm_plane base;
235 enum pipe pipe;
236 struct drm_i915_gem_object *obj;
237 bool can_scale;
238 int max_downscale;
239 u32 lut_r[1024], lut_g[1024], lut_b[1024];
240 void (*update_plane)(struct drm_plane *plane,
241 struct drm_framebuffer *fb,
242 struct drm_i915_gem_object *obj,
243 int crtc_x, int crtc_y,
244 unsigned int crtc_w, unsigned int crtc_h,
245 uint32_t x, uint32_t y,
246 uint32_t src_w, uint32_t src_h);
247 void (*disable_plane)(struct drm_plane *plane);
248 int (*update_colorkey)(struct drm_plane *plane,
249 struct drm_intel_sprite_colorkey *key);
250 void (*get_colorkey)(struct drm_plane *plane,
251 struct drm_intel_sprite_colorkey *key);
252};
253
254struct intel_watermark_params {
255 unsigned long fifo_size;
256 unsigned long max_wm;
257 unsigned long default_wm;
258 unsigned long guard_size;
259 unsigned long cacheline_size;
260};
261
262struct cxsr_latency {
263 int is_desktop;
264 int is_ddr3;
265 unsigned long fsb_freq;
266 unsigned long mem_freq;
267 unsigned long display_sr;
268 unsigned long display_hpll_disable;
269 unsigned long cursor_sr;
270 unsigned long cursor_hpll_disable;
271}; 174};
272 175
273#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 176#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
274#define to_intel_connector(x) container_of(x, struct intel_connector, base) 177#define to_intel_connector(x) container_of(x, struct intel_connector, base)
275#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) 178#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
276#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 179#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
277#define to_intel_plane(x) container_of(x, struct intel_plane, base)
278 180
279#define DIP_HEADER_SIZE 5 181#define DIP_HEADER_SIZE 5
280 182
281#define DIP_TYPE_AVI 0x82 183#define DIP_TYPE_AVI 0x82
282#define DIP_VERSION_AVI 0x2 184#define DIP_VERSION_AVI 0x2
283#define DIP_LEN_AVI 13 185#define DIP_LEN_AVI 13
284#define DIP_AVI_PR_1 0
285#define DIP_AVI_PR_2 1
286 186
287#define DIP_TYPE_SPD 0x83 187#define DIP_TYPE_SPD 0x83
288#define DIP_VERSION_SPD 0x1 188#define DIP_VERSION_SPD 0x1
@@ -316,71 +216,23 @@ struct dip_infoframe {
316 uint8_t ITC_EC_Q_SC; 216 uint8_t ITC_EC_Q_SC;
317 /* PB4 - VIC 6:0 */ 217 /* PB4 - VIC 6:0 */
318 uint8_t VIC; 218 uint8_t VIC;
319 /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */ 219 /* PB5 - PR 3:0 */
320 uint8_t YQ_CN_PR; 220 uint8_t PR;
321 /* PB6 to PB13 */ 221 /* PB6 to PB13 */
322 uint16_t top_bar_end; 222 uint16_t top_bar_end;
323 uint16_t bottom_bar_start; 223 uint16_t bottom_bar_start;
324 uint16_t left_bar_end; 224 uint16_t left_bar_end;
325 uint16_t right_bar_start; 225 uint16_t right_bar_start;
326 } __attribute__ ((packed)) avi; 226 } avi;
327 struct { 227 struct {
328 uint8_t vn[8]; 228 uint8_t vn[8];
329 uint8_t pd[16]; 229 uint8_t pd[16];
330 uint8_t sdi; 230 uint8_t sdi;
331 } __attribute__ ((packed)) spd; 231 } spd;
332 uint8_t payload[27]; 232 uint8_t payload[27];
333 } __attribute__ ((packed)) body; 233 } __attribute__ ((packed)) body;
334} __attribute__((packed)); 234} __attribute__((packed));
335 235
336struct intel_hdmi {
337 u32 sdvox_reg;
338 int ddc_bus;
339 uint32_t color_range;
340 bool has_hdmi_sink;
341 bool has_audio;
342 enum hdmi_force_audio force_audio;
343 void (*write_infoframe)(struct drm_encoder *encoder,
344 struct dip_infoframe *frame);
345 void (*set_infoframes)(struct drm_encoder *encoder,
346 struct drm_display_mode *adjusted_mode);
347};
348
349#define DP_MAX_DOWNSTREAM_PORTS 0x10
350#define DP_LINK_CONFIGURATION_SIZE 9
351
352struct intel_dp {
353 uint32_t output_reg;
354 uint32_t DP;
355 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
356 bool has_audio;
357 enum hdmi_force_audio force_audio;
358 uint32_t color_range;
359 uint8_t link_bw;
360 uint8_t lane_count;
361 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
362 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
363 struct i2c_adapter adapter;
364 struct i2c_algo_dp_aux_data algo;
365 bool is_pch_edp;
366 uint8_t train_set[4];
367 int panel_power_up_delay;
368 int panel_power_down_delay;
369 int panel_power_cycle_delay;
370 int backlight_on_delay;
371 int backlight_off_delay;
372 struct delayed_work panel_vdd_work;
373 bool want_panel_vdd;
374 struct intel_connector *attached_connector;
375};
376
377struct intel_digital_port {
378 struct intel_encoder base;
379 enum port port;
380 struct intel_dp dp;
381 struct intel_hdmi hdmi;
382};
383
384static inline struct drm_crtc * 236static inline struct drm_crtc *
385intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) 237intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
386{ 238{
@@ -397,14 +249,11 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
397 249
398struct intel_unpin_work { 250struct intel_unpin_work {
399 struct work_struct work; 251 struct work_struct work;
400 struct drm_crtc *crtc; 252 struct drm_device *dev;
401 struct drm_i915_gem_object *old_fb_obj; 253 struct drm_i915_gem_object *old_fb_obj;
402 struct drm_i915_gem_object *pending_flip_obj; 254 struct drm_i915_gem_object *pending_flip_obj;
403 struct drm_pending_vblank_event *event; 255 struct drm_pending_vblank_event *event;
404 atomic_t pending; 256 int pending;
405#define INTEL_FLIP_INACTIVE 0
406#define INTEL_FLIP_PENDING 1
407#define INTEL_FLIP_COMPLETE 2
408 bool enable_stall_check; 257 bool enable_stall_check;
409}; 258};
410 259
@@ -415,137 +264,55 @@ struct intel_fbc_work {
415 int interval; 264 int interval;
416}; 265};
417 266
418int intel_pch_rawclk(struct drm_device *dev);
419
420int intel_connector_update_modes(struct drm_connector *connector,
421 struct edid *edid);
422int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 267int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
268extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
423 269
424extern void intel_attach_force_audio_property(struct drm_connector *connector); 270extern void intel_attach_force_audio_property(struct drm_connector *connector);
425extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); 271extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
426 272
427extern void intel_crt_init(struct drm_device *dev); 273extern void intel_crt_init(struct drm_device *dev);
428extern void intel_hdmi_init(struct drm_device *dev, 274extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
429 int sdvox_reg, enum port port); 275void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
430extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, 276extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
431 struct intel_connector *intel_connector);
432extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
433extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
434 const struct drm_display_mode *mode,
435 struct drm_display_mode *adjusted_mode);
436extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
437extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
438 bool is_sdvob);
439extern void intel_dvo_init(struct drm_device *dev); 277extern void intel_dvo_init(struct drm_device *dev);
440extern void intel_tv_init(struct drm_device *dev); 278extern void intel_tv_init(struct drm_device *dev);
441extern void intel_mark_busy(struct drm_device *dev); 279extern void intel_mark_busy(struct drm_device *dev,
442extern void intel_mark_idle(struct drm_device *dev); 280 struct drm_i915_gem_object *obj);
443extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
444extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
445extern bool intel_lvds_init(struct drm_device *dev); 281extern bool intel_lvds_init(struct drm_device *dev);
446extern void intel_dp_init(struct drm_device *dev, int output_reg, 282extern void intel_dp_init(struct drm_device *dev, int dp_reg);
447 enum port port);
448extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
449 struct intel_connector *intel_connector);
450void 283void
451intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 284intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
452 struct drm_display_mode *adjusted_mode); 285 struct drm_display_mode *adjusted_mode);
453extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
454extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
455extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
456extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
457extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
458extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
459extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
460 const struct drm_display_mode *mode,
461 struct drm_display_mode *adjusted_mode);
462extern bool intel_dpd_is_edp(struct drm_device *dev); 286extern bool intel_dpd_is_edp(struct drm_device *dev);
463extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp); 287extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
464extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
465extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
466extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
467extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
468extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
469extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
470extern int intel_edp_target_clock(struct intel_encoder *,
471 struct drm_display_mode *mode);
472extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); 288extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
473extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
474extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
475 enum plane plane);
476 289
477/* intel_panel.c */ 290/* intel_panel.c */
478extern int intel_panel_init(struct intel_panel *panel,
479 struct drm_display_mode *fixed_mode);
480extern void intel_panel_fini(struct intel_panel *panel);
481
482extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 291extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
483 struct drm_display_mode *adjusted_mode); 292 struct drm_display_mode *adjusted_mode);
484extern void intel_pch_panel_fitting(struct drm_device *dev, 293extern void intel_pch_panel_fitting(struct drm_device *dev,
485 int fitting_mode, 294 int fitting_mode,
486 const struct drm_display_mode *mode, 295 struct drm_display_mode *mode,
487 struct drm_display_mode *adjusted_mode); 296 struct drm_display_mode *adjusted_mode);
488extern u32 intel_panel_get_max_backlight(struct drm_device *dev); 297extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
298extern u32 intel_panel_get_backlight(struct drm_device *dev);
489extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); 299extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
490extern int intel_panel_setup_backlight(struct drm_connector *connector); 300extern int intel_panel_setup_backlight(struct drm_device *dev);
491extern void intel_panel_enable_backlight(struct drm_device *dev, 301extern void intel_panel_enable_backlight(struct drm_device *dev);
492 enum pipe pipe);
493extern void intel_panel_disable_backlight(struct drm_device *dev); 302extern void intel_panel_disable_backlight(struct drm_device *dev);
494extern void intel_panel_destroy_backlight(struct drm_device *dev); 303extern void intel_panel_destroy_backlight(struct drm_device *dev);
495extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); 304extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
496 305
497struct intel_set_config {
498 struct drm_encoder **save_connector_encoders;
499 struct drm_crtc **save_encoder_crtcs;
500
501 bool fb_changed;
502 bool mode_changed;
503};
504
505extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
506 int x, int y, struct drm_framebuffer *old_fb);
507extern void intel_modeset_disable(struct drm_device *dev);
508extern void intel_crtc_load_lut(struct drm_crtc *crtc); 306extern void intel_crtc_load_lut(struct drm_crtc *crtc);
509extern void intel_crtc_update_dpms(struct drm_crtc *crtc); 307extern void intel_encoder_prepare (struct drm_encoder *encoder);
510extern void intel_encoder_noop(struct drm_encoder *encoder); 308extern void intel_encoder_commit (struct drm_encoder *encoder);
511extern void intel_encoder_destroy(struct drm_encoder *encoder); 309extern void intel_encoder_destroy(struct drm_encoder *encoder);
512extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
513extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
514extern void intel_connector_dpms(struct drm_connector *, int mode);
515extern bool intel_connector_get_hw_state(struct intel_connector *connector);
516extern void intel_modeset_check_state(struct drm_device *dev);
517
518 310
519static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) 311static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
520{ 312{
521 return to_intel_connector(connector)->encoder; 313 return to_intel_connector(connector)->encoder;
522} 314}
523 315
524static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
525{
526 struct intel_digital_port *intel_dig_port =
527 container_of(encoder, struct intel_digital_port, base.base);
528 return &intel_dig_port->dp;
529}
530
531static inline struct intel_digital_port *
532enc_to_dig_port(struct drm_encoder *encoder)
533{
534 return container_of(encoder, struct intel_digital_port, base.base);
535}
536
537static inline struct intel_digital_port *
538dp_to_dig_port(struct intel_dp *intel_dp)
539{
540 return container_of(intel_dp, struct intel_digital_port, dp);
541}
542
543static inline struct intel_digital_port *
544hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
545{
546 return container_of(intel_hdmi, struct intel_digital_port, hdmi);
547}
548
549extern void intel_connector_attach_encoder(struct intel_connector *connector, 316extern void intel_connector_attach_encoder(struct intel_connector *connector,
550 struct intel_encoder *encoder); 317 struct intel_encoder *encoder);
551extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); 318extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -554,22 +321,20 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
554 struct drm_crtc *crtc); 321 struct drm_crtc *crtc);
555int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 322int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
556 struct drm_file *file_priv); 323 struct drm_file *file_priv);
557extern enum transcoder
558intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
559 enum pipe pipe);
560extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 324extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
561extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); 325extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
562extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
563 326
564struct intel_load_detect_pipe { 327struct intel_load_detect_pipe {
565 struct drm_framebuffer *release_fb; 328 struct drm_framebuffer *release_fb;
566 bool load_detect_temp; 329 bool load_detect_temp;
567 int dpms_mode; 330 int dpms_mode;
568}; 331};
569extern bool intel_get_load_detect_pipe(struct drm_connector *connector, 332extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
333 struct drm_connector *connector,
570 struct drm_display_mode *mode, 334 struct drm_display_mode *mode,
571 struct intel_load_detect_pipe *old); 335 struct intel_load_detect_pipe *old);
572extern void intel_release_load_detect_pipe(struct drm_connector *connector, 336extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
337 struct drm_connector *connector,
573 struct intel_load_detect_pipe *old); 338 struct intel_load_detect_pipe *old);
574 339
575extern void intelfb_restore(void); 340extern void intelfb_restore(void);
@@ -578,19 +343,24 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
578extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 343extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
579 u16 *blue, int regno); 344 u16 *blue, int regno);
580extern void intel_enable_clock_gating(struct drm_device *dev); 345extern void intel_enable_clock_gating(struct drm_device *dev);
346extern void ironlake_enable_drps(struct drm_device *dev);
347extern void ironlake_disable_drps(struct drm_device *dev);
348extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
349extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
350extern void gen6_disable_rps(struct drm_device *dev);
351extern void intel_init_emon(struct drm_device *dev);
581 352
582extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 353extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
583 struct drm_i915_gem_object *obj, 354 struct drm_i915_gem_object *obj,
584 struct intel_ring_buffer *pipelined); 355 struct intel_ring_buffer *pipelined);
585extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
586 356
587extern int intel_framebuffer_init(struct drm_device *dev, 357extern int intel_framebuffer_init(struct drm_device *dev,
588 struct intel_framebuffer *ifb, 358 struct intel_framebuffer *ifb,
589 struct drm_mode_fb_cmd2 *mode_cmd, 359 struct drm_mode_fb_cmd *mode_cmd,
590 struct drm_i915_gem_object *obj); 360 struct drm_i915_gem_object *obj);
591extern int intel_fbdev_init(struct drm_device *dev); 361extern int intel_fbdev_init(struct drm_device *dev);
592extern void intel_fbdev_fini(struct drm_device *dev); 362extern void intel_fbdev_fini(struct drm_device *dev);
593extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); 363
594extern void intel_prepare_page_flip(struct drm_device *dev, int plane); 364extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
595extern void intel_finish_page_flip(struct drm_device *dev, int pipe); 365extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
596extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); 366extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
@@ -606,70 +376,5 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
606extern void intel_fb_output_poll_changed(struct drm_device *dev); 376extern void intel_fb_output_poll_changed(struct drm_device *dev);
607extern void intel_fb_restore_mode(struct drm_device *dev); 377extern void intel_fb_restore_mode(struct drm_device *dev);
608 378
609extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
610 bool state);
611#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
612#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
613
614extern void intel_init_clock_gating(struct drm_device *dev); 379extern void intel_init_clock_gating(struct drm_device *dev);
615extern void intel_write_eld(struct drm_encoder *encoder,
616 struct drm_display_mode *mode);
617extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
618extern void intel_prepare_ddi(struct drm_device *dev);
619extern void hsw_fdi_link_train(struct drm_crtc *crtc);
620extern void intel_ddi_init(struct drm_device *dev, enum port port);
621
622/* For use by IVB LP watermark workaround in intel_sprite.c */
623extern void intel_update_watermarks(struct drm_device *dev);
624extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
625 uint32_t sprite_width,
626 int pixel_size);
627extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
628 struct drm_display_mode *mode);
629
630extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
631 unsigned int bpp,
632 unsigned int pitch);
633
634extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
635 struct drm_file *file_priv);
636extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
637 struct drm_file *file_priv);
638
639extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
640
641/* Power-related functions, located in intel_pm.c */
642extern void intel_init_pm(struct drm_device *dev);
643/* FBC */
644extern bool intel_fbc_enabled(struct drm_device *dev);
645extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
646extern void intel_update_fbc(struct drm_device *dev);
647/* IPS */
648extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
649extern void intel_gpu_ips_teardown(void);
650
651extern void intel_init_power_wells(struct drm_device *dev);
652extern void intel_enable_gt_powersave(struct drm_device *dev);
653extern void intel_disable_gt_powersave(struct drm_device *dev);
654extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
655extern void ironlake_teardown_rc6(struct drm_device *dev);
656
657extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
658 enum pipe *pipe);
659extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
660extern void intel_ddi_pll_init(struct drm_device *dev);
661extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
662extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
663 enum transcoder cpu_transcoder);
664extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
665extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
666extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
667extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
668extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
669extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
670extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
671extern bool
672intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
673extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
674
675#endif /* __INTEL_DRV_H__ */ 380#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 15da99533e5..6eda1b51c63 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -26,17 +26,17 @@
26 */ 26 */
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <drm/drmP.h> 29#include "drmP.h"
30#include <drm/drm_crtc.h> 30#include "drm.h"
31#include "drm_crtc.h"
31#include "intel_drv.h" 32#include "intel_drv.h"
32#include <drm/i915_drm.h> 33#include "i915_drm.h"
33#include "i915_drv.h" 34#include "i915_drv.h"
34#include "dvo.h" 35#include "dvo.h"
35 36
36#define SIL164_ADDR 0x38 37#define SIL164_ADDR 0x38
37#define CH7xxx_ADDR 0x76 38#define CH7xxx_ADDR 0x76
38#define TFP410_ADDR 0x38 39#define TFP410_ADDR 0x38
39#define NS2501_ADDR 0x38
40 40
41static const struct intel_dvo_device intel_dvo_devices[] = { 41static const struct intel_dvo_device intel_dvo_devices[] = {
42 { 42 {
@@ -74,14 +74,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
74 .slave_addr = 0x75, 74 .slave_addr = 0x75,
75 .gpio = GMBUS_PORT_DPB, 75 .gpio = GMBUS_PORT_DPB,
76 .dev_ops = &ch7017_ops, 76 .dev_ops = &ch7017_ops,
77 }, 77 }
78 {
79 .type = INTEL_DVO_CHIP_TMDS,
80 .name = "ns2501",
81 .dvo_reg = DVOC,
82 .slave_addr = NS2501_ADDR,
83 .dev_ops = &ns2501_ops,
84 }
85}; 78};
86 79
87struct intel_dvo { 80struct intel_dvo {
@@ -104,91 +97,22 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
104 struct intel_dvo, base); 97 struct intel_dvo, base);
105} 98}
106 99
107static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) 100static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
108{
109 struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
110
111 return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
112}
113
114static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
115 enum pipe *pipe)
116{
117 struct drm_device *dev = encoder->base.dev;
118 struct drm_i915_private *dev_priv = dev->dev_private;
119 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
120 u32 tmp;
121
122 tmp = I915_READ(intel_dvo->dev.dvo_reg);
123
124 if (!(tmp & DVO_ENABLE))
125 return false;
126
127 *pipe = PORT_TO_PIPE(tmp);
128
129 return true;
130}
131
132static void intel_disable_dvo(struct intel_encoder *encoder)
133{
134 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
135 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
136 u32 dvo_reg = intel_dvo->dev.dvo_reg;
137 u32 temp = I915_READ(dvo_reg);
138
139 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
140 I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
141 I915_READ(dvo_reg);
142}
143
144static void intel_enable_dvo(struct intel_encoder *encoder)
145{ 101{
146 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 102 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
147 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); 103 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
148 u32 dvo_reg = intel_dvo->dev.dvo_reg; 104 u32 dvo_reg = intel_dvo->dev.dvo_reg;
149 u32 temp = I915_READ(dvo_reg); 105 u32 temp = I915_READ(dvo_reg);
150 106
151 I915_WRITE(dvo_reg, temp | DVO_ENABLE);
152 I915_READ(dvo_reg);
153 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
154}
155
156static void intel_dvo_dpms(struct drm_connector *connector, int mode)
157{
158 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
159 struct drm_crtc *crtc;
160
161 /* dvo supports only 2 dpms states. */
162 if (mode != DRM_MODE_DPMS_ON)
163 mode = DRM_MODE_DPMS_OFF;
164
165 if (mode == connector->dpms)
166 return;
167
168 connector->dpms = mode;
169
170 /* Only need to change hw state when actually enabled */
171 crtc = intel_dvo->base.base.crtc;
172 if (!crtc) {
173 intel_dvo->base.connectors_active = false;
174 return;
175 }
176
177 if (mode == DRM_MODE_DPMS_ON) { 107 if (mode == DRM_MODE_DPMS_ON) {
178 intel_dvo->base.connectors_active = true; 108 I915_WRITE(dvo_reg, temp | DVO_ENABLE);
179 109 I915_READ(dvo_reg);
180 intel_crtc_update_dpms(crtc); 110 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
181
182 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
183 } else { 111 } else {
184 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); 112 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
185 113 I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
186 intel_dvo->base.connectors_active = false; 114 I915_READ(dvo_reg);
187
188 intel_crtc_update_dpms(crtc);
189 } 115 }
190
191 intel_modeset_check_state(connector->dev);
192} 116}
193 117
194static int intel_dvo_mode_valid(struct drm_connector *connector, 118static int intel_dvo_mode_valid(struct drm_connector *connector,
@@ -212,7 +136,7 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
212} 136}
213 137
214static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, 138static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
215 const struct drm_display_mode *mode, 139 struct drm_display_mode *mode,
216 struct drm_display_mode *adjusted_mode) 140 struct drm_display_mode *adjusted_mode)
217{ 141{
218 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); 142 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
@@ -233,6 +157,7 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
233 C(vsync_end); 157 C(vsync_end);
234 C(vtotal); 158 C(vtotal);
235 C(clock); 159 C(clock);
160 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
236#undef C 161#undef C
237 } 162 }
238 163
@@ -319,7 +244,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
319 * that's not the case. 244 * that's not the case.
320 */ 245 */
321 intel_ddc_get_modes(connector, 246 intel_ddc_get_modes(connector,
322 intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC)); 247 &dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
323 if (!list_empty(&connector->probed_modes)) 248 if (!list_empty(&connector->probed_modes))
324 return 1; 249 return 1;
325 250
@@ -343,13 +268,15 @@ static void intel_dvo_destroy(struct drm_connector *connector)
343} 268}
344 269
345static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { 270static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
271 .dpms = intel_dvo_dpms,
346 .mode_fixup = intel_dvo_mode_fixup, 272 .mode_fixup = intel_dvo_mode_fixup,
273 .prepare = intel_encoder_prepare,
347 .mode_set = intel_dvo_mode_set, 274 .mode_set = intel_dvo_mode_set,
348 .disable = intel_encoder_noop, 275 .commit = intel_encoder_commit,
349}; 276};
350 277
351static const struct drm_connector_funcs intel_dvo_connector_funcs = { 278static const struct drm_connector_funcs intel_dvo_connector_funcs = {
352 .dpms = intel_dvo_dpms, 279 .dpms = drm_helper_connector_dpms,
353 .detect = intel_dvo_detect, 280 .detect = intel_dvo_detect,
354 .destroy = intel_dvo_destroy, 281 .destroy = intel_dvo_destroy,
355 .fill_modes = drm_helper_probe_single_connector_modes, 282 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -438,11 +365,6 @@ void intel_dvo_init(struct drm_device *dev)
438 drm_encoder_init(dev, &intel_encoder->base, 365 drm_encoder_init(dev, &intel_encoder->base,
439 &intel_dvo_enc_funcs, encoder_type); 366 &intel_dvo_enc_funcs, encoder_type);
440 367
441 intel_encoder->disable = intel_disable_dvo;
442 intel_encoder->enable = intel_enable_dvo;
443 intel_encoder->get_hw_state = intel_dvo_get_hw_state;
444 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
445
446 /* Now, try to find a controller */ 368 /* Now, try to find a controller */
447 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { 369 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
448 struct drm_connector *connector = &intel_connector->base; 370 struct drm_connector *connector = &intel_connector->base;
@@ -454,7 +376,7 @@ void intel_dvo_init(struct drm_device *dev)
454 * special cases, but otherwise default to what's defined 376 * special cases, but otherwise default to what's defined
455 * in the spec. 377 * in the spec.
456 */ 378 */
457 if (intel_gmbus_is_port_valid(dvo->gpio)) 379 if (dvo->gpio != 0)
458 gpio = dvo->gpio; 380 gpio = dvo->gpio;
459 else if (dvo->type == INTEL_DVO_CHIP_LVDS) 381 else if (dvo->type == INTEL_DVO_CHIP_LVDS)
460 gpio = GMBUS_PORT_SSC; 382 gpio = GMBUS_PORT_SSC;
@@ -465,7 +387,7 @@ void intel_dvo_init(struct drm_device *dev)
465 * It appears that everything is on GPIOE except for panels 387 * It appears that everything is on GPIOE except for panels
466 * on i830 laptops, which are on GPIOB (DVOA). 388 * on i830 laptops, which are on GPIOB (DVOA).
467 */ 389 */
468 i2c = intel_gmbus_get_adapter(dev_priv, gpio); 390 i2c = &dev_priv->gmbus[gpio].adapter;
469 391
470 intel_dvo->dev = *dvo; 392 intel_dvo->dev = *dvo;
471 if (!dvo->dev_ops->init(&intel_dvo->dev, i2c)) 393 if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
@@ -475,14 +397,17 @@ void intel_dvo_init(struct drm_device *dev)
475 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 397 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
476 switch (dvo->type) { 398 switch (dvo->type) {
477 case INTEL_DVO_CHIP_TMDS: 399 case INTEL_DVO_CHIP_TMDS:
478 intel_encoder->cloneable = true; 400 intel_encoder->clone_mask =
401 (1 << INTEL_DVO_TMDS_CLONE_BIT) |
402 (1 << INTEL_ANALOG_CLONE_BIT);
479 drm_connector_init(dev, connector, 403 drm_connector_init(dev, connector,
480 &intel_dvo_connector_funcs, 404 &intel_dvo_connector_funcs,
481 DRM_MODE_CONNECTOR_DVII); 405 DRM_MODE_CONNECTOR_DVII);
482 encoder_type = DRM_MODE_ENCODER_TMDS; 406 encoder_type = DRM_MODE_ENCODER_TMDS;
483 break; 407 break;
484 case INTEL_DVO_CHIP_LVDS: 408 case INTEL_DVO_CHIP_LVDS:
485 intel_encoder->cloneable = false; 409 intel_encoder->clone_mask =
410 (1 << INTEL_DVO_LVDS_CLONE_BIT);
486 drm_connector_init(dev, connector, 411 drm_connector_init(dev, connector,
487 &intel_dvo_connector_funcs, 412 &intel_dvo_connector_funcs,
488 DRM_MODE_CONNECTOR_LVDS); 413 DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 7b30b5c2c4e..ec49bae7338 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -36,11 +36,12 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/vga_switcheroo.h> 37#include <linux/vga_switcheroo.h>
38 38
39#include <drm/drmP.h> 39#include "drmP.h"
40#include <drm/drm_crtc.h> 40#include "drm.h"
41#include <drm/drm_fb_helper.h> 41#include "drm_crtc.h"
42#include "drm_fb_helper.h"
42#include "intel_drv.h" 43#include "intel_drv.h"
43#include <drm/i915_drm.h> 44#include "i915_drm.h"
44#include "i915_drv.h" 45#include "i915_drv.h"
45 46
46static struct fb_ops intelfb_ops = { 47static struct fb_ops intelfb_ops = {
@@ -64,7 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
64 struct drm_i915_private *dev_priv = dev->dev_private; 65 struct drm_i915_private *dev_priv = dev->dev_private;
65 struct fb_info *info; 66 struct fb_info *info;
66 struct drm_framebuffer *fb; 67 struct drm_framebuffer *fb;
67 struct drm_mode_fb_cmd2 mode_cmd = {}; 68 struct drm_mode_fb_cmd mode_cmd;
68 struct drm_i915_gem_object *obj; 69 struct drm_i915_gem_object *obj;
69 struct device *device = &dev->pdev->dev; 70 struct device *device = &dev->pdev->dev;
70 int size, ret; 71 int size, ret;
@@ -76,12 +77,11 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
76 mode_cmd.width = sizes->surface_width; 77 mode_cmd.width = sizes->surface_width;
77 mode_cmd.height = sizes->surface_height; 78 mode_cmd.height = sizes->surface_height;
78 79
79 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) / 80 mode_cmd.bpp = sizes->surface_bpp;
80 8), 64); 81 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
81 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 82 mode_cmd.depth = sizes->surface_depth;
82 sizes->surface_depth);
83 83
84 size = mode_cmd.pitches[0] * mode_cmd.height; 84 size = mode_cmd.pitch * mode_cmd.height;
85 size = ALIGN(size, PAGE_SIZE); 85 size = ALIGN(size, PAGE_SIZE);
86 obj = i915_gem_alloc_object(dev, size); 86 obj = i915_gem_alloc_object(dev, size);
87 if (!obj) { 87 if (!obj) {
@@ -93,7 +93,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
93 mutex_lock(&dev->struct_mutex); 93 mutex_lock(&dev->struct_mutex);
94 94
95 /* Flush everything out, we'll be doing GTT only from now on */ 95 /* Flush everything out, we'll be doing GTT only from now on */
96 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 96 ret = intel_pin_and_fence_fb_obj(dev, obj, false);
97 if (ret) { 97 if (ret) {
98 DRM_ERROR("failed to pin fb: %d\n", ret); 98 DRM_ERROR("failed to pin fb: %d\n", ret);
99 goto out_unref; 99 goto out_unref;
@@ -139,9 +139,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
139 info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; 139 info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
140 info->fix.smem_len = size; 140 info->fix.smem_len = size;
141 141
142 info->screen_base = 142 info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
143 ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
144 size);
145 if (!info->screen_base) { 143 if (!info->screen_base) {
146 ret = -ENOSPC; 144 ret = -ENOSPC;
147 goto out_unpin; 145 goto out_unpin;
@@ -150,10 +148,14 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
150 148
151// memset(info->screen_base, 0, size); 149// memset(info->screen_base, 0, size);
152 150
153 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 151 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
154 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); 152 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
155 153
156 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 154 info->pixmap.size = 64*1024;
155 info->pixmap.buf_align = 8;
156 info->pixmap.access_align = 32;
157 info->pixmap.flags = FB_PIXMAP_SYSTEM;
158 info->pixmap.scan_align = 1;
157 159
158 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", 160 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
159 fb->width, fb->height, 161 fb->width, fb->height,
@@ -255,16 +257,6 @@ void intel_fbdev_fini(struct drm_device *dev)
255 kfree(dev_priv->fbdev); 257 kfree(dev_priv->fbdev);
256 dev_priv->fbdev = NULL; 258 dev_priv->fbdev = NULL;
257} 259}
258
259void intel_fbdev_set_suspend(struct drm_device *dev, int state)
260{
261 drm_i915_private_t *dev_priv = dev->dev_private;
262 if (!dev_priv->fbdev)
263 return;
264
265 fb_set_suspend(dev_priv->fbdev->helper.fbdev, state);
266}
267
268MODULE_LICENSE("GPL and additional rights"); 260MODULE_LICENSE("GPL and additional rights");
269 261
270void intel_fb_output_poll_changed(struct drm_device *dev) 262void intel_fb_output_poll_changed(struct drm_device *dev)
@@ -277,18 +269,8 @@ void intel_fb_restore_mode(struct drm_device *dev)
277{ 269{
278 int ret; 270 int ret;
279 drm_i915_private_t *dev_priv = dev->dev_private; 271 drm_i915_private_t *dev_priv = dev->dev_private;
280 struct drm_mode_config *config = &dev->mode_config;
281 struct drm_plane *plane;
282
283 mutex_lock(&dev->mode_config.mutex);
284 272
285 ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); 273 ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
286 if (ret) 274 if (ret)
287 DRM_DEBUG("failed to restore crtc mode\n"); 275 DRM_DEBUG("failed to restore crtc mode\n");
288
289 /* Be sure to shut off any planes that may be active */
290 list_for_each_entry(plane, &config->plane_list, head)
291 plane->funcs->disable_plane(plane);
292
293 mutex_unlock(&dev->mode_config.mutex);
294} 276}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2ee9821b9d9..226ba830f38 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -29,41 +29,35 @@
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <drm/drmP.h> 32#include "drmP.h"
33#include <drm/drm_crtc.h> 33#include "drm.h"
34#include <drm/drm_edid.h> 34#include "drm_crtc.h"
35#include "drm_edid.h"
35#include "intel_drv.h" 36#include "intel_drv.h"
36#include <drm/i915_drm.h> 37#include "i915_drm.h"
37#include "i915_drv.h" 38#include "i915_drv.h"
38 39
39static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi) 40struct intel_hdmi {
40{ 41 struct intel_encoder base;
41 return hdmi_to_dig_port(intel_hdmi)->base.base.dev; 42 u32 sdvox_reg;
42} 43 int ddc_bus;
43 44 uint32_t color_range;
44static void 45 bool has_hdmi_sink;
45assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) 46 bool has_audio;
46{ 47 int force_audio;
47 struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); 48 void (*write_infoframe)(struct drm_encoder *encoder,
48 struct drm_i915_private *dev_priv = dev->dev_private; 49 struct dip_infoframe *frame);
49 uint32_t enabled_bits; 50};
50
51 enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
52
53 WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
54 "HDMI port enabled, expecting disabled\n");
55}
56 51
57struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) 52static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
58{ 53{
59 struct intel_digital_port *intel_dig_port = 54 return container_of(encoder, struct intel_hdmi, base.base);
60 container_of(encoder, struct intel_digital_port, base.base);
61 return &intel_dig_port->hdmi;
62} 55}
63 56
64static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) 57static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
65{ 58{
66 return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base); 59 return container_of(intel_attached_encoder(connector),
60 struct intel_hdmi, base);
67} 61}
68 62
69void intel_dip_infoframe_csum(struct dip_infoframe *frame) 63void intel_dip_infoframe_csum(struct dip_infoframe *frame)
@@ -75,261 +69,128 @@ void intel_dip_infoframe_csum(struct dip_infoframe *frame)
75 frame->checksum = 0; 69 frame->checksum = 0;
76 frame->ecc = 0; 70 frame->ecc = 0;
77 71
78 for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++) 72 /* Header isn't part of the checksum */
73 for (i = 5; i < frame->len; i++)
79 sum += data[i]; 74 sum += data[i];
80 75
81 frame->checksum = 0x100 - sum; 76 frame->checksum = 0x100 - sum;
82} 77}
83 78
84static u32 g4x_infoframe_index(struct dip_infoframe *frame) 79static u32 intel_infoframe_index(struct dip_infoframe *frame)
85{ 80{
86 switch (frame->type) { 81 u32 flags = 0;
87 case DIP_TYPE_AVI:
88 return VIDEO_DIP_SELECT_AVI;
89 case DIP_TYPE_SPD:
90 return VIDEO_DIP_SELECT_SPD;
91 default:
92 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
93 return 0;
94 }
95}
96 82
97static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
98{
99 switch (frame->type) { 83 switch (frame->type) {
100 case DIP_TYPE_AVI: 84 case DIP_TYPE_AVI:
101 return VIDEO_DIP_ENABLE_AVI; 85 flags |= VIDEO_DIP_SELECT_AVI;
86 break;
102 case DIP_TYPE_SPD: 87 case DIP_TYPE_SPD:
103 return VIDEO_DIP_ENABLE_SPD; 88 flags |= VIDEO_DIP_SELECT_SPD;
89 break;
104 default: 90 default:
105 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 91 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
106 return 0; 92 break;
107 } 93 }
108}
109 94
110static u32 hsw_infoframe_enable(struct dip_infoframe *frame) 95 return flags;
111{
112 switch (frame->type) {
113 case DIP_TYPE_AVI:
114 return VIDEO_DIP_ENABLE_AVI_HSW;
115 case DIP_TYPE_SPD:
116 return VIDEO_DIP_ENABLE_SPD_HSW;
117 default:
118 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
119 return 0;
120 }
121} 96}
122 97
123static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe) 98static u32 intel_infoframe_flags(struct dip_infoframe *frame)
124{ 99{
100 u32 flags = 0;
101
125 switch (frame->type) { 102 switch (frame->type) {
126 case DIP_TYPE_AVI: 103 case DIP_TYPE_AVI:
127 return HSW_TVIDEO_DIP_AVI_DATA(pipe); 104 flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
105 break;
128 case DIP_TYPE_SPD: 106 case DIP_TYPE_SPD:
129 return HSW_TVIDEO_DIP_SPD_DATA(pipe); 107 flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_2VSYNC;
108 break;
130 default: 109 default:
131 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 110 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
132 return 0; 111 break;
133 }
134}
135
136static void g4x_write_infoframe(struct drm_encoder *encoder,
137 struct dip_infoframe *frame)
138{
139 uint32_t *data = (uint32_t *)frame;
140 struct drm_device *dev = encoder->dev;
141 struct drm_i915_private *dev_priv = dev->dev_private;
142 u32 val = I915_READ(VIDEO_DIP_CTL);
143 unsigned i, len = DIP_HEADER_SIZE + frame->len;
144
145 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
146
147 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
148 val |= g4x_infoframe_index(frame);
149
150 val &= ~g4x_infoframe_enable(frame);
151
152 I915_WRITE(VIDEO_DIP_CTL, val);
153
154 mmiowb();
155 for (i = 0; i < len; i += 4) {
156 I915_WRITE(VIDEO_DIP_DATA, *data);
157 data++;
158 } 112 }
159 /* Write every possible data byte to force correct ECC calculation. */
160 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
161 I915_WRITE(VIDEO_DIP_DATA, 0);
162 mmiowb();
163 113
164 val |= g4x_infoframe_enable(frame); 114 return flags;
165 val &= ~VIDEO_DIP_FREQ_MASK;
166 val |= VIDEO_DIP_FREQ_VSYNC;
167
168 I915_WRITE(VIDEO_DIP_CTL, val);
169 POSTING_READ(VIDEO_DIP_CTL);
170} 115}
171 116
172static void ibx_write_infoframe(struct drm_encoder *encoder, 117static void i9xx_write_infoframe(struct drm_encoder *encoder,
173 struct dip_infoframe *frame) 118 struct dip_infoframe *frame)
174{ 119{
175 uint32_t *data = (uint32_t *)frame; 120 uint32_t *data = (uint32_t *)frame;
176 struct drm_device *dev = encoder->dev; 121 struct drm_device *dev = encoder->dev;
177 struct drm_i915_private *dev_priv = dev->dev_private; 122 struct drm_i915_private *dev_priv = dev->dev_private;
178 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 123 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
179 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 124 u32 port, flags, val = I915_READ(VIDEO_DIP_CTL);
180 unsigned i, len = DIP_HEADER_SIZE + frame->len; 125 unsigned i, len = DIP_HEADER_SIZE + frame->len;
181 u32 val = I915_READ(reg);
182
183 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
184
185 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
186 val |= g4x_infoframe_index(frame);
187 126
188 val &= ~g4x_infoframe_enable(frame);
189 127
190 I915_WRITE(reg, val); 128 /* XXX first guess at handling video port, is this corrent? */
191 129 if (intel_hdmi->sdvox_reg == SDVOB)
192 mmiowb(); 130 port = VIDEO_DIP_PORT_B;
193 for (i = 0; i < len; i += 4) { 131 else if (intel_hdmi->sdvox_reg == SDVOC)
194 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); 132 port = VIDEO_DIP_PORT_C;
195 data++; 133 else
196 } 134 return;
197 /* Write every possible data byte to force correct ECC calculation. */
198 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
199 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
200 mmiowb();
201
202 val |= g4x_infoframe_enable(frame);
203 val &= ~VIDEO_DIP_FREQ_MASK;
204 val |= VIDEO_DIP_FREQ_VSYNC;
205
206 I915_WRITE(reg, val);
207 POSTING_READ(reg);
208}
209
210static void cpt_write_infoframe(struct drm_encoder *encoder,
211 struct dip_infoframe *frame)
212{
213 uint32_t *data = (uint32_t *)frame;
214 struct drm_device *dev = encoder->dev;
215 struct drm_i915_private *dev_priv = dev->dev_private;
216 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
217 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
218 unsigned i, len = DIP_HEADER_SIZE + frame->len;
219 u32 val = I915_READ(reg);
220
221 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
222 135
223 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 136 flags = intel_infoframe_index(frame);
224 val |= g4x_infoframe_index(frame);
225 137
226 /* The DIP control register spec says that we need to update the AVI 138 val &= ~VIDEO_DIP_SELECT_MASK;
227 * infoframe without clearing its enable bit */
228 if (frame->type != DIP_TYPE_AVI)
229 val &= ~g4x_infoframe_enable(frame);
230 139
231 I915_WRITE(reg, val); 140 I915_WRITE(VIDEO_DIP_CTL, val | port | flags);
232 141
233 mmiowb();
234 for (i = 0; i < len; i += 4) { 142 for (i = 0; i < len; i += 4) {
235 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); 143 I915_WRITE(VIDEO_DIP_DATA, *data);
236 data++; 144 data++;
237 } 145 }
238 /* Write every possible data byte to force correct ECC calculation. */
239 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
240 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
241 mmiowb();
242 146
243 val |= g4x_infoframe_enable(frame); 147 flags |= intel_infoframe_flags(frame);
244 val &= ~VIDEO_DIP_FREQ_MASK;
245 val |= VIDEO_DIP_FREQ_VSYNC;
246 148
247 I915_WRITE(reg, val); 149 I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
248 POSTING_READ(reg);
249} 150}
250 151
251static void vlv_write_infoframe(struct drm_encoder *encoder, 152static void ironlake_write_infoframe(struct drm_encoder *encoder,
252 struct dip_infoframe *frame) 153 struct dip_infoframe *frame)
253{ 154{
254 uint32_t *data = (uint32_t *)frame; 155 uint32_t *data = (uint32_t *)frame;
255 struct drm_device *dev = encoder->dev; 156 struct drm_device *dev = encoder->dev;
256 struct drm_i915_private *dev_priv = dev->dev_private; 157 struct drm_i915_private *dev_priv = dev->dev_private;
257 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 158 struct drm_crtc *crtc = encoder->crtc;
258 int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 159 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
160 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
259 unsigned i, len = DIP_HEADER_SIZE + frame->len; 161 unsigned i, len = DIP_HEADER_SIZE + frame->len;
260 u32 val = I915_READ(reg); 162 u32 flags, val = I915_READ(reg);
261 163
262 WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); 164 intel_wait_for_vblank(dev, intel_crtc->pipe);
263 165
264 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 166 flags = intel_infoframe_index(frame);
265 val |= g4x_infoframe_index(frame);
266 167
267 val &= ~g4x_infoframe_enable(frame); 168 val &= ~VIDEO_DIP_SELECT_MASK;
268 169
269 I915_WRITE(reg, val); 170 I915_WRITE(reg, val | flags);
270 171
271 mmiowb();
272 for (i = 0; i < len; i += 4) { 172 for (i = 0; i < len; i += 4) {
273 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); 173 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
274 data++; 174 data++;
275 } 175 }
276 /* Write every possible data byte to force correct ECC calculation. */
277 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
278 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
279 mmiowb();
280 176
281 val |= g4x_infoframe_enable(frame); 177 flags |= intel_infoframe_flags(frame);
282 val &= ~VIDEO_DIP_FREQ_MASK;
283 val |= VIDEO_DIP_FREQ_VSYNC;
284 178
285 I915_WRITE(reg, val); 179 I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
286 POSTING_READ(reg);
287} 180}
288
289static void hsw_write_infoframe(struct drm_encoder *encoder,
290 struct dip_infoframe *frame)
291{
292 uint32_t *data = (uint32_t *)frame;
293 struct drm_device *dev = encoder->dev;
294 struct drm_i915_private *dev_priv = dev->dev_private;
295 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
296 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
297 u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe);
298 unsigned int i, len = DIP_HEADER_SIZE + frame->len;
299 u32 val = I915_READ(ctl_reg);
300
301 if (data_reg == 0)
302 return;
303
304 val &= ~hsw_infoframe_enable(frame);
305 I915_WRITE(ctl_reg, val);
306
307 mmiowb();
308 for (i = 0; i < len; i += 4) {
309 I915_WRITE(data_reg + i, *data);
310 data++;
311 }
312 /* Write every possible data byte to force correct ECC calculation. */
313 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
314 I915_WRITE(data_reg + i, 0);
315 mmiowb();
316
317 val |= hsw_infoframe_enable(frame);
318 I915_WRITE(ctl_reg, val);
319 POSTING_READ(ctl_reg);
320}
321
322static void intel_set_infoframe(struct drm_encoder *encoder, 181static void intel_set_infoframe(struct drm_encoder *encoder,
323 struct dip_infoframe *frame) 182 struct dip_infoframe *frame)
324{ 183{
325 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 184 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
326 185
186 if (!intel_hdmi->has_hdmi_sink)
187 return;
188
327 intel_dip_infoframe_csum(frame); 189 intel_dip_infoframe_csum(frame);
328 intel_hdmi->write_infoframe(encoder, frame); 190 intel_hdmi->write_infoframe(encoder, frame);
329} 191}
330 192
331static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, 193static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
332 struct drm_display_mode *adjusted_mode)
333{ 194{
334 struct dip_infoframe avi_if = { 195 struct dip_infoframe avi_if = {
335 .type = DIP_TYPE_AVI, 196 .type = DIP_TYPE_AVI,
@@ -337,11 +198,6 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
337 .len = DIP_LEN_AVI, 198 .len = DIP_LEN_AVI,
338 }; 199 };
339 200
340 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
341 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
342
343 avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
344
345 intel_set_infoframe(encoder, &avi_if); 201 intel_set_infoframe(encoder, &avi_if);
346} 202}
347 203
@@ -360,236 +216,18 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
360 intel_set_infoframe(encoder, &spd_if); 216 intel_set_infoframe(encoder, &spd_if);
361} 217}
362 218
363static void g4x_set_infoframes(struct drm_encoder *encoder,
364 struct drm_display_mode *adjusted_mode)
365{
366 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
367 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
368 u32 reg = VIDEO_DIP_CTL;
369 u32 val = I915_READ(reg);
370 u32 port;
371
372 assert_hdmi_port_disabled(intel_hdmi);
373
374 /* If the registers were not initialized yet, they might be zeroes,
375 * which means we're selecting the AVI DIP and we're setting its
376 * frequency to once. This seems to really confuse the HW and make
377 * things stop working (the register spec says the AVI always needs to
378 * be sent every VSync). So here we avoid writing to the register more
379 * than we need and also explicitly select the AVI DIP and explicitly
380 * set its frequency to every VSync. Avoiding to write it twice seems to
381 * be enough to solve the problem, but being defensive shouldn't hurt us
382 * either. */
383 val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
384
385 if (!intel_hdmi->has_hdmi_sink) {
386 if (!(val & VIDEO_DIP_ENABLE))
387 return;
388 val &= ~VIDEO_DIP_ENABLE;
389 I915_WRITE(reg, val);
390 POSTING_READ(reg);
391 return;
392 }
393
394 switch (intel_hdmi->sdvox_reg) {
395 case SDVOB:
396 port = VIDEO_DIP_PORT_B;
397 break;
398 case SDVOC:
399 port = VIDEO_DIP_PORT_C;
400 break;
401 default:
402 BUG();
403 return;
404 }
405
406 if (port != (val & VIDEO_DIP_PORT_MASK)) {
407 if (val & VIDEO_DIP_ENABLE) {
408 val &= ~VIDEO_DIP_ENABLE;
409 I915_WRITE(reg, val);
410 POSTING_READ(reg);
411 }
412 val &= ~VIDEO_DIP_PORT_MASK;
413 val |= port;
414 }
415
416 val |= VIDEO_DIP_ENABLE;
417 val &= ~VIDEO_DIP_ENABLE_VENDOR;
418
419 I915_WRITE(reg, val);
420 POSTING_READ(reg);
421
422 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
423 intel_hdmi_set_spd_infoframe(encoder);
424}
425
426static void ibx_set_infoframes(struct drm_encoder *encoder,
427 struct drm_display_mode *adjusted_mode)
428{
429 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
430 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
431 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
432 u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
433 u32 val = I915_READ(reg);
434 u32 port;
435
436 assert_hdmi_port_disabled(intel_hdmi);
437
438 /* See the big comment in g4x_set_infoframes() */
439 val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
440
441 if (!intel_hdmi->has_hdmi_sink) {
442 if (!(val & VIDEO_DIP_ENABLE))
443 return;
444 val &= ~VIDEO_DIP_ENABLE;
445 I915_WRITE(reg, val);
446 POSTING_READ(reg);
447 return;
448 }
449
450 switch (intel_hdmi->sdvox_reg) {
451 case HDMIB:
452 port = VIDEO_DIP_PORT_B;
453 break;
454 case HDMIC:
455 port = VIDEO_DIP_PORT_C;
456 break;
457 case HDMID:
458 port = VIDEO_DIP_PORT_D;
459 break;
460 default:
461 BUG();
462 return;
463 }
464
465 if (port != (val & VIDEO_DIP_PORT_MASK)) {
466 if (val & VIDEO_DIP_ENABLE) {
467 val &= ~VIDEO_DIP_ENABLE;
468 I915_WRITE(reg, val);
469 POSTING_READ(reg);
470 }
471 val &= ~VIDEO_DIP_PORT_MASK;
472 val |= port;
473 }
474
475 val |= VIDEO_DIP_ENABLE;
476 val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
477 VIDEO_DIP_ENABLE_GCP);
478
479 I915_WRITE(reg, val);
480 POSTING_READ(reg);
481
482 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
483 intel_hdmi_set_spd_infoframe(encoder);
484}
485
486static void cpt_set_infoframes(struct drm_encoder *encoder,
487 struct drm_display_mode *adjusted_mode)
488{
489 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
490 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
491 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
492 u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
493 u32 val = I915_READ(reg);
494
495 assert_hdmi_port_disabled(intel_hdmi);
496
497 /* See the big comment in g4x_set_infoframes() */
498 val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
499
500 if (!intel_hdmi->has_hdmi_sink) {
501 if (!(val & VIDEO_DIP_ENABLE))
502 return;
503 val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
504 I915_WRITE(reg, val);
505 POSTING_READ(reg);
506 return;
507 }
508
509 /* Set both together, unset both together: see the spec. */
510 val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
511 val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
512 VIDEO_DIP_ENABLE_GCP);
513
514 I915_WRITE(reg, val);
515 POSTING_READ(reg);
516
517 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
518 intel_hdmi_set_spd_infoframe(encoder);
519}
520
521static void vlv_set_infoframes(struct drm_encoder *encoder,
522 struct drm_display_mode *adjusted_mode)
523{
524 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
525 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
526 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
527 u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
528 u32 val = I915_READ(reg);
529
530 assert_hdmi_port_disabled(intel_hdmi);
531
532 /* See the big comment in g4x_set_infoframes() */
533 val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
534
535 if (!intel_hdmi->has_hdmi_sink) {
536 if (!(val & VIDEO_DIP_ENABLE))
537 return;
538 val &= ~VIDEO_DIP_ENABLE;
539 I915_WRITE(reg, val);
540 POSTING_READ(reg);
541 return;
542 }
543
544 val |= VIDEO_DIP_ENABLE;
545 val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
546 VIDEO_DIP_ENABLE_GCP);
547
548 I915_WRITE(reg, val);
549 POSTING_READ(reg);
550
551 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
552 intel_hdmi_set_spd_infoframe(encoder);
553}
554
555static void hsw_set_infoframes(struct drm_encoder *encoder,
556 struct drm_display_mode *adjusted_mode)
557{
558 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
559 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
560 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
561 u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
562 u32 val = I915_READ(reg);
563
564 assert_hdmi_port_disabled(intel_hdmi);
565
566 if (!intel_hdmi->has_hdmi_sink) {
567 I915_WRITE(reg, 0);
568 POSTING_READ(reg);
569 return;
570 }
571
572 val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
573 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
574
575 I915_WRITE(reg, val);
576 POSTING_READ(reg);
577
578 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
579 intel_hdmi_set_spd_infoframe(encoder);
580}
581
582static void intel_hdmi_mode_set(struct drm_encoder *encoder, 219static void intel_hdmi_mode_set(struct drm_encoder *encoder,
583 struct drm_display_mode *mode, 220 struct drm_display_mode *mode,
584 struct drm_display_mode *adjusted_mode) 221 struct drm_display_mode *adjusted_mode)
585{ 222{
586 struct drm_device *dev = encoder->dev; 223 struct drm_device *dev = encoder->dev;
587 struct drm_i915_private *dev_priv = dev->dev_private; 224 struct drm_i915_private *dev_priv = dev->dev_private;
588 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 225 struct drm_crtc *crtc = encoder->crtc;
226 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
589 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 227 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
590 u32 sdvox; 228 u32 sdvox;
591 229
592 sdvox = SDVO_ENCODING_HDMI; 230 sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
593 if (!HAS_PCH_SPLIT(dev)) 231 if (!HAS_PCH_SPLIT(dev))
594 sdvox |= intel_hdmi->color_range; 232 sdvox |= intel_hdmi->color_range;
595 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 233 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -607,69 +245,33 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
607 sdvox |= HDMI_MODE_SELECT; 245 sdvox |= HDMI_MODE_SELECT;
608 246
609 if (intel_hdmi->has_audio) { 247 if (intel_hdmi->has_audio) {
610 DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
611 pipe_name(intel_crtc->pipe));
612 sdvox |= SDVO_AUDIO_ENABLE; 248 sdvox |= SDVO_AUDIO_ENABLE;
613 sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC; 249 sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
614 intel_write_eld(encoder, adjusted_mode);
615 } 250 }
616 251
617 if (HAS_PCH_CPT(dev)) 252 if (intel_crtc->pipe == 1) {
618 sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); 253 if (HAS_PCH_CPT(dev))
619 else if (intel_crtc->pipe == PIPE_B) 254 sdvox |= PORT_TRANS_B_SEL_CPT;
620 sdvox |= SDVO_PIPE_B_SELECT; 255 else
256 sdvox |= SDVO_PIPE_B_SELECT;
257 }
621 258
622 I915_WRITE(intel_hdmi->sdvox_reg, sdvox); 259 I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
623 POSTING_READ(intel_hdmi->sdvox_reg); 260 POSTING_READ(intel_hdmi->sdvox_reg);
624 261
625 intel_hdmi->set_infoframes(encoder, adjusted_mode); 262 intel_hdmi_set_avi_infoframe(encoder);
626} 263 intel_hdmi_set_spd_infoframe(encoder);
627
628static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
629 enum pipe *pipe)
630{
631 struct drm_device *dev = encoder->base.dev;
632 struct drm_i915_private *dev_priv = dev->dev_private;
633 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
634 u32 tmp;
635
636 tmp = I915_READ(intel_hdmi->sdvox_reg);
637
638 if (!(tmp & SDVO_ENABLE))
639 return false;
640
641 if (HAS_PCH_CPT(dev))
642 *pipe = PORT_TO_PIPE_CPT(tmp);
643 else
644 *pipe = PORT_TO_PIPE(tmp);
645
646 return true;
647} 264}
648 265
649static void intel_enable_hdmi(struct intel_encoder *encoder) 266static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
650{ 267{
651 struct drm_device *dev = encoder->base.dev; 268 struct drm_device *dev = encoder->dev;
652 struct drm_i915_private *dev_priv = dev->dev_private; 269 struct drm_i915_private *dev_priv = dev->dev_private;
653 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 270 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
654 u32 temp; 271 u32 temp;
655 u32 enable_bits = SDVO_ENABLE;
656
657 if (intel_hdmi->has_audio)
658 enable_bits |= SDVO_AUDIO_ENABLE;
659 272
660 temp = I915_READ(intel_hdmi->sdvox_reg); 273 temp = I915_READ(intel_hdmi->sdvox_reg);
661 274
662 /* HW workaround for IBX, we need to move the port to transcoder A
663 * before disabling it. */
664 if (HAS_PCH_IBX(dev)) {
665 struct drm_crtc *crtc = encoder->base.crtc;
666 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
667
668 /* Restore the transcoder select bit. */
669 if (pipe == PIPE_B)
670 enable_bits |= SDVO_PIPE_B_SELECT;
671 }
672
673 /* HW workaround, need to toggle enable bit off and on for 12bpc, but 275 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
674 * we do this anyway which shows more stable in testing. 276 * we do this anyway which shows more stable in testing.
675 */ 277 */
@@ -678,64 +280,12 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
678 POSTING_READ(intel_hdmi->sdvox_reg); 280 POSTING_READ(intel_hdmi->sdvox_reg);
679 } 281 }
680 282
681 temp |= enable_bits; 283 if (mode != DRM_MODE_DPMS_ON) {
682 284 temp &= ~SDVO_ENABLE;
683 I915_WRITE(intel_hdmi->sdvox_reg, temp); 285 } else {
684 POSTING_READ(intel_hdmi->sdvox_reg); 286 temp |= SDVO_ENABLE;
685
686 /* HW workaround, need to write this twice for issue that may result
687 * in first write getting masked.
688 */
689 if (HAS_PCH_SPLIT(dev)) {
690 I915_WRITE(intel_hdmi->sdvox_reg, temp);
691 POSTING_READ(intel_hdmi->sdvox_reg);
692 }
693}
694
695static void intel_disable_hdmi(struct intel_encoder *encoder)
696{
697 struct drm_device *dev = encoder->base.dev;
698 struct drm_i915_private *dev_priv = dev->dev_private;
699 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
700 u32 temp;
701 u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
702
703 temp = I915_READ(intel_hdmi->sdvox_reg);
704
705 /* HW workaround for IBX, we need to move the port to transcoder A
706 * before disabling it. */
707 if (HAS_PCH_IBX(dev)) {
708 struct drm_crtc *crtc = encoder->base.crtc;
709 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
710
711 if (temp & SDVO_PIPE_B_SELECT) {
712 temp &= ~SDVO_PIPE_B_SELECT;
713 I915_WRITE(intel_hdmi->sdvox_reg, temp);
714 POSTING_READ(intel_hdmi->sdvox_reg);
715
716 /* Again we need to write this twice. */
717 I915_WRITE(intel_hdmi->sdvox_reg, temp);
718 POSTING_READ(intel_hdmi->sdvox_reg);
719
720 /* Transcoder selection bits only update
721 * effectively on vblank. */
722 if (crtc)
723 intel_wait_for_vblank(dev, pipe);
724 else
725 msleep(50);
726 }
727 }
728
729 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
730 * we do this anyway which shows more stable in testing.
731 */
732 if (HAS_PCH_SPLIT(dev)) {
733 I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
734 POSTING_READ(intel_hdmi->sdvox_reg);
735 } 287 }
736 288
737 temp &= ~enable_bits;
738
739 I915_WRITE(intel_hdmi->sdvox_reg, temp); 289 I915_WRITE(intel_hdmi->sdvox_reg, temp);
740 POSTING_READ(intel_hdmi->sdvox_reg); 290 POSTING_READ(intel_hdmi->sdvox_reg);
741 291
@@ -762,70 +312,39 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
762 return MODE_OK; 312 return MODE_OK;
763} 313}
764 314
765bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, 315static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
766 const struct drm_display_mode *mode, 316 struct drm_display_mode *mode,
767 struct drm_display_mode *adjusted_mode) 317 struct drm_display_mode *adjusted_mode)
768{ 318{
769 return true; 319 return true;
770} 320}
771 321
772static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
773{
774 struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
775 struct drm_i915_private *dev_priv = dev->dev_private;
776 uint32_t bit;
777
778 switch (intel_hdmi->sdvox_reg) {
779 case SDVOB:
780 bit = HDMIB_HOTPLUG_LIVE_STATUS;
781 break;
782 case SDVOC:
783 bit = HDMIC_HOTPLUG_LIVE_STATUS;
784 break;
785 default:
786 bit = 0;
787 break;
788 }
789
790 return I915_READ(PORT_HOTPLUG_STAT) & bit;
791}
792
793static enum drm_connector_status 322static enum drm_connector_status
794intel_hdmi_detect(struct drm_connector *connector, bool force) 323intel_hdmi_detect(struct drm_connector *connector, bool force)
795{ 324{
796 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 325 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
797 struct intel_digital_port *intel_dig_port =
798 hdmi_to_dig_port(intel_hdmi);
799 struct intel_encoder *intel_encoder = &intel_dig_port->base;
800 struct drm_i915_private *dev_priv = connector->dev->dev_private; 326 struct drm_i915_private *dev_priv = connector->dev->dev_private;
801 struct edid *edid; 327 struct edid *edid;
802 enum drm_connector_status status = connector_status_disconnected; 328 enum drm_connector_status status = connector_status_disconnected;
803 329
804 if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi))
805 return status;
806
807 intel_hdmi->has_hdmi_sink = false; 330 intel_hdmi->has_hdmi_sink = false;
808 intel_hdmi->has_audio = false; 331 intel_hdmi->has_audio = false;
809 edid = drm_get_edid(connector, 332 edid = drm_get_edid(connector,
810 intel_gmbus_get_adapter(dev_priv, 333 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
811 intel_hdmi->ddc_bus));
812 334
813 if (edid) { 335 if (edid) {
814 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 336 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
815 status = connector_status_connected; 337 status = connector_status_connected;
816 if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI) 338 intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
817 intel_hdmi->has_hdmi_sink =
818 drm_detect_hdmi_monitor(edid);
819 intel_hdmi->has_audio = drm_detect_monitor_audio(edid); 339 intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
820 } 340 }
341 connector->display_info.raw_edid = NULL;
821 kfree(edid); 342 kfree(edid);
822 } 343 }
823 344
824 if (status == connector_status_connected) { 345 if (status == connector_status_connected) {
825 if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) 346 if (intel_hdmi->force_audio)
826 intel_hdmi->has_audio = 347 intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
827 (intel_hdmi->force_audio == HDMI_AUDIO_ON);
828 intel_encoder->type = INTEL_OUTPUT_HDMI;
829 } 348 }
830 349
831 return status; 350 return status;
@@ -841,8 +360,7 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
841 */ 360 */
842 361
843 return intel_ddc_get_modes(connector, 362 return intel_ddc_get_modes(connector,
844 intel_gmbus_get_adapter(dev_priv, 363 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
845 intel_hdmi->ddc_bus));
846} 364}
847 365
848static bool 366static bool
@@ -854,11 +372,12 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
854 bool has_audio = false; 372 bool has_audio = false;
855 373
856 edid = drm_get_edid(connector, 374 edid = drm_get_edid(connector,
857 intel_gmbus_get_adapter(dev_priv, 375 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
858 intel_hdmi->ddc_bus));
859 if (edid) { 376 if (edid) {
860 if (edid->input & DRM_EDID_INPUT_DIGITAL) 377 if (edid->input & DRM_EDID_INPUT_DIGITAL)
861 has_audio = drm_detect_monitor_audio(edid); 378 has_audio = drm_detect_monitor_audio(edid);
379
380 connector->display_info.raw_edid = NULL;
862 kfree(edid); 381 kfree(edid);
863 } 382 }
864 383
@@ -867,21 +386,19 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
867 386
868static int 387static int
869intel_hdmi_set_property(struct drm_connector *connector, 388intel_hdmi_set_property(struct drm_connector *connector,
870 struct drm_property *property, 389 struct drm_property *property,
871 uint64_t val) 390 uint64_t val)
872{ 391{
873 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 392 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
874 struct intel_digital_port *intel_dig_port =
875 hdmi_to_dig_port(intel_hdmi);
876 struct drm_i915_private *dev_priv = connector->dev->dev_private; 393 struct drm_i915_private *dev_priv = connector->dev->dev_private;
877 int ret; 394 int ret;
878 395
879 ret = drm_object_property_set_value(&connector->base, property, val); 396 ret = drm_connector_property_set_value(connector, property, val);
880 if (ret) 397 if (ret)
881 return ret; 398 return ret;
882 399
883 if (property == dev_priv->force_audio_property) { 400 if (property == dev_priv->force_audio_property) {
884 enum hdmi_force_audio i = val; 401 int i = val;
885 bool has_audio; 402 bool has_audio;
886 403
887 if (i == intel_hdmi->force_audio) 404 if (i == intel_hdmi->force_audio)
@@ -889,13 +406,13 @@ intel_hdmi_set_property(struct drm_connector *connector,
889 406
890 intel_hdmi->force_audio = i; 407 intel_hdmi->force_audio = i;
891 408
892 if (i == HDMI_AUDIO_AUTO) 409 if (i == 0)
893 has_audio = intel_hdmi_detect_audio(connector); 410 has_audio = intel_hdmi_detect_audio(connector);
894 else 411 else
895 has_audio = (i == HDMI_AUDIO_ON); 412 has_audio = i > 0;
896 413
897 if (i == HDMI_AUDIO_OFF_DVI) 414 if (has_audio == intel_hdmi->has_audio)
898 intel_hdmi->has_hdmi_sink = 0; 415 return 0;
899 416
900 intel_hdmi->has_audio = has_audio; 417 intel_hdmi->has_audio = has_audio;
901 goto done; 418 goto done;
@@ -912,10 +429,11 @@ intel_hdmi_set_property(struct drm_connector *connector,
912 return -EINVAL; 429 return -EINVAL;
913 430
914done: 431done:
915 if (intel_dig_port->base.base.crtc) { 432 if (intel_hdmi->base.base.crtc) {
916 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 433 struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
917 intel_set_mode(crtc, &crtc->mode, 434 drm_crtc_helper_set_mode(crtc, &crtc->mode,
918 crtc->x, crtc->y, crtc->fb); 435 crtc->x, crtc->y,
436 crtc->fb);
919 } 437 }
920 438
921 return 0; 439 return 0;
@@ -929,13 +447,15 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
929} 447}
930 448
931static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { 449static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
450 .dpms = intel_hdmi_dpms,
932 .mode_fixup = intel_hdmi_mode_fixup, 451 .mode_fixup = intel_hdmi_mode_fixup,
452 .prepare = intel_encoder_prepare,
933 .mode_set = intel_hdmi_mode_set, 453 .mode_set = intel_hdmi_mode_set,
934 .disable = intel_encoder_noop, 454 .commit = intel_encoder_commit,
935}; 455};
936 456
937static const struct drm_connector_funcs intel_hdmi_connector_funcs = { 457static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
938 .dpms = intel_connector_dpms, 458 .dpms = drm_helper_connector_dpms,
939 .detect = intel_hdmi_detect, 459 .detect = intel_hdmi_detect,
940 .fill_modes = drm_helper_probe_single_connector_modes, 460 .fill_modes = drm_helper_probe_single_connector_modes,
941 .set_property = intel_hdmi_set_property, 461 .set_property = intel_hdmi_set_property,
@@ -959,64 +479,71 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
959 intel_attach_broadcast_rgb_property(connector); 479 intel_attach_broadcast_rgb_property(connector);
960} 480}
961 481
962void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, 482void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
963 struct intel_connector *intel_connector)
964{ 483{
965 struct drm_connector *connector = &intel_connector->base;
966 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
967 struct intel_encoder *intel_encoder = &intel_dig_port->base;
968 struct drm_device *dev = intel_encoder->base.dev;
969 struct drm_i915_private *dev_priv = dev->dev_private; 484 struct drm_i915_private *dev_priv = dev->dev_private;
970 enum port port = intel_dig_port->port; 485 struct drm_connector *connector;
486 struct intel_encoder *intel_encoder;
487 struct intel_connector *intel_connector;
488 struct intel_hdmi *intel_hdmi;
489
490 intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
491 if (!intel_hdmi)
492 return;
493
494 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
495 if (!intel_connector) {
496 kfree(intel_hdmi);
497 return;
498 }
499
500 intel_encoder = &intel_hdmi->base;
501 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
502 DRM_MODE_ENCODER_TMDS);
971 503
504 connector = &intel_connector->base;
972 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, 505 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
973 DRM_MODE_CONNECTOR_HDMIA); 506 DRM_MODE_CONNECTOR_HDMIA);
974 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); 507 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
975 508
509 intel_encoder->type = INTEL_OUTPUT_HDMI;
510
976 connector->polled = DRM_CONNECTOR_POLL_HPD; 511 connector->polled = DRM_CONNECTOR_POLL_HPD;
977 connector->interlace_allowed = 1; 512 connector->interlace_allowed = 0;
978 connector->doublescan_allowed = 0; 513 connector->doublescan_allowed = 0;
514 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
979 515
980 switch (port) { 516 /* Set up the DDC bus. */
981 case PORT_B: 517 if (sdvox_reg == SDVOB) {
518 intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
982 intel_hdmi->ddc_bus = GMBUS_PORT_DPB; 519 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
983 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 520 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
984 break; 521 } else if (sdvox_reg == SDVOC) {
985 case PORT_C: 522 intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
986 intel_hdmi->ddc_bus = GMBUS_PORT_DPC; 523 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
987 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 524 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
988 break; 525 } else if (sdvox_reg == HDMIB) {
989 case PORT_D: 526 intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
527 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
528 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
529 } else if (sdvox_reg == HDMIC) {
530 intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
531 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
532 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
533 } else if (sdvox_reg == HDMID) {
534 intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
990 intel_hdmi->ddc_bus = GMBUS_PORT_DPD; 535 intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
991 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; 536 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
992 break;
993 case PORT_A:
994 /* Internal port only for eDP. */
995 default:
996 BUG();
997 } 537 }
998 538
999 if (!HAS_PCH_SPLIT(dev)) { 539 intel_hdmi->sdvox_reg = sdvox_reg;
1000 intel_hdmi->write_infoframe = g4x_write_infoframe;
1001 intel_hdmi->set_infoframes = g4x_set_infoframes;
1002 } else if (IS_VALLEYVIEW(dev)) {
1003 intel_hdmi->write_infoframe = vlv_write_infoframe;
1004 intel_hdmi->set_infoframes = vlv_set_infoframes;
1005 } else if (IS_HASWELL(dev)) {
1006 intel_hdmi->write_infoframe = hsw_write_infoframe;
1007 intel_hdmi->set_infoframes = hsw_set_infoframes;
1008 } else if (HAS_PCH_IBX(dev)) {
1009 intel_hdmi->write_infoframe = ibx_write_infoframe;
1010 intel_hdmi->set_infoframes = ibx_set_infoframes;
1011 } else {
1012 intel_hdmi->write_infoframe = cpt_write_infoframe;
1013 intel_hdmi->set_infoframes = cpt_set_infoframes;
1014 }
1015 540
1016 if (IS_HASWELL(dev)) 541 if (!HAS_PCH_SPLIT(dev))
1017 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 542 intel_hdmi->write_infoframe = i9xx_write_infoframe;
1018 else 543 else
1019 intel_connector->get_hw_state = intel_connector_get_hw_state; 544 intel_hdmi->write_infoframe = ironlake_write_infoframe;
545
546 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
1020 547
1021 intel_hdmi_add_properties(intel_hdmi, connector); 548 intel_hdmi_add_properties(intel_hdmi, connector);
1022 549
@@ -1032,42 +559,3 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1032 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 559 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
1033 } 560 }
1034} 561}
1035
1036void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
1037{
1038 struct intel_digital_port *intel_dig_port;
1039 struct intel_encoder *intel_encoder;
1040 struct drm_encoder *encoder;
1041 struct intel_connector *intel_connector;
1042
1043 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
1044 if (!intel_dig_port)
1045 return;
1046
1047 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1048 if (!intel_connector) {
1049 kfree(intel_dig_port);
1050 return;
1051 }
1052
1053 intel_encoder = &intel_dig_port->base;
1054 encoder = &intel_encoder->base;
1055
1056 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
1057 DRM_MODE_ENCODER_TMDS);
1058 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
1059
1060 intel_encoder->enable = intel_enable_hdmi;
1061 intel_encoder->disable = intel_disable_hdmi;
1062 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
1063
1064 intel_encoder->type = INTEL_OUTPUT_HDMI;
1065 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
1066 intel_encoder->cloneable = false;
1067
1068 intel_dig_port->port = port;
1069 intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
1070 intel_dig_port->dp.output_reg = 0;
1071
1072 intel_hdmi_init_connector(intel_dig_port, intel_connector);
1073}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3ef5af15b81..d98cee60b60 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -28,29 +28,15 @@
28 */ 28 */
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/i2c-algo-bit.h> 30#include <linux/i2c-algo-bit.h>
31#include <linux/export.h> 31#include "drmP.h"
32#include <drm/drmP.h> 32#include "drm.h"
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <drm/i915_drm.h> 34#include "i915_drm.h"
35#include "i915_drv.h" 35#include "i915_drv.h"
36 36
37struct gmbus_port {
38 const char *name;
39 int reg;
40};
41
42static const struct gmbus_port gmbus_ports[] = {
43 { "ssc", GPIOB },
44 { "vga", GPIOA },
45 { "panel", GPIOC },
46 { "dpc", GPIOD },
47 { "dpb", GPIOE },
48 { "dpd", GPIOF },
49};
50
51/* Intel GPIO access functions */ 37/* Intel GPIO access functions */
52 38
53#define I2C_RISEFALL_TIME 10 39#define I2C_RISEFALL_TIME 20
54 40
55static inline struct intel_gmbus * 41static inline struct intel_gmbus *
56to_intel_gmbus(struct i2c_adapter *i2c) 42to_intel_gmbus(struct i2c_adapter *i2c)
@@ -58,11 +44,21 @@ to_intel_gmbus(struct i2c_adapter *i2c)
58 return container_of(i2c, struct intel_gmbus, adapter); 44 return container_of(i2c, struct intel_gmbus, adapter);
59} 45}
60 46
47struct intel_gpio {
48 struct i2c_adapter adapter;
49 struct i2c_algo_bit_data algo;
50 struct drm_i915_private *dev_priv;
51 u32 reg;
52};
53
61void 54void
62intel_i2c_reset(struct drm_device *dev) 55intel_i2c_reset(struct drm_device *dev)
63{ 56{
64 struct drm_i915_private *dev_priv = dev->dev_private; 57 struct drm_i915_private *dev_priv = dev->dev_private;
65 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); 58 if (HAS_PCH_SPLIT(dev))
59 I915_WRITE(PCH_GMBUS0, 0);
60 else
61 I915_WRITE(GMBUS0, 0);
66} 62}
67 63
68static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) 64static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
@@ -81,15 +77,15 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
81 I915_WRITE(DSPCLK_GATE_D, val); 77 I915_WRITE(DSPCLK_GATE_D, val);
82} 78}
83 79
84static u32 get_reserved(struct intel_gmbus *bus) 80static u32 get_reserved(struct intel_gpio *gpio)
85{ 81{
86 struct drm_i915_private *dev_priv = bus->dev_priv; 82 struct drm_i915_private *dev_priv = gpio->dev_priv;
87 struct drm_device *dev = dev_priv->dev; 83 struct drm_device *dev = dev_priv->dev;
88 u32 reserved = 0; 84 u32 reserved = 0;
89 85
90 /* On most chips, these bits must be preserved in software. */ 86 /* On most chips, these bits must be preserved in software. */
91 if (!IS_I830(dev) && !IS_845G(dev)) 87 if (!IS_I830(dev) && !IS_845G(dev))
92 reserved = I915_READ_NOTRACE(bus->gpio_reg) & 88 reserved = I915_READ_NOTRACE(gpio->reg) &
93 (GPIO_DATA_PULLUP_DISABLE | 89 (GPIO_DATA_PULLUP_DISABLE |
94 GPIO_CLOCK_PULLUP_DISABLE); 90 GPIO_CLOCK_PULLUP_DISABLE);
95 91
@@ -98,29 +94,29 @@ static u32 get_reserved(struct intel_gmbus *bus)
98 94
99static int get_clock(void *data) 95static int get_clock(void *data)
100{ 96{
101 struct intel_gmbus *bus = data; 97 struct intel_gpio *gpio = data;
102 struct drm_i915_private *dev_priv = bus->dev_priv; 98 struct drm_i915_private *dev_priv = gpio->dev_priv;
103 u32 reserved = get_reserved(bus); 99 u32 reserved = get_reserved(gpio);
104 I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK); 100 I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
105 I915_WRITE_NOTRACE(bus->gpio_reg, reserved); 101 I915_WRITE_NOTRACE(gpio->reg, reserved);
106 return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0; 102 return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
107} 103}
108 104
109static int get_data(void *data) 105static int get_data(void *data)
110{ 106{
111 struct intel_gmbus *bus = data; 107 struct intel_gpio *gpio = data;
112 struct drm_i915_private *dev_priv = bus->dev_priv; 108 struct drm_i915_private *dev_priv = gpio->dev_priv;
113 u32 reserved = get_reserved(bus); 109 u32 reserved = get_reserved(gpio);
114 I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK); 110 I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
115 I915_WRITE_NOTRACE(bus->gpio_reg, reserved); 111 I915_WRITE_NOTRACE(gpio->reg, reserved);
116 return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0; 112 return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
117} 113}
118 114
119static void set_clock(void *data, int state_high) 115static void set_clock(void *data, int state_high)
120{ 116{
121 struct intel_gmbus *bus = data; 117 struct intel_gpio *gpio = data;
122 struct drm_i915_private *dev_priv = bus->dev_priv; 118 struct drm_i915_private *dev_priv = gpio->dev_priv;
123 u32 reserved = get_reserved(bus); 119 u32 reserved = get_reserved(gpio);
124 u32 clock_bits; 120 u32 clock_bits;
125 121
126 if (state_high) 122 if (state_high)
@@ -129,15 +125,15 @@ static void set_clock(void *data, int state_high)
129 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | 125 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
130 GPIO_CLOCK_VAL_MASK; 126 GPIO_CLOCK_VAL_MASK;
131 127
132 I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits); 128 I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
133 POSTING_READ(bus->gpio_reg); 129 POSTING_READ(gpio->reg);
134} 130}
135 131
136static void set_data(void *data, int state_high) 132static void set_data(void *data, int state_high)
137{ 133{
138 struct intel_gmbus *bus = data; 134 struct intel_gpio *gpio = data;
139 struct drm_i915_private *dev_priv = bus->dev_priv; 135 struct drm_i915_private *dev_priv = gpio->dev_priv;
140 u32 reserved = get_reserved(bus); 136 u32 reserved = get_reserved(gpio);
141 u32 data_bits; 137 u32 data_bits;
142 138
143 if (state_high) 139 if (state_high)
@@ -146,177 +142,83 @@ static void set_data(void *data, int state_high)
146 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | 142 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
147 GPIO_DATA_VAL_MASK; 143 GPIO_DATA_VAL_MASK;
148 144
149 I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits); 145 I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
150 POSTING_READ(bus->gpio_reg); 146 POSTING_READ(gpio->reg);
151}
152
153static int
154intel_gpio_pre_xfer(struct i2c_adapter *adapter)
155{
156 struct intel_gmbus *bus = container_of(adapter,
157 struct intel_gmbus,
158 adapter);
159 struct drm_i915_private *dev_priv = bus->dev_priv;
160
161 intel_i2c_reset(dev_priv->dev);
162 intel_i2c_quirk_set(dev_priv, true);
163 set_data(bus, 1);
164 set_clock(bus, 1);
165 udelay(I2C_RISEFALL_TIME);
166 return 0;
167} 147}
168 148
169static void 149static struct i2c_adapter *
170intel_gpio_post_xfer(struct i2c_adapter *adapter) 150intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
171{ 151{
172 struct intel_gmbus *bus = container_of(adapter, 152 static const int map_pin_to_reg[] = {
173 struct intel_gmbus, 153 0,
174 adapter); 154 GPIOB,
175 struct drm_i915_private *dev_priv = bus->dev_priv; 155 GPIOA,
176 156 GPIOC,
177 set_data(bus, 1); 157 GPIOD,
178 set_clock(bus, 1); 158 GPIOE,
179 intel_i2c_quirk_set(dev_priv, false); 159 0,
180} 160 GPIOF,
181 161 };
182static void 162 struct intel_gpio *gpio;
183intel_gpio_setup(struct intel_gmbus *bus, u32 pin) 163
184{ 164 if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
185 struct drm_i915_private *dev_priv = bus->dev_priv; 165 return NULL;
186 struct i2c_algo_bit_data *algo; 166
187 167 gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
188 algo = &bus->bit_algo; 168 if (gpio == NULL)
189 169 return NULL;
190 /* -1 to map pin pair to gmbus index */ 170
191 bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg; 171 gpio->reg = map_pin_to_reg[pin];
192 172 if (HAS_PCH_SPLIT(dev_priv->dev))
193 bus->adapter.algo_data = algo; 173 gpio->reg += PCH_GPIOA - GPIOA;
194 algo->setsda = set_data; 174 gpio->dev_priv = dev_priv;
195 algo->setscl = set_clock; 175
196 algo->getsda = get_data; 176 snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
197 algo->getscl = get_clock; 177 "i915 GPIO%c", "?BACDE?F"[pin]);
198 algo->pre_xfer = intel_gpio_pre_xfer; 178 gpio->adapter.owner = THIS_MODULE;
199 algo->post_xfer = intel_gpio_post_xfer; 179 gpio->adapter.algo_data = &gpio->algo;
200 algo->udelay = I2C_RISEFALL_TIME; 180 gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
201 algo->timeout = usecs_to_jiffies(2200); 181 gpio->algo.setsda = set_data;
202 algo->data = bus; 182 gpio->algo.setscl = set_clock;
183 gpio->algo.getsda = get_data;
184 gpio->algo.getscl = get_clock;
185 gpio->algo.udelay = I2C_RISEFALL_TIME;
186 gpio->algo.timeout = usecs_to_jiffies(2200);
187 gpio->algo.data = gpio;
188
189 if (i2c_bit_add_bus(&gpio->adapter))
190 goto out_free;
191
192 return &gpio->adapter;
193
194out_free:
195 kfree(gpio);
196 return NULL;
203} 197}
204 198
205static int 199static int
206gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, 200intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
207 u32 gmbus1_index) 201 struct i2c_adapter *adapter,
202 struct i2c_msg *msgs,
203 int num)
208{ 204{
209 int reg_offset = dev_priv->gpio_mmio_base; 205 struct intel_gpio *gpio = container_of(adapter,
210 u16 len = msg->len; 206 struct intel_gpio,
211 u8 *buf = msg->buf; 207 adapter);
212
213 I915_WRITE(GMBUS1 + reg_offset,
214 gmbus1_index |
215 GMBUS_CYCLE_WAIT |
216 (len << GMBUS_BYTE_COUNT_SHIFT) |
217 (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
218 GMBUS_SLAVE_READ | GMBUS_SW_RDY);
219 while (len) {
220 int ret;
221 u32 val, loop = 0;
222 u32 gmbus2;
223
224 ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
225 (GMBUS_SATOER | GMBUS_HW_RDY),
226 50);
227 if (ret)
228 return -ETIMEDOUT;
229 if (gmbus2 & GMBUS_SATOER)
230 return -ENXIO;
231
232 val = I915_READ(GMBUS3 + reg_offset);
233 do {
234 *buf++ = val & 0xff;
235 val >>= 8;
236 } while (--len && ++loop < 4);
237 }
238
239 return 0;
240}
241
242static int
243gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
244{
245 int reg_offset = dev_priv->gpio_mmio_base;
246 u16 len = msg->len;
247 u8 *buf = msg->buf;
248 u32 val, loop;
249
250 val = loop = 0;
251 while (len && loop < 4) {
252 val |= *buf++ << (8 * loop++);
253 len -= 1;
254 }
255
256 I915_WRITE(GMBUS3 + reg_offset, val);
257 I915_WRITE(GMBUS1 + reg_offset,
258 GMBUS_CYCLE_WAIT |
259 (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
260 (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
261 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
262 while (len) {
263 int ret;
264 u32 gmbus2;
265
266 val = loop = 0;
267 do {
268 val |= *buf++ << (8 * loop);
269 } while (--len && ++loop < 4);
270
271 I915_WRITE(GMBUS3 + reg_offset, val);
272
273 ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
274 (GMBUS_SATOER | GMBUS_HW_RDY),
275 50);
276 if (ret)
277 return -ETIMEDOUT;
278 if (gmbus2 & GMBUS_SATOER)
279 return -ENXIO;
280 }
281 return 0;
282}
283
284/*
285 * The gmbus controller can combine a 1 or 2 byte write with a read that
286 * immediately follows it by using an "INDEX" cycle.
287 */
288static bool
289gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
290{
291 return (i + 1 < num &&
292 !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
293 (msgs[i + 1].flags & I2C_M_RD));
294}
295
296static int
297gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
298{
299 int reg_offset = dev_priv->gpio_mmio_base;
300 u32 gmbus1_index = 0;
301 u32 gmbus5 = 0;
302 int ret; 208 int ret;
303 209
304 if (msgs[0].len == 2) 210 intel_i2c_reset(dev_priv->dev);
305 gmbus5 = GMBUS_2BYTE_INDEX_EN |
306 msgs[0].buf[1] | (msgs[0].buf[0] << 8);
307 if (msgs[0].len == 1)
308 gmbus1_index = GMBUS_CYCLE_INDEX |
309 (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
310 211
311 /* GMBUS5 holds 16-bit index */ 212 intel_i2c_quirk_set(dev_priv, true);
312 if (gmbus5) 213 set_data(gpio, 1);
313 I915_WRITE(GMBUS5 + reg_offset, gmbus5); 214 set_clock(gpio, 1);
215 udelay(I2C_RISEFALL_TIME);
314 216
315 ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index); 217 ret = adapter->algo->master_xfer(adapter, msgs, num);
316 218
317 /* Clear GMBUS5 after each index transfer */ 219 set_data(gpio, 1);
318 if (gmbus5) 220 set_clock(gpio, 1);
319 I915_WRITE(GMBUS5 + reg_offset, 0); 221 intel_i2c_quirk_set(dev_priv, false);
320 222
321 return ret; 223 return ret;
322} 224}
@@ -329,121 +231,120 @@ gmbus_xfer(struct i2c_adapter *adapter,
329 struct intel_gmbus *bus = container_of(adapter, 231 struct intel_gmbus *bus = container_of(adapter,
330 struct intel_gmbus, 232 struct intel_gmbus,
331 adapter); 233 adapter);
332 struct drm_i915_private *dev_priv = bus->dev_priv; 234 struct drm_i915_private *dev_priv = adapter->algo_data;
333 int i, reg_offset; 235 int i, reg_offset;
334 int ret = 0;
335 236
336 mutex_lock(&dev_priv->gmbus_mutex); 237 if (bus->force_bit)
238 return intel_i2c_quirk_xfer(dev_priv,
239 bus->force_bit, msgs, num);
337 240
338 if (bus->force_bit) { 241 reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
339 ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
340 goto out;
341 }
342
343 reg_offset = dev_priv->gpio_mmio_base;
344 242
345 I915_WRITE(GMBUS0 + reg_offset, bus->reg0); 243 I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
346 244
347 for (i = 0; i < num; i++) { 245 for (i = 0; i < num; i++) {
348 u32 gmbus2; 246 u16 len = msgs[i].len;
349 247 u8 *buf = msgs[i].buf;
350 if (gmbus_is_index_read(msgs, i, num)) { 248
351 ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); 249 if (msgs[i].flags & I2C_M_RD) {
352 i += 1; /* set i to the index of the read xfer */ 250 I915_WRITE(GMBUS1 + reg_offset,
353 } else if (msgs[i].flags & I2C_M_RD) { 251 GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
354 ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); 252 (len << GMBUS_BYTE_COUNT_SHIFT) |
253 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
254 GMBUS_SLAVE_READ | GMBUS_SW_RDY);
255 POSTING_READ(GMBUS2+reg_offset);
256 do {
257 u32 val, loop = 0;
258
259 if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
260 goto timeout;
261 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
262 goto clear_err;
263
264 val = I915_READ(GMBUS3 + reg_offset);
265 do {
266 *buf++ = val & 0xff;
267 val >>= 8;
268 } while (--len && ++loop < 4);
269 } while (len);
355 } else { 270 } else {
356 ret = gmbus_xfer_write(dev_priv, &msgs[i]); 271 u32 val, loop;
272
273 val = loop = 0;
274 do {
275 val |= *buf++ << (8 * loop);
276 } while (--len && ++loop < 4);
277
278 I915_WRITE(GMBUS3 + reg_offset, val);
279 I915_WRITE(GMBUS1 + reg_offset,
280 (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
281 (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
282 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
283 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
284 POSTING_READ(GMBUS2+reg_offset);
285
286 while (len) {
287 if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
288 goto timeout;
289 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
290 goto clear_err;
291
292 val = loop = 0;
293 do {
294 val |= *buf++ << (8 * loop);
295 } while (--len && ++loop < 4);
296
297 I915_WRITE(GMBUS3 + reg_offset, val);
298 POSTING_READ(GMBUS2+reg_offset);
299 }
357 } 300 }
358 301
359 if (ret == -ETIMEDOUT) 302 if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
360 goto timeout; 303 goto timeout;
361 if (ret == -ENXIO) 304 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
362 goto clear_err;
363
364 ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
365 (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE),
366 50);
367 if (ret)
368 goto timeout;
369 if (gmbus2 & GMBUS_SATOER)
370 goto clear_err; 305 goto clear_err;
371 } 306 }
372 307
373 /* Generate a STOP condition on the bus. Note that gmbus can't generata 308 goto done;
374 * a STOP on the very first cycle. To simplify the code we
375 * unconditionally generate the STOP condition with an additional gmbus
376 * cycle. */
377 I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
378
379 /* Mark the GMBUS interface as disabled after waiting for idle.
380 * We will re-enable it at the start of the next xfer,
381 * till then let it sleep.
382 */
383 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
384 10)) {
385 DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
386 adapter->name);
387 ret = -ETIMEDOUT;
388 }
389 I915_WRITE(GMBUS0 + reg_offset, 0);
390 ret = ret ?: i;
391 goto out;
392 309
393clear_err: 310clear_err:
394 /*
395 * Wait for bus to IDLE before clearing NAK.
396 * If we clear the NAK while bus is still active, then it will stay
397 * active and the next transaction may fail.
398 *
399 * If no ACK is received during the address phase of a transaction, the
400 * adapter must report -ENXIO. It is not clear what to return if no ACK
401 * is received at other times. But we have to be careful to not return
402 * spurious -ENXIO because that will prevent i2c and drm edid functions
403 * from retrying. So return -ENXIO only when gmbus properly quiescents -
404 * timing out seems to happen when there _is_ a ddc chip present, but
405 * it's slow responding and only answers on the 2nd retry.
406 */
407 ret = -ENXIO;
408 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
409 10)) {
410 DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
411 adapter->name);
412 ret = -ETIMEDOUT;
413 }
414
415 /* Toggle the Software Clear Interrupt bit. This has the effect 311 /* Toggle the Software Clear Interrupt bit. This has the effect
416 * of resetting the GMBUS controller and so clearing the 312 * of resetting the GMBUS controller and so clearing the
417 * BUS_ERROR raised by the slave's NAK. 313 * BUS_ERROR raised by the slave's NAK.
418 */ 314 */
419 I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT); 315 I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
420 I915_WRITE(GMBUS1 + reg_offset, 0); 316 I915_WRITE(GMBUS1 + reg_offset, 0);
421 I915_WRITE(GMBUS0 + reg_offset, 0);
422 317
423 DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n", 318done:
424 adapter->name, msgs[i].addr, 319 /* Mark the GMBUS interface as disabled. We will re-enable it at the
425 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); 320 * start of the next xfer, till then let it sleep.
426 321 */
427 goto out; 322 I915_WRITE(GMBUS0 + reg_offset, 0);
323 return i;
428 324
429timeout: 325timeout:
430 DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n", 326 DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
431 bus->adapter.name, bus->reg0 & 0xff); 327 bus->reg0 & 0xff, bus->adapter.name);
432 I915_WRITE(GMBUS0 + reg_offset, 0); 328 I915_WRITE(GMBUS0 + reg_offset, 0);
433 329
434 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ 330 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
435 bus->force_bit = 1; 331 bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
436 ret = i2c_bit_algo.master_xfer(adapter, msgs, num); 332 if (!bus->force_bit)
333 return -ENOMEM;
437 334
438out: 335 return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
439 mutex_unlock(&dev_priv->gmbus_mutex);
440 return ret;
441} 336}
442 337
443static u32 gmbus_func(struct i2c_adapter *adapter) 338static u32 gmbus_func(struct i2c_adapter *adapter)
444{ 339{
445 return i2c_bit_algo.functionality(adapter) & 340 struct intel_gmbus *bus = container_of(adapter,
446 (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | 341 struct intel_gmbus,
342 adapter);
343
344 if (bus->force_bit)
345 bus->force_bit->algo->functionality(bus->force_bit);
346
347 return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
447 /* I2C_FUNC_10BIT_ADDR | */ 348 /* I2C_FUNC_10BIT_ADDR | */
448 I2C_FUNC_SMBUS_READ_BLOCK_DATA | 349 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
449 I2C_FUNC_SMBUS_BLOCK_PROC_CALL); 350 I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
@@ -460,44 +361,47 @@ static const struct i2c_algorithm gmbus_algorithm = {
460 */ 361 */
461int intel_setup_gmbus(struct drm_device *dev) 362int intel_setup_gmbus(struct drm_device *dev)
462{ 363{
364 static const char *names[GMBUS_NUM_PORTS] = {
365 "disabled",
366 "ssc",
367 "vga",
368 "panel",
369 "dpc",
370 "dpb",
371 "reserved",
372 "dpd",
373 };
463 struct drm_i915_private *dev_priv = dev->dev_private; 374 struct drm_i915_private *dev_priv = dev->dev_private;
464 int ret, i; 375 int ret, i;
465 376
466 if (HAS_PCH_SPLIT(dev)) 377 dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
467 dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; 378 GFP_KERNEL);
468 else 379 if (dev_priv->gmbus == NULL)
469 dev_priv->gpio_mmio_base = 0; 380 return -ENOMEM;
470
471 mutex_init(&dev_priv->gmbus_mutex);
472 381
473 for (i = 0; i < GMBUS_NUM_PORTS; i++) { 382 for (i = 0; i < GMBUS_NUM_PORTS; i++) {
474 struct intel_gmbus *bus = &dev_priv->gmbus[i]; 383 struct intel_gmbus *bus = &dev_priv->gmbus[i];
475 u32 port = i + 1; /* +1 to map gmbus index to pin pair */
476 384
477 bus->adapter.owner = THIS_MODULE; 385 bus->adapter.owner = THIS_MODULE;
478 bus->adapter.class = I2C_CLASS_DDC; 386 bus->adapter.class = I2C_CLASS_DDC;
479 snprintf(bus->adapter.name, 387 snprintf(bus->adapter.name,
480 sizeof(bus->adapter.name), 388 sizeof(bus->adapter.name),
481 "i915 gmbus %s", 389 "i915 gmbus %s",
482 gmbus_ports[i].name); 390 names[i]);
483 391
484 bus->adapter.dev.parent = &dev->pdev->dev; 392 bus->adapter.dev.parent = &dev->pdev->dev;
485 bus->dev_priv = dev_priv; 393 bus->adapter.algo_data = dev_priv;
486 394
487 bus->adapter.algo = &gmbus_algorithm; 395 bus->adapter.algo = &gmbus_algorithm;
488
489 /* By default use a conservative clock rate */
490 bus->reg0 = port | GMBUS_RATE_100KHZ;
491
492 /* gmbus seems to be broken on i830 */
493 if (IS_I830(dev))
494 bus->force_bit = 1;
495
496 intel_gpio_setup(bus, port);
497
498 ret = i2c_add_adapter(&bus->adapter); 396 ret = i2c_add_adapter(&bus->adapter);
499 if (ret) 397 if (ret)
500 goto err; 398 goto err;
399
400 /* By default use a conservative clock rate */
401 bus->reg0 = i | GMBUS_RATE_100KHZ;
402
403 /* XXX force bit banging until GMBUS is fully debugged */
404 bus->force_bit = intel_gpio_create(dev_priv, i);
501 } 405 }
502 406
503 intel_i2c_reset(dev_priv->dev); 407 intel_i2c_reset(dev_priv->dev);
@@ -509,33 +413,41 @@ err:
509 struct intel_gmbus *bus = &dev_priv->gmbus[i]; 413 struct intel_gmbus *bus = &dev_priv->gmbus[i];
510 i2c_del_adapter(&bus->adapter); 414 i2c_del_adapter(&bus->adapter);
511 } 415 }
416 kfree(dev_priv->gmbus);
417 dev_priv->gmbus = NULL;
512 return ret; 418 return ret;
513} 419}
514 420
515struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
516 unsigned port)
517{
518 WARN_ON(!intel_gmbus_is_port_valid(port));
519 /* -1 to map pin pair to gmbus index */
520 return (intel_gmbus_is_port_valid(port)) ?
521 &dev_priv->gmbus[port - 1].adapter : NULL;
522}
523
524void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) 421void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
525{ 422{
526 struct intel_gmbus *bus = to_intel_gmbus(adapter); 423 struct intel_gmbus *bus = to_intel_gmbus(adapter);
527 424
528 bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed; 425 /* speed:
426 * 0x0 = 100 KHz
427 * 0x1 = 50 KHz
428 * 0x2 = 400 KHz
429 * 0x3 = 1000 Khz
430 */
431 bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
529} 432}
530 433
531void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) 434void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
532{ 435{
533 struct intel_gmbus *bus = to_intel_gmbus(adapter); 436 struct intel_gmbus *bus = to_intel_gmbus(adapter);
534 437
535 bus->force_bit += force_bit ? 1 : -1; 438 if (force_bit) {
536 DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n", 439 if (bus->force_bit == NULL) {
537 force_bit ? "en" : "dis", adapter->name, 440 struct drm_i915_private *dev_priv = adapter->algo_data;
538 bus->force_bit); 441 bus->force_bit = intel_gpio_create(dev_priv,
442 bus->reg0 & 0xff);
443 }
444 } else {
445 if (bus->force_bit) {
446 i2c_del_adapter(bus->force_bit);
447 kfree(bus->force_bit);
448 bus->force_bit = NULL;
449 }
450 }
539} 451}
540 452
541void intel_teardown_gmbus(struct drm_device *dev) 453void intel_teardown_gmbus(struct drm_device *dev)
@@ -543,8 +455,18 @@ void intel_teardown_gmbus(struct drm_device *dev)
543 struct drm_i915_private *dev_priv = dev->dev_private; 455 struct drm_i915_private *dev_priv = dev->dev_private;
544 int i; 456 int i;
545 457
458 if (dev_priv->gmbus == NULL)
459 return;
460
546 for (i = 0; i < GMBUS_NUM_PORTS; i++) { 461 for (i = 0; i < GMBUS_NUM_PORTS; i++) {
547 struct intel_gmbus *bus = &dev_priv->gmbus[i]; 462 struct intel_gmbus *bus = &dev_priv->gmbus[i];
463 if (bus->force_bit) {
464 i2c_del_adapter(bus->force_bit);
465 kfree(bus->force_bit);
466 }
548 i2c_del_adapter(&bus->adapter); 467 i2c_del_adapter(&bus->adapter);
549 } 468 }
469
470 kfree(dev_priv->gmbus);
471 dev_priv->gmbus = NULL;
550} 472}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 17aee74258a..31da77f5c05 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -31,75 +31,46 @@
31#include <linux/dmi.h> 31#include <linux/dmi.h>
32#include <linux/i2c.h> 32#include <linux/i2c.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <drm/drmP.h> 34#include "drmP.h"
35#include <drm/drm_crtc.h> 35#include "drm.h"
36#include <drm/drm_edid.h> 36#include "drm_crtc.h"
37#include "drm_edid.h"
37#include "intel_drv.h" 38#include "intel_drv.h"
38#include <drm/i915_drm.h> 39#include "i915_drm.h"
39#include "i915_drv.h" 40#include "i915_drv.h"
40#include <linux/acpi.h> 41#include <linux/acpi.h>
41 42
42/* Private structure for the integrated LVDS support */ 43/* Private structure for the integrated LVDS support */
43struct intel_lvds_connector { 44struct intel_lvds {
44 struct intel_connector base;
45
46 struct notifier_block lid_notifier;
47};
48
49struct intel_lvds_encoder {
50 struct intel_encoder base; 45 struct intel_encoder base;
51 46
47 struct edid *edid;
48
49 int fitting_mode;
52 u32 pfit_control; 50 u32 pfit_control;
53 u32 pfit_pgm_ratios; 51 u32 pfit_pgm_ratios;
54 bool pfit_dirty; 52 bool pfit_dirty;
55 53
56 struct intel_lvds_connector *attached_connector; 54 struct drm_display_mode *fixed_mode;
57}; 55};
58 56
59static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder) 57static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
60{
61 return container_of(encoder, struct intel_lvds_encoder, base.base);
62}
63
64static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
65{ 58{
66 return container_of(connector, struct intel_lvds_connector, base.base); 59 return container_of(encoder, struct intel_lvds, base.base);
67} 60}
68 61
69static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, 62static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
70 enum pipe *pipe)
71{ 63{
72 struct drm_device *dev = encoder->base.dev; 64 return container_of(intel_attached_encoder(connector),
73 struct drm_i915_private *dev_priv = dev->dev_private; 65 struct intel_lvds, base);
74 u32 lvds_reg, tmp;
75
76 if (HAS_PCH_SPLIT(dev)) {
77 lvds_reg = PCH_LVDS;
78 } else {
79 lvds_reg = LVDS;
80 }
81
82 tmp = I915_READ(lvds_reg);
83
84 if (!(tmp & LVDS_PORT_EN))
85 return false;
86
87 if (HAS_PCH_CPT(dev))
88 *pipe = PORT_TO_PIPE_CPT(tmp);
89 else
90 *pipe = PORT_TO_PIPE(tmp);
91
92 return true;
93} 66}
94 67
95/** 68/**
96 * Sets the power state for the panel. 69 * Sets the power state for the panel.
97 */ 70 */
98static void intel_enable_lvds(struct intel_encoder *encoder) 71static void intel_lvds_enable(struct intel_lvds *intel_lvds)
99{ 72{
100 struct drm_device *dev = encoder->base.dev; 73 struct drm_device *dev = intel_lvds->base.base.dev;
101 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
102 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
103 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
104 u32 ctl_reg, lvds_reg, stat_reg; 75 u32 ctl_reg, lvds_reg, stat_reg;
105 76
@@ -115,7 +86,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
115 86
116 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); 87 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
117 88
118 if (lvds_encoder->pfit_dirty) { 89 if (intel_lvds->pfit_dirty) {
119 /* 90 /*
120 * Enable automatic panel scaling so that non-native modes 91 * Enable automatic panel scaling so that non-native modes
121 * fill the screen. The panel fitter should only be 92 * fill the screen. The panel fitter should only be
@@ -123,12 +94,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
123 * register description and PRM. 94 * register description and PRM.
124 */ 95 */
125 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", 96 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
126 lvds_encoder->pfit_control, 97 intel_lvds->pfit_control,
127 lvds_encoder->pfit_pgm_ratios); 98 intel_lvds->pfit_pgm_ratios);
128 99
129 I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios); 100 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
130 I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control); 101 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
131 lvds_encoder->pfit_dirty = false; 102 intel_lvds->pfit_dirty = false;
132 } 103 }
133 104
134 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); 105 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
@@ -136,13 +107,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
136 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) 107 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
137 DRM_ERROR("timed out waiting for panel to power on\n"); 108 DRM_ERROR("timed out waiting for panel to power on\n");
138 109
139 intel_panel_enable_backlight(dev, intel_crtc->pipe); 110 intel_panel_enable_backlight(dev);
140} 111}
141 112
142static void intel_disable_lvds(struct intel_encoder *encoder) 113static void intel_lvds_disable(struct intel_lvds *intel_lvds)
143{ 114{
144 struct drm_device *dev = encoder->base.dev; 115 struct drm_device *dev = intel_lvds->base.base.dev;
145 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
146 struct drm_i915_private *dev_priv = dev->dev_private; 116 struct drm_i915_private *dev_priv = dev->dev_private;
147 u32 ctl_reg, lvds_reg, stat_reg; 117 u32 ctl_reg, lvds_reg, stat_reg;
148 118
@@ -162,20 +132,32 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
162 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) 132 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
163 DRM_ERROR("timed out waiting for panel to power off\n"); 133 DRM_ERROR("timed out waiting for panel to power off\n");
164 134
165 if (lvds_encoder->pfit_control) { 135 if (intel_lvds->pfit_control) {
166 I915_WRITE(PFIT_CONTROL, 0); 136 I915_WRITE(PFIT_CONTROL, 0);
167 lvds_encoder->pfit_dirty = true; 137 intel_lvds->pfit_dirty = true;
168 } 138 }
169 139
170 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); 140 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
171 POSTING_READ(lvds_reg); 141 POSTING_READ(lvds_reg);
172} 142}
173 143
144static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
145{
146 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
147
148 if (mode == DRM_MODE_DPMS_ON)
149 intel_lvds_enable(intel_lvds);
150 else
151 intel_lvds_disable(intel_lvds);
152
153 /* XXX: We never power down the LVDS pairs. */
154}
155
174static int intel_lvds_mode_valid(struct drm_connector *connector, 156static int intel_lvds_mode_valid(struct drm_connector *connector,
175 struct drm_display_mode *mode) 157 struct drm_display_mode *mode)
176{ 158{
177 struct intel_connector *intel_connector = to_intel_connector(connector); 159 struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
178 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 160 struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
179 161
180 if (mode->hdisplay > fixed_mode->hdisplay) 162 if (mode->hdisplay > fixed_mode->hdisplay)
181 return MODE_PANEL; 163 return MODE_PANEL;
@@ -205,8 +187,6 @@ centre_horizontally(struct drm_display_mode *mode,
205 187
206 mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos; 188 mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
207 mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width; 189 mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
208
209 mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
210} 190}
211 191
212static void 192static void
@@ -228,8 +208,6 @@ centre_vertically(struct drm_display_mode *mode,
228 208
229 mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos; 209 mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
230 mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width; 210 mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
231
232 mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
233} 211}
234 212
235static inline u32 panel_fitter_scaling(u32 source, u32 target) 213static inline u32 panel_fitter_scaling(u32 source, u32 target)
@@ -246,15 +224,14 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
246} 224}
247 225
248static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, 226static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
249 const struct drm_display_mode *mode, 227 struct drm_display_mode *mode,
250 struct drm_display_mode *adjusted_mode) 228 struct drm_display_mode *adjusted_mode)
251{ 229{
252 struct drm_device *dev = encoder->dev; 230 struct drm_device *dev = encoder->dev;
253 struct drm_i915_private *dev_priv = dev->dev_private; 231 struct drm_i915_private *dev_priv = dev->dev_private;
254 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); 232 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
255 struct intel_connector *intel_connector = 233 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
256 &lvds_encoder->attached_connector->base; 234 struct drm_encoder *tmp_encoder;
257 struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
258 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; 235 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
259 int pipe; 236 int pipe;
260 237
@@ -264,8 +241,14 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
264 return false; 241 return false;
265 } 242 }
266 243
267 if (intel_encoder_check_is_cloned(&lvds_encoder->base)) 244 /* Should never happen!! */
268 return false; 245 list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) {
246 if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) {
247 DRM_ERROR("Can't enable LVDS and another "
248 "encoder on the same pipe\n");
249 return false;
250 }
251 }
269 252
270 /* 253 /*
271 * We have timings from the BIOS for the panel, put them in 254 * We have timings from the BIOS for the panel, put them in
@@ -273,12 +256,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
273 * with the panel scaling set up to source from the H/VDisplay 256 * with the panel scaling set up to source from the H/VDisplay
274 * of the original mode. 257 * of the original mode.
275 */ 258 */
276 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 259 intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
277 adjusted_mode);
278 260
279 if (HAS_PCH_SPLIT(dev)) { 261 if (HAS_PCH_SPLIT(dev)) {
280 intel_pch_panel_fitting(dev, 262 intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
281 intel_connector->panel.fitting_mode,
282 mode, adjusted_mode); 263 mode, adjusted_mode);
283 return true; 264 return true;
284 } 265 }
@@ -302,9 +283,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
302 for_each_pipe(pipe) 283 for_each_pipe(pipe)
303 I915_WRITE(BCLRPAT(pipe), 0); 284 I915_WRITE(BCLRPAT(pipe), 0);
304 285
305 drm_mode_set_crtcinfo(adjusted_mode, 0); 286 switch (intel_lvds->fitting_mode) {
306
307 switch (intel_connector->panel.fitting_mode) {
308 case DRM_MODE_SCALE_CENTER: 287 case DRM_MODE_SCALE_CENTER:
309 /* 288 /*
310 * For centered modes, we have to calculate border widths & 289 * For centered modes, we have to calculate border widths &
@@ -402,11 +381,11 @@ out:
402 if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) 381 if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
403 pfit_control |= PANEL_8TO6_DITHER_ENABLE; 382 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
404 383
405 if (pfit_control != lvds_encoder->pfit_control || 384 if (pfit_control != intel_lvds->pfit_control ||
406 pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) { 385 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
407 lvds_encoder->pfit_control = pfit_control; 386 intel_lvds->pfit_control = pfit_control;
408 lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios; 387 intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
409 lvds_encoder->pfit_dirty = true; 388 intel_lvds->pfit_dirty = true;
410 } 389 }
411 dev_priv->lvds_border_bits = border; 390 dev_priv->lvds_border_bits = border;
412 391
@@ -419,6 +398,29 @@ out:
419 return true; 398 return true;
420} 399}
421 400
401static void intel_lvds_prepare(struct drm_encoder *encoder)
402{
403 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
404
405 /*
406 * Prior to Ironlake, we must disable the pipe if we want to adjust
407 * the panel fitter. However at all other times we can just reset
408 * the registers regardless.
409 */
410 if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
411 intel_lvds_disable(intel_lvds);
412}
413
414static void intel_lvds_commit(struct drm_encoder *encoder)
415{
416 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
417
418 /* Always do a full power on as we do not know what state
419 * we were left in.
420 */
421 intel_lvds_enable(intel_lvds);
422}
423
422static void intel_lvds_mode_set(struct drm_encoder *encoder, 424static void intel_lvds_mode_set(struct drm_encoder *encoder,
423 struct drm_display_mode *mode, 425 struct drm_display_mode *mode,
424 struct drm_display_mode *adjusted_mode) 426 struct drm_display_mode *adjusted_mode)
@@ -455,15 +457,14 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
455 */ 457 */
456static int intel_lvds_get_modes(struct drm_connector *connector) 458static int intel_lvds_get_modes(struct drm_connector *connector)
457{ 459{
458 struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector); 460 struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
459 struct drm_device *dev = connector->dev; 461 struct drm_device *dev = connector->dev;
460 struct drm_display_mode *mode; 462 struct drm_display_mode *mode;
461 463
462 /* use cached edid if we have one */ 464 if (intel_lvds->edid)
463 if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) 465 return drm_add_edid_modes(connector, intel_lvds->edid);
464 return drm_add_edid_modes(connector, lvds_connector->base.edid);
465 466
466 mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode); 467 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
467 if (mode == NULL) 468 if (mode == NULL)
468 return 0; 469 return 0;
469 470
@@ -473,7 +474,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
473 474
474static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) 475static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
475{ 476{
476 DRM_INFO("Skipping forced modeset for %s\n", id->ident); 477 DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
477 return 1; 478 return 1;
478} 479}
479 480
@@ -503,11 +504,10 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
503static int intel_lid_notify(struct notifier_block *nb, unsigned long val, 504static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
504 void *unused) 505 void *unused)
505{ 506{
506 struct intel_lvds_connector *lvds_connector = 507 struct drm_i915_private *dev_priv =
507 container_of(nb, struct intel_lvds_connector, lid_notifier); 508 container_of(nb, struct drm_i915_private, lid_notifier);
508 struct drm_connector *connector = &lvds_connector->base.base; 509 struct drm_device *dev = dev_priv->dev;
509 struct drm_device *dev = connector->dev; 510 struct drm_connector *connector = dev_priv->int_lvds_connector;
510 struct drm_i915_private *dev_priv = dev->dev_private;
511 511
512 if (dev->switch_power_state != DRM_SWITCH_POWER_ON) 512 if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
513 return NOTIFY_OK; 513 return NOTIFY_OK;
@@ -516,7 +516,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
516 * check and update the status of LVDS connector after receiving 516 * check and update the status of LVDS connector after receiving
517 * the LID nofication event. 517 * the LID nofication event.
518 */ 518 */
519 connector->status = connector->funcs->detect(connector, false); 519 if (connector)
520 connector->status = connector->funcs->detect(connector,
521 false);
520 522
521 /* Don't force modeset on machines where it causes a GPU lockup */ 523 /* Don't force modeset on machines where it causes a GPU lockup */
522 if (dmi_check_system(intel_no_modeset_on_lid)) 524 if (dmi_check_system(intel_no_modeset_on_lid))
@@ -532,7 +534,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
532 dev_priv->modeset_on_lid = 0; 534 dev_priv->modeset_on_lid = 0;
533 535
534 mutex_lock(&dev->mode_config.mutex); 536 mutex_lock(&dev->mode_config.mutex);
535 intel_modeset_setup_hw_state(dev, true); 537 drm_helper_resume_force_mode(dev);
536 mutex_unlock(&dev->mode_config.mutex); 538 mutex_unlock(&dev->mode_config.mutex);
537 539
538 return NOTIFY_OK; 540 return NOTIFY_OK;
@@ -547,18 +549,13 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
547 */ 549 */
548static void intel_lvds_destroy(struct drm_connector *connector) 550static void intel_lvds_destroy(struct drm_connector *connector)
549{ 551{
550 struct intel_lvds_connector *lvds_connector = 552 struct drm_device *dev = connector->dev;
551 to_lvds_connector(connector); 553 struct drm_i915_private *dev_priv = dev->dev_private;
552
553 if (lvds_connector->lid_notifier.notifier_call)
554 acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
555
556 if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
557 kfree(lvds_connector->base.edid);
558 554
559 intel_panel_destroy_backlight(connector->dev); 555 intel_panel_destroy_backlight(dev);
560 intel_panel_fini(&lvds_connector->base.panel);
561 556
557 if (dev_priv->lid_notifier.notifier_call)
558 acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
562 drm_sysfs_connector_remove(connector); 559 drm_sysfs_connector_remove(connector);
563 drm_connector_cleanup(connector); 560 drm_connector_cleanup(connector);
564 kfree(connector); 561 kfree(connector);
@@ -568,31 +565,29 @@ static int intel_lvds_set_property(struct drm_connector *connector,
568 struct drm_property *property, 565 struct drm_property *property,
569 uint64_t value) 566 uint64_t value)
570{ 567{
571 struct intel_connector *intel_connector = to_intel_connector(connector); 568 struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
572 struct drm_device *dev = connector->dev; 569 struct drm_device *dev = connector->dev;
573 570
574 if (property == dev->mode_config.scaling_mode_property) { 571 if (property == dev->mode_config.scaling_mode_property) {
575 struct drm_crtc *crtc; 572 struct drm_crtc *crtc = intel_lvds->base.base.crtc;
576 573
577 if (value == DRM_MODE_SCALE_NONE) { 574 if (value == DRM_MODE_SCALE_NONE) {
578 DRM_DEBUG_KMS("no scaling not supported\n"); 575 DRM_DEBUG_KMS("no scaling not supported\n");
579 return -EINVAL; 576 return -EINVAL;
580 } 577 }
581 578
582 if (intel_connector->panel.fitting_mode == value) { 579 if (intel_lvds->fitting_mode == value) {
583 /* the LVDS scaling property is not changed */ 580 /* the LVDS scaling property is not changed */
584 return 0; 581 return 0;
585 } 582 }
586 intel_connector->panel.fitting_mode = value; 583 intel_lvds->fitting_mode = value;
587
588 crtc = intel_attached_encoder(connector)->base.crtc;
589 if (crtc && crtc->enabled) { 584 if (crtc && crtc->enabled) {
590 /* 585 /*
591 * If the CRTC is enabled, the display will be changed 586 * If the CRTC is enabled, the display will be changed
592 * according to the new panel fitting mode. 587 * according to the new panel fitting mode.
593 */ 588 */
594 intel_set_mode(crtc, &crtc->mode, 589 drm_crtc_helper_set_mode(crtc, &crtc->mode,
595 crtc->x, crtc->y, crtc->fb); 590 crtc->x, crtc->y, crtc->fb);
596 } 591 }
597 } 592 }
598 593
@@ -600,9 +595,11 @@ static int intel_lvds_set_property(struct drm_connector *connector,
600} 595}
601 596
602static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { 597static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
598 .dpms = intel_lvds_dpms,
603 .mode_fixup = intel_lvds_mode_fixup, 599 .mode_fixup = intel_lvds_mode_fixup,
600 .prepare = intel_lvds_prepare,
604 .mode_set = intel_lvds_mode_set, 601 .mode_set = intel_lvds_mode_set,
605 .disable = intel_encoder_noop, 602 .commit = intel_lvds_commit,
606}; 603};
607 604
608static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 605static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -612,7 +609,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
612}; 609};
613 610
614static const struct drm_connector_funcs intel_lvds_connector_funcs = { 611static const struct drm_connector_funcs intel_lvds_connector_funcs = {
615 .dpms = intel_connector_dpms, 612 .dpms = drm_helper_connector_dpms,
616 .detect = intel_lvds_detect, 613 .detect = intel_lvds_detect,
617 .fill_modes = drm_helper_probe_single_connector_modes, 614 .fill_modes = drm_helper_probe_single_connector_modes,
618 .set_property = intel_lvds_set_property, 615 .set_property = intel_lvds_set_property,
@@ -625,7 +622,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
625 622
626static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) 623static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
627{ 624{
628 DRM_INFO("Skipping LVDS initialization for %s\n", id->ident); 625 DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
629 return 1; 626 return 1;
630} 627}
631 628
@@ -697,14 +694,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
697 }, 694 },
698 { 695 {
699 .callback = intel_no_lvds_dmi_callback, 696 .callback = intel_no_lvds_dmi_callback,
700 .ident = "AOpen i45GMx-I",
701 .matches = {
702 DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
703 DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
704 },
705 },
706 {
707 .callback = intel_no_lvds_dmi_callback,
708 .ident = "Aopen i945GTt-VFA", 697 .ident = "Aopen i945GTt-VFA",
709 .matches = { 698 .matches = {
710 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), 699 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
@@ -719,14 +708,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
719 }, 708 },
720 }, 709 },
721 { 710 {
722 .callback = intel_no_lvds_dmi_callback,
723 .ident = "Clientron E830",
724 .matches = {
725 DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
726 DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
727 },
728 },
729 {
730 .callback = intel_no_lvds_dmi_callback, 711 .callback = intel_no_lvds_dmi_callback,
731 .ident = "Asus EeeBox PC EB1007", 712 .ident = "Asus EeeBox PC EB1007",
732 .matches = { 713 .matches = {
@@ -734,62 +715,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
734 DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"), 715 DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
735 }, 716 },
736 }, 717 },
737 {
738 .callback = intel_no_lvds_dmi_callback,
739 .ident = "Asus AT5NM10T-I",
740 .matches = {
741 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
742 DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
743 },
744 },
745 {
746 .callback = intel_no_lvds_dmi_callback,
747 .ident = "Hewlett-Packard HP t5740e Thin Client",
748 .matches = {
749 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
750 DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
751 },
752 },
753 {
754 .callback = intel_no_lvds_dmi_callback,
755 .ident = "Hewlett-Packard t5745",
756 .matches = {
757 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
758 DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
759 },
760 },
761 {
762 .callback = intel_no_lvds_dmi_callback,
763 .ident = "Hewlett-Packard st5747",
764 .matches = {
765 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
766 DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
767 },
768 },
769 {
770 .callback = intel_no_lvds_dmi_callback,
771 .ident = "MSI Wind Box DC500",
772 .matches = {
773 DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
774 DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
775 },
776 },
777 {
778 .callback = intel_no_lvds_dmi_callback,
779 .ident = "Gigabyte GA-D525TUD",
780 .matches = {
781 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
782 DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
783 },
784 },
785 {
786 .callback = intel_no_lvds_dmi_callback,
787 .ident = "Supermicro X7SPA-H",
788 .matches = {
789 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
790 DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
791 },
792 },
793 718
794 { } /* terminating entry */ 719 { } /* terminating entry */
795}; 720};
@@ -872,8 +797,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
872 child->device_type != DEVICE_TYPE_LFP) 797 child->device_type != DEVICE_TYPE_LFP)
873 continue; 798 continue;
874 799
875 if (intel_gmbus_is_port_valid(child->i2c_pin)) 800 if (child->i2c_pin)
876 *i2c_pin = child->i2c_pin; 801 *i2c_pin = child->i2c_pin;
877 802
878 /* However, we cannot trust the BIOS writers to populate 803 /* However, we cannot trust the BIOS writers to populate
879 * the VBT correctly. Since LVDS requires additional 804 * the VBT correctly. Since LVDS requires additional
@@ -895,18 +820,6 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
895 return false; 820 return false;
896} 821}
897 822
898static bool intel_lvds_supported(struct drm_device *dev)
899{
900 /* With the introduction of the PCH we gained a dedicated
901 * LVDS presence pin, use it. */
902 if (HAS_PCH_SPLIT(dev))
903 return true;
904
905 /* Otherwise LVDS was only attached to mobile products,
906 * except for the inglorious 830gm */
907 return IS_MOBILE(dev) && !IS_I830(dev);
908}
909
910/** 823/**
911 * intel_lvds_init - setup LVDS connectors on this device 824 * intel_lvds_init - setup LVDS connectors on this device
912 * @dev: drm device 825 * @dev: drm device
@@ -917,23 +830,17 @@ static bool intel_lvds_supported(struct drm_device *dev)
917bool intel_lvds_init(struct drm_device *dev) 830bool intel_lvds_init(struct drm_device *dev)
918{ 831{
919 struct drm_i915_private *dev_priv = dev->dev_private; 832 struct drm_i915_private *dev_priv = dev->dev_private;
920 struct intel_lvds_encoder *lvds_encoder; 833 struct intel_lvds *intel_lvds;
921 struct intel_encoder *intel_encoder; 834 struct intel_encoder *intel_encoder;
922 struct intel_lvds_connector *lvds_connector;
923 struct intel_connector *intel_connector; 835 struct intel_connector *intel_connector;
924 struct drm_connector *connector; 836 struct drm_connector *connector;
925 struct drm_encoder *encoder; 837 struct drm_encoder *encoder;
926 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 838 struct drm_display_mode *scan; /* *modes, *bios_mode; */
927 struct drm_display_mode *fixed_mode = NULL;
928 struct edid *edid;
929 struct drm_crtc *crtc; 839 struct drm_crtc *crtc;
930 u32 lvds; 840 u32 lvds;
931 int pipe; 841 int pipe;
932 u8 pin; 842 u8 pin;
933 843
934 if (!intel_lvds_supported(dev))
935 return false;
936
937 /* Skip init on machines we know falsely report LVDS */ 844 /* Skip init on machines we know falsely report LVDS */
938 if (dmi_check_system(intel_no_lvds)) 845 if (dmi_check_system(intel_no_lvds))
939 return false; 846 return false;
@@ -953,25 +860,23 @@ bool intel_lvds_init(struct drm_device *dev)
953 } 860 }
954 } 861 }
955 862
956 lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL); 863 intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
957 if (!lvds_encoder) 864 if (!intel_lvds) {
958 return false; 865 return false;
866 }
959 867
960 lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL); 868 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
961 if (!lvds_connector) { 869 if (!intel_connector) {
962 kfree(lvds_encoder); 870 kfree(intel_lvds);
963 return false; 871 return false;
964 } 872 }
965 873
966 lvds_encoder->attached_connector = lvds_connector;
967
968 if (!HAS_PCH_SPLIT(dev)) { 874 if (!HAS_PCH_SPLIT(dev)) {
969 lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL); 875 intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
970 } 876 }
971 877
972 intel_encoder = &lvds_encoder->base; 878 intel_encoder = &intel_lvds->base;
973 encoder = &intel_encoder->base; 879 encoder = &intel_encoder->base;
974 intel_connector = &lvds_connector->base;
975 connector = &intel_connector->base; 880 connector = &intel_connector->base;
976 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, 881 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
977 DRM_MODE_CONNECTOR_LVDS); 882 DRM_MODE_CONNECTOR_LVDS);
@@ -979,22 +884,13 @@ bool intel_lvds_init(struct drm_device *dev)
979 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, 884 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
980 DRM_MODE_ENCODER_LVDS); 885 DRM_MODE_ENCODER_LVDS);
981 886
982 intel_encoder->enable = intel_enable_lvds;
983 intel_encoder->disable = intel_disable_lvds;
984 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
985 intel_connector->get_hw_state = intel_connector_get_hw_state;
986
987 intel_connector_attach_encoder(intel_connector, intel_encoder); 887 intel_connector_attach_encoder(intel_connector, intel_encoder);
988 intel_encoder->type = INTEL_OUTPUT_LVDS; 888 intel_encoder->type = INTEL_OUTPUT_LVDS;
989 889
990 intel_encoder->cloneable = false; 890 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
991 if (HAS_PCH_SPLIT(dev)) 891 intel_encoder->crtc_mask = (1 << 1);
992 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 892 if (INTEL_INFO(dev)->gen >= 5)
993 else if (IS_GEN4(dev)) 893 intel_encoder->crtc_mask |= (1 << 0);
994 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
995 else
996 intel_encoder->crtc_mask = (1 << 1);
997
998 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); 894 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
999 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 895 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
1000 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 896 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1003,10 +899,14 @@ bool intel_lvds_init(struct drm_device *dev)
1003 899
1004 /* create the scaling mode property */ 900 /* create the scaling mode property */
1005 drm_mode_create_scaling_mode_property(dev); 901 drm_mode_create_scaling_mode_property(dev);
1006 drm_object_attach_property(&connector->base, 902 /*
903 * the initial panel fitting mode will be FULL_SCREEN.
904 */
905
906 drm_connector_attach_property(&intel_connector->base,
1007 dev->mode_config.scaling_mode_property, 907 dev->mode_config.scaling_mode_property,
1008 DRM_MODE_SCALE_ASPECT); 908 DRM_MODE_SCALE_ASPECT);
1009 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; 909 intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
1010 /* 910 /*
1011 * LVDS discovery: 911 * LVDS discovery:
1012 * 1) check for EDID on DDC 912 * 1) check for EDID on DDC
@@ -1021,21 +921,19 @@ bool intel_lvds_init(struct drm_device *dev)
1021 * Attempt to get the fixed panel mode from DDC. Assume that the 921 * Attempt to get the fixed panel mode from DDC. Assume that the
1022 * preferred mode is the right one. 922 * preferred mode is the right one.
1023 */ 923 */
1024 edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin)); 924 intel_lvds->edid = drm_get_edid(connector,
1025 if (edid) { 925 &dev_priv->gmbus[pin].adapter);
1026 if (drm_add_edid_modes(connector, edid)) { 926 if (intel_lvds->edid) {
927 if (drm_add_edid_modes(connector,
928 intel_lvds->edid)) {
1027 drm_mode_connector_update_edid_property(connector, 929 drm_mode_connector_update_edid_property(connector,
1028 edid); 930 intel_lvds->edid);
1029 } else { 931 } else {
1030 kfree(edid); 932 kfree(intel_lvds->edid);
1031 edid = ERR_PTR(-EINVAL); 933 intel_lvds->edid = NULL;
1032 } 934 }
1033 } else {
1034 edid = ERR_PTR(-ENOENT);
1035 } 935 }
1036 lvds_connector->base.edid = edid; 936 if (!intel_lvds->edid) {
1037
1038 if (IS_ERR_OR_NULL(edid)) {
1039 /* Didn't get an EDID, so 937 /* Didn't get an EDID, so
1040 * Set wide sync ranges so we get all modes 938 * Set wide sync ranges so we get all modes
1041 * handed to valid_mode for checking 939 * handed to valid_mode for checking
@@ -1048,26 +946,22 @@ bool intel_lvds_init(struct drm_device *dev)
1048 946
1049 list_for_each_entry(scan, &connector->probed_modes, head) { 947 list_for_each_entry(scan, &connector->probed_modes, head) {
1050 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 948 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
1051 DRM_DEBUG_KMS("using preferred mode from EDID: "); 949 intel_lvds->fixed_mode =
1052 drm_mode_debug_printmodeline(scan); 950 drm_mode_duplicate(dev, scan);
1053 951 intel_find_lvds_downclock(dev,
1054 fixed_mode = drm_mode_duplicate(dev, scan); 952 intel_lvds->fixed_mode,
1055 if (fixed_mode) { 953 connector);
1056 intel_find_lvds_downclock(dev, fixed_mode, 954 goto out;
1057 connector);
1058 goto out;
1059 }
1060 } 955 }
1061 } 956 }
1062 957
1063 /* Failed to get EDID, what about VBT? */ 958 /* Failed to get EDID, what about VBT? */
1064 if (dev_priv->lfp_lvds_vbt_mode) { 959 if (dev_priv->lfp_lvds_vbt_mode) {
1065 DRM_DEBUG_KMS("using mode from VBT: "); 960 intel_lvds->fixed_mode =
1066 drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode); 961 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
1067 962 if (intel_lvds->fixed_mode) {
1068 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 963 intel_lvds->fixed_mode->type |=
1069 if (fixed_mode) { 964 DRM_MODE_TYPE_PREFERRED;
1070 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
1071 goto out; 965 goto out;
1072 } 966 }
1073 } 967 }
@@ -1087,40 +981,61 @@ bool intel_lvds_init(struct drm_device *dev)
1087 crtc = intel_get_crtc_for_pipe(dev, pipe); 981 crtc = intel_get_crtc_for_pipe(dev, pipe);
1088 982
1089 if (crtc && (lvds & LVDS_PORT_EN)) { 983 if (crtc && (lvds & LVDS_PORT_EN)) {
1090 fixed_mode = intel_crtc_mode_get(dev, crtc); 984 intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
1091 if (fixed_mode) { 985 if (intel_lvds->fixed_mode) {
1092 DRM_DEBUG_KMS("using current (BIOS) mode: "); 986 intel_lvds->fixed_mode->type |=
1093 drm_mode_debug_printmodeline(fixed_mode); 987 DRM_MODE_TYPE_PREFERRED;
1094 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
1095 goto out; 988 goto out;
1096 } 989 }
1097 } 990 }
1098 991
1099 /* If we still don't have a mode after all that, give up. */ 992 /* If we still don't have a mode after all that, give up. */
1100 if (!fixed_mode) 993 if (!intel_lvds->fixed_mode)
1101 goto failed; 994 goto failed;
1102 995
1103out: 996out:
1104 /*
1105 * Unlock registers and just
1106 * leave them unlocked
1107 */
1108 if (HAS_PCH_SPLIT(dev)) { 997 if (HAS_PCH_SPLIT(dev)) {
998 u32 pwm;
999
1000 pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
1001
1002 /* make sure PWM is enabled and locked to the LVDS pipe */
1003 pwm = I915_READ(BLC_PWM_CPU_CTL2);
1004 if (pipe == 0 && (pwm & PWM_PIPE_B))
1005 I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
1006 if (pipe)
1007 pwm |= PWM_PIPE_B;
1008 else
1009 pwm &= ~PWM_PIPE_B;
1010 I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
1011
1012 pwm = I915_READ(BLC_PWM_PCH_CTL1);
1013 pwm |= PWM_PCH_ENABLE;
1014 I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
1015 /*
1016 * Unlock registers and just
1017 * leave them unlocked
1018 */
1109 I915_WRITE(PCH_PP_CONTROL, 1019 I915_WRITE(PCH_PP_CONTROL,
1110 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); 1020 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
1111 } else { 1021 } else {
1022 /*
1023 * Unlock registers and just
1024 * leave them unlocked
1025 */
1112 I915_WRITE(PP_CONTROL, 1026 I915_WRITE(PP_CONTROL,
1113 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); 1027 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
1114 } 1028 }
1115 lvds_connector->lid_notifier.notifier_call = intel_lid_notify; 1029 dev_priv->lid_notifier.notifier_call = intel_lid_notify;
1116 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { 1030 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
1117 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1031 DRM_DEBUG_KMS("lid notifier registration failed\n");
1118 lvds_connector->lid_notifier.notifier_call = NULL; 1032 dev_priv->lid_notifier.notifier_call = NULL;
1119 } 1033 }
1034 /* keep the LVDS connector */
1035 dev_priv->int_lvds_connector = connector;
1120 drm_sysfs_connector_add(connector); 1036 drm_sysfs_connector_add(connector);
1121 1037
1122 intel_panel_init(&intel_connector->panel, fixed_mode); 1038 intel_panel_setup_backlight(dev);
1123 intel_panel_setup_backlight(connector);
1124 1039
1125 return true; 1040 return true;
1126 1041
@@ -1128,9 +1043,7 @@ failed:
1128 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); 1043 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
1129 drm_connector_cleanup(connector); 1044 drm_connector_cleanup(connector);
1130 drm_encoder_cleanup(encoder); 1045 drm_encoder_cleanup(encoder);
1131 if (fixed_mode) 1046 kfree(intel_lvds);
1132 drm_mode_destroy(dev, fixed_mode); 1047 kfree(intel_connector);
1133 kfree(lvds_encoder);
1134 kfree(lvds_connector);
1135 return false; 1048 return false;
1136} 1049}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index b00f1c83adc..3b26a3ba02d 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -26,27 +26,35 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/fb.h> 28#include <linux/fb.h>
29#include <drm/drm_edid.h> 29#include "drmP.h"
30#include <drm/drmP.h>
31#include <drm/drm_edid.h>
32#include "intel_drv.h" 30#include "intel_drv.h"
33#include "i915_drv.h" 31#include "i915_drv.h"
34 32
35/** 33/**
36 * intel_connector_update_modes - update connector from edid 34 * intel_ddc_probe
37 * @connector: DRM connector device to use 35 *
38 * @edid: previously read EDID information
39 */ 36 */
40int intel_connector_update_modes(struct drm_connector *connector, 37bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
41 struct edid *edid)
42{ 38{
43 int ret; 39 struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
44 40 u8 out_buf[] = { 0x0, 0x0};
45 drm_mode_connector_update_edid_property(connector, edid); 41 u8 buf[2];
46 ret = drm_add_edid_modes(connector, edid); 42 struct i2c_msg msgs[] = {
47 drm_edid_to_eld(connector, edid); 43 {
48 44 .addr = 0x50,
49 return ret; 45 .flags = 0,
46 .len = 1,
47 .buf = out_buf,
48 },
49 {
50 .addr = 0x50,
51 .flags = I2C_M_RD,
52 .len = 1,
53 .buf = buf,
54 }
55 };
56
57 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
50} 58}
51 59
52/** 60/**
@@ -60,23 +68,23 @@ int intel_ddc_get_modes(struct drm_connector *connector,
60 struct i2c_adapter *adapter) 68 struct i2c_adapter *adapter)
61{ 69{
62 struct edid *edid; 70 struct edid *edid;
63 int ret; 71 int ret = 0;
64 72
65 edid = drm_get_edid(connector, adapter); 73 edid = drm_get_edid(connector, adapter);
66 if (!edid) 74 if (edid) {
67 return 0; 75 drm_mode_connector_update_edid_property(connector, edid);
68 76 ret = drm_add_edid_modes(connector, edid);
69 ret = intel_connector_update_modes(connector, edid); 77 connector->display_info.raw_edid = NULL;
70 kfree(edid); 78 kfree(edid);
79 }
71 80
72 return ret; 81 return ret;
73} 82}
74 83
75static const struct drm_prop_enum_list force_audio_names[] = { 84static const char *force_audio_names[] = {
76 { HDMI_AUDIO_OFF_DVI, "force-dvi" }, 85 "off",
77 { HDMI_AUDIO_OFF, "off" }, 86 "auto",
78 { HDMI_AUDIO_AUTO, "auto" }, 87 "on",
79 { HDMI_AUDIO_ON, "on" },
80}; 88};
81 89
82void 90void
@@ -85,24 +93,27 @@ intel_attach_force_audio_property(struct drm_connector *connector)
85 struct drm_device *dev = connector->dev; 93 struct drm_device *dev = connector->dev;
86 struct drm_i915_private *dev_priv = dev->dev_private; 94 struct drm_i915_private *dev_priv = dev->dev_private;
87 struct drm_property *prop; 95 struct drm_property *prop;
96 int i;
88 97
89 prop = dev_priv->force_audio_property; 98 prop = dev_priv->force_audio_property;
90 if (prop == NULL) { 99 if (prop == NULL) {
91 prop = drm_property_create_enum(dev, 0, 100 prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
92 "audio", 101 "audio",
93 force_audio_names,
94 ARRAY_SIZE(force_audio_names)); 102 ARRAY_SIZE(force_audio_names));
95 if (prop == NULL) 103 if (prop == NULL)
96 return; 104 return;
97 105
106 for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
107 drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
108
98 dev_priv->force_audio_property = prop; 109 dev_priv->force_audio_property = prop;
99 } 110 }
100 drm_object_attach_property(&connector->base, prop, 0); 111 drm_connector_attach_property(connector, prop, 0);
101} 112}
102 113
103static const struct drm_prop_enum_list broadcast_rgb_names[] = { 114static const char *broadcast_rgb_names[] = {
104 { 0, "Full" }, 115 "Full",
105 { 1, "Limited 16:235" }, 116 "Limited 16:235",
106}; 117};
107 118
108void 119void
@@ -111,18 +122,21 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
111 struct drm_device *dev = connector->dev; 122 struct drm_device *dev = connector->dev;
112 struct drm_i915_private *dev_priv = dev->dev_private; 123 struct drm_i915_private *dev_priv = dev->dev_private;
113 struct drm_property *prop; 124 struct drm_property *prop;
125 int i;
114 126
115 prop = dev_priv->broadcast_rgb_property; 127 prop = dev_priv->broadcast_rgb_property;
116 if (prop == NULL) { 128 if (prop == NULL) {
117 prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, 129 prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
118 "Broadcast RGB", 130 "Broadcast RGB",
119 broadcast_rgb_names,
120 ARRAY_SIZE(broadcast_rgb_names)); 131 ARRAY_SIZE(broadcast_rgb_names));
121 if (prop == NULL) 132 if (prop == NULL)
122 return; 133 return;
123 134
135 for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
136 drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
137
124 dev_priv->broadcast_rgb_property = prop; 138 dev_priv->broadcast_rgb_property = prop;
125 } 139 }
126 140
127 drm_object_attach_property(&connector->base, prop, 0); 141 drm_connector_attach_property(connector, prop, 0);
128} 142}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 7741c22c934..b8e8158bb16 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -25,14 +25,12 @@
25 * 25 *
26 */ 26 */
27 27
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
30#include <linux/acpi.h> 28#include <linux/acpi.h>
31#include <linux/acpi_io.h> 29#include <linux/acpi_io.h>
32#include <acpi/video.h> 30#include <acpi/video.h>
33 31
34#include <drm/drmP.h> 32#include "drmP.h"
35#include <drm/i915_drm.h> 33#include "i915_drm.h"
36#include "i915_drv.h" 34#include "i915_drv.h"
37#include "intel_drv.h" 35#include "intel_drv.h"
38 36
@@ -53,61 +51,61 @@
53#define MBOX_ASLE (1<<2) 51#define MBOX_ASLE (1<<2)
54 52
55struct opregion_header { 53struct opregion_header {
56 u8 signature[16]; 54 u8 signature[16];
57 u32 size; 55 u32 size;
58 u32 opregion_ver; 56 u32 opregion_ver;
59 u8 bios_ver[32]; 57 u8 bios_ver[32];
60 u8 vbios_ver[16]; 58 u8 vbios_ver[16];
61 u8 driver_ver[16]; 59 u8 driver_ver[16];
62 u32 mboxes; 60 u32 mboxes;
63 u8 reserved[164]; 61 u8 reserved[164];
64} __attribute__((packed)); 62} __attribute__((packed));
65 63
66/* OpRegion mailbox #1: public ACPI methods */ 64/* OpRegion mailbox #1: public ACPI methods */
67struct opregion_acpi { 65struct opregion_acpi {
68 u32 drdy; /* driver readiness */ 66 u32 drdy; /* driver readiness */
69 u32 csts; /* notification status */ 67 u32 csts; /* notification status */
70 u32 cevt; /* current event */ 68 u32 cevt; /* current event */
71 u8 rsvd1[20]; 69 u8 rsvd1[20];
72 u32 didl[8]; /* supported display devices ID list */ 70 u32 didl[8]; /* supported display devices ID list */
73 u32 cpdl[8]; /* currently presented display list */ 71 u32 cpdl[8]; /* currently presented display list */
74 u32 cadl[8]; /* currently active display list */ 72 u32 cadl[8]; /* currently active display list */
75 u32 nadl[8]; /* next active devices list */ 73 u32 nadl[8]; /* next active devices list */
76 u32 aslp; /* ASL sleep time-out */ 74 u32 aslp; /* ASL sleep time-out */
77 u32 tidx; /* toggle table index */ 75 u32 tidx; /* toggle table index */
78 u32 chpd; /* current hotplug enable indicator */ 76 u32 chpd; /* current hotplug enable indicator */
79 u32 clid; /* current lid state*/ 77 u32 clid; /* current lid state*/
80 u32 cdck; /* current docking state */ 78 u32 cdck; /* current docking state */
81 u32 sxsw; /* Sx state resume */ 79 u32 sxsw; /* Sx state resume */
82 u32 evts; /* ASL supported events */ 80 u32 evts; /* ASL supported events */
83 u32 cnot; /* current OS notification */ 81 u32 cnot; /* current OS notification */
84 u32 nrdy; /* driver status */ 82 u32 nrdy; /* driver status */
85 u8 rsvd2[60]; 83 u8 rsvd2[60];
86} __attribute__((packed)); 84} __attribute__((packed));
87 85
88/* OpRegion mailbox #2: SWSCI */ 86/* OpRegion mailbox #2: SWSCI */
89struct opregion_swsci { 87struct opregion_swsci {
90 u32 scic; /* SWSCI command|status|data */ 88 u32 scic; /* SWSCI command|status|data */
91 u32 parm; /* command parameters */ 89 u32 parm; /* command parameters */
92 u32 dslp; /* driver sleep time-out */ 90 u32 dslp; /* driver sleep time-out */
93 u8 rsvd[244]; 91 u8 rsvd[244];
94} __attribute__((packed)); 92} __attribute__((packed));
95 93
96/* OpRegion mailbox #3: ASLE */ 94/* OpRegion mailbox #3: ASLE */
97struct opregion_asle { 95struct opregion_asle {
98 u32 ardy; /* driver readiness */ 96 u32 ardy; /* driver readiness */
99 u32 aslc; /* ASLE interrupt command */ 97 u32 aslc; /* ASLE interrupt command */
100 u32 tche; /* technology enabled indicator */ 98 u32 tche; /* technology enabled indicator */
101 u32 alsi; /* current ALS illuminance reading */ 99 u32 alsi; /* current ALS illuminance reading */
102 u32 bclp; /* backlight brightness to set */ 100 u32 bclp; /* backlight brightness to set */
103 u32 pfit; /* panel fitting state */ 101 u32 pfit; /* panel fitting state */
104 u32 cblv; /* current brightness level */ 102 u32 cblv; /* current brightness level */
105 u16 bclm[20]; /* backlight level duty cycle mapping table */ 103 u16 bclm[20]; /* backlight level duty cycle mapping table */
106 u32 cpfm; /* current panel fitting mode */ 104 u32 cpfm; /* current panel fitting mode */
107 u32 epfm; /* enabled panel fitting modes */ 105 u32 epfm; /* enabled panel fitting modes */
108 u8 plut[74]; /* panel LUT and identifier */ 106 u8 plut[74]; /* panel LUT and identifier */
109 u32 pfmb; /* PWM freq and min brightness */ 107 u32 pfmb; /* PWM freq and min brightness */
110 u8 rsvd[102]; 108 u8 rsvd[102];
111} __attribute__((packed)); 109} __attribute__((packed));
112 110
113/* ASLE irq request bits */ 111/* ASLE irq request bits */
@@ -151,11 +149,9 @@ struct opregion_asle {
151static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 149static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
152{ 150{
153 struct drm_i915_private *dev_priv = dev->dev_private; 151 struct drm_i915_private *dev_priv = dev->dev_private;
154 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 152 struct opregion_asle *asle = dev_priv->opregion.asle;
155 u32 max; 153 u32 max;
156 154
157 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
158
159 if (!(bclp & ASLE_BCLP_VALID)) 155 if (!(bclp & ASLE_BCLP_VALID))
160 return ASLE_BACKLIGHT_FAILED; 156 return ASLE_BACKLIGHT_FAILED;
161 157
@@ -165,7 +161,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
165 161
166 max = intel_panel_get_max_backlight(dev); 162 max = intel_panel_get_max_backlight(dev);
167 intel_panel_set_backlight(dev, bclp * max / 255); 163 intel_panel_set_backlight(dev, bclp * max / 255);
168 iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv); 164 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
169 165
170 return 0; 166 return 0;
171} 167}
@@ -202,14 +198,14 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
202void intel_opregion_asle_intr(struct drm_device *dev) 198void intel_opregion_asle_intr(struct drm_device *dev)
203{ 199{
204 struct drm_i915_private *dev_priv = dev->dev_private; 200 struct drm_i915_private *dev_priv = dev->dev_private;
205 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 201 struct opregion_asle *asle = dev_priv->opregion.asle;
206 u32 asle_stat = 0; 202 u32 asle_stat = 0;
207 u32 asle_req; 203 u32 asle_req;
208 204
209 if (!asle) 205 if (!asle)
210 return; 206 return;
211 207
212 asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK; 208 asle_req = asle->aslc & ASLE_REQ_MSK;
213 209
214 if (!asle_req) { 210 if (!asle_req) {
215 DRM_DEBUG_DRIVER("non asle set request??\n"); 211 DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -217,31 +213,31 @@ void intel_opregion_asle_intr(struct drm_device *dev)
217 } 213 }
218 214
219 if (asle_req & ASLE_SET_ALS_ILLUM) 215 if (asle_req & ASLE_SET_ALS_ILLUM)
220 asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi)); 216 asle_stat |= asle_set_als_illum(dev, asle->alsi);
221 217
222 if (asle_req & ASLE_SET_BACKLIGHT) 218 if (asle_req & ASLE_SET_BACKLIGHT)
223 asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp)); 219 asle_stat |= asle_set_backlight(dev, asle->bclp);
224 220
225 if (asle_req & ASLE_SET_PFIT) 221 if (asle_req & ASLE_SET_PFIT)
226 asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit)); 222 asle_stat |= asle_set_pfit(dev, asle->pfit);
227 223
228 if (asle_req & ASLE_SET_PWM_FREQ) 224 if (asle_req & ASLE_SET_PWM_FREQ)
229 asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb)); 225 asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
230 226
231 iowrite32(asle_stat, &asle->aslc); 227 asle->aslc = asle_stat;
232} 228}
233 229
234void intel_opregion_gse_intr(struct drm_device *dev) 230void intel_opregion_gse_intr(struct drm_device *dev)
235{ 231{
236 struct drm_i915_private *dev_priv = dev->dev_private; 232 struct drm_i915_private *dev_priv = dev->dev_private;
237 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 233 struct opregion_asle *asle = dev_priv->opregion.asle;
238 u32 asle_stat = 0; 234 u32 asle_stat = 0;
239 u32 asle_req; 235 u32 asle_req;
240 236
241 if (!asle) 237 if (!asle)
242 return; 238 return;
243 239
244 asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK; 240 asle_req = asle->aslc & ASLE_REQ_MSK;
245 241
246 if (!asle_req) { 242 if (!asle_req) {
247 DRM_DEBUG_DRIVER("non asle set request??\n"); 243 DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -254,7 +250,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
254 } 250 }
255 251
256 if (asle_req & ASLE_SET_BACKLIGHT) 252 if (asle_req & ASLE_SET_BACKLIGHT)
257 asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp)); 253 asle_stat |= asle_set_backlight(dev, asle->bclp);
258 254
259 if (asle_req & ASLE_SET_PFIT) { 255 if (asle_req & ASLE_SET_PFIT) {
260 DRM_DEBUG_DRIVER("Pfit is not supported\n"); 256 DRM_DEBUG_DRIVER("Pfit is not supported\n");
@@ -266,7 +262,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
266 asle_stat |= ASLE_PWM_FREQ_FAILED; 262 asle_stat |= ASLE_PWM_FREQ_FAILED;
267 } 263 }
268 264
269 iowrite32(asle_stat, &asle->aslc); 265 asle->aslc = asle_stat;
270} 266}
271#define ASLE_ALS_EN (1<<0) 267#define ASLE_ALS_EN (1<<0)
272#define ASLE_BLC_EN (1<<1) 268#define ASLE_BLC_EN (1<<1)
@@ -276,16 +272,15 @@ void intel_opregion_gse_intr(struct drm_device *dev)
276void intel_opregion_enable_asle(struct drm_device *dev) 272void intel_opregion_enable_asle(struct drm_device *dev)
277{ 273{
278 struct drm_i915_private *dev_priv = dev->dev_private; 274 struct drm_i915_private *dev_priv = dev->dev_private;
279 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 275 struct opregion_asle *asle = dev_priv->opregion.asle;
280 276
281 if (asle) { 277 if (asle) {
282 if (IS_MOBILE(dev)) 278 if (IS_MOBILE(dev))
283 intel_enable_asle(dev); 279 intel_enable_asle(dev);
284 280
285 iowrite32(ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | 281 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
286 ASLE_PFMB_EN, 282 ASLE_PFMB_EN;
287 &asle->tche); 283 asle->ardy = 1;
288 iowrite32(1, &asle->ardy);
289 } 284 }
290} 285}
291 286
@@ -303,7 +298,7 @@ static int intel_opregion_video_event(struct notifier_block *nb,
303 Linux, these are handled by the dock, button and video drivers. 298 Linux, these are handled by the dock, button and video drivers.
304 */ 299 */
305 300
306 struct opregion_acpi __iomem *acpi; 301 struct opregion_acpi *acpi;
307 struct acpi_bus_event *event = data; 302 struct acpi_bus_event *event = data;
308 int ret = NOTIFY_OK; 303 int ret = NOTIFY_OK;
309 304
@@ -315,11 +310,10 @@ static int intel_opregion_video_event(struct notifier_block *nb,
315 310
316 acpi = system_opregion->acpi; 311 acpi = system_opregion->acpi;
317 312
318 if (event->type == 0x80 && 313 if (event->type == 0x80 && !(acpi->cevt & 0x1))
319 (ioread32(&acpi->cevt) & 1) == 0)
320 ret = NOTIFY_BAD; 314 ret = NOTIFY_BAD;
321 315
322 iowrite32(0, &acpi->csts); 316 acpi->csts = 0;
323 317
324 return ret; 318 return ret;
325} 319}
@@ -343,7 +337,6 @@ static void intel_didl_outputs(struct drm_device *dev)
343 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; 337 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
344 unsigned long long device_id; 338 unsigned long long device_id;
345 acpi_status status; 339 acpi_status status;
346 u32 temp;
347 int i = 0; 340 int i = 0;
348 341
349 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); 342 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
@@ -362,13 +355,13 @@ static void intel_didl_outputs(struct drm_device *dev)
362 } 355 }
363 356
364 if (!acpi_video_bus) { 357 if (!acpi_video_bus) {
365 pr_warn("No ACPI video bus found\n"); 358 printk(KERN_WARNING "No ACPI video bus found\n");
366 return; 359 return;
367 } 360 }
368 361
369 list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { 362 list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
370 if (i >= 8) { 363 if (i >= 8) {
371 dev_printk(KERN_ERR, &dev->pdev->dev, 364 dev_printk (KERN_ERR, &dev->pdev->dev,
372 "More than 8 outputs detected\n"); 365 "More than 8 outputs detected\n");
373 return; 366 return;
374 } 367 }
@@ -378,8 +371,7 @@ static void intel_didl_outputs(struct drm_device *dev)
378 if (ACPI_SUCCESS(status)) { 371 if (ACPI_SUCCESS(status)) {
379 if (!device_id) 372 if (!device_id)
380 goto blind_set; 373 goto blind_set;
381 iowrite32((u32)(device_id & 0x0f0f), 374 opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
382 &opregion->acpi->didl[i]);
383 i++; 375 i++;
384 } 376 }
385 } 377 }
@@ -387,7 +379,7 @@ static void intel_didl_outputs(struct drm_device *dev)
387end: 379end:
388 /* If fewer than 8 outputs, the list must be null terminated */ 380 /* If fewer than 8 outputs, the list must be null terminated */
389 if (i < 8) 381 if (i < 8)
390 iowrite32(0, &opregion->acpi->didl[i]); 382 opregion->acpi->didl[i] = 0;
391 return; 383 return;
392 384
393blind_set: 385blind_set:
@@ -395,7 +387,7 @@ blind_set:
395 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 387 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
396 int output_type = ACPI_OTHER_OUTPUT; 388 int output_type = ACPI_OTHER_OUTPUT;
397 if (i >= 8) { 389 if (i >= 8) {
398 dev_printk(KERN_ERR, &dev->pdev->dev, 390 dev_printk (KERN_ERR, &dev->pdev->dev,
399 "More than 8 outputs detected\n"); 391 "More than 8 outputs detected\n");
400 return; 392 return;
401 } 393 }
@@ -421,33 +413,12 @@ blind_set:
421 output_type = ACPI_LVDS_OUTPUT; 413 output_type = ACPI_LVDS_OUTPUT;
422 break; 414 break;
423 } 415 }
424 temp = ioread32(&opregion->acpi->didl[i]); 416 opregion->acpi->didl[i] |= (1<<31) | output_type | i;
425 iowrite32(temp | (1<<31) | output_type | i,
426 &opregion->acpi->didl[i]);
427 i++; 417 i++;
428 } 418 }
429 goto end; 419 goto end;
430} 420}
431 421
432static void intel_setup_cadls(struct drm_device *dev)
433{
434 struct drm_i915_private *dev_priv = dev->dev_private;
435 struct intel_opregion *opregion = &dev_priv->opregion;
436 int i = 0;
437 u32 disp_id;
438
439 /* Initialize the CADL field by duplicating the DIDL values.
440 * Technically, this is not always correct as display outputs may exist,
441 * but not active. This initialization is necessary for some Clevo
442 * laptops that check this field before processing the brightness and
443 * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
444 * there are less than eight devices. */
445 do {
446 disp_id = ioread32(&opregion->acpi->didl[i]);
447 iowrite32(disp_id, &opregion->acpi->cadl[i]);
448 } while (++i < 8 && disp_id != 0);
449}
450
451void intel_opregion_init(struct drm_device *dev) 422void intel_opregion_init(struct drm_device *dev)
452{ 423{
453 struct drm_i915_private *dev_priv = dev->dev_private; 424 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -457,16 +428,14 @@ void intel_opregion_init(struct drm_device *dev)
457 return; 428 return;
458 429
459 if (opregion->acpi) { 430 if (opregion->acpi) {
460 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 431 if (drm_core_check_feature(dev, DRIVER_MODESET))
461 intel_didl_outputs(dev); 432 intel_didl_outputs(dev);
462 intel_setup_cadls(dev);
463 }
464 433
465 /* Notify BIOS we are ready to handle ACPI video ext notifs. 434 /* Notify BIOS we are ready to handle ACPI video ext notifs.
466 * Right now, all the events are handled by the ACPI video module. 435 * Right now, all the events are handled by the ACPI video module.
467 * We don't actually need to do anything with them. */ 436 * We don't actually need to do anything with them. */
468 iowrite32(0, &opregion->acpi->csts); 437 opregion->acpi->csts = 0;
469 iowrite32(1, &opregion->acpi->drdy); 438 opregion->acpi->drdy = 1;
470 439
471 system_opregion = opregion; 440 system_opregion = opregion;
472 register_acpi_notifier(&intel_opregion_notifier); 441 register_acpi_notifier(&intel_opregion_notifier);
@@ -485,7 +454,7 @@ void intel_opregion_fini(struct drm_device *dev)
485 return; 454 return;
486 455
487 if (opregion->acpi) { 456 if (opregion->acpi) {
488 iowrite32(0, &opregion->acpi->drdy); 457 opregion->acpi->drdy = 0;
489 458
490 system_opregion = NULL; 459 system_opregion = NULL;
491 unregister_acpi_notifier(&intel_opregion_notifier); 460 unregister_acpi_notifier(&intel_opregion_notifier);
@@ -505,9 +474,8 @@ int intel_opregion_setup(struct drm_device *dev)
505{ 474{
506 struct drm_i915_private *dev_priv = dev->dev_private; 475 struct drm_i915_private *dev_priv = dev->dev_private;
507 struct intel_opregion *opregion = &dev_priv->opregion; 476 struct intel_opregion *opregion = &dev_priv->opregion;
508 void __iomem *base; 477 void *base;
509 u32 asls, mboxes; 478 u32 asls, mboxes;
510 char buf[sizeof(OPREGION_SIGNATURE)];
511 int err = 0; 479 int err = 0;
512 480
513 pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); 481 pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
@@ -521,9 +489,7 @@ int intel_opregion_setup(struct drm_device *dev)
521 if (!base) 489 if (!base)
522 return -ENOMEM; 490 return -ENOMEM;
523 491
524 memcpy_fromio(buf, base, sizeof(buf)); 492 if (memcmp(base, OPREGION_SIGNATURE, 16)) {
525
526 if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
527 DRM_DEBUG_DRIVER("opregion signature mismatch\n"); 493 DRM_DEBUG_DRIVER("opregion signature mismatch\n");
528 err = -EINVAL; 494 err = -EINVAL;
529 goto err_out; 495 goto err_out;
@@ -533,7 +499,7 @@ int intel_opregion_setup(struct drm_device *dev)
533 499
534 opregion->lid_state = base + ACPI_CLID; 500 opregion->lid_state = base + ACPI_CLID;
535 501
536 mboxes = ioread32(&opregion->header->mboxes); 502 mboxes = opregion->header->mboxes;
537 if (mboxes & MBOX_ACPI) { 503 if (mboxes & MBOX_ACPI) {
538 DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); 504 DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
539 opregion->acpi = base + OPREGION_ACPI_OFFSET; 505 opregion->acpi = base + OPREGION_ACPI_OFFSET;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d7bc817f51a..d3603808682 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,8 +25,11 @@
25 * 25 *
26 * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c 26 * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
27 */ 27 */
28#include <drm/drmP.h> 28
29#include <drm/i915_drm.h> 29#include <linux/seq_file.h>
30#include "drmP.h"
31#include "drm.h"
32#include "i915_drm.h"
30#include "i915_drv.h" 33#include "i915_drv.h"
31#include "i915_reg.h" 34#include "i915_reg.h"
32#include "intel_drv.h" 35#include "intel_drv.h"
@@ -114,57 +117,57 @@
114 117
115/* memory bufferd overlay registers */ 118/* memory bufferd overlay registers */
116struct overlay_registers { 119struct overlay_registers {
117 u32 OBUF_0Y; 120 u32 OBUF_0Y;
118 u32 OBUF_1Y; 121 u32 OBUF_1Y;
119 u32 OBUF_0U; 122 u32 OBUF_0U;
120 u32 OBUF_0V; 123 u32 OBUF_0V;
121 u32 OBUF_1U; 124 u32 OBUF_1U;
122 u32 OBUF_1V; 125 u32 OBUF_1V;
123 u32 OSTRIDE; 126 u32 OSTRIDE;
124 u32 YRGB_VPH; 127 u32 YRGB_VPH;
125 u32 UV_VPH; 128 u32 UV_VPH;
126 u32 HORZ_PH; 129 u32 HORZ_PH;
127 u32 INIT_PHS; 130 u32 INIT_PHS;
128 u32 DWINPOS; 131 u32 DWINPOS;
129 u32 DWINSZ; 132 u32 DWINSZ;
130 u32 SWIDTH; 133 u32 SWIDTH;
131 u32 SWIDTHSW; 134 u32 SWIDTHSW;
132 u32 SHEIGHT; 135 u32 SHEIGHT;
133 u32 YRGBSCALE; 136 u32 YRGBSCALE;
134 u32 UVSCALE; 137 u32 UVSCALE;
135 u32 OCLRC0; 138 u32 OCLRC0;
136 u32 OCLRC1; 139 u32 OCLRC1;
137 u32 DCLRKV; 140 u32 DCLRKV;
138 u32 DCLRKM; 141 u32 DCLRKM;
139 u32 SCLRKVH; 142 u32 SCLRKVH;
140 u32 SCLRKVL; 143 u32 SCLRKVL;
141 u32 SCLRKEN; 144 u32 SCLRKEN;
142 u32 OCONFIG; 145 u32 OCONFIG;
143 u32 OCMD; 146 u32 OCMD;
144 u32 RESERVED1; /* 0x6C */ 147 u32 RESERVED1; /* 0x6C */
145 u32 OSTART_0Y; 148 u32 OSTART_0Y;
146 u32 OSTART_1Y; 149 u32 OSTART_1Y;
147 u32 OSTART_0U; 150 u32 OSTART_0U;
148 u32 OSTART_0V; 151 u32 OSTART_0V;
149 u32 OSTART_1U; 152 u32 OSTART_1U;
150 u32 OSTART_1V; 153 u32 OSTART_1V;
151 u32 OTILEOFF_0Y; 154 u32 OTILEOFF_0Y;
152 u32 OTILEOFF_1Y; 155 u32 OTILEOFF_1Y;
153 u32 OTILEOFF_0U; 156 u32 OTILEOFF_0U;
154 u32 OTILEOFF_0V; 157 u32 OTILEOFF_0V;
155 u32 OTILEOFF_1U; 158 u32 OTILEOFF_1U;
156 u32 OTILEOFF_1V; 159 u32 OTILEOFF_1V;
157 u32 FASTHSCALE; /* 0xA0 */ 160 u32 FASTHSCALE; /* 0xA0 */
158 u32 UVSCALEV; /* 0xA4 */ 161 u32 UVSCALEV; /* 0xA4 */
159 u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */ 162 u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
160 u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */ 163 u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
161 u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES]; 164 u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
162 u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */ 165 u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
163 u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES]; 166 u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
164 u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */ 167 u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
165 u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES]; 168 u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
166 u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */ 169 u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
167 u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES]; 170 u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
168}; 171};
169 172
170struct intel_overlay { 173struct intel_overlay {
@@ -186,14 +189,14 @@ struct intel_overlay {
186 void (*flip_tail)(struct intel_overlay *); 189 void (*flip_tail)(struct intel_overlay *);
187}; 190};
188 191
189static struct overlay_registers __iomem * 192static struct overlay_registers *
190intel_overlay_map_regs(struct intel_overlay *overlay) 193intel_overlay_map_regs(struct intel_overlay *overlay)
191{ 194{
192 drm_i915_private_t *dev_priv = overlay->dev->dev_private; 195 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
193 struct overlay_registers __iomem *regs; 196 struct overlay_registers *regs;
194 197
195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 198 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; 199 regs = overlay->reg_bo->phys_obj->handle->vaddr;
197 else 200 else
198 regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping, 201 regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
199 overlay->reg_bo->gtt_offset); 202 overlay->reg_bo->gtt_offset);
@@ -202,59 +205,126 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
202} 205}
203 206
204static void intel_overlay_unmap_regs(struct intel_overlay *overlay, 207static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
205 struct overlay_registers __iomem *regs) 208 struct overlay_registers *regs)
206{ 209{
207 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 210 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
208 io_mapping_unmap(regs); 211 io_mapping_unmap(regs);
209} 212}
210 213
211static int intel_overlay_do_wait_request(struct intel_overlay *overlay, 214static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
215 struct drm_i915_gem_request *request,
212 void (*tail)(struct intel_overlay *)) 216 void (*tail)(struct intel_overlay *))
213{ 217{
214 struct drm_device *dev = overlay->dev; 218 struct drm_device *dev = overlay->dev;
215 drm_i915_private_t *dev_priv = dev->dev_private; 219 drm_i915_private_t *dev_priv = dev->dev_private;
216 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
217 int ret; 220 int ret;
218 221
219 BUG_ON(overlay->last_flip_req); 222 BUG_ON(overlay->last_flip_req);
220 ret = i915_add_request(ring, NULL, &overlay->last_flip_req); 223 ret = i915_add_request(LP_RING(dev_priv), NULL, request);
221 if (ret) 224 if (ret) {
222 return ret; 225 kfree(request);
223 226 return ret;
227 }
228 overlay->last_flip_req = request->seqno;
224 overlay->flip_tail = tail; 229 overlay->flip_tail = tail;
225 ret = i915_wait_seqno(ring, overlay->last_flip_req); 230 ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
226 if (ret) 231 if (ret)
227 return ret; 232 return ret;
228 i915_gem_retire_requests(dev);
229 233
230 overlay->last_flip_req = 0; 234 overlay->last_flip_req = 0;
231 return 0; 235 return 0;
232} 236}
233 237
238/* Workaround for i830 bug where pipe a must be enable to change control regs */
239static int
240i830_activate_pipe_a(struct drm_device *dev)
241{
242 drm_i915_private_t *dev_priv = dev->dev_private;
243 struct intel_crtc *crtc;
244 struct drm_crtc_helper_funcs *crtc_funcs;
245 struct drm_display_mode vesa_640x480 = {
246 DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
247 752, 800, 0, 480, 489, 492, 525, 0,
248 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
249 }, *mode;
250
251 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
252 if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
253 return 0;
254
255 /* most i8xx have pipe a forced on, so don't trust dpms mode */
256 if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
257 return 0;
258
259 crtc_funcs = crtc->base.helper_private;
260 if (crtc_funcs->dpms == NULL)
261 return 0;
262
263 DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
264
265 mode = drm_mode_duplicate(dev, &vesa_640x480);
266 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
267 if(!drm_crtc_helper_set_mode(&crtc->base, mode,
268 crtc->base.x, crtc->base.y,
269 crtc->base.fb))
270 return 0;
271
272 crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
273 return 1;
274}
275
276static void
277i830_deactivate_pipe_a(struct drm_device *dev)
278{
279 drm_i915_private_t *dev_priv = dev->dev_private;
280 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
281 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
282
283 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
284}
285
234/* overlay needs to be disable in OCMD reg */ 286/* overlay needs to be disable in OCMD reg */
235static int intel_overlay_on(struct intel_overlay *overlay) 287static int intel_overlay_on(struct intel_overlay *overlay)
236{ 288{
237 struct drm_device *dev = overlay->dev; 289 struct drm_device *dev = overlay->dev;
238 struct drm_i915_private *dev_priv = dev->dev_private; 290 struct drm_i915_private *dev_priv = dev->dev_private;
239 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 291 struct drm_i915_gem_request *request;
292 int pipe_a_quirk = 0;
240 int ret; 293 int ret;
241 294
242 BUG_ON(overlay->active); 295 BUG_ON(overlay->active);
243 overlay->active = 1; 296 overlay->active = 1;
244 297
245 WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); 298 if (IS_I830(dev)) {
299 pipe_a_quirk = i830_activate_pipe_a(dev);
300 if (pipe_a_quirk < 0)
301 return pipe_a_quirk;
302 }
246 303
247 ret = intel_ring_begin(ring, 4); 304 request = kzalloc(sizeof(*request), GFP_KERNEL);
248 if (ret) 305 if (request == NULL) {
249 return ret; 306 ret = -ENOMEM;
307 goto out;
308 }
250 309
251 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); 310 ret = BEGIN_LP_RING(4);
252 intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); 311 if (ret) {
253 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 312 kfree(request);
254 intel_ring_emit(ring, MI_NOOP); 313 goto out;
255 intel_ring_advance(ring); 314 }
256 315
257 return intel_overlay_do_wait_request(overlay, NULL); 316 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
317 OUT_RING(overlay->flip_addr | OFC_UPDATE);
318 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
319 OUT_RING(MI_NOOP);
320 ADVANCE_LP_RING();
321
322 ret = intel_overlay_do_wait_request(overlay, request, NULL);
323out:
324 if (pipe_a_quirk)
325 i830_deactivate_pipe_a(dev);
326
327 return ret;
258} 328}
259 329
260/* overlay needs to be enabled in OCMD reg */ 330/* overlay needs to be enabled in OCMD reg */
@@ -262,14 +332,18 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
262 bool load_polyphase_filter) 332 bool load_polyphase_filter)
263{ 333{
264 struct drm_device *dev = overlay->dev; 334 struct drm_device *dev = overlay->dev;
265 drm_i915_private_t *dev_priv = dev->dev_private; 335 drm_i915_private_t *dev_priv = dev->dev_private;
266 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 336 struct drm_i915_gem_request *request;
267 u32 flip_addr = overlay->flip_addr; 337 u32 flip_addr = overlay->flip_addr;
268 u32 tmp; 338 u32 tmp;
269 int ret; 339 int ret;
270 340
271 BUG_ON(!overlay->active); 341 BUG_ON(!overlay->active);
272 342
343 request = kzalloc(sizeof(*request), GFP_KERNEL);
344 if (request == NULL)
345 return -ENOMEM;
346
273 if (load_polyphase_filter) 347 if (load_polyphase_filter)
274 flip_addr |= OFC_UPDATE; 348 flip_addr |= OFC_UPDATE;
275 349
@@ -278,15 +352,23 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
278 if (tmp & (1 << 17)) 352 if (tmp & (1 << 17))
279 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 353 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
280 354
281 ret = intel_ring_begin(ring, 2); 355 ret = BEGIN_LP_RING(2);
282 if (ret) 356 if (ret) {
357 kfree(request);
283 return ret; 358 return ret;
359 }
360 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
361 OUT_RING(flip_addr);
362 ADVANCE_LP_RING();
284 363
285 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 364 ret = i915_add_request(LP_RING(dev_priv), NULL, request);
286 intel_ring_emit(ring, flip_addr); 365 if (ret) {
287 intel_ring_advance(ring); 366 kfree(request);
367 return ret;
368 }
288 369
289 return i915_add_request(ring, NULL, &overlay->last_flip_req); 370 overlay->last_flip_req = request->seqno;
371 return 0;
290} 372}
291 373
292static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) 374static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -320,41 +402,39 @@ static int intel_overlay_off(struct intel_overlay *overlay)
320{ 402{
321 struct drm_device *dev = overlay->dev; 403 struct drm_device *dev = overlay->dev;
322 struct drm_i915_private *dev_priv = dev->dev_private; 404 struct drm_i915_private *dev_priv = dev->dev_private;
323 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
324 u32 flip_addr = overlay->flip_addr; 405 u32 flip_addr = overlay->flip_addr;
406 struct drm_i915_gem_request *request;
325 int ret; 407 int ret;
326 408
327 BUG_ON(!overlay->active); 409 BUG_ON(!overlay->active);
328 410
411 request = kzalloc(sizeof(*request), GFP_KERNEL);
412 if (request == NULL)
413 return -ENOMEM;
414
329 /* According to intel docs the overlay hw may hang (when switching 415 /* According to intel docs the overlay hw may hang (when switching
330 * off) without loading the filter coeffs. It is however unclear whether 416 * off) without loading the filter coeffs. It is however unclear whether
331 * this applies to the disabling of the overlay or to the switching off 417 * this applies to the disabling of the overlay or to the switching off
332 * of the hw. Do it in both cases */ 418 * of the hw. Do it in both cases */
333 flip_addr |= OFC_UPDATE; 419 flip_addr |= OFC_UPDATE;
334 420
335 ret = intel_ring_begin(ring, 6); 421 ret = BEGIN_LP_RING(6);
336 if (ret) 422 if (ret) {
423 kfree(request);
337 return ret; 424 return ret;
338 425 }
339 /* wait for overlay to go idle */ 426 /* wait for overlay to go idle */
340 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 427 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
341 intel_ring_emit(ring, flip_addr); 428 OUT_RING(flip_addr);
342 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 429 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
343 /* turn overlay off */ 430 /* turn overlay off */
344 if (IS_I830(dev)) { 431 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
345 /* Workaround: Don't disable the overlay fully, since otherwise 432 OUT_RING(flip_addr);
346 * it dies on the next OVERLAY_ON cmd. */ 433 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
347 intel_ring_emit(ring, MI_NOOP); 434 ADVANCE_LP_RING();
348 intel_ring_emit(ring, MI_NOOP);
349 intel_ring_emit(ring, MI_NOOP);
350 } else {
351 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
352 intel_ring_emit(ring, flip_addr);
353 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
354 }
355 intel_ring_advance(ring);
356 435
357 return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail); 436 return intel_overlay_do_wait_request(overlay, request,
437 intel_overlay_off_tail);
358} 438}
359 439
360/* recover from an interruption due to a signal 440/* recover from an interruption due to a signal
@@ -363,16 +443,14 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
363{ 443{
364 struct drm_device *dev = overlay->dev; 444 struct drm_device *dev = overlay->dev;
365 drm_i915_private_t *dev_priv = dev->dev_private; 445 drm_i915_private_t *dev_priv = dev->dev_private;
366 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
367 int ret; 446 int ret;
368 447
369 if (overlay->last_flip_req == 0) 448 if (overlay->last_flip_req == 0)
370 return 0; 449 return 0;
371 450
372 ret = i915_wait_seqno(ring, overlay->last_flip_req); 451 ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
373 if (ret) 452 if (ret)
374 return ret; 453 return ret;
375 i915_gem_retire_requests(dev);
376 454
377 if (overlay->flip_tail) 455 if (overlay->flip_tail)
378 overlay->flip_tail(overlay); 456 overlay->flip_tail(overlay);
@@ -389,7 +467,6 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
389{ 467{
390 struct drm_device *dev = overlay->dev; 468 struct drm_device *dev = overlay->dev;
391 drm_i915_private_t *dev_priv = dev->dev_private; 469 drm_i915_private_t *dev_priv = dev->dev_private;
392 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
393 int ret; 470 int ret;
394 471
395 /* Only wait if there is actually an old frame to release to 472 /* Only wait if there is actually an old frame to release to
@@ -399,16 +476,24 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
399 return 0; 476 return 0;
400 477
401 if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { 478 if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
479 struct drm_i915_gem_request *request;
480
402 /* synchronous slowpath */ 481 /* synchronous slowpath */
403 ret = intel_ring_begin(ring, 2); 482 request = kzalloc(sizeof(*request), GFP_KERNEL);
404 if (ret) 483 if (request == NULL)
484 return -ENOMEM;
485
486 ret = BEGIN_LP_RING(2);
487 if (ret) {
488 kfree(request);
405 return ret; 489 return ret;
490 }
406 491
407 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 492 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
408 intel_ring_emit(ring, MI_NOOP); 493 OUT_RING(MI_NOOP);
409 intel_ring_advance(ring); 494 ADVANCE_LP_RING();
410 495
411 ret = intel_overlay_do_wait_request(overlay, 496 ret = intel_overlay_do_wait_request(overlay, request,
412 intel_overlay_release_old_vid_tail); 497 intel_overlay_release_old_vid_tail);
413 if (ret) 498 if (ret)
414 return ret; 499 return ret;
@@ -498,7 +583,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
498 ret = ((offset + width + mask) >> shift) - (offset >> shift); 583 ret = ((offset + width + mask) >> shift) - (offset >> shift);
499 if (!IS_GEN2(dev)) 584 if (!IS_GEN2(dev))
500 ret <<= 1; 585 ret <<= 1;
501 ret -= 1; 586 ret -=1;
502 return ret << 2; 587 return ret << 2;
503} 588}
504 589
@@ -534,15 +619,14 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
534 0x3000, 0x0800, 0x3000 619 0x3000, 0x0800, 0x3000
535}; 620};
536 621
537static void update_polyphase_filter(struct overlay_registers __iomem *regs) 622static void update_polyphase_filter(struct overlay_registers *regs)
538{ 623{
539 memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs)); 624 memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
540 memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs, 625 memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
541 sizeof(uv_static_hcoeffs));
542} 626}
543 627
544static bool update_scaling_factors(struct intel_overlay *overlay, 628static bool update_scaling_factors(struct intel_overlay *overlay,
545 struct overlay_registers __iomem *regs, 629 struct overlay_registers *regs,
546 struct put_image_params *params) 630 struct put_image_params *params)
547{ 631{
548 /* fixed point with a 12 bit shift */ 632 /* fixed point with a 12 bit shift */
@@ -581,19 +665,16 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
581 overlay->old_xscale = xscale; 665 overlay->old_xscale = xscale;
582 overlay->old_yscale = yscale; 666 overlay->old_yscale = yscale;
583 667
584 iowrite32(((yscale & FRACT_MASK) << 20) | 668 regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
585 ((xscale >> FP_SHIFT) << 16) | 669 ((xscale >> FP_SHIFT) << 16) |
586 ((xscale & FRACT_MASK) << 3), 670 ((xscale & FRACT_MASK) << 3));
587 &regs->YRGBSCALE);
588 671
589 iowrite32(((yscale_UV & FRACT_MASK) << 20) | 672 regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
590 ((xscale_UV >> FP_SHIFT) << 16) | 673 ((xscale_UV >> FP_SHIFT) << 16) |
591 ((xscale_UV & FRACT_MASK) << 3), 674 ((xscale_UV & FRACT_MASK) << 3));
592 &regs->UVSCALE);
593 675
594 iowrite32((((yscale >> FP_SHIFT) << 16) | 676 regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
595 ((yscale_UV >> FP_SHIFT) << 0)), 677 ((yscale_UV >> FP_SHIFT) << 0)));
596 &regs->UVSCALEV);
597 678
598 if (scale_changed) 679 if (scale_changed)
599 update_polyphase_filter(regs); 680 update_polyphase_filter(regs);
@@ -602,32 +683,30 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
602} 683}
603 684
604static void update_colorkey(struct intel_overlay *overlay, 685static void update_colorkey(struct intel_overlay *overlay,
605 struct overlay_registers __iomem *regs) 686 struct overlay_registers *regs)
606{ 687{
607 u32 key = overlay->color_key; 688 u32 key = overlay->color_key;
608 689
609 switch (overlay->crtc->base.fb->bits_per_pixel) { 690 switch (overlay->crtc->base.fb->bits_per_pixel) {
610 case 8: 691 case 8:
611 iowrite32(0, &regs->DCLRKV); 692 regs->DCLRKV = 0;
612 iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM); 693 regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
613 break; 694 break;
614 695
615 case 16: 696 case 16:
616 if (overlay->crtc->base.fb->depth == 15) { 697 if (overlay->crtc->base.fb->depth == 15) {
617 iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV); 698 regs->DCLRKV = RGB15_TO_COLORKEY(key);
618 iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE, 699 regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
619 &regs->DCLRKM);
620 } else { 700 } else {
621 iowrite32(RGB16_TO_COLORKEY(key), &regs->DCLRKV); 701 regs->DCLRKV = RGB16_TO_COLORKEY(key);
622 iowrite32(CLK_RGB16_MASK | DST_KEY_ENABLE, 702 regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
623 &regs->DCLRKM);
624 } 703 }
625 break; 704 break;
626 705
627 case 24: 706 case 24:
628 case 32: 707 case 32:
629 iowrite32(key, &regs->DCLRKV); 708 regs->DCLRKV = key;
630 iowrite32(CLK_RGB24_MASK | DST_KEY_ENABLE, &regs->DCLRKM); 709 regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
631 break; 710 break;
632 } 711 }
633} 712}
@@ -682,10 +761,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
682 struct put_image_params *params) 761 struct put_image_params *params)
683{ 762{
684 int ret, tmp_width; 763 int ret, tmp_width;
685 struct overlay_registers __iomem *regs; 764 struct overlay_registers *regs;
686 bool scale_changed = false; 765 bool scale_changed = false;
687 struct drm_device *dev = overlay->dev; 766 struct drm_device *dev = overlay->dev;
688 u32 swidth, swidthsw, sheight, ostride;
689 767
690 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 768 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
691 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); 769 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
@@ -704,18 +782,16 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
704 goto out_unpin; 782 goto out_unpin;
705 783
706 if (!overlay->active) { 784 if (!overlay->active) {
707 u32 oconfig;
708 regs = intel_overlay_map_regs(overlay); 785 regs = intel_overlay_map_regs(overlay);
709 if (!regs) { 786 if (!regs) {
710 ret = -ENOMEM; 787 ret = -ENOMEM;
711 goto out_unpin; 788 goto out_unpin;
712 } 789 }
713 oconfig = OCONF_CC_OUT_8BIT; 790 regs->OCONFIG = OCONF_CC_OUT_8BIT;
714 if (IS_GEN4(overlay->dev)) 791 if (IS_GEN4(overlay->dev))
715 oconfig |= OCONF_CSC_MODE_BT709; 792 regs->OCONFIG |= OCONF_CSC_MODE_BT709;
716 oconfig |= overlay->crtc->pipe == 0 ? 793 regs->OCONFIG |= overlay->crtc->pipe == 0 ?
717 OCONF_PIPE_A : OCONF_PIPE_B; 794 OCONF_PIPE_A : OCONF_PIPE_B;
718 iowrite32(oconfig, &regs->OCONFIG);
719 intel_overlay_unmap_regs(overlay, regs); 795 intel_overlay_unmap_regs(overlay, regs);
720 796
721 ret = intel_overlay_on(overlay); 797 ret = intel_overlay_on(overlay);
@@ -729,46 +805,42 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
729 goto out_unpin; 805 goto out_unpin;
730 } 806 }
731 807
732 iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS); 808 regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
733 iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ); 809 regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
734 810
735 if (params->format & I915_OVERLAY_YUV_PACKED) 811 if (params->format & I915_OVERLAY_YUV_PACKED)
736 tmp_width = packed_width_bytes(params->format, params->src_w); 812 tmp_width = packed_width_bytes(params->format, params->src_w);
737 else 813 else
738 tmp_width = params->src_w; 814 tmp_width = params->src_w;
739 815
740 swidth = params->src_w; 816 regs->SWIDTH = params->src_w;
741 swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); 817 regs->SWIDTHSW = calc_swidthsw(overlay->dev,
742 sheight = params->src_h; 818 params->offset_Y, tmp_width);
743 iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y); 819 regs->SHEIGHT = params->src_h;
744 ostride = params->stride_Y; 820 regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
821 regs->OSTRIDE = params->stride_Y;
745 822
746 if (params->format & I915_OVERLAY_YUV_PLANAR) { 823 if (params->format & I915_OVERLAY_YUV_PLANAR) {
747 int uv_hscale = uv_hsubsampling(params->format); 824 int uv_hscale = uv_hsubsampling(params->format);
748 int uv_vscale = uv_vsubsampling(params->format); 825 int uv_vscale = uv_vsubsampling(params->format);
749 u32 tmp_U, tmp_V; 826 u32 tmp_U, tmp_V;
750 swidth |= (params->src_w/uv_hscale) << 16; 827 regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
751 tmp_U = calc_swidthsw(overlay->dev, params->offset_U, 828 tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
752 params->src_w/uv_hscale); 829 params->src_w/uv_hscale);
753 tmp_V = calc_swidthsw(overlay->dev, params->offset_V, 830 tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
754 params->src_w/uv_hscale); 831 params->src_w/uv_hscale);
755 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; 832 regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
756 sheight |= (params->src_h/uv_vscale) << 16; 833 regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
757 iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U); 834 regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
758 iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V); 835 regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
759 ostride |= params->stride_UV << 16; 836 regs->OSTRIDE |= params->stride_UV << 16;
760 } 837 }
761 838
762 iowrite32(swidth, &regs->SWIDTH);
763 iowrite32(swidthsw, &regs->SWIDTHSW);
764 iowrite32(sheight, &regs->SHEIGHT);
765 iowrite32(ostride, &regs->OSTRIDE);
766
767 scale_changed = update_scaling_factors(overlay, regs, params); 839 scale_changed = update_scaling_factors(overlay, regs, params);
768 840
769 update_colorkey(overlay, regs); 841 update_colorkey(overlay, regs);
770 842
771 iowrite32(overlay_cmd_reg(params), &regs->OCMD); 843 regs->OCMD = overlay_cmd_reg(params);
772 844
773 intel_overlay_unmap_regs(overlay, regs); 845 intel_overlay_unmap_regs(overlay, regs);
774 846
@@ -788,7 +860,7 @@ out_unpin:
788 860
789int intel_overlay_switch_off(struct intel_overlay *overlay) 861int intel_overlay_switch_off(struct intel_overlay *overlay)
790{ 862{
791 struct overlay_registers __iomem *regs; 863 struct overlay_registers *regs;
792 struct drm_device *dev = overlay->dev; 864 struct drm_device *dev = overlay->dev;
793 int ret; 865 int ret;
794 866
@@ -807,7 +879,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
807 return ret; 879 return ret;
808 880
809 regs = intel_overlay_map_regs(overlay); 881 regs = intel_overlay_map_regs(overlay);
810 iowrite32(0, &regs->OCMD); 882 regs->OCMD = 0;
811 intel_overlay_unmap_regs(overlay, regs); 883 intel_overlay_unmap_regs(overlay, regs);
812 884
813 ret = intel_overlay_off(overlay); 885 ret = intel_overlay_off(overlay);
@@ -845,7 +917,7 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
845 * line with the intel documentation for the i965 917 * line with the intel documentation for the i965
846 */ 918 */
847 if (INTEL_INFO(dev)->gen >= 4) { 919 if (INTEL_INFO(dev)->gen >= 4) {
848 /* on i965 use the PGM reg to read out the autoscaler values */ 920 /* on i965 use the PGM reg to read out the autoscaler values */
849 ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; 921 ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
850 } else { 922 } else {
851 if (pfit_control & VERT_AUTO_SCALE) 923 if (pfit_control & VERT_AUTO_SCALE)
@@ -863,10 +935,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
863{ 935{
864 struct drm_display_mode *mode = &overlay->crtc->base.mode; 936 struct drm_display_mode *mode = &overlay->crtc->base.mode;
865 937
866 if (rec->dst_x < mode->hdisplay && 938 if (rec->dst_x < mode->crtc_hdisplay &&
867 rec->dst_x + rec->dst_width <= mode->hdisplay && 939 rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
868 rec->dst_y < mode->vdisplay && 940 rec->dst_y < mode->crtc_vdisplay &&
869 rec->dst_y + rec->dst_height <= mode->vdisplay) 941 rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
870 return 0; 942 return 0;
871 else 943 else
872 return -EINVAL; 944 return -EINVAL;
@@ -1026,7 +1098,7 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
1026} 1098}
1027 1099
1028int intel_overlay_put_image(struct drm_device *dev, void *data, 1100int intel_overlay_put_image(struct drm_device *dev, void *data,
1029 struct drm_file *file_priv) 1101 struct drm_file *file_priv)
1030{ 1102{
1031 struct drm_intel_overlay_put_image *put_image_rec = data; 1103 struct drm_intel_overlay_put_image *put_image_rec = data;
1032 drm_i915_private_t *dev_priv = dev->dev_private; 1104 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1037,7 +1109,11 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1037 struct put_image_params *params; 1109 struct put_image_params *params;
1038 int ret; 1110 int ret;
1039 1111
1040 /* No need to check for DRIVER_MODESET - we don't set it up then. */ 1112 if (!dev_priv) {
1113 DRM_ERROR("called with no initialization\n");
1114 return -EINVAL;
1115 }
1116
1041 overlay = dev_priv->overlay; 1117 overlay = dev_priv->overlay;
1042 if (!overlay) { 1118 if (!overlay) {
1043 DRM_DEBUG("userspace bug: no overlay\n"); 1119 DRM_DEBUG("userspace bug: no overlay\n");
@@ -1174,11 +1250,10 @@ out_free:
1174} 1250}
1175 1251
1176static void update_reg_attrs(struct intel_overlay *overlay, 1252static void update_reg_attrs(struct intel_overlay *overlay,
1177 struct overlay_registers __iomem *regs) 1253 struct overlay_registers *regs)
1178{ 1254{
1179 iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff), 1255 regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
1180 &regs->OCLRC0); 1256 regs->OCLRC1 = overlay->saturation;
1181 iowrite32(overlay->saturation, &regs->OCLRC1);
1182} 1257}
1183 1258
1184static bool check_gamma_bounds(u32 gamma1, u32 gamma2) 1259static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
@@ -1226,15 +1301,19 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
1226} 1301}
1227 1302
1228int intel_overlay_attrs(struct drm_device *dev, void *data, 1303int intel_overlay_attrs(struct drm_device *dev, void *data,
1229 struct drm_file *file_priv) 1304 struct drm_file *file_priv)
1230{ 1305{
1231 struct drm_intel_overlay_attrs *attrs = data; 1306 struct drm_intel_overlay_attrs *attrs = data;
1232 drm_i915_private_t *dev_priv = dev->dev_private; 1307 drm_i915_private_t *dev_priv = dev->dev_private;
1233 struct intel_overlay *overlay; 1308 struct intel_overlay *overlay;
1234 struct overlay_registers __iomem *regs; 1309 struct overlay_registers *regs;
1235 int ret; 1310 int ret;
1236 1311
1237 /* No need to check for DRIVER_MODESET - we don't set it up then. */ 1312 if (!dev_priv) {
1313 DRM_ERROR("called with no initialization\n");
1314 return -EINVAL;
1315 }
1316
1238 overlay = dev_priv->overlay; 1317 overlay = dev_priv->overlay;
1239 if (!overlay) { 1318 if (!overlay) {
1240 DRM_DEBUG("userspace bug: no overlay\n"); 1319 DRM_DEBUG("userspace bug: no overlay\n");
@@ -1314,10 +1393,10 @@ out_unlock:
1314 1393
1315void intel_setup_overlay(struct drm_device *dev) 1394void intel_setup_overlay(struct drm_device *dev)
1316{ 1395{
1317 drm_i915_private_t *dev_priv = dev->dev_private; 1396 drm_i915_private_t *dev_priv = dev->dev_private;
1318 struct intel_overlay *overlay; 1397 struct intel_overlay *overlay;
1319 struct drm_i915_gem_object *reg_bo; 1398 struct drm_i915_gem_object *reg_bo;
1320 struct overlay_registers __iomem *regs; 1399 struct overlay_registers *regs;
1321 int ret; 1400 int ret;
1322 1401
1323 if (!HAS_OVERLAY(dev)) 1402 if (!HAS_OVERLAY(dev))
@@ -1342,24 +1421,24 @@ void intel_setup_overlay(struct drm_device *dev)
1342 ret = i915_gem_attach_phys_object(dev, reg_bo, 1421 ret = i915_gem_attach_phys_object(dev, reg_bo,
1343 I915_GEM_PHYS_OVERLAY_REGS, 1422 I915_GEM_PHYS_OVERLAY_REGS,
1344 PAGE_SIZE); 1423 PAGE_SIZE);
1345 if (ret) { 1424 if (ret) {
1346 DRM_ERROR("failed to attach phys overlay regs\n"); 1425 DRM_ERROR("failed to attach phys overlay regs\n");
1347 goto out_free_bo; 1426 goto out_free_bo;
1348 } 1427 }
1349 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; 1428 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
1350 } else { 1429 } else {
1351 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false); 1430 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
1352 if (ret) { 1431 if (ret) {
1353 DRM_ERROR("failed to pin overlay register bo\n"); 1432 DRM_ERROR("failed to pin overlay register bo\n");
1354 goto out_free_bo; 1433 goto out_free_bo;
1355 } 1434 }
1356 overlay->flip_addr = reg_bo->gtt_offset; 1435 overlay->flip_addr = reg_bo->gtt_offset;
1357 1436
1358 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); 1437 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
1359 if (ret) { 1438 if (ret) {
1360 DRM_ERROR("failed to move overlay register bo into the GTT\n"); 1439 DRM_ERROR("failed to move overlay register bo into the GTT\n");
1361 goto out_unpin_bo; 1440 goto out_unpin_bo;
1362 } 1441 }
1363 } 1442 }
1364 1443
1365 /* init all values */ 1444 /* init all values */
@@ -1372,7 +1451,7 @@ void intel_setup_overlay(struct drm_device *dev)
1372 if (!regs) 1451 if (!regs)
1373 goto out_unpin_bo; 1452 goto out_unpin_bo;
1374 1453
1375 memset_io(regs, 0, sizeof(struct overlay_registers)); 1454 memset(regs, 0, sizeof(struct overlay_registers));
1376 update_polyphase_filter(regs); 1455 update_polyphase_filter(regs);
1377 update_reg_attrs(overlay, regs); 1456 update_reg_attrs(overlay, regs);
1378 1457
@@ -1420,17 +1499,14 @@ struct intel_overlay_error_state {
1420 u32 isr; 1499 u32 isr;
1421}; 1500};
1422 1501
1423static struct overlay_registers __iomem * 1502static struct overlay_registers *
1424intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 1503intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1425{ 1504{
1426 drm_i915_private_t *dev_priv = overlay->dev->dev_private; 1505 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
1427 struct overlay_registers __iomem *regs; 1506 struct overlay_registers *regs;
1428 1507
1429 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1508 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1430 /* Cast to make sparse happy, but it's wc memory anyway, so 1509 regs = overlay->reg_bo->phys_obj->handle->vaddr;
1431 * equivalent to the wc io mapping on X86. */
1432 regs = (struct overlay_registers __iomem *)
1433 overlay->reg_bo->phys_obj->handle->vaddr;
1434 else 1510 else
1435 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 1511 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
1436 overlay->reg_bo->gtt_offset); 1512 overlay->reg_bo->gtt_offset);
@@ -1439,7 +1515,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1439} 1515}
1440 1516
1441static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, 1517static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
1442 struct overlay_registers __iomem *regs) 1518 struct overlay_registers *regs)
1443{ 1519{
1444 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1520 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1445 io_mapping_unmap_atomic(regs); 1521 io_mapping_unmap_atomic(regs);
@@ -1449,7 +1525,7 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
1449struct intel_overlay_error_state * 1525struct intel_overlay_error_state *
1450intel_overlay_capture_error_state(struct drm_device *dev) 1526intel_overlay_capture_error_state(struct drm_device *dev)
1451{ 1527{
1452 drm_i915_private_t *dev_priv = dev->dev_private; 1528 drm_i915_private_t *dev_priv = dev->dev_private;
1453 struct intel_overlay *overlay = dev_priv->overlay; 1529 struct intel_overlay *overlay = dev_priv->overlay;
1454 struct intel_overlay_error_state *error; 1530 struct intel_overlay_error_state *error;
1455 struct overlay_registers __iomem *regs; 1531 struct overlay_registers __iomem *regs;
@@ -1464,9 +1540,9 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1464 error->dovsta = I915_READ(DOVSTA); 1540 error->dovsta = I915_READ(DOVSTA);
1465 error->isr = I915_READ(ISR); 1541 error->isr = I915_READ(ISR);
1466 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1542 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1467 error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; 1543 error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
1468 else 1544 else
1469 error->base = overlay->reg_bo->gtt_offset; 1545 error->base = (long) overlay->reg_bo->gtt_offset;
1470 1546
1471 regs = intel_overlay_map_regs_atomic(overlay); 1547 regs = intel_overlay_map_regs_atomic(overlay);
1472 if (!regs) 1548 if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index bee8cb6108a..007f6ca309d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -28,9 +28,6 @@
28 * Chris Wilson <chris@chris-wilson.co.uk> 28 * Chris Wilson <chris@chris-wilson.co.uk>
29 */ 29 */
30 30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33#include <linux/moduleparam.h>
34#include "intel_drv.h" 31#include "intel_drv.h"
35 32
36#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ 33#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
@@ -50,13 +47,15 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
50 adjusted_mode->vtotal = fixed_mode->vtotal; 47 adjusted_mode->vtotal = fixed_mode->vtotal;
51 48
52 adjusted_mode->clock = fixed_mode->clock; 49 adjusted_mode->clock = fixed_mode->clock;
50
51 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
53} 52}
54 53
55/* adjusted_mode has been preset to be the panel's fixed mode */ 54/* adjusted_mode has been preset to be the panel's fixed mode */
56void 55void
57intel_pch_panel_fitting(struct drm_device *dev, 56intel_pch_panel_fitting(struct drm_device *dev,
58 int fitting_mode, 57 int fitting_mode,
59 const struct drm_display_mode *mode, 58 struct drm_display_mode *mode,
60 struct drm_display_mode *adjusted_mode) 59 struct drm_display_mode *adjusted_mode)
61{ 60{
62 struct drm_i915_private *dev_priv = dev->dev_private; 61 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -85,7 +84,7 @@ intel_pch_panel_fitting(struct drm_device *dev,
85 if (scaled_width > scaled_height) { /* pillar */ 84 if (scaled_width > scaled_height) { /* pillar */
86 width = scaled_height / mode->vdisplay; 85 width = scaled_height / mode->vdisplay;
87 if (width & 1) 86 if (width & 1)
88 width++; 87 width++;
89 x = (adjusted_mode->hdisplay - width + 1) / 2; 88 x = (adjusted_mode->hdisplay - width + 1) / 2;
90 y = 0; 89 y = 0;
91 height = adjusted_mode->vdisplay; 90 height = adjusted_mode->vdisplay;
@@ -130,100 +129,72 @@ static int is_backlight_combination_mode(struct drm_device *dev)
130 return 0; 129 return 0;
131} 130}
132 131
133static u32 i915_read_blc_pwm_ctl(struct drm_device *dev) 132static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
134{ 133{
135 struct drm_i915_private *dev_priv = dev->dev_private;
136 u32 val; 134 u32 val;
137 135
138 /* Restore the CTL value if it lost, e.g. GPU reset */ 136 /* Restore the CTL value if it lost, e.g. GPU reset */
139 137
140 if (HAS_PCH_SPLIT(dev_priv->dev)) { 138 if (HAS_PCH_SPLIT(dev_priv->dev)) {
141 val = I915_READ(BLC_PWM_PCH_CTL2); 139 val = I915_READ(BLC_PWM_PCH_CTL2);
142 if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) { 140 if (dev_priv->saveBLC_PWM_CTL2 == 0) {
143 dev_priv->regfile.saveBLC_PWM_CTL2 = val; 141 dev_priv->saveBLC_PWM_CTL2 = val;
144 } else if (val == 0) { 142 } else if (val == 0) {
145 val = dev_priv->regfile.saveBLC_PWM_CTL2; 143 I915_WRITE(BLC_PWM_PCH_CTL2,
146 I915_WRITE(BLC_PWM_PCH_CTL2, val); 144 dev_priv->saveBLC_PWM_CTL);
145 val = dev_priv->saveBLC_PWM_CTL;
147 } 146 }
148 } else { 147 } else {
149 val = I915_READ(BLC_PWM_CTL); 148 val = I915_READ(BLC_PWM_CTL);
150 if (dev_priv->regfile.saveBLC_PWM_CTL == 0) { 149 if (dev_priv->saveBLC_PWM_CTL == 0) {
151 dev_priv->regfile.saveBLC_PWM_CTL = val; 150 dev_priv->saveBLC_PWM_CTL = val;
152 if (INTEL_INFO(dev)->gen >= 4) 151 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
153 dev_priv->regfile.saveBLC_PWM_CTL2 =
154 I915_READ(BLC_PWM_CTL2);
155 } else if (val == 0) { 152 } else if (val == 0) {
156 val = dev_priv->regfile.saveBLC_PWM_CTL; 153 I915_WRITE(BLC_PWM_CTL,
157 I915_WRITE(BLC_PWM_CTL, val); 154 dev_priv->saveBLC_PWM_CTL);
158 if (INTEL_INFO(dev)->gen >= 4) 155 I915_WRITE(BLC_PWM_CTL2,
159 I915_WRITE(BLC_PWM_CTL2, 156 dev_priv->saveBLC_PWM_CTL2);
160 dev_priv->regfile.saveBLC_PWM_CTL2); 157 val = dev_priv->saveBLC_PWM_CTL;
161 } 158 }
162 } 159 }
163 160
164 return val; 161 return val;
165} 162}
166 163
167static u32 _intel_panel_get_max_backlight(struct drm_device *dev) 164u32 intel_panel_get_max_backlight(struct drm_device *dev)
168{ 165{
166 struct drm_i915_private *dev_priv = dev->dev_private;
169 u32 max; 167 u32 max;
170 168
171 max = i915_read_blc_pwm_ctl(dev); 169 max = i915_read_blc_pwm_ctl(dev_priv);
170 if (max == 0) {
171 /* XXX add code here to query mode clock or hardware clock
172 * and program max PWM appropriately.
173 */
174 printk_once(KERN_WARNING "fixme: max PWM is zero.\n");
175 return 1;
176 }
172 177
173 if (HAS_PCH_SPLIT(dev)) { 178 if (HAS_PCH_SPLIT(dev)) {
174 max >>= 16; 179 max >>= 16;
175 } else { 180 } else {
176 if (INTEL_INFO(dev)->gen < 4) 181 if (IS_PINEVIEW(dev)) {
177 max >>= 17; 182 max >>= 17;
178 else 183 } else {
179 max >>= 16; 184 max >>= 16;
185 if (INTEL_INFO(dev)->gen < 4)
186 max &= ~1;
187 }
180 188
181 if (is_backlight_combination_mode(dev)) 189 if (is_backlight_combination_mode(dev))
182 max *= 0xff; 190 max *= 0xff;
183 } 191 }
184 192
185 return max;
186}
187
188u32 intel_panel_get_max_backlight(struct drm_device *dev)
189{
190 u32 max;
191
192 max = _intel_panel_get_max_backlight(dev);
193 if (max == 0) {
194 /* XXX add code here to query mode clock or hardware clock
195 * and program max PWM appropriately.
196 */
197 pr_warn_once("fixme: max PWM is zero\n");
198 return 1;
199 }
200
201 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); 193 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
202 return max; 194 return max;
203} 195}
204 196
205static int i915_panel_invert_brightness; 197u32 intel_panel_get_backlight(struct drm_device *dev)
206MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
207 "(-1 force normal, 0 machine defaults, 1 force inversion), please "
208 "report PCI device ID, subsystem vendor and subsystem device ID "
209 "to dri-devel@lists.freedesktop.org, if your machine needs it. "
210 "It will then be included in an upcoming module version.");
211module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
212static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
213{
214 struct drm_i915_private *dev_priv = dev->dev_private;
215
216 if (i915_panel_invert_brightness < 0)
217 return val;
218
219 if (i915_panel_invert_brightness > 0 ||
220 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
221 return intel_panel_get_max_backlight(dev) - val;
222
223 return val;
224}
225
226static u32 intel_panel_get_backlight(struct drm_device *dev)
227{ 198{
228 struct drm_i915_private *dev_priv = dev->dev_private; 199 struct drm_i915_private *dev_priv = dev->dev_private;
229 u32 val; 200 u32 val;
@@ -232,18 +203,18 @@ static u32 intel_panel_get_backlight(struct drm_device *dev)
232 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 203 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
233 } else { 204 } else {
234 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 205 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
235 if (INTEL_INFO(dev)->gen < 4) 206 if (IS_PINEVIEW(dev))
236 val >>= 1; 207 val >>= 1;
237 208
238 if (is_backlight_combination_mode(dev)) { 209 if (is_backlight_combination_mode(dev)){
239 u8 lbpc; 210 u8 lbpc;
240 211
212 val &= ~1;
241 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); 213 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
242 val *= lbpc; 214 val *= lbpc;
243 } 215 }
244 } 216 }
245 217
246 val = intel_panel_compute_brightness(dev, val);
247 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); 218 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
248 return val; 219 return val;
249} 220}
@@ -261,12 +232,11 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
261 u32 tmp; 232 u32 tmp;
262 233
263 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); 234 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
264 level = intel_panel_compute_brightness(dev, level);
265 235
266 if (HAS_PCH_SPLIT(dev)) 236 if (HAS_PCH_SPLIT(dev))
267 return intel_pch_panel_set_backlight(dev, level); 237 return intel_pch_panel_set_backlight(dev, level);
268 238
269 if (is_backlight_combination_mode(dev)) { 239 if (is_backlight_combination_mode(dev)){
270 u32 max = intel_panel_get_max_backlight(dev); 240 u32 max = intel_panel_get_max_backlight(dev);
271 u8 lbpc; 241 u8 lbpc;
272 242
@@ -276,9 +246,11 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
276 } 246 }
277 247
278 tmp = I915_READ(BLC_PWM_CTL); 248 tmp = I915_READ(BLC_PWM_CTL);
279 if (INTEL_INFO(dev)->gen < 4) 249 if (IS_PINEVIEW(dev)) {
250 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
280 level <<= 1; 251 level <<= 1;
281 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; 252 } else
253 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
282 I915_WRITE(BLC_PWM_CTL, tmp | level); 254 I915_WRITE(BLC_PWM_CTL, tmp | level);
283} 255}
284 256
@@ -297,69 +269,15 @@ void intel_panel_disable_backlight(struct drm_device *dev)
297 269
298 dev_priv->backlight_enabled = false; 270 dev_priv->backlight_enabled = false;
299 intel_panel_actually_set_backlight(dev, 0); 271 intel_panel_actually_set_backlight(dev, 0);
300
301 if (INTEL_INFO(dev)->gen >= 4) {
302 uint32_t reg, tmp;
303
304 reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
305
306 I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
307
308 if (HAS_PCH_SPLIT(dev)) {
309 tmp = I915_READ(BLC_PWM_PCH_CTL1);
310 tmp &= ~BLM_PCH_PWM_ENABLE;
311 I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
312 }
313 }
314} 272}
315 273
316void intel_panel_enable_backlight(struct drm_device *dev, 274void intel_panel_enable_backlight(struct drm_device *dev)
317 enum pipe pipe)
318{ 275{
319 struct drm_i915_private *dev_priv = dev->dev_private; 276 struct drm_i915_private *dev_priv = dev->dev_private;
320 277
321 if (dev_priv->backlight_level == 0) 278 if (dev_priv->backlight_level == 0)
322 dev_priv->backlight_level = intel_panel_get_max_backlight(dev); 279 dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
323 280
324 if (INTEL_INFO(dev)->gen >= 4) {
325 uint32_t reg, tmp;
326
327 reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
328
329
330 tmp = I915_READ(reg);
331
332 /* Note that this can also get called through dpms changes. And
333 * we don't track the backlight dpms state, hence check whether
334 * we have to do anything first. */
335 if (tmp & BLM_PWM_ENABLE)
336 goto set_level;
337
338 if (dev_priv->num_pipe == 3)
339 tmp &= ~BLM_PIPE_SELECT_IVB;
340 else
341 tmp &= ~BLM_PIPE_SELECT;
342
343 tmp |= BLM_PIPE(pipe);
344 tmp &= ~BLM_PWM_ENABLE;
345
346 I915_WRITE(reg, tmp);
347 POSTING_READ(reg);
348 I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
349
350 if (HAS_PCH_SPLIT(dev)) {
351 tmp = I915_READ(BLC_PWM_PCH_CTL1);
352 tmp |= BLM_PCH_PWM_ENABLE;
353 tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
354 I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
355 }
356 }
357
358set_level:
359 /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
360 * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
361 * registers are set.
362 */
363 dev_priv->backlight_enabled = true; 281 dev_priv->backlight_enabled = true;
364 intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); 282 intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
365} 283}
@@ -375,23 +293,26 @@ static void intel_panel_init_backlight(struct drm_device *dev)
375enum drm_connector_status 293enum drm_connector_status
376intel_panel_detect(struct drm_device *dev) 294intel_panel_detect(struct drm_device *dev)
377{ 295{
296#if 0
378 struct drm_i915_private *dev_priv = dev->dev_private; 297 struct drm_i915_private *dev_priv = dev->dev_private;
298#endif
299
300 if (i915_panel_ignore_lid)
301 return i915_panel_ignore_lid > 0 ?
302 connector_status_connected :
303 connector_status_disconnected;
379 304
305 /* opregion lid state on HP 2540p is wrong at boot up,
306 * appears to be either the BIOS or Linux ACPI fault */
307#if 0
380 /* Assume that the BIOS does not lie through the OpRegion... */ 308 /* Assume that the BIOS does not lie through the OpRegion... */
381 if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) { 309 if (dev_priv->opregion.lid_state)
382 return ioread32(dev_priv->opregion.lid_state) & 0x1 ? 310 return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
383 connector_status_connected : 311 connector_status_connected :
384 connector_status_disconnected; 312 connector_status_disconnected;
385 } 313#endif
386 314
387 switch (i915_panel_ignore_lid) { 315 return connector_status_unknown;
388 case -2:
389 return connector_status_connected;
390 case -1:
391 return connector_status_disconnected;
392 default:
393 return connector_status_unknown;
394 }
395} 316}
396 317
397#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 318#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -414,21 +335,23 @@ static const struct backlight_ops intel_panel_bl_ops = {
414 .get_brightness = intel_panel_get_brightness, 335 .get_brightness = intel_panel_get_brightness,
415}; 336};
416 337
417int intel_panel_setup_backlight(struct drm_connector *connector) 338int intel_panel_setup_backlight(struct drm_device *dev)
418{ 339{
419 struct drm_device *dev = connector->dev;
420 struct drm_i915_private *dev_priv = dev->dev_private; 340 struct drm_i915_private *dev_priv = dev->dev_private;
421 struct backlight_properties props; 341 struct backlight_properties props;
342 struct drm_connector *connector;
422 343
423 intel_panel_init_backlight(dev); 344 intel_panel_init_backlight(dev);
424 345
425 memset(&props, 0, sizeof(props)); 346 if (dev_priv->int_lvds_connector)
426 props.type = BACKLIGHT_RAW; 347 connector = dev_priv->int_lvds_connector;
427 props.max_brightness = _intel_panel_get_max_backlight(dev); 348 else if (dev_priv->int_edp_connector)
428 if (props.max_brightness == 0) { 349 connector = dev_priv->int_edp_connector;
429 DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n"); 350 else
430 return -ENODEV; 351 return -ENODEV;
431 } 352
353 props.type = BACKLIGHT_RAW;
354 props.max_brightness = intel_panel_get_max_backlight(dev);
432 dev_priv->backlight = 355 dev_priv->backlight =
433 backlight_device_register("intel_backlight", 356 backlight_device_register("intel_backlight",
434 &connector->kdev, dev, 357 &connector->kdev, dev,
@@ -451,9 +374,9 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
451 backlight_device_unregister(dev_priv->backlight); 374 backlight_device_unregister(dev_priv->backlight);
452} 375}
453#else 376#else
454int intel_panel_setup_backlight(struct drm_connector *connector) 377int intel_panel_setup_backlight(struct drm_device *dev)
455{ 378{
456 intel_panel_init_backlight(connector->dev); 379 intel_panel_init_backlight(dev);
457 return 0; 380 return 0;
458} 381}
459 382
@@ -462,20 +385,3 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
462 return; 385 return;
463} 386}
464#endif 387#endif
465
466int intel_panel_init(struct intel_panel *panel,
467 struct drm_display_mode *fixed_mode)
468{
469 panel->fixed_mode = fixed_mode;
470
471 return 0;
472}
473
474void intel_panel_fini(struct intel_panel *panel)
475{
476 struct intel_connector *intel_connector =
477 container_of(panel, struct intel_connector, panel);
478
479 if (panel->fixed_mode)
480 drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
481}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
deleted file mode 100644
index e83a1179417..00000000000
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ /dev/null
@@ -1,4458 +0,0 @@
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
28#include <linux/cpufreq.h>
29#include "i915_drv.h"
30#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h>
33
34#define FORCEWAKE_ACK_TIMEOUT_MS 2
35
36/* FBC, or Frame Buffer Compression, is a technique employed to compress the
37 * framebuffer contents in-memory, aiming at reducing the required bandwidth
38 * during in-memory transfers and, therefore, reduce the power packet.
39 *
40 * The benefits of FBC are mostly visible with solid backgrounds and
41 * variation-less patterns.
42 *
43 * FBC-related functionality can be enabled by the means of the
44 * i915.i915_enable_fbc parameter
45 */
46
47static bool intel_crtc_active(struct drm_crtc *crtc)
48{
49 /* Be paranoid as we can arrive here with only partial
50 * state retrieved from the hardware during setup.
51 */
52 return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
53}
54
55static void i8xx_disable_fbc(struct drm_device *dev)
56{
57 struct drm_i915_private *dev_priv = dev->dev_private;
58 u32 fbc_ctl;
59
60 /* Disable compression */
61 fbc_ctl = I915_READ(FBC_CONTROL);
62 if ((fbc_ctl & FBC_CTL_EN) == 0)
63 return;
64
65 fbc_ctl &= ~FBC_CTL_EN;
66 I915_WRITE(FBC_CONTROL, fbc_ctl);
67
68 /* Wait for compressing bit to clear */
69 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
70 DRM_DEBUG_KMS("FBC idle timed out\n");
71 return;
72 }
73
74 DRM_DEBUG_KMS("disabled FBC\n");
75}
76
77static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
78{
79 struct drm_device *dev = crtc->dev;
80 struct drm_i915_private *dev_priv = dev->dev_private;
81 struct drm_framebuffer *fb = crtc->fb;
82 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
83 struct drm_i915_gem_object *obj = intel_fb->obj;
84 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
85 int cfb_pitch;
86 int plane, i;
87 u32 fbc_ctl, fbc_ctl2;
88
89 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
90 if (fb->pitches[0] < cfb_pitch)
91 cfb_pitch = fb->pitches[0];
92
93 /* FBC_CTL wants 64B units */
94 cfb_pitch = (cfb_pitch / 64) - 1;
95 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
96
97 /* Clear old tags */
98 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
99 I915_WRITE(FBC_TAG + (i * 4), 0);
100
101 /* Set it up... */
102 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
103 fbc_ctl2 |= plane;
104 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
105 I915_WRITE(FBC_FENCE_OFF, crtc->y);
106
107 /* enable it... */
108 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
109 if (IS_I945GM(dev))
110 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
111 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
112 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
113 fbc_ctl |= obj->fence_reg;
114 I915_WRITE(FBC_CONTROL, fbc_ctl);
115
116 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
117 cfb_pitch, crtc->y, intel_crtc->plane);
118}
119
120static bool i8xx_fbc_enabled(struct drm_device *dev)
121{
122 struct drm_i915_private *dev_priv = dev->dev_private;
123
124 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
125}
126
127static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
128{
129 struct drm_device *dev = crtc->dev;
130 struct drm_i915_private *dev_priv = dev->dev_private;
131 struct drm_framebuffer *fb = crtc->fb;
132 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
133 struct drm_i915_gem_object *obj = intel_fb->obj;
134 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
135 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
136 unsigned long stall_watermark = 200;
137 u32 dpfc_ctl;
138
139 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
140 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
141 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
142
143 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
144 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
145 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
146 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
147
148 /* enable it... */
149 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
150
151 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
152}
153
154static void g4x_disable_fbc(struct drm_device *dev)
155{
156 struct drm_i915_private *dev_priv = dev->dev_private;
157 u32 dpfc_ctl;
158
159 /* Disable compression */
160 dpfc_ctl = I915_READ(DPFC_CONTROL);
161 if (dpfc_ctl & DPFC_CTL_EN) {
162 dpfc_ctl &= ~DPFC_CTL_EN;
163 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
164
165 DRM_DEBUG_KMS("disabled FBC\n");
166 }
167}
168
169static bool g4x_fbc_enabled(struct drm_device *dev)
170{
171 struct drm_i915_private *dev_priv = dev->dev_private;
172
173 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
174}
175
176static void sandybridge_blit_fbc_update(struct drm_device *dev)
177{
178 struct drm_i915_private *dev_priv = dev->dev_private;
179 u32 blt_ecoskpd;
180
181 /* Make sure blitter notifies FBC of writes */
182 gen6_gt_force_wake_get(dev_priv);
183 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
184 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
185 GEN6_BLITTER_LOCK_SHIFT;
186 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
187 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
188 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
189 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
190 GEN6_BLITTER_LOCK_SHIFT);
191 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
192 POSTING_READ(GEN6_BLITTER_ECOSKPD);
193 gen6_gt_force_wake_put(dev_priv);
194}
195
196static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
197{
198 struct drm_device *dev = crtc->dev;
199 struct drm_i915_private *dev_priv = dev->dev_private;
200 struct drm_framebuffer *fb = crtc->fb;
201 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
202 struct drm_i915_gem_object *obj = intel_fb->obj;
203 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
204 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
205 unsigned long stall_watermark = 200;
206 u32 dpfc_ctl;
207
208 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
209 dpfc_ctl &= DPFC_RESERVED;
210 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
211 /* Set persistent mode for front-buffer rendering, ala X. */
212 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
213 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
214 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
215
216 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
217 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
218 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
219 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
220 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
221 /* enable it... */
222 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
223
224 if (IS_GEN6(dev)) {
225 I915_WRITE(SNB_DPFC_CTL_SA,
226 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
227 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
228 sandybridge_blit_fbc_update(dev);
229 }
230
231 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
232}
233
234static void ironlake_disable_fbc(struct drm_device *dev)
235{
236 struct drm_i915_private *dev_priv = dev->dev_private;
237 u32 dpfc_ctl;
238
239 /* Disable compression */
240 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
241 if (dpfc_ctl & DPFC_CTL_EN) {
242 dpfc_ctl &= ~DPFC_CTL_EN;
243 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
244
245 DRM_DEBUG_KMS("disabled FBC\n");
246 }
247}
248
249static bool ironlake_fbc_enabled(struct drm_device *dev)
250{
251 struct drm_i915_private *dev_priv = dev->dev_private;
252
253 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
254}
255
256bool intel_fbc_enabled(struct drm_device *dev)
257{
258 struct drm_i915_private *dev_priv = dev->dev_private;
259
260 if (!dev_priv->display.fbc_enabled)
261 return false;
262
263 return dev_priv->display.fbc_enabled(dev);
264}
265
266static void intel_fbc_work_fn(struct work_struct *__work)
267{
268 struct intel_fbc_work *work =
269 container_of(to_delayed_work(__work),
270 struct intel_fbc_work, work);
271 struct drm_device *dev = work->crtc->dev;
272 struct drm_i915_private *dev_priv = dev->dev_private;
273
274 mutex_lock(&dev->struct_mutex);
275 if (work == dev_priv->fbc_work) {
276 /* Double check that we haven't switched fb without cancelling
277 * the prior work.
278 */
279 if (work->crtc->fb == work->fb) {
280 dev_priv->display.enable_fbc(work->crtc,
281 work->interval);
282
283 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
284 dev_priv->cfb_fb = work->crtc->fb->base.id;
285 dev_priv->cfb_y = work->crtc->y;
286 }
287
288 dev_priv->fbc_work = NULL;
289 }
290 mutex_unlock(&dev->struct_mutex);
291
292 kfree(work);
293}
294
295static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
296{
297 if (dev_priv->fbc_work == NULL)
298 return;
299
300 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
301
302 /* Synchronisation is provided by struct_mutex and checking of
303 * dev_priv->fbc_work, so we can perform the cancellation
304 * entirely asynchronously.
305 */
306 if (cancel_delayed_work(&dev_priv->fbc_work->work))
307 /* tasklet was killed before being run, clean up */
308 kfree(dev_priv->fbc_work);
309
310 /* Mark the work as no longer wanted so that if it does
311 * wake-up (because the work was already running and waiting
312 * for our mutex), it will discover that is no longer
313 * necessary to run.
314 */
315 dev_priv->fbc_work = NULL;
316}
317
318void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
319{
320 struct intel_fbc_work *work;
321 struct drm_device *dev = crtc->dev;
322 struct drm_i915_private *dev_priv = dev->dev_private;
323
324 if (!dev_priv->display.enable_fbc)
325 return;
326
327 intel_cancel_fbc_work(dev_priv);
328
329 work = kzalloc(sizeof *work, GFP_KERNEL);
330 if (work == NULL) {
331 dev_priv->display.enable_fbc(crtc, interval);
332 return;
333 }
334
335 work->crtc = crtc;
336 work->fb = crtc->fb;
337 work->interval = interval;
338 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
339
340 dev_priv->fbc_work = work;
341
342 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
343
344 /* Delay the actual enabling to let pageflipping cease and the
345 * display to settle before starting the compression. Note that
346 * this delay also serves a second purpose: it allows for a
347 * vblank to pass after disabling the FBC before we attempt
348 * to modify the control registers.
349 *
350 * A more complicated solution would involve tracking vblanks
351 * following the termination of the page-flipping sequence
352 * and indeed performing the enable as a co-routine and not
353 * waiting synchronously upon the vblank.
354 */
355 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
356}
357
358void intel_disable_fbc(struct drm_device *dev)
359{
360 struct drm_i915_private *dev_priv = dev->dev_private;
361
362 intel_cancel_fbc_work(dev_priv);
363
364 if (!dev_priv->display.disable_fbc)
365 return;
366
367 dev_priv->display.disable_fbc(dev);
368 dev_priv->cfb_plane = -1;
369}
370
371/**
372 * intel_update_fbc - enable/disable FBC as needed
373 * @dev: the drm_device
374 *
375 * Set up the framebuffer compression hardware at mode set time. We
376 * enable it if possible:
377 * - plane A only (on pre-965)
378 * - no pixel mulitply/line duplication
379 * - no alpha buffer discard
380 * - no dual wide
381 * - framebuffer <= 2048 in width, 1536 in height
382 *
383 * We can't assume that any compression will take place (worst case),
384 * so the compressed buffer has to be the same size as the uncompressed
385 * one. It also must reside (along with the line length buffer) in
386 * stolen memory.
387 *
388 * We need to enable/disable FBC on a global basis.
389 */
390void intel_update_fbc(struct drm_device *dev)
391{
392 struct drm_i915_private *dev_priv = dev->dev_private;
393 struct drm_crtc *crtc = NULL, *tmp_crtc;
394 struct intel_crtc *intel_crtc;
395 struct drm_framebuffer *fb;
396 struct intel_framebuffer *intel_fb;
397 struct drm_i915_gem_object *obj;
398 int enable_fbc;
399
400 if (!i915_powersave)
401 return;
402
403 if (!I915_HAS_FBC(dev))
404 return;
405
406 /*
407 * If FBC is already on, we just have to verify that we can
408 * keep it that way...
409 * Need to disable if:
410 * - more than one pipe is active
411 * - changing FBC params (stride, fence, mode)
412 * - new fb is too large to fit in compressed buffer
413 * - going to an unsupported config (interlace, pixel multiply, etc.)
414 */
415 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
416 if (intel_crtc_active(tmp_crtc) &&
417 !to_intel_crtc(tmp_crtc)->primary_disabled) {
418 if (crtc) {
419 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
420 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
421 goto out_disable;
422 }
423 crtc = tmp_crtc;
424 }
425 }
426
427 if (!crtc || crtc->fb == NULL) {
428 DRM_DEBUG_KMS("no output, disabling\n");
429 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
430 goto out_disable;
431 }
432
433 intel_crtc = to_intel_crtc(crtc);
434 fb = crtc->fb;
435 intel_fb = to_intel_framebuffer(fb);
436 obj = intel_fb->obj;
437
438 enable_fbc = i915_enable_fbc;
439 if (enable_fbc < 0) {
440 DRM_DEBUG_KMS("fbc set to per-chip default\n");
441 enable_fbc = 1;
442 if (INTEL_INFO(dev)->gen <= 6)
443 enable_fbc = 0;
444 }
445 if (!enable_fbc) {
446 DRM_DEBUG_KMS("fbc disabled per module param\n");
447 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
448 goto out_disable;
449 }
450 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
451 DRM_DEBUG_KMS("framebuffer too large, disabling "
452 "compression\n");
453 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
454 goto out_disable;
455 }
456 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
457 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
458 DRM_DEBUG_KMS("mode incompatible with compression, "
459 "disabling\n");
460 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
461 goto out_disable;
462 }
463 if ((crtc->mode.hdisplay > 2048) ||
464 (crtc->mode.vdisplay > 1536)) {
465 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
466 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
467 goto out_disable;
468 }
469 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
470 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
471 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
472 goto out_disable;
473 }
474
475 /* The use of a CPU fence is mandatory in order to detect writes
476 * by the CPU to the scanout and trigger updates to the FBC.
477 */
478 if (obj->tiling_mode != I915_TILING_X ||
479 obj->fence_reg == I915_FENCE_REG_NONE) {
480 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
481 dev_priv->no_fbc_reason = FBC_NOT_TILED;
482 goto out_disable;
483 }
484
485 /* If the kernel debugger is active, always disable compression */
486 if (in_dbg_master())
487 goto out_disable;
488
489 /* If the scanout has not changed, don't modify the FBC settings.
490 * Note that we make the fundamental assumption that the fb->obj
491 * cannot be unpinned (and have its GTT offset and fence revoked)
492 * without first being decoupled from the scanout and FBC disabled.
493 */
494 if (dev_priv->cfb_plane == intel_crtc->plane &&
495 dev_priv->cfb_fb == fb->base.id &&
496 dev_priv->cfb_y == crtc->y)
497 return;
498
499 if (intel_fbc_enabled(dev)) {
500 /* We update FBC along two paths, after changing fb/crtc
501 * configuration (modeswitching) and after page-flipping
502 * finishes. For the latter, we know that not only did
503 * we disable the FBC at the start of the page-flip
504 * sequence, but also more than one vblank has passed.
505 *
506 * For the former case of modeswitching, it is possible
507 * to switch between two FBC valid configurations
508 * instantaneously so we do need to disable the FBC
509 * before we can modify its control registers. We also
510 * have to wait for the next vblank for that to take
511 * effect. However, since we delay enabling FBC we can
512 * assume that a vblank has passed since disabling and
513 * that we can safely alter the registers in the deferred
514 * callback.
515 *
516 * In the scenario that we go from a valid to invalid
517 * and then back to valid FBC configuration we have
518 * no strict enforcement that a vblank occurred since
519 * disabling the FBC. However, along all current pipe
520 * disabling paths we do need to wait for a vblank at
521 * some point. And we wait before enabling FBC anyway.
522 */
523 DRM_DEBUG_KMS("disabling active FBC for update\n");
524 intel_disable_fbc(dev);
525 }
526
527 intel_enable_fbc(crtc, 500);
528 return;
529
530out_disable:
531 /* Multiple disables should be harmless */
532 if (intel_fbc_enabled(dev)) {
533 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
534 intel_disable_fbc(dev);
535 }
536}
537
538static void i915_pineview_get_mem_freq(struct drm_device *dev)
539{
540 drm_i915_private_t *dev_priv = dev->dev_private;
541 u32 tmp;
542
543 tmp = I915_READ(CLKCFG);
544
545 switch (tmp & CLKCFG_FSB_MASK) {
546 case CLKCFG_FSB_533:
547 dev_priv->fsb_freq = 533; /* 133*4 */
548 break;
549 case CLKCFG_FSB_800:
550 dev_priv->fsb_freq = 800; /* 200*4 */
551 break;
552 case CLKCFG_FSB_667:
553 dev_priv->fsb_freq = 667; /* 167*4 */
554 break;
555 case CLKCFG_FSB_400:
556 dev_priv->fsb_freq = 400; /* 100*4 */
557 break;
558 }
559
560 switch (tmp & CLKCFG_MEM_MASK) {
561 case CLKCFG_MEM_533:
562 dev_priv->mem_freq = 533;
563 break;
564 case CLKCFG_MEM_667:
565 dev_priv->mem_freq = 667;
566 break;
567 case CLKCFG_MEM_800:
568 dev_priv->mem_freq = 800;
569 break;
570 }
571
572 /* detect pineview DDR3 setting */
573 tmp = I915_READ(CSHRDDR3CTL);
574 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
575}
576
577static void i915_ironlake_get_mem_freq(struct drm_device *dev)
578{
579 drm_i915_private_t *dev_priv = dev->dev_private;
580 u16 ddrpll, csipll;
581
582 ddrpll = I915_READ16(DDRMPLL1);
583 csipll = I915_READ16(CSIPLL0);
584
585 switch (ddrpll & 0xff) {
586 case 0xc:
587 dev_priv->mem_freq = 800;
588 break;
589 case 0x10:
590 dev_priv->mem_freq = 1066;
591 break;
592 case 0x14:
593 dev_priv->mem_freq = 1333;
594 break;
595 case 0x18:
596 dev_priv->mem_freq = 1600;
597 break;
598 default:
599 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
600 ddrpll & 0xff);
601 dev_priv->mem_freq = 0;
602 break;
603 }
604
605 dev_priv->ips.r_t = dev_priv->mem_freq;
606
607 switch (csipll & 0x3ff) {
608 case 0x00c:
609 dev_priv->fsb_freq = 3200;
610 break;
611 case 0x00e:
612 dev_priv->fsb_freq = 3733;
613 break;
614 case 0x010:
615 dev_priv->fsb_freq = 4266;
616 break;
617 case 0x012:
618 dev_priv->fsb_freq = 4800;
619 break;
620 case 0x014:
621 dev_priv->fsb_freq = 5333;
622 break;
623 case 0x016:
624 dev_priv->fsb_freq = 5866;
625 break;
626 case 0x018:
627 dev_priv->fsb_freq = 6400;
628 break;
629 default:
630 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
631 csipll & 0x3ff);
632 dev_priv->fsb_freq = 0;
633 break;
634 }
635
636 if (dev_priv->fsb_freq == 3200) {
637 dev_priv->ips.c_m = 0;
638 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
639 dev_priv->ips.c_m = 1;
640 } else {
641 dev_priv->ips.c_m = 2;
642 }
643}
644
645static const struct cxsr_latency cxsr_latency_table[] = {
646 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
647 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
648 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
649 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
650 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
651
652 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
653 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
654 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
655 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
656 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
657
658 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
659 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
660 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
661 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
662 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
663
664 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
665 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
666 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
667 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
668 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
669
670 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
671 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
672 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
673 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
674 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
675
676 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
677 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
678 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
679 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
680 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
681};
682
683static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
684 int is_ddr3,
685 int fsb,
686 int mem)
687{
688 const struct cxsr_latency *latency;
689 int i;
690
691 if (fsb == 0 || mem == 0)
692 return NULL;
693
694 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
695 latency = &cxsr_latency_table[i];
696 if (is_desktop == latency->is_desktop &&
697 is_ddr3 == latency->is_ddr3 &&
698 fsb == latency->fsb_freq && mem == latency->mem_freq)
699 return latency;
700 }
701
702 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
703
704 return NULL;
705}
706
707static void pineview_disable_cxsr(struct drm_device *dev)
708{
709 struct drm_i915_private *dev_priv = dev->dev_private;
710
711 /* deactivate cxsr */
712 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
713}
714
715/*
716 * Latency for FIFO fetches is dependent on several factors:
717 * - memory configuration (speed, channels)
718 * - chipset
719 * - current MCH state
720 * It can be fairly high in some situations, so here we assume a fairly
721 * pessimal value. It's a tradeoff between extra memory fetches (if we
722 * set this value too high, the FIFO will fetch frequently to stay full)
723 * and power consumption (set it too low to save power and we might see
724 * FIFO underruns and display "flicker").
725 *
726 * A value of 5us seems to be a good balance; safe for very low end
727 * platforms but not overly aggressive on lower latency configs.
728 */
729static const int latency_ns = 5000;
730
731static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
732{
733 struct drm_i915_private *dev_priv = dev->dev_private;
734 uint32_t dsparb = I915_READ(DSPARB);
735 int size;
736
737 size = dsparb & 0x7f;
738 if (plane)
739 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
740
741 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
742 plane ? "B" : "A", size);
743
744 return size;
745}
746
747static int i85x_get_fifo_size(struct drm_device *dev, int plane)
748{
749 struct drm_i915_private *dev_priv = dev->dev_private;
750 uint32_t dsparb = I915_READ(DSPARB);
751 int size;
752
753 size = dsparb & 0x1ff;
754 if (plane)
755 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
756 size >>= 1; /* Convert to cachelines */
757
758 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
759 plane ? "B" : "A", size);
760
761 return size;
762}
763
764static int i845_get_fifo_size(struct drm_device *dev, int plane)
765{
766 struct drm_i915_private *dev_priv = dev->dev_private;
767 uint32_t dsparb = I915_READ(DSPARB);
768 int size;
769
770 size = dsparb & 0x7f;
771 size >>= 2; /* Convert to cachelines */
772
773 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
774 plane ? "B" : "A",
775 size);
776
777 return size;
778}
779
780static int i830_get_fifo_size(struct drm_device *dev, int plane)
781{
782 struct drm_i915_private *dev_priv = dev->dev_private;
783 uint32_t dsparb = I915_READ(DSPARB);
784 int size;
785
786 size = dsparb & 0x7f;
787 size >>= 1; /* Convert to cachelines */
788
789 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
790 plane ? "B" : "A", size);
791
792 return size;
793}
794
795/* Pineview has different values for various configs */
796static const struct intel_watermark_params pineview_display_wm = {
797 PINEVIEW_DISPLAY_FIFO,
798 PINEVIEW_MAX_WM,
799 PINEVIEW_DFT_WM,
800 PINEVIEW_GUARD_WM,
801 PINEVIEW_FIFO_LINE_SIZE
802};
803static const struct intel_watermark_params pineview_display_hplloff_wm = {
804 PINEVIEW_DISPLAY_FIFO,
805 PINEVIEW_MAX_WM,
806 PINEVIEW_DFT_HPLLOFF_WM,
807 PINEVIEW_GUARD_WM,
808 PINEVIEW_FIFO_LINE_SIZE
809};
810static const struct intel_watermark_params pineview_cursor_wm = {
811 PINEVIEW_CURSOR_FIFO,
812 PINEVIEW_CURSOR_MAX_WM,
813 PINEVIEW_CURSOR_DFT_WM,
814 PINEVIEW_CURSOR_GUARD_WM,
815 PINEVIEW_FIFO_LINE_SIZE,
816};
817static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
818 PINEVIEW_CURSOR_FIFO,
819 PINEVIEW_CURSOR_MAX_WM,
820 PINEVIEW_CURSOR_DFT_WM,
821 PINEVIEW_CURSOR_GUARD_WM,
822 PINEVIEW_FIFO_LINE_SIZE
823};
824static const struct intel_watermark_params g4x_wm_info = {
825 G4X_FIFO_SIZE,
826 G4X_MAX_WM,
827 G4X_MAX_WM,
828 2,
829 G4X_FIFO_LINE_SIZE,
830};
831static const struct intel_watermark_params g4x_cursor_wm_info = {
832 I965_CURSOR_FIFO,
833 I965_CURSOR_MAX_WM,
834 I965_CURSOR_DFT_WM,
835 2,
836 G4X_FIFO_LINE_SIZE,
837};
838static const struct intel_watermark_params valleyview_wm_info = {
839 VALLEYVIEW_FIFO_SIZE,
840 VALLEYVIEW_MAX_WM,
841 VALLEYVIEW_MAX_WM,
842 2,
843 G4X_FIFO_LINE_SIZE,
844};
845static const struct intel_watermark_params valleyview_cursor_wm_info = {
846 I965_CURSOR_FIFO,
847 VALLEYVIEW_CURSOR_MAX_WM,
848 I965_CURSOR_DFT_WM,
849 2,
850 G4X_FIFO_LINE_SIZE,
851};
852static const struct intel_watermark_params i965_cursor_wm_info = {
853 I965_CURSOR_FIFO,
854 I965_CURSOR_MAX_WM,
855 I965_CURSOR_DFT_WM,
856 2,
857 I915_FIFO_LINE_SIZE,
858};
859static const struct intel_watermark_params i945_wm_info = {
860 I945_FIFO_SIZE,
861 I915_MAX_WM,
862 1,
863 2,
864 I915_FIFO_LINE_SIZE
865};
866static const struct intel_watermark_params i915_wm_info = {
867 I915_FIFO_SIZE,
868 I915_MAX_WM,
869 1,
870 2,
871 I915_FIFO_LINE_SIZE
872};
873static const struct intel_watermark_params i855_wm_info = {
874 I855GM_FIFO_SIZE,
875 I915_MAX_WM,
876 1,
877 2,
878 I830_FIFO_LINE_SIZE
879};
880static const struct intel_watermark_params i830_wm_info = {
881 I830_FIFO_SIZE,
882 I915_MAX_WM,
883 1,
884 2,
885 I830_FIFO_LINE_SIZE
886};
887
888static const struct intel_watermark_params ironlake_display_wm_info = {
889 ILK_DISPLAY_FIFO,
890 ILK_DISPLAY_MAXWM,
891 ILK_DISPLAY_DFTWM,
892 2,
893 ILK_FIFO_LINE_SIZE
894};
895static const struct intel_watermark_params ironlake_cursor_wm_info = {
896 ILK_CURSOR_FIFO,
897 ILK_CURSOR_MAXWM,
898 ILK_CURSOR_DFTWM,
899 2,
900 ILK_FIFO_LINE_SIZE
901};
902static const struct intel_watermark_params ironlake_display_srwm_info = {
903 ILK_DISPLAY_SR_FIFO,
904 ILK_DISPLAY_MAX_SRWM,
905 ILK_DISPLAY_DFT_SRWM,
906 2,
907 ILK_FIFO_LINE_SIZE
908};
909static const struct intel_watermark_params ironlake_cursor_srwm_info = {
910 ILK_CURSOR_SR_FIFO,
911 ILK_CURSOR_MAX_SRWM,
912 ILK_CURSOR_DFT_SRWM,
913 2,
914 ILK_FIFO_LINE_SIZE
915};
916
917static const struct intel_watermark_params sandybridge_display_wm_info = {
918 SNB_DISPLAY_FIFO,
919 SNB_DISPLAY_MAXWM,
920 SNB_DISPLAY_DFTWM,
921 2,
922 SNB_FIFO_LINE_SIZE
923};
924static const struct intel_watermark_params sandybridge_cursor_wm_info = {
925 SNB_CURSOR_FIFO,
926 SNB_CURSOR_MAXWM,
927 SNB_CURSOR_DFTWM,
928 2,
929 SNB_FIFO_LINE_SIZE
930};
931static const struct intel_watermark_params sandybridge_display_srwm_info = {
932 SNB_DISPLAY_SR_FIFO,
933 SNB_DISPLAY_MAX_SRWM,
934 SNB_DISPLAY_DFT_SRWM,
935 2,
936 SNB_FIFO_LINE_SIZE
937};
938static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
939 SNB_CURSOR_SR_FIFO,
940 SNB_CURSOR_MAX_SRWM,
941 SNB_CURSOR_DFT_SRWM,
942 2,
943 SNB_FIFO_LINE_SIZE
944};
945
946
947/**
948 * intel_calculate_wm - calculate watermark level
949 * @clock_in_khz: pixel clock
950 * @wm: chip FIFO params
951 * @pixel_size: display pixel size
952 * @latency_ns: memory latency for the platform
953 *
954 * Calculate the watermark level (the level at which the display plane will
955 * start fetching from memory again). Each chip has a different display
956 * FIFO size and allocation, so the caller needs to figure that out and pass
957 * in the correct intel_watermark_params structure.
958 *
959 * As the pixel clock runs, the FIFO will be drained at a rate that depends
960 * on the pixel size. When it reaches the watermark level, it'll start
961 * fetching FIFO line sized based chunks from memory until the FIFO fills
962 * past the watermark point. If the FIFO drains completely, a FIFO underrun
963 * will occur, and a display engine hang could result.
964 */
965static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
966 const struct intel_watermark_params *wm,
967 int fifo_size,
968 int pixel_size,
969 unsigned long latency_ns)
970{
971 long entries_required, wm_size;
972
973 /*
974 * Note: we need to make sure we don't overflow for various clock &
975 * latency values.
976 * clocks go from a few thousand to several hundred thousand.
977 * latency is usually a few thousand
978 */
979 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
980 1000;
981 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
982
983 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
984
985 wm_size = fifo_size - (entries_required + wm->guard_size);
986
987 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
988
989 /* Don't promote wm_size to unsigned... */
990 if (wm_size > (long)wm->max_wm)
991 wm_size = wm->max_wm;
992 if (wm_size <= 0)
993 wm_size = wm->default_wm;
994 return wm_size;
995}
996
997static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
998{
999 struct drm_crtc *crtc, *enabled = NULL;
1000
1001 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1002 if (intel_crtc_active(crtc)) {
1003 if (enabled)
1004 return NULL;
1005 enabled = crtc;
1006 }
1007 }
1008
1009 return enabled;
1010}
1011
1012static void pineview_update_wm(struct drm_device *dev)
1013{
1014 struct drm_i915_private *dev_priv = dev->dev_private;
1015 struct drm_crtc *crtc;
1016 const struct cxsr_latency *latency;
1017 u32 reg;
1018 unsigned long wm;
1019
1020 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1021 dev_priv->fsb_freq, dev_priv->mem_freq);
1022 if (!latency) {
1023 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1024 pineview_disable_cxsr(dev);
1025 return;
1026 }
1027
1028 crtc = single_enabled_crtc(dev);
1029 if (crtc) {
1030 int clock = crtc->mode.clock;
1031 int pixel_size = crtc->fb->bits_per_pixel / 8;
1032
1033 /* Display SR */
1034 wm = intel_calculate_wm(clock, &pineview_display_wm,
1035 pineview_display_wm.fifo_size,
1036 pixel_size, latency->display_sr);
1037 reg = I915_READ(DSPFW1);
1038 reg &= ~DSPFW_SR_MASK;
1039 reg |= wm << DSPFW_SR_SHIFT;
1040 I915_WRITE(DSPFW1, reg);
1041 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1042
1043 /* cursor SR */
1044 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1045 pineview_display_wm.fifo_size,
1046 pixel_size, latency->cursor_sr);
1047 reg = I915_READ(DSPFW3);
1048 reg &= ~DSPFW_CURSOR_SR_MASK;
1049 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1050 I915_WRITE(DSPFW3, reg);
1051
1052 /* Display HPLL off SR */
1053 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1054 pineview_display_hplloff_wm.fifo_size,
1055 pixel_size, latency->display_hpll_disable);
1056 reg = I915_READ(DSPFW3);
1057 reg &= ~DSPFW_HPLL_SR_MASK;
1058 reg |= wm & DSPFW_HPLL_SR_MASK;
1059 I915_WRITE(DSPFW3, reg);
1060
1061 /* cursor HPLL off SR */
1062 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1063 pineview_display_hplloff_wm.fifo_size,
1064 pixel_size, latency->cursor_hpll_disable);
1065 reg = I915_READ(DSPFW3);
1066 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1067 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1068 I915_WRITE(DSPFW3, reg);
1069 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1070
1071 /* activate cxsr */
1072 I915_WRITE(DSPFW3,
1073 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1074 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1075 } else {
1076 pineview_disable_cxsr(dev);
1077 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1078 }
1079}
1080
1081static bool g4x_compute_wm0(struct drm_device *dev,
1082 int plane,
1083 const struct intel_watermark_params *display,
1084 int display_latency_ns,
1085 const struct intel_watermark_params *cursor,
1086 int cursor_latency_ns,
1087 int *plane_wm,
1088 int *cursor_wm)
1089{
1090 struct drm_crtc *crtc;
1091 int htotal, hdisplay, clock, pixel_size;
1092 int line_time_us, line_count;
1093 int entries, tlb_miss;
1094
1095 crtc = intel_get_crtc_for_plane(dev, plane);
1096 if (!intel_crtc_active(crtc)) {
1097 *cursor_wm = cursor->guard_size;
1098 *plane_wm = display->guard_size;
1099 return false;
1100 }
1101
1102 htotal = crtc->mode.htotal;
1103 hdisplay = crtc->mode.hdisplay;
1104 clock = crtc->mode.clock;
1105 pixel_size = crtc->fb->bits_per_pixel / 8;
1106
1107 /* Use the small buffer method to calculate plane watermark */
1108 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1109 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1110 if (tlb_miss > 0)
1111 entries += tlb_miss;
1112 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1113 *plane_wm = entries + display->guard_size;
1114 if (*plane_wm > (int)display->max_wm)
1115 *plane_wm = display->max_wm;
1116
1117 /* Use the large buffer method to calculate cursor watermark */
1118 line_time_us = ((htotal * 1000) / clock);
1119 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1120 entries = line_count * 64 * pixel_size;
1121 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1122 if (tlb_miss > 0)
1123 entries += tlb_miss;
1124 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1125 *cursor_wm = entries + cursor->guard_size;
1126 if (*cursor_wm > (int)cursor->max_wm)
1127 *cursor_wm = (int)cursor->max_wm;
1128
1129 return true;
1130}
1131
1132/*
1133 * Check the wm result.
1134 *
1135 * If any calculated watermark values is larger than the maximum value that
1136 * can be programmed into the associated watermark register, that watermark
1137 * must be disabled.
1138 */
1139static bool g4x_check_srwm(struct drm_device *dev,
1140 int display_wm, int cursor_wm,
1141 const struct intel_watermark_params *display,
1142 const struct intel_watermark_params *cursor)
1143{
1144 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1145 display_wm, cursor_wm);
1146
1147 if (display_wm > display->max_wm) {
1148 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1149 display_wm, display->max_wm);
1150 return false;
1151 }
1152
1153 if (cursor_wm > cursor->max_wm) {
1154 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1155 cursor_wm, cursor->max_wm);
1156 return false;
1157 }
1158
1159 if (!(display_wm || cursor_wm)) {
1160 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1161 return false;
1162 }
1163
1164 return true;
1165}
1166
1167static bool g4x_compute_srwm(struct drm_device *dev,
1168 int plane,
1169 int latency_ns,
1170 const struct intel_watermark_params *display,
1171 const struct intel_watermark_params *cursor,
1172 int *display_wm, int *cursor_wm)
1173{
1174 struct drm_crtc *crtc;
1175 int hdisplay, htotal, pixel_size, clock;
1176 unsigned long line_time_us;
1177 int line_count, line_size;
1178 int small, large;
1179 int entries;
1180
1181 if (!latency_ns) {
1182 *display_wm = *cursor_wm = 0;
1183 return false;
1184 }
1185
1186 crtc = intel_get_crtc_for_plane(dev, plane);
1187 hdisplay = crtc->mode.hdisplay;
1188 htotal = crtc->mode.htotal;
1189 clock = crtc->mode.clock;
1190 pixel_size = crtc->fb->bits_per_pixel / 8;
1191
1192 line_time_us = (htotal * 1000) / clock;
1193 line_count = (latency_ns / line_time_us + 1000) / 1000;
1194 line_size = hdisplay * pixel_size;
1195
1196 /* Use the minimum of the small and large buffer method for primary */
1197 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1198 large = line_count * line_size;
1199
1200 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1201 *display_wm = entries + display->guard_size;
1202
1203 /* calculate the self-refresh watermark for display cursor */
1204 entries = line_count * pixel_size * 64;
1205 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1206 *cursor_wm = entries + cursor->guard_size;
1207
1208 return g4x_check_srwm(dev,
1209 *display_wm, *cursor_wm,
1210 display, cursor);
1211}
1212
1213static bool vlv_compute_drain_latency(struct drm_device *dev,
1214 int plane,
1215 int *plane_prec_mult,
1216 int *plane_dl,
1217 int *cursor_prec_mult,
1218 int *cursor_dl)
1219{
1220 struct drm_crtc *crtc;
1221 int clock, pixel_size;
1222 int entries;
1223
1224 crtc = intel_get_crtc_for_plane(dev, plane);
1225 if (!intel_crtc_active(crtc))
1226 return false;
1227
1228 clock = crtc->mode.clock; /* VESA DOT Clock */
1229 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
1230
1231 entries = (clock / 1000) * pixel_size;
1232 *plane_prec_mult = (entries > 256) ?
1233 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1234 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1235 pixel_size);
1236
1237 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
1238 *cursor_prec_mult = (entries > 256) ?
1239 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1240 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1241
1242 return true;
1243}
1244
1245/*
1246 * Update drain latency registers of memory arbiter
1247 *
1248 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1249 * to be programmed. Each plane has a drain latency multiplier and a drain
1250 * latency value.
1251 */
1252
1253static void vlv_update_drain_latency(struct drm_device *dev)
1254{
1255 struct drm_i915_private *dev_priv = dev->dev_private;
1256 int planea_prec, planea_dl, planeb_prec, planeb_dl;
1257 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1258 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1259 either 16 or 32 */
1260
1261 /* For plane A, Cursor A */
1262 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1263 &cursor_prec_mult, &cursora_dl)) {
1264 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1265 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1266 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1267 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1268
1269 I915_WRITE(VLV_DDL1, cursora_prec |
1270 (cursora_dl << DDL_CURSORA_SHIFT) |
1271 planea_prec | planea_dl);
1272 }
1273
1274 /* For plane B, Cursor B */
1275 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1276 &cursor_prec_mult, &cursorb_dl)) {
1277 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1278 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1279 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1280 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1281
1282 I915_WRITE(VLV_DDL2, cursorb_prec |
1283 (cursorb_dl << DDL_CURSORB_SHIFT) |
1284 planeb_prec | planeb_dl);
1285 }
1286}
1287
1288#define single_plane_enabled(mask) is_power_of_2(mask)
1289
1290static void valleyview_update_wm(struct drm_device *dev)
1291{
1292 static const int sr_latency_ns = 12000;
1293 struct drm_i915_private *dev_priv = dev->dev_private;
1294 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1295 int plane_sr, cursor_sr;
1296 int ignore_plane_sr, ignore_cursor_sr;
1297 unsigned int enabled = 0;
1298
1299 vlv_update_drain_latency(dev);
1300
1301 if (g4x_compute_wm0(dev, 0,
1302 &valleyview_wm_info, latency_ns,
1303 &valleyview_cursor_wm_info, latency_ns,
1304 &planea_wm, &cursora_wm))
1305 enabled |= 1;
1306
1307 if (g4x_compute_wm0(dev, 1,
1308 &valleyview_wm_info, latency_ns,
1309 &valleyview_cursor_wm_info, latency_ns,
1310 &planeb_wm, &cursorb_wm))
1311 enabled |= 2;
1312
1313 if (single_plane_enabled(enabled) &&
1314 g4x_compute_srwm(dev, ffs(enabled) - 1,
1315 sr_latency_ns,
1316 &valleyview_wm_info,
1317 &valleyview_cursor_wm_info,
1318 &plane_sr, &ignore_cursor_sr) &&
1319 g4x_compute_srwm(dev, ffs(enabled) - 1,
1320 2*sr_latency_ns,
1321 &valleyview_wm_info,
1322 &valleyview_cursor_wm_info,
1323 &ignore_plane_sr, &cursor_sr)) {
1324 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1325 } else {
1326 I915_WRITE(FW_BLC_SELF_VLV,
1327 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1328 plane_sr = cursor_sr = 0;
1329 }
1330
1331 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1332 planea_wm, cursora_wm,
1333 planeb_wm, cursorb_wm,
1334 plane_sr, cursor_sr);
1335
1336 I915_WRITE(DSPFW1,
1337 (plane_sr << DSPFW_SR_SHIFT) |
1338 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1339 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1340 planea_wm);
1341 I915_WRITE(DSPFW2,
1342 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1343 (cursora_wm << DSPFW_CURSORA_SHIFT));
1344 I915_WRITE(DSPFW3,
1345 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1346 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1347}
1348
1349static void g4x_update_wm(struct drm_device *dev)
1350{
1351 static const int sr_latency_ns = 12000;
1352 struct drm_i915_private *dev_priv = dev->dev_private;
1353 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1354 int plane_sr, cursor_sr;
1355 unsigned int enabled = 0;
1356
1357 if (g4x_compute_wm0(dev, 0,
1358 &g4x_wm_info, latency_ns,
1359 &g4x_cursor_wm_info, latency_ns,
1360 &planea_wm, &cursora_wm))
1361 enabled |= 1;
1362
1363 if (g4x_compute_wm0(dev, 1,
1364 &g4x_wm_info, latency_ns,
1365 &g4x_cursor_wm_info, latency_ns,
1366 &planeb_wm, &cursorb_wm))
1367 enabled |= 2;
1368
1369 if (single_plane_enabled(enabled) &&
1370 g4x_compute_srwm(dev, ffs(enabled) - 1,
1371 sr_latency_ns,
1372 &g4x_wm_info,
1373 &g4x_cursor_wm_info,
1374 &plane_sr, &cursor_sr)) {
1375 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1376 } else {
1377 I915_WRITE(FW_BLC_SELF,
1378 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1379 plane_sr = cursor_sr = 0;
1380 }
1381
1382 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1383 planea_wm, cursora_wm,
1384 planeb_wm, cursorb_wm,
1385 plane_sr, cursor_sr);
1386
1387 I915_WRITE(DSPFW1,
1388 (plane_sr << DSPFW_SR_SHIFT) |
1389 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1390 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1391 planea_wm);
1392 I915_WRITE(DSPFW2,
1393 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1394 (cursora_wm << DSPFW_CURSORA_SHIFT));
1395 /* HPLL off in SR has some issues on G4x... disable it */
1396 I915_WRITE(DSPFW3,
1397 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1398 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1399}
1400
1401static void i965_update_wm(struct drm_device *dev)
1402{
1403 struct drm_i915_private *dev_priv = dev->dev_private;
1404 struct drm_crtc *crtc;
1405 int srwm = 1;
1406 int cursor_sr = 16;
1407
1408 /* Calc sr entries for one plane configs */
1409 crtc = single_enabled_crtc(dev);
1410 if (crtc) {
1411 /* self-refresh has much higher latency */
1412 static const int sr_latency_ns = 12000;
1413 int clock = crtc->mode.clock;
1414 int htotal = crtc->mode.htotal;
1415 int hdisplay = crtc->mode.hdisplay;
1416 int pixel_size = crtc->fb->bits_per_pixel / 8;
1417 unsigned long line_time_us;
1418 int entries;
1419
1420 line_time_us = ((htotal * 1000) / clock);
1421
1422 /* Use ns/us then divide to preserve precision */
1423 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1424 pixel_size * hdisplay;
1425 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1426 srwm = I965_FIFO_SIZE - entries;
1427 if (srwm < 0)
1428 srwm = 1;
1429 srwm &= 0x1ff;
1430 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1431 entries, srwm);
1432
1433 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1434 pixel_size * 64;
1435 entries = DIV_ROUND_UP(entries,
1436 i965_cursor_wm_info.cacheline_size);
1437 cursor_sr = i965_cursor_wm_info.fifo_size -
1438 (entries + i965_cursor_wm_info.guard_size);
1439
1440 if (cursor_sr > i965_cursor_wm_info.max_wm)
1441 cursor_sr = i965_cursor_wm_info.max_wm;
1442
1443 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1444 "cursor %d\n", srwm, cursor_sr);
1445
1446 if (IS_CRESTLINE(dev))
1447 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1448 } else {
1449 /* Turn off self refresh if both pipes are enabled */
1450 if (IS_CRESTLINE(dev))
1451 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1452 & ~FW_BLC_SELF_EN);
1453 }
1454
1455 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1456 srwm);
1457
1458 /* 965 has limitations... */
1459 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1460 (8 << 16) | (8 << 8) | (8 << 0));
1461 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1462 /* update cursor SR watermark */
1463 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1464}
1465
1466static void i9xx_update_wm(struct drm_device *dev)
1467{
1468 struct drm_i915_private *dev_priv = dev->dev_private;
1469 const struct intel_watermark_params *wm_info;
1470 uint32_t fwater_lo;
1471 uint32_t fwater_hi;
1472 int cwm, srwm = 1;
1473 int fifo_size;
1474 int planea_wm, planeb_wm;
1475 struct drm_crtc *crtc, *enabled = NULL;
1476
1477 if (IS_I945GM(dev))
1478 wm_info = &i945_wm_info;
1479 else if (!IS_GEN2(dev))
1480 wm_info = &i915_wm_info;
1481 else
1482 wm_info = &i855_wm_info;
1483
1484 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1485 crtc = intel_get_crtc_for_plane(dev, 0);
1486 if (intel_crtc_active(crtc)) {
1487 int cpp = crtc->fb->bits_per_pixel / 8;
1488 if (IS_GEN2(dev))
1489 cpp = 4;
1490
1491 planea_wm = intel_calculate_wm(crtc->mode.clock,
1492 wm_info, fifo_size, cpp,
1493 latency_ns);
1494 enabled = crtc;
1495 } else
1496 planea_wm = fifo_size - wm_info->guard_size;
1497
1498 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1499 crtc = intel_get_crtc_for_plane(dev, 1);
1500 if (intel_crtc_active(crtc)) {
1501 int cpp = crtc->fb->bits_per_pixel / 8;
1502 if (IS_GEN2(dev))
1503 cpp = 4;
1504
1505 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1506 wm_info, fifo_size, cpp,
1507 latency_ns);
1508 if (enabled == NULL)
1509 enabled = crtc;
1510 else
1511 enabled = NULL;
1512 } else
1513 planeb_wm = fifo_size - wm_info->guard_size;
1514
1515 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1516
1517 /*
1518 * Overlay gets an aggressive default since video jitter is bad.
1519 */
1520 cwm = 2;
1521
1522 /* Play safe and disable self-refresh before adjusting watermarks. */
1523 if (IS_I945G(dev) || IS_I945GM(dev))
1524 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1525 else if (IS_I915GM(dev))
1526 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1527
1528 /* Calc sr entries for one plane configs */
1529 if (HAS_FW_BLC(dev) && enabled) {
1530 /* self-refresh has much higher latency */
1531 static const int sr_latency_ns = 6000;
1532 int clock = enabled->mode.clock;
1533 int htotal = enabled->mode.htotal;
1534 int hdisplay = enabled->mode.hdisplay;
1535 int pixel_size = enabled->fb->bits_per_pixel / 8;
1536 unsigned long line_time_us;
1537 int entries;
1538
1539 line_time_us = (htotal * 1000) / clock;
1540
1541 /* Use ns/us then divide to preserve precision */
1542 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1543 pixel_size * hdisplay;
1544 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1545 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1546 srwm = wm_info->fifo_size - entries;
1547 if (srwm < 0)
1548 srwm = 1;
1549
1550 if (IS_I945G(dev) || IS_I945GM(dev))
1551 I915_WRITE(FW_BLC_SELF,
1552 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1553 else if (IS_I915GM(dev))
1554 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1555 }
1556
1557 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1558 planea_wm, planeb_wm, cwm, srwm);
1559
1560 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1561 fwater_hi = (cwm & 0x1f);
1562
1563 /* Set request length to 8 cachelines per fetch */
1564 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1565 fwater_hi = fwater_hi | (1 << 8);
1566
1567 I915_WRITE(FW_BLC, fwater_lo);
1568 I915_WRITE(FW_BLC2, fwater_hi);
1569
1570 if (HAS_FW_BLC(dev)) {
1571 if (enabled) {
1572 if (IS_I945G(dev) || IS_I945GM(dev))
1573 I915_WRITE(FW_BLC_SELF,
1574 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1575 else if (IS_I915GM(dev))
1576 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1577 DRM_DEBUG_KMS("memory self refresh enabled\n");
1578 } else
1579 DRM_DEBUG_KMS("memory self refresh disabled\n");
1580 }
1581}
1582
1583static void i830_update_wm(struct drm_device *dev)
1584{
1585 struct drm_i915_private *dev_priv = dev->dev_private;
1586 struct drm_crtc *crtc;
1587 uint32_t fwater_lo;
1588 int planea_wm;
1589
1590 crtc = single_enabled_crtc(dev);
1591 if (crtc == NULL)
1592 return;
1593
1594 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1595 dev_priv->display.get_fifo_size(dev, 0),
1596 4, latency_ns);
1597 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1598 fwater_lo |= (3<<8) | planea_wm;
1599
1600 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1601
1602 I915_WRITE(FW_BLC, fwater_lo);
1603}
1604
1605#define ILK_LP0_PLANE_LATENCY 700
1606#define ILK_LP0_CURSOR_LATENCY 1300
1607
1608/*
1609 * Check the wm result.
1610 *
1611 * If any calculated watermark values is larger than the maximum value that
1612 * can be programmed into the associated watermark register, that watermark
1613 * must be disabled.
1614 */
1615static bool ironlake_check_srwm(struct drm_device *dev, int level,
1616 int fbc_wm, int display_wm, int cursor_wm,
1617 const struct intel_watermark_params *display,
1618 const struct intel_watermark_params *cursor)
1619{
1620 struct drm_i915_private *dev_priv = dev->dev_private;
1621
1622 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1623 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1624
1625 if (fbc_wm > SNB_FBC_MAX_SRWM) {
1626 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1627 fbc_wm, SNB_FBC_MAX_SRWM, level);
1628
1629 /* fbc has it's own way to disable FBC WM */
1630 I915_WRITE(DISP_ARB_CTL,
1631 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1632 return false;
1633 }
1634
1635 if (display_wm > display->max_wm) {
1636 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1637 display_wm, SNB_DISPLAY_MAX_SRWM, level);
1638 return false;
1639 }
1640
1641 if (cursor_wm > cursor->max_wm) {
1642 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1643 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1644 return false;
1645 }
1646
1647 if (!(fbc_wm || display_wm || cursor_wm)) {
1648 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1649 return false;
1650 }
1651
1652 return true;
1653}
1654
1655/*
1656 * Compute watermark values of WM[1-3],
1657 */
1658static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1659 int latency_ns,
1660 const struct intel_watermark_params *display,
1661 const struct intel_watermark_params *cursor,
1662 int *fbc_wm, int *display_wm, int *cursor_wm)
1663{
1664 struct drm_crtc *crtc;
1665 unsigned long line_time_us;
1666 int hdisplay, htotal, pixel_size, clock;
1667 int line_count, line_size;
1668 int small, large;
1669 int entries;
1670
1671 if (!latency_ns) {
1672 *fbc_wm = *display_wm = *cursor_wm = 0;
1673 return false;
1674 }
1675
1676 crtc = intel_get_crtc_for_plane(dev, plane);
1677 hdisplay = crtc->mode.hdisplay;
1678 htotal = crtc->mode.htotal;
1679 clock = crtc->mode.clock;
1680 pixel_size = crtc->fb->bits_per_pixel / 8;
1681
1682 line_time_us = (htotal * 1000) / clock;
1683 line_count = (latency_ns / line_time_us + 1000) / 1000;
1684 line_size = hdisplay * pixel_size;
1685
1686 /* Use the minimum of the small and large buffer method for primary */
1687 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1688 large = line_count * line_size;
1689
1690 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1691 *display_wm = entries + display->guard_size;
1692
1693 /*
1694 * Spec says:
1695 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1696 */
1697 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1698
1699 /* calculate the self-refresh watermark for display cursor */
1700 entries = line_count * pixel_size * 64;
1701 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1702 *cursor_wm = entries + cursor->guard_size;
1703
1704 return ironlake_check_srwm(dev, level,
1705 *fbc_wm, *display_wm, *cursor_wm,
1706 display, cursor);
1707}
1708
1709static void ironlake_update_wm(struct drm_device *dev)
1710{
1711 struct drm_i915_private *dev_priv = dev->dev_private;
1712 int fbc_wm, plane_wm, cursor_wm;
1713 unsigned int enabled;
1714
1715 enabled = 0;
1716 if (g4x_compute_wm0(dev, 0,
1717 &ironlake_display_wm_info,
1718 ILK_LP0_PLANE_LATENCY,
1719 &ironlake_cursor_wm_info,
1720 ILK_LP0_CURSOR_LATENCY,
1721 &plane_wm, &cursor_wm)) {
1722 I915_WRITE(WM0_PIPEA_ILK,
1723 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1724 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1725 " plane %d, " "cursor: %d\n",
1726 plane_wm, cursor_wm);
1727 enabled |= 1;
1728 }
1729
1730 if (g4x_compute_wm0(dev, 1,
1731 &ironlake_display_wm_info,
1732 ILK_LP0_PLANE_LATENCY,
1733 &ironlake_cursor_wm_info,
1734 ILK_LP0_CURSOR_LATENCY,
1735 &plane_wm, &cursor_wm)) {
1736 I915_WRITE(WM0_PIPEB_ILK,
1737 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1738 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1739 " plane %d, cursor: %d\n",
1740 plane_wm, cursor_wm);
1741 enabled |= 2;
1742 }
1743
1744 /*
1745 * Calculate and update the self-refresh watermark only when one
1746 * display plane is used.
1747 */
1748 I915_WRITE(WM3_LP_ILK, 0);
1749 I915_WRITE(WM2_LP_ILK, 0);
1750 I915_WRITE(WM1_LP_ILK, 0);
1751
1752 if (!single_plane_enabled(enabled))
1753 return;
1754 enabled = ffs(enabled) - 1;
1755
1756 /* WM1 */
1757 if (!ironlake_compute_srwm(dev, 1, enabled,
1758 ILK_READ_WM1_LATENCY() * 500,
1759 &ironlake_display_srwm_info,
1760 &ironlake_cursor_srwm_info,
1761 &fbc_wm, &plane_wm, &cursor_wm))
1762 return;
1763
1764 I915_WRITE(WM1_LP_ILK,
1765 WM1_LP_SR_EN |
1766 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1767 (fbc_wm << WM1_LP_FBC_SHIFT) |
1768 (plane_wm << WM1_LP_SR_SHIFT) |
1769 cursor_wm);
1770
1771 /* WM2 */
1772 if (!ironlake_compute_srwm(dev, 2, enabled,
1773 ILK_READ_WM2_LATENCY() * 500,
1774 &ironlake_display_srwm_info,
1775 &ironlake_cursor_srwm_info,
1776 &fbc_wm, &plane_wm, &cursor_wm))
1777 return;
1778
1779 I915_WRITE(WM2_LP_ILK,
1780 WM2_LP_EN |
1781 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1782 (fbc_wm << WM1_LP_FBC_SHIFT) |
1783 (plane_wm << WM1_LP_SR_SHIFT) |
1784 cursor_wm);
1785
1786 /*
1787 * WM3 is unsupported on ILK, probably because we don't have latency
1788 * data for that power state
1789 */
1790}
1791
1792static void sandybridge_update_wm(struct drm_device *dev)
1793{
1794 struct drm_i915_private *dev_priv = dev->dev_private;
1795 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1796 u32 val;
1797 int fbc_wm, plane_wm, cursor_wm;
1798 unsigned int enabled;
1799
1800 enabled = 0;
1801 if (g4x_compute_wm0(dev, 0,
1802 &sandybridge_display_wm_info, latency,
1803 &sandybridge_cursor_wm_info, latency,
1804 &plane_wm, &cursor_wm)) {
1805 val = I915_READ(WM0_PIPEA_ILK);
1806 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1807 I915_WRITE(WM0_PIPEA_ILK, val |
1808 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1809 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1810 " plane %d, " "cursor: %d\n",
1811 plane_wm, cursor_wm);
1812 enabled |= 1;
1813 }
1814
1815 if (g4x_compute_wm0(dev, 1,
1816 &sandybridge_display_wm_info, latency,
1817 &sandybridge_cursor_wm_info, latency,
1818 &plane_wm, &cursor_wm)) {
1819 val = I915_READ(WM0_PIPEB_ILK);
1820 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1821 I915_WRITE(WM0_PIPEB_ILK, val |
1822 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1823 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1824 " plane %d, cursor: %d\n",
1825 plane_wm, cursor_wm);
1826 enabled |= 2;
1827 }
1828
1829 /*
1830 * Calculate and update the self-refresh watermark only when one
1831 * display plane is used.
1832 *
1833 * SNB support 3 levels of watermark.
1834 *
1835 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1836 * and disabled in the descending order
1837 *
1838 */
1839 I915_WRITE(WM3_LP_ILK, 0);
1840 I915_WRITE(WM2_LP_ILK, 0);
1841 I915_WRITE(WM1_LP_ILK, 0);
1842
1843 if (!single_plane_enabled(enabled) ||
1844 dev_priv->sprite_scaling_enabled)
1845 return;
1846 enabled = ffs(enabled) - 1;
1847
1848 /* WM1 */
1849 if (!ironlake_compute_srwm(dev, 1, enabled,
1850 SNB_READ_WM1_LATENCY() * 500,
1851 &sandybridge_display_srwm_info,
1852 &sandybridge_cursor_srwm_info,
1853 &fbc_wm, &plane_wm, &cursor_wm))
1854 return;
1855
1856 I915_WRITE(WM1_LP_ILK,
1857 WM1_LP_SR_EN |
1858 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1859 (fbc_wm << WM1_LP_FBC_SHIFT) |
1860 (plane_wm << WM1_LP_SR_SHIFT) |
1861 cursor_wm);
1862
1863 /* WM2 */
1864 if (!ironlake_compute_srwm(dev, 2, enabled,
1865 SNB_READ_WM2_LATENCY() * 500,
1866 &sandybridge_display_srwm_info,
1867 &sandybridge_cursor_srwm_info,
1868 &fbc_wm, &plane_wm, &cursor_wm))
1869 return;
1870
1871 I915_WRITE(WM2_LP_ILK,
1872 WM2_LP_EN |
1873 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1874 (fbc_wm << WM1_LP_FBC_SHIFT) |
1875 (plane_wm << WM1_LP_SR_SHIFT) |
1876 cursor_wm);
1877
1878 /* WM3 */
1879 if (!ironlake_compute_srwm(dev, 3, enabled,
1880 SNB_READ_WM3_LATENCY() * 500,
1881 &sandybridge_display_srwm_info,
1882 &sandybridge_cursor_srwm_info,
1883 &fbc_wm, &plane_wm, &cursor_wm))
1884 return;
1885
1886 I915_WRITE(WM3_LP_ILK,
1887 WM3_LP_EN |
1888 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1889 (fbc_wm << WM1_LP_FBC_SHIFT) |
1890 (plane_wm << WM1_LP_SR_SHIFT) |
1891 cursor_wm);
1892}
1893
1894static void ivybridge_update_wm(struct drm_device *dev)
1895{
1896 struct drm_i915_private *dev_priv = dev->dev_private;
1897 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1898 u32 val;
1899 int fbc_wm, plane_wm, cursor_wm;
1900 int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
1901 unsigned int enabled;
1902
1903 enabled = 0;
1904 if (g4x_compute_wm0(dev, 0,
1905 &sandybridge_display_wm_info, latency,
1906 &sandybridge_cursor_wm_info, latency,
1907 &plane_wm, &cursor_wm)) {
1908 val = I915_READ(WM0_PIPEA_ILK);
1909 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1910 I915_WRITE(WM0_PIPEA_ILK, val |
1911 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1912 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1913 " plane %d, " "cursor: %d\n",
1914 plane_wm, cursor_wm);
1915 enabled |= 1;
1916 }
1917
1918 if (g4x_compute_wm0(dev, 1,
1919 &sandybridge_display_wm_info, latency,
1920 &sandybridge_cursor_wm_info, latency,
1921 &plane_wm, &cursor_wm)) {
1922 val = I915_READ(WM0_PIPEB_ILK);
1923 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1924 I915_WRITE(WM0_PIPEB_ILK, val |
1925 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1926 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1927 " plane %d, cursor: %d\n",
1928 plane_wm, cursor_wm);
1929 enabled |= 2;
1930 }
1931
1932 if (g4x_compute_wm0(dev, 2,
1933 &sandybridge_display_wm_info, latency,
1934 &sandybridge_cursor_wm_info, latency,
1935 &plane_wm, &cursor_wm)) {
1936 val = I915_READ(WM0_PIPEC_IVB);
1937 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1938 I915_WRITE(WM0_PIPEC_IVB, val |
1939 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1940 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1941 " plane %d, cursor: %d\n",
1942 plane_wm, cursor_wm);
1943 enabled |= 3;
1944 }
1945
1946 /*
1947 * Calculate and update the self-refresh watermark only when one
1948 * display plane is used.
1949 *
1950 * SNB support 3 levels of watermark.
1951 *
1952 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1953 * and disabled in the descending order
1954 *
1955 */
1956 I915_WRITE(WM3_LP_ILK, 0);
1957 I915_WRITE(WM2_LP_ILK, 0);
1958 I915_WRITE(WM1_LP_ILK, 0);
1959
1960 if (!single_plane_enabled(enabled) ||
1961 dev_priv->sprite_scaling_enabled)
1962 return;
1963 enabled = ffs(enabled) - 1;
1964
1965 /* WM1 */
1966 if (!ironlake_compute_srwm(dev, 1, enabled,
1967 SNB_READ_WM1_LATENCY() * 500,
1968 &sandybridge_display_srwm_info,
1969 &sandybridge_cursor_srwm_info,
1970 &fbc_wm, &plane_wm, &cursor_wm))
1971 return;
1972
1973 I915_WRITE(WM1_LP_ILK,
1974 WM1_LP_SR_EN |
1975 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1976 (fbc_wm << WM1_LP_FBC_SHIFT) |
1977 (plane_wm << WM1_LP_SR_SHIFT) |
1978 cursor_wm);
1979
1980 /* WM2 */
1981 if (!ironlake_compute_srwm(dev, 2, enabled,
1982 SNB_READ_WM2_LATENCY() * 500,
1983 &sandybridge_display_srwm_info,
1984 &sandybridge_cursor_srwm_info,
1985 &fbc_wm, &plane_wm, &cursor_wm))
1986 return;
1987
1988 I915_WRITE(WM2_LP_ILK,
1989 WM2_LP_EN |
1990 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1991 (fbc_wm << WM1_LP_FBC_SHIFT) |
1992 (plane_wm << WM1_LP_SR_SHIFT) |
1993 cursor_wm);
1994
1995 /* WM3, note we have to correct the cursor latency */
1996 if (!ironlake_compute_srwm(dev, 3, enabled,
1997 SNB_READ_WM3_LATENCY() * 500,
1998 &sandybridge_display_srwm_info,
1999 &sandybridge_cursor_srwm_info,
2000 &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
2001 !ironlake_compute_srwm(dev, 3, enabled,
2002 2 * SNB_READ_WM3_LATENCY() * 500,
2003 &sandybridge_display_srwm_info,
2004 &sandybridge_cursor_srwm_info,
2005 &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
2006 return;
2007
2008 I915_WRITE(WM3_LP_ILK,
2009 WM3_LP_EN |
2010 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
2011 (fbc_wm << WM1_LP_FBC_SHIFT) |
2012 (plane_wm << WM1_LP_SR_SHIFT) |
2013 cursor_wm);
2014}
2015
2016static void
2017haswell_update_linetime_wm(struct drm_device *dev, int pipe,
2018 struct drm_display_mode *mode)
2019{
2020 struct drm_i915_private *dev_priv = dev->dev_private;
2021 u32 temp;
2022
2023 temp = I915_READ(PIPE_WM_LINETIME(pipe));
2024 temp &= ~PIPE_WM_LINETIME_MASK;
2025
2026 /* The WM are computed with base on how long it takes to fill a single
2027 * row at the given clock rate, multiplied by 8.
2028 * */
2029 temp |= PIPE_WM_LINETIME_TIME(
2030 ((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
2031
2032 /* IPS watermarks are only used by pipe A, and are ignored by
2033 * pipes B and C. They are calculated similarly to the common
2034 * linetime values, except that we are using CD clock frequency
2035 * in MHz instead of pixel rate for the division.
2036 *
2037 * This is a placeholder for the IPS watermark calculation code.
2038 */
2039
2040 I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
2041}
2042
2043static bool
2044sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
2045 uint32_t sprite_width, int pixel_size,
2046 const struct intel_watermark_params *display,
2047 int display_latency_ns, int *sprite_wm)
2048{
2049 struct drm_crtc *crtc;
2050 int clock;
2051 int entries, tlb_miss;
2052
2053 crtc = intel_get_crtc_for_plane(dev, plane);
2054 if (!intel_crtc_active(crtc)) {
2055 *sprite_wm = display->guard_size;
2056 return false;
2057 }
2058
2059 clock = crtc->mode.clock;
2060
2061 /* Use the small buffer method to calculate the sprite watermark */
2062 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
2063 tlb_miss = display->fifo_size*display->cacheline_size -
2064 sprite_width * 8;
2065 if (tlb_miss > 0)
2066 entries += tlb_miss;
2067 entries = DIV_ROUND_UP(entries, display->cacheline_size);
2068 *sprite_wm = entries + display->guard_size;
2069 if (*sprite_wm > (int)display->max_wm)
2070 *sprite_wm = display->max_wm;
2071
2072 return true;
2073}
2074
2075static bool
2076sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2077 uint32_t sprite_width, int pixel_size,
2078 const struct intel_watermark_params *display,
2079 int latency_ns, int *sprite_wm)
2080{
2081 struct drm_crtc *crtc;
2082 unsigned long line_time_us;
2083 int clock;
2084 int line_count, line_size;
2085 int small, large;
2086 int entries;
2087
2088 if (!latency_ns) {
2089 *sprite_wm = 0;
2090 return false;
2091 }
2092
2093 crtc = intel_get_crtc_for_plane(dev, plane);
2094 clock = crtc->mode.clock;
2095 if (!clock) {
2096 *sprite_wm = 0;
2097 return false;
2098 }
2099
2100 line_time_us = (sprite_width * 1000) / clock;
2101 if (!line_time_us) {
2102 *sprite_wm = 0;
2103 return false;
2104 }
2105
2106 line_count = (latency_ns / line_time_us + 1000) / 1000;
2107 line_size = sprite_width * pixel_size;
2108
2109 /* Use the minimum of the small and large buffer method for primary */
2110 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
2111 large = line_count * line_size;
2112
2113 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
2114 *sprite_wm = entries + display->guard_size;
2115
2116 return *sprite_wm > 0x3ff ? false : true;
2117}
2118
2119static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2120 uint32_t sprite_width, int pixel_size)
2121{
2122 struct drm_i915_private *dev_priv = dev->dev_private;
2123 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
2124 u32 val;
2125 int sprite_wm, reg;
2126 int ret;
2127
2128 switch (pipe) {
2129 case 0:
2130 reg = WM0_PIPEA_ILK;
2131 break;
2132 case 1:
2133 reg = WM0_PIPEB_ILK;
2134 break;
2135 case 2:
2136 reg = WM0_PIPEC_IVB;
2137 break;
2138 default:
2139 return; /* bad pipe */
2140 }
2141
2142 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
2143 &sandybridge_display_wm_info,
2144 latency, &sprite_wm);
2145 if (!ret) {
2146 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
2147 pipe);
2148 return;
2149 }
2150
2151 val = I915_READ(reg);
2152 val &= ~WM0_PIPE_SPRITE_MASK;
2153 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
2154 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
2155
2156
2157 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2158 pixel_size,
2159 &sandybridge_display_srwm_info,
2160 SNB_READ_WM1_LATENCY() * 500,
2161 &sprite_wm);
2162 if (!ret) {
2163 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
2164 pipe);
2165 return;
2166 }
2167 I915_WRITE(WM1S_LP_ILK, sprite_wm);
2168
2169 /* Only IVB has two more LP watermarks for sprite */
2170 if (!IS_IVYBRIDGE(dev))
2171 return;
2172
2173 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2174 pixel_size,
2175 &sandybridge_display_srwm_info,
2176 SNB_READ_WM2_LATENCY() * 500,
2177 &sprite_wm);
2178 if (!ret) {
2179 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
2180 pipe);
2181 return;
2182 }
2183 I915_WRITE(WM2S_LP_IVB, sprite_wm);
2184
2185 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2186 pixel_size,
2187 &sandybridge_display_srwm_info,
2188 SNB_READ_WM3_LATENCY() * 500,
2189 &sprite_wm);
2190 if (!ret) {
2191 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
2192 pipe);
2193 return;
2194 }
2195 I915_WRITE(WM3S_LP_IVB, sprite_wm);
2196}
2197
2198/**
2199 * intel_update_watermarks - update FIFO watermark values based on current modes
2200 *
2201 * Calculate watermark values for the various WM regs based on current mode
2202 * and plane configuration.
2203 *
2204 * There are several cases to deal with here:
2205 * - normal (i.e. non-self-refresh)
2206 * - self-refresh (SR) mode
2207 * - lines are large relative to FIFO size (buffer can hold up to 2)
2208 * - lines are small relative to FIFO size (buffer can hold more than 2
2209 * lines), so need to account for TLB latency
2210 *
2211 * The normal calculation is:
2212 * watermark = dotclock * bytes per pixel * latency
2213 * where latency is platform & configuration dependent (we assume pessimal
2214 * values here).
2215 *
2216 * The SR calculation is:
2217 * watermark = (trunc(latency/line time)+1) * surface width *
2218 * bytes per pixel
2219 * where
2220 * line time = htotal / dotclock
2221 * surface width = hdisplay for normal plane and 64 for cursor
2222 * and latency is assumed to be high, as above.
2223 *
2224 * The final value programmed to the register should always be rounded up,
2225 * and include an extra 2 entries to account for clock crossings.
2226 *
2227 * We don't use the sprite, so we can ignore that. And on Crestline we have
2228 * to set the non-SR watermarks to 8.
2229 */
2230void intel_update_watermarks(struct drm_device *dev)
2231{
2232 struct drm_i915_private *dev_priv = dev->dev_private;
2233
2234 if (dev_priv->display.update_wm)
2235 dev_priv->display.update_wm(dev);
2236}
2237
2238void intel_update_linetime_watermarks(struct drm_device *dev,
2239 int pipe, struct drm_display_mode *mode)
2240{
2241 struct drm_i915_private *dev_priv = dev->dev_private;
2242
2243 if (dev_priv->display.update_linetime_wm)
2244 dev_priv->display.update_linetime_wm(dev, pipe, mode);
2245}
2246
2247void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
2248 uint32_t sprite_width, int pixel_size)
2249{
2250 struct drm_i915_private *dev_priv = dev->dev_private;
2251
2252 if (dev_priv->display.update_sprite_wm)
2253 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
2254 pixel_size);
2255}
2256
2257static struct drm_i915_gem_object *
2258intel_alloc_context_page(struct drm_device *dev)
2259{
2260 struct drm_i915_gem_object *ctx;
2261 int ret;
2262
2263 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2264
2265 ctx = i915_gem_alloc_object(dev, 4096);
2266 if (!ctx) {
2267 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2268 return NULL;
2269 }
2270
2271 ret = i915_gem_object_pin(ctx, 4096, true, false);
2272 if (ret) {
2273 DRM_ERROR("failed to pin power context: %d\n", ret);
2274 goto err_unref;
2275 }
2276
2277 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2278 if (ret) {
2279 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2280 goto err_unpin;
2281 }
2282
2283 return ctx;
2284
2285err_unpin:
2286 i915_gem_object_unpin(ctx);
2287err_unref:
2288 drm_gem_object_unreference(&ctx->base);
2289 mutex_unlock(&dev->struct_mutex);
2290 return NULL;
2291}
2292
2293/**
2294 * Lock protecting IPS related data structures
2295 */
2296DEFINE_SPINLOCK(mchdev_lock);
2297
2298/* Global for IPS driver to get at the current i915 device. Protected by
2299 * mchdev_lock. */
2300static struct drm_i915_private *i915_mch_dev;
2301
2302bool ironlake_set_drps(struct drm_device *dev, u8 val)
2303{
2304 struct drm_i915_private *dev_priv = dev->dev_private;
2305 u16 rgvswctl;
2306
2307 assert_spin_locked(&mchdev_lock);
2308
2309 rgvswctl = I915_READ16(MEMSWCTL);
2310 if (rgvswctl & MEMCTL_CMD_STS) {
2311 DRM_DEBUG("gpu busy, RCS change rejected\n");
2312 return false; /* still busy with another command */
2313 }
2314
2315 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2316 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2317 I915_WRITE16(MEMSWCTL, rgvswctl);
2318 POSTING_READ16(MEMSWCTL);
2319
2320 rgvswctl |= MEMCTL_CMD_STS;
2321 I915_WRITE16(MEMSWCTL, rgvswctl);
2322
2323 return true;
2324}
2325
2326static void ironlake_enable_drps(struct drm_device *dev)
2327{
2328 struct drm_i915_private *dev_priv = dev->dev_private;
2329 u32 rgvmodectl = I915_READ(MEMMODECTL);
2330 u8 fmax, fmin, fstart, vstart;
2331
2332 spin_lock_irq(&mchdev_lock);
2333
2334 /* Enable temp reporting */
2335 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2336 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2337
2338 /* 100ms RC evaluation intervals */
2339 I915_WRITE(RCUPEI, 100000);
2340 I915_WRITE(RCDNEI, 100000);
2341
2342 /* Set max/min thresholds to 90ms and 80ms respectively */
2343 I915_WRITE(RCBMAXAVG, 90000);
2344 I915_WRITE(RCBMINAVG, 80000);
2345
2346 I915_WRITE(MEMIHYST, 1);
2347
2348 /* Set up min, max, and cur for interrupt handling */
2349 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2350 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2351 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2352 MEMMODE_FSTART_SHIFT;
2353
2354 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2355 PXVFREQ_PX_SHIFT;
2356
2357 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
2358 dev_priv->ips.fstart = fstart;
2359
2360 dev_priv->ips.max_delay = fstart;
2361 dev_priv->ips.min_delay = fmin;
2362 dev_priv->ips.cur_delay = fstart;
2363
2364 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2365 fmax, fmin, fstart);
2366
2367 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2368
2369 /*
2370 * Interrupts will be enabled in ironlake_irq_postinstall
2371 */
2372
2373 I915_WRITE(VIDSTART, vstart);
2374 POSTING_READ(VIDSTART);
2375
2376 rgvmodectl |= MEMMODE_SWMODE_EN;
2377 I915_WRITE(MEMMODECTL, rgvmodectl);
2378
2379 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2380 DRM_ERROR("stuck trying to change perf mode\n");
2381 mdelay(1);
2382
2383 ironlake_set_drps(dev, fstart);
2384
2385 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2386 I915_READ(0x112e0);
2387 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
2388 dev_priv->ips.last_count2 = I915_READ(0x112f4);
2389 getrawmonotonic(&dev_priv->ips.last_time2);
2390
2391 spin_unlock_irq(&mchdev_lock);
2392}
2393
2394static void ironlake_disable_drps(struct drm_device *dev)
2395{
2396 struct drm_i915_private *dev_priv = dev->dev_private;
2397 u16 rgvswctl;
2398
2399 spin_lock_irq(&mchdev_lock);
2400
2401 rgvswctl = I915_READ16(MEMSWCTL);
2402
2403 /* Ack interrupts, disable EFC interrupt */
2404 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2405 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2406 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2407 I915_WRITE(DEIIR, DE_PCU_EVENT);
2408 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2409
2410 /* Go back to the starting frequency */
2411 ironlake_set_drps(dev, dev_priv->ips.fstart);
2412 mdelay(1);
2413 rgvswctl |= MEMCTL_CMD_STS;
2414 I915_WRITE(MEMSWCTL, rgvswctl);
2415 mdelay(1);
2416
2417 spin_unlock_irq(&mchdev_lock);
2418}
2419
2420/* There's a funny hw issue where the hw returns all 0 when reading from
2421 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
2422 * ourselves, instead of doing a rmw cycle (which might result in us clearing
2423 * all limits and the gpu stuck at whatever frequency it is at atm).
2424 */
2425static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
2426{
2427 u32 limits;
2428
2429 limits = 0;
2430
2431 if (*val >= dev_priv->rps.max_delay)
2432 *val = dev_priv->rps.max_delay;
2433 limits |= dev_priv->rps.max_delay << 24;
2434
2435 /* Only set the down limit when we've reached the lowest level to avoid
2436 * getting more interrupts, otherwise leave this clear. This prevents a
2437 * race in the hw when coming out of rc6: There's a tiny window where
2438 * the hw runs at the minimal clock before selecting the desired
2439 * frequency, if the down threshold expires in that window we will not
2440 * receive a down interrupt. */
2441 if (*val <= dev_priv->rps.min_delay) {
2442 *val = dev_priv->rps.min_delay;
2443 limits |= dev_priv->rps.min_delay << 16;
2444 }
2445
2446 return limits;
2447}
2448
2449void gen6_set_rps(struct drm_device *dev, u8 val)
2450{
2451 struct drm_i915_private *dev_priv = dev->dev_private;
2452 u32 limits = gen6_rps_limits(dev_priv, &val);
2453
2454 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
2455 WARN_ON(val > dev_priv->rps.max_delay);
2456 WARN_ON(val < dev_priv->rps.min_delay);
2457
2458 if (val == dev_priv->rps.cur_delay)
2459 return;
2460
2461 I915_WRITE(GEN6_RPNSWREQ,
2462 GEN6_FREQUENCY(val) |
2463 GEN6_OFFSET(0) |
2464 GEN6_AGGRESSIVE_TURBO);
2465
2466 /* Make sure we continue to get interrupts
2467 * until we hit the minimum or maximum frequencies.
2468 */
2469 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
2470
2471 POSTING_READ(GEN6_RPNSWREQ);
2472
2473 dev_priv->rps.cur_delay = val;
2474
2475 trace_intel_gpu_freq_change(val * 50);
2476}
2477
2478static void gen6_disable_rps(struct drm_device *dev)
2479{
2480 struct drm_i915_private *dev_priv = dev->dev_private;
2481
2482 I915_WRITE(GEN6_RC_CONTROL, 0);
2483 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2484 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
2485 I915_WRITE(GEN6_PMIER, 0);
2486 /* Complete PM interrupt masking here doesn't race with the rps work
2487 * item again unmasking PM interrupts because that is using a different
2488 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2489 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2490
2491 spin_lock_irq(&dev_priv->rps.lock);
2492 dev_priv->rps.pm_iir = 0;
2493 spin_unlock_irq(&dev_priv->rps.lock);
2494
2495 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2496}
2497
2498int intel_enable_rc6(const struct drm_device *dev)
2499{
2500 /* Respect the kernel parameter if it is set */
2501 if (i915_enable_rc6 >= 0)
2502 return i915_enable_rc6;
2503
2504 /* Disable RC6 on Ironlake */
2505 if (INTEL_INFO(dev)->gen == 5)
2506 return 0;
2507
2508 if (IS_HASWELL(dev)) {
2509 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
2510 return INTEL_RC6_ENABLE;
2511 }
2512
2513 /* snb/ivb have more than one rc6 state. */
2514 if (INTEL_INFO(dev)->gen == 6) {
2515 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
2516 return INTEL_RC6_ENABLE;
2517 }
2518
2519 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2520 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
2521}
2522
2523static void gen6_enable_rps(struct drm_device *dev)
2524{
2525 struct drm_i915_private *dev_priv = dev->dev_private;
2526 struct intel_ring_buffer *ring;
2527 u32 rp_state_cap;
2528 u32 gt_perf_status;
2529 u32 rc6vids, pcu_mbox, rc6_mask = 0;
2530 u32 gtfifodbg;
2531 int rc6_mode;
2532 int i, ret;
2533
2534 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
2535
2536 /* Here begins a magic sequence of register writes to enable
2537 * auto-downclocking.
2538 *
2539 * Perhaps there might be some value in exposing these to
2540 * userspace...
2541 */
2542 I915_WRITE(GEN6_RC_STATE, 0);
2543
2544 /* Clear the DBG now so we don't confuse earlier errors */
2545 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
2546 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
2547 I915_WRITE(GTFIFODBG, gtfifodbg);
2548 }
2549
2550 gen6_gt_force_wake_get(dev_priv);
2551
2552 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2553 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2554
2555 /* In units of 100MHz */
2556 dev_priv->rps.max_delay = rp_state_cap & 0xff;
2557 dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
2558 dev_priv->rps.cur_delay = 0;
2559
2560 /* disable the counters and set deterministic thresholds */
2561 I915_WRITE(GEN6_RC_CONTROL, 0);
2562
2563 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
2564 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
2565 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
2566 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
2567 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
2568
2569 for_each_ring(ring, dev_priv, i)
2570 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
2571
2572 I915_WRITE(GEN6_RC_SLEEP, 0);
2573 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
2574 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
2575 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
2576 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
2577
2578 /* Check if we are enabling RC6 */
2579 rc6_mode = intel_enable_rc6(dev_priv->dev);
2580 if (rc6_mode & INTEL_RC6_ENABLE)
2581 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
2582
2583 /* We don't use those on Haswell */
2584 if (!IS_HASWELL(dev)) {
2585 if (rc6_mode & INTEL_RC6p_ENABLE)
2586 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2587
2588 if (rc6_mode & INTEL_RC6pp_ENABLE)
2589 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
2590 }
2591
2592 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
2593 (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
2594 (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
2595 (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
2596
2597 I915_WRITE(GEN6_RC_CONTROL,
2598 rc6_mask |
2599 GEN6_RC_CTL_EI_MODE(1) |
2600 GEN6_RC_CTL_HW_ENABLE);
2601
2602 I915_WRITE(GEN6_RPNSWREQ,
2603 GEN6_FREQUENCY(10) |
2604 GEN6_OFFSET(0) |
2605 GEN6_AGGRESSIVE_TURBO);
2606 I915_WRITE(GEN6_RC_VIDEO_FREQ,
2607 GEN6_FREQUENCY(12));
2608
2609 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2610 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2611 dev_priv->rps.max_delay << 24 |
2612 dev_priv->rps.min_delay << 16);
2613
2614 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
2615 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
2616 I915_WRITE(GEN6_RP_UP_EI, 66000);
2617 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
2618
2619 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2620 I915_WRITE(GEN6_RP_CONTROL,
2621 GEN6_RP_MEDIA_TURBO |
2622 GEN6_RP_MEDIA_HW_NORMAL_MODE |
2623 GEN6_RP_MEDIA_IS_GFX |
2624 GEN6_RP_ENABLE |
2625 GEN6_RP_UP_BUSY_AVG |
2626 (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
2627
2628 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
2629 if (!ret) {
2630 pcu_mbox = 0;
2631 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
2632 if (ret && pcu_mbox & (1<<31)) { /* OC supported */
2633 dev_priv->rps.max_delay = pcu_mbox & 0xff;
2634 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2635 }
2636 } else {
2637 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
2638 }
2639
2640 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
2641
2642 /* requires MSI enabled */
2643 I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
2644 spin_lock_irq(&dev_priv->rps.lock);
2645 WARN_ON(dev_priv->rps.pm_iir != 0);
2646 I915_WRITE(GEN6_PMIMR, 0);
2647 spin_unlock_irq(&dev_priv->rps.lock);
2648 /* enable all PM interrupts */
2649 I915_WRITE(GEN6_PMINTRMSK, 0);
2650
2651 rc6vids = 0;
2652 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
2653 if (IS_GEN6(dev) && ret) {
2654 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
2655 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
2656 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
2657 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
2658 rc6vids &= 0xffff00;
2659 rc6vids |= GEN6_ENCODE_RC6_VID(450);
2660 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
2661 if (ret)
2662 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
2663 }
2664
2665 gen6_gt_force_wake_put(dev_priv);
2666}
2667
2668static void gen6_update_ring_freq(struct drm_device *dev)
2669{
2670 struct drm_i915_private *dev_priv = dev->dev_private;
2671 int min_freq = 15;
2672 int gpu_freq;
2673 unsigned int ia_freq, max_ia_freq;
2674 int scaling_factor = 180;
2675
2676 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
2677
2678 max_ia_freq = cpufreq_quick_get_max(0);
2679 /*
2680 * Default to measured freq if none found, PCU will ensure we don't go
2681 * over
2682 */
2683 if (!max_ia_freq)
2684 max_ia_freq = tsc_khz;
2685
2686 /* Convert from kHz to MHz */
2687 max_ia_freq /= 1000;
2688
2689 /*
2690 * For each potential GPU frequency, load a ring frequency we'd like
2691 * to use for memory access. We do this by specifying the IA frequency
2692 * the PCU should use as a reference to determine the ring frequency.
2693 */
2694 for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
2695 gpu_freq--) {
2696 int diff = dev_priv->rps.max_delay - gpu_freq;
2697
2698 /*
2699 * For GPU frequencies less than 750MHz, just use the lowest
2700 * ring freq.
2701 */
2702 if (gpu_freq < min_freq)
2703 ia_freq = 800;
2704 else
2705 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2706 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
2707 ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
2708
2709 sandybridge_pcode_write(dev_priv,
2710 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
2711 ia_freq | gpu_freq);
2712 }
2713}
2714
2715void ironlake_teardown_rc6(struct drm_device *dev)
2716{
2717 struct drm_i915_private *dev_priv = dev->dev_private;
2718
2719 if (dev_priv->ips.renderctx) {
2720 i915_gem_object_unpin(dev_priv->ips.renderctx);
2721 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
2722 dev_priv->ips.renderctx = NULL;
2723 }
2724
2725 if (dev_priv->ips.pwrctx) {
2726 i915_gem_object_unpin(dev_priv->ips.pwrctx);
2727 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
2728 dev_priv->ips.pwrctx = NULL;
2729 }
2730}
2731
2732static void ironlake_disable_rc6(struct drm_device *dev)
2733{
2734 struct drm_i915_private *dev_priv = dev->dev_private;
2735
2736 if (I915_READ(PWRCTXA)) {
2737 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
2738 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
2739 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
2740 50);
2741
2742 I915_WRITE(PWRCTXA, 0);
2743 POSTING_READ(PWRCTXA);
2744
2745 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2746 POSTING_READ(RSTDBYCTL);
2747 }
2748}
2749
2750static int ironlake_setup_rc6(struct drm_device *dev)
2751{
2752 struct drm_i915_private *dev_priv = dev->dev_private;
2753
2754 if (dev_priv->ips.renderctx == NULL)
2755 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
2756 if (!dev_priv->ips.renderctx)
2757 return -ENOMEM;
2758
2759 if (dev_priv->ips.pwrctx == NULL)
2760 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
2761 if (!dev_priv->ips.pwrctx) {
2762 ironlake_teardown_rc6(dev);
2763 return -ENOMEM;
2764 }
2765
2766 return 0;
2767}
2768
2769static void ironlake_enable_rc6(struct drm_device *dev)
2770{
2771 struct drm_i915_private *dev_priv = dev->dev_private;
2772 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
2773 bool was_interruptible;
2774 int ret;
2775
2776 /* rc6 disabled by default due to repeated reports of hanging during
2777 * boot and resume.
2778 */
2779 if (!intel_enable_rc6(dev))
2780 return;
2781
2782 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2783
2784 ret = ironlake_setup_rc6(dev);
2785 if (ret)
2786 return;
2787
2788 was_interruptible = dev_priv->mm.interruptible;
2789 dev_priv->mm.interruptible = false;
2790
2791 /*
2792 * GPU can automatically power down the render unit if given a page
2793 * to save state.
2794 */
2795 ret = intel_ring_begin(ring, 6);
2796 if (ret) {
2797 ironlake_teardown_rc6(dev);
2798 dev_priv->mm.interruptible = was_interruptible;
2799 return;
2800 }
2801
2802 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
2803 intel_ring_emit(ring, MI_SET_CONTEXT);
2804 intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
2805 MI_MM_SPACE_GTT |
2806 MI_SAVE_EXT_STATE_EN |
2807 MI_RESTORE_EXT_STATE_EN |
2808 MI_RESTORE_INHIBIT);
2809 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
2810 intel_ring_emit(ring, MI_NOOP);
2811 intel_ring_emit(ring, MI_FLUSH);
2812 intel_ring_advance(ring);
2813
2814 /*
2815 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
2816 * does an implicit flush, combined with MI_FLUSH above, it should be
2817 * safe to assume that renderctx is valid
2818 */
2819 ret = intel_ring_idle(ring);
2820 dev_priv->mm.interruptible = was_interruptible;
2821 if (ret) {
2822 DRM_ERROR("failed to enable ironlake power power savings\n");
2823 ironlake_teardown_rc6(dev);
2824 return;
2825 }
2826
2827 I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
2828 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2829}
2830
2831static unsigned long intel_pxfreq(u32 vidfreq)
2832{
2833 unsigned long freq;
2834 int div = (vidfreq & 0x3f0000) >> 16;
2835 int post = (vidfreq & 0x3000) >> 12;
2836 int pre = (vidfreq & 0x7);
2837
2838 if (!pre)
2839 return 0;
2840
2841 freq = ((div * 133333) / ((1<<post) * pre));
2842
2843 return freq;
2844}
2845
2846static const struct cparams {
2847 u16 i;
2848 u16 t;
2849 u16 m;
2850 u16 c;
2851} cparams[] = {
2852 { 1, 1333, 301, 28664 },
2853 { 1, 1066, 294, 24460 },
2854 { 1, 800, 294, 25192 },
2855 { 0, 1333, 276, 27605 },
2856 { 0, 1066, 276, 27605 },
2857 { 0, 800, 231, 23784 },
2858};
2859
2860static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
2861{
2862 u64 total_count, diff, ret;
2863 u32 count1, count2, count3, m = 0, c = 0;
2864 unsigned long now = jiffies_to_msecs(jiffies), diff1;
2865 int i;
2866
2867 assert_spin_locked(&mchdev_lock);
2868
2869 diff1 = now - dev_priv->ips.last_time1;
2870
2871 /* Prevent division-by-zero if we are asking too fast.
2872 * Also, we don't get interesting results if we are polling
2873 * faster than once in 10ms, so just return the saved value
2874 * in such cases.
2875 */
2876 if (diff1 <= 10)
2877 return dev_priv->ips.chipset_power;
2878
2879 count1 = I915_READ(DMIEC);
2880 count2 = I915_READ(DDREC);
2881 count3 = I915_READ(CSIEC);
2882
2883 total_count = count1 + count2 + count3;
2884
2885 /* FIXME: handle per-counter overflow */
2886 if (total_count < dev_priv->ips.last_count1) {
2887 diff = ~0UL - dev_priv->ips.last_count1;
2888 diff += total_count;
2889 } else {
2890 diff = total_count - dev_priv->ips.last_count1;
2891 }
2892
2893 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
2894 if (cparams[i].i == dev_priv->ips.c_m &&
2895 cparams[i].t == dev_priv->ips.r_t) {
2896 m = cparams[i].m;
2897 c = cparams[i].c;
2898 break;
2899 }
2900 }
2901
2902 diff = div_u64(diff, diff1);
2903 ret = ((m * diff) + c);
2904 ret = div_u64(ret, 10);
2905
2906 dev_priv->ips.last_count1 = total_count;
2907 dev_priv->ips.last_time1 = now;
2908
2909 dev_priv->ips.chipset_power = ret;
2910
2911 return ret;
2912}
2913
2914unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2915{
2916 unsigned long val;
2917
2918 if (dev_priv->info->gen != 5)
2919 return 0;
2920
2921 spin_lock_irq(&mchdev_lock);
2922
2923 val = __i915_chipset_val(dev_priv);
2924
2925 spin_unlock_irq(&mchdev_lock);
2926
2927 return val;
2928}
2929
2930unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
2931{
2932 unsigned long m, x, b;
2933 u32 tsfs;
2934
2935 tsfs = I915_READ(TSFS);
2936
2937 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
2938 x = I915_READ8(TR1);
2939
2940 b = tsfs & TSFS_INTR_MASK;
2941
2942 return ((m * x) / 127) - b;
2943}
2944
2945static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
2946{
2947 static const struct v_table {
2948 u16 vd; /* in .1 mil */
2949 u16 vm; /* in .1 mil */
2950 } v_table[] = {
2951 { 0, 0, },
2952 { 375, 0, },
2953 { 500, 0, },
2954 { 625, 0, },
2955 { 750, 0, },
2956 { 875, 0, },
2957 { 1000, 0, },
2958 { 1125, 0, },
2959 { 4125, 3000, },
2960 { 4125, 3000, },
2961 { 4125, 3000, },
2962 { 4125, 3000, },
2963 { 4125, 3000, },
2964 { 4125, 3000, },
2965 { 4125, 3000, },
2966 { 4125, 3000, },
2967 { 4125, 3000, },
2968 { 4125, 3000, },
2969 { 4125, 3000, },
2970 { 4125, 3000, },
2971 { 4125, 3000, },
2972 { 4125, 3000, },
2973 { 4125, 3000, },
2974 { 4125, 3000, },
2975 { 4125, 3000, },
2976 { 4125, 3000, },
2977 { 4125, 3000, },
2978 { 4125, 3000, },
2979 { 4125, 3000, },
2980 { 4125, 3000, },
2981 { 4125, 3000, },
2982 { 4125, 3000, },
2983 { 4250, 3125, },
2984 { 4375, 3250, },
2985 { 4500, 3375, },
2986 { 4625, 3500, },
2987 { 4750, 3625, },
2988 { 4875, 3750, },
2989 { 5000, 3875, },
2990 { 5125, 4000, },
2991 { 5250, 4125, },
2992 { 5375, 4250, },
2993 { 5500, 4375, },
2994 { 5625, 4500, },
2995 { 5750, 4625, },
2996 { 5875, 4750, },
2997 { 6000, 4875, },
2998 { 6125, 5000, },
2999 { 6250, 5125, },
3000 { 6375, 5250, },
3001 { 6500, 5375, },
3002 { 6625, 5500, },
3003 { 6750, 5625, },
3004 { 6875, 5750, },
3005 { 7000, 5875, },
3006 { 7125, 6000, },
3007 { 7250, 6125, },
3008 { 7375, 6250, },
3009 { 7500, 6375, },
3010 { 7625, 6500, },
3011 { 7750, 6625, },
3012 { 7875, 6750, },
3013 { 8000, 6875, },
3014 { 8125, 7000, },
3015 { 8250, 7125, },
3016 { 8375, 7250, },
3017 { 8500, 7375, },
3018 { 8625, 7500, },
3019 { 8750, 7625, },
3020 { 8875, 7750, },
3021 { 9000, 7875, },
3022 { 9125, 8000, },
3023 { 9250, 8125, },
3024 { 9375, 8250, },
3025 { 9500, 8375, },
3026 { 9625, 8500, },
3027 { 9750, 8625, },
3028 { 9875, 8750, },
3029 { 10000, 8875, },
3030 { 10125, 9000, },
3031 { 10250, 9125, },
3032 { 10375, 9250, },
3033 { 10500, 9375, },
3034 { 10625, 9500, },
3035 { 10750, 9625, },
3036 { 10875, 9750, },
3037 { 11000, 9875, },
3038 { 11125, 10000, },
3039 { 11250, 10125, },
3040 { 11375, 10250, },
3041 { 11500, 10375, },
3042 { 11625, 10500, },
3043 { 11750, 10625, },
3044 { 11875, 10750, },
3045 { 12000, 10875, },
3046 { 12125, 11000, },
3047 { 12250, 11125, },
3048 { 12375, 11250, },
3049 { 12500, 11375, },
3050 { 12625, 11500, },
3051 { 12750, 11625, },
3052 { 12875, 11750, },
3053 { 13000, 11875, },
3054 { 13125, 12000, },
3055 { 13250, 12125, },
3056 { 13375, 12250, },
3057 { 13500, 12375, },
3058 { 13625, 12500, },
3059 { 13750, 12625, },
3060 { 13875, 12750, },
3061 { 14000, 12875, },
3062 { 14125, 13000, },
3063 { 14250, 13125, },
3064 { 14375, 13250, },
3065 { 14500, 13375, },
3066 { 14625, 13500, },
3067 { 14750, 13625, },
3068 { 14875, 13750, },
3069 { 15000, 13875, },
3070 { 15125, 14000, },
3071 { 15250, 14125, },
3072 { 15375, 14250, },
3073 { 15500, 14375, },
3074 { 15625, 14500, },
3075 { 15750, 14625, },
3076 { 15875, 14750, },
3077 { 16000, 14875, },
3078 { 16125, 15000, },
3079 };
3080 if (dev_priv->info->is_mobile)
3081 return v_table[pxvid].vm;
3082 else
3083 return v_table[pxvid].vd;
3084}
3085
3086static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
3087{
3088 struct timespec now, diff1;
3089 u64 diff;
3090 unsigned long diffms;
3091 u32 count;
3092
3093 assert_spin_locked(&mchdev_lock);
3094
3095 getrawmonotonic(&now);
3096 diff1 = timespec_sub(now, dev_priv->ips.last_time2);
3097
3098 /* Don't divide by 0 */
3099 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
3100 if (!diffms)
3101 return;
3102
3103 count = I915_READ(GFXEC);
3104
3105 if (count < dev_priv->ips.last_count2) {
3106 diff = ~0UL - dev_priv->ips.last_count2;
3107 diff += count;
3108 } else {
3109 diff = count - dev_priv->ips.last_count2;
3110 }
3111
3112 dev_priv->ips.last_count2 = count;
3113 dev_priv->ips.last_time2 = now;
3114
3115 /* More magic constants... */
3116 diff = diff * 1181;
3117 diff = div_u64(diff, diffms * 10);
3118 dev_priv->ips.gfx_power = diff;
3119}
3120
3121void i915_update_gfx_val(struct drm_i915_private *dev_priv)
3122{
3123 if (dev_priv->info->gen != 5)
3124 return;
3125
3126 spin_lock_irq(&mchdev_lock);
3127
3128 __i915_update_gfx_val(dev_priv);
3129
3130 spin_unlock_irq(&mchdev_lock);
3131}
3132
3133static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
3134{
3135 unsigned long t, corr, state1, corr2, state2;
3136 u32 pxvid, ext_v;
3137
3138 assert_spin_locked(&mchdev_lock);
3139
3140 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
3141 pxvid = (pxvid >> 24) & 0x7f;
3142 ext_v = pvid_to_extvid(dev_priv, pxvid);
3143
3144 state1 = ext_v;
3145
3146 t = i915_mch_val(dev_priv);
3147
3148 /* Revel in the empirically derived constants */
3149
3150 /* Correction factor in 1/100000 units */
3151 if (t > 80)
3152 corr = ((t * 2349) + 135940);
3153 else if (t >= 50)
3154 corr = ((t * 964) + 29317);
3155 else /* < 50 */
3156 corr = ((t * 301) + 1004);
3157
3158 corr = corr * ((150142 * state1) / 10000 - 78642);
3159 corr /= 100000;
3160 corr2 = (corr * dev_priv->ips.corr);
3161
3162 state2 = (corr2 * state1) / 10000;
3163 state2 /= 100; /* convert to mW */
3164
3165 __i915_update_gfx_val(dev_priv);
3166
3167 return dev_priv->ips.gfx_power + state2;
3168}
3169
3170unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
3171{
3172 unsigned long val;
3173
3174 if (dev_priv->info->gen != 5)
3175 return 0;
3176
3177 spin_lock_irq(&mchdev_lock);
3178
3179 val = __i915_gfx_val(dev_priv);
3180
3181 spin_unlock_irq(&mchdev_lock);
3182
3183 return val;
3184}
3185
3186/**
3187 * i915_read_mch_val - return value for IPS use
3188 *
3189 * Calculate and return a value for the IPS driver to use when deciding whether
3190 * we have thermal and power headroom to increase CPU or GPU power budget.
3191 */
3192unsigned long i915_read_mch_val(void)
3193{
3194 struct drm_i915_private *dev_priv;
3195 unsigned long chipset_val, graphics_val, ret = 0;
3196
3197 spin_lock_irq(&mchdev_lock);
3198 if (!i915_mch_dev)
3199 goto out_unlock;
3200 dev_priv = i915_mch_dev;
3201
3202 chipset_val = __i915_chipset_val(dev_priv);
3203 graphics_val = __i915_gfx_val(dev_priv);
3204
3205 ret = chipset_val + graphics_val;
3206
3207out_unlock:
3208 spin_unlock_irq(&mchdev_lock);
3209
3210 return ret;
3211}
3212EXPORT_SYMBOL_GPL(i915_read_mch_val);
3213
3214/**
3215 * i915_gpu_raise - raise GPU frequency limit
3216 *
3217 * Raise the limit; IPS indicates we have thermal headroom.
3218 */
3219bool i915_gpu_raise(void)
3220{
3221 struct drm_i915_private *dev_priv;
3222 bool ret = true;
3223
3224 spin_lock_irq(&mchdev_lock);
3225 if (!i915_mch_dev) {
3226 ret = false;
3227 goto out_unlock;
3228 }
3229 dev_priv = i915_mch_dev;
3230
3231 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
3232 dev_priv->ips.max_delay--;
3233
3234out_unlock:
3235 spin_unlock_irq(&mchdev_lock);
3236
3237 return ret;
3238}
3239EXPORT_SYMBOL_GPL(i915_gpu_raise);
3240
3241/**
3242 * i915_gpu_lower - lower GPU frequency limit
3243 *
3244 * IPS indicates we're close to a thermal limit, so throttle back the GPU
3245 * frequency maximum.
3246 */
3247bool i915_gpu_lower(void)
3248{
3249 struct drm_i915_private *dev_priv;
3250 bool ret = true;
3251
3252 spin_lock_irq(&mchdev_lock);
3253 if (!i915_mch_dev) {
3254 ret = false;
3255 goto out_unlock;
3256 }
3257 dev_priv = i915_mch_dev;
3258
3259 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
3260 dev_priv->ips.max_delay++;
3261
3262out_unlock:
3263 spin_unlock_irq(&mchdev_lock);
3264
3265 return ret;
3266}
3267EXPORT_SYMBOL_GPL(i915_gpu_lower);
3268
3269/**
3270 * i915_gpu_busy - indicate GPU business to IPS
3271 *
3272 * Tell the IPS driver whether or not the GPU is busy.
3273 */
3274bool i915_gpu_busy(void)
3275{
3276 struct drm_i915_private *dev_priv;
3277 struct intel_ring_buffer *ring;
3278 bool ret = false;
3279 int i;
3280
3281 spin_lock_irq(&mchdev_lock);
3282 if (!i915_mch_dev)
3283 goto out_unlock;
3284 dev_priv = i915_mch_dev;
3285
3286 for_each_ring(ring, dev_priv, i)
3287 ret |= !list_empty(&ring->request_list);
3288
3289out_unlock:
3290 spin_unlock_irq(&mchdev_lock);
3291
3292 return ret;
3293}
3294EXPORT_SYMBOL_GPL(i915_gpu_busy);
3295
3296/**
3297 * i915_gpu_turbo_disable - disable graphics turbo
3298 *
3299 * Disable graphics turbo by resetting the max frequency and setting the
3300 * current frequency to the default.
3301 */
3302bool i915_gpu_turbo_disable(void)
3303{
3304 struct drm_i915_private *dev_priv;
3305 bool ret = true;
3306
3307 spin_lock_irq(&mchdev_lock);
3308 if (!i915_mch_dev) {
3309 ret = false;
3310 goto out_unlock;
3311 }
3312 dev_priv = i915_mch_dev;
3313
3314 dev_priv->ips.max_delay = dev_priv->ips.fstart;
3315
3316 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
3317 ret = false;
3318
3319out_unlock:
3320 spin_unlock_irq(&mchdev_lock);
3321
3322 return ret;
3323}
3324EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
3325
3326/**
3327 * Tells the intel_ips driver that the i915 driver is now loaded, if
3328 * IPS got loaded first.
3329 *
3330 * This awkward dance is so that neither module has to depend on the
3331 * other in order for IPS to do the appropriate communication of
3332 * GPU turbo limits to i915.
3333 */
3334static void
3335ips_ping_for_i915_load(void)
3336{
3337 void (*link)(void);
3338
3339 link = symbol_get(ips_link_to_i915_driver);
3340 if (link) {
3341 link();
3342 symbol_put(ips_link_to_i915_driver);
3343 }
3344}
3345
3346void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
3347{
3348 /* We only register the i915 ips part with intel-ips once everything is
3349 * set up, to avoid intel-ips sneaking in and reading bogus values. */
3350 spin_lock_irq(&mchdev_lock);
3351 i915_mch_dev = dev_priv;
3352 spin_unlock_irq(&mchdev_lock);
3353
3354 ips_ping_for_i915_load();
3355}
3356
3357void intel_gpu_ips_teardown(void)
3358{
3359 spin_lock_irq(&mchdev_lock);
3360 i915_mch_dev = NULL;
3361 spin_unlock_irq(&mchdev_lock);
3362}
3363static void intel_init_emon(struct drm_device *dev)
3364{
3365 struct drm_i915_private *dev_priv = dev->dev_private;
3366 u32 lcfuse;
3367 u8 pxw[16];
3368 int i;
3369
3370 /* Disable to program */
3371 I915_WRITE(ECR, 0);
3372 POSTING_READ(ECR);
3373
3374 /* Program energy weights for various events */
3375 I915_WRITE(SDEW, 0x15040d00);
3376 I915_WRITE(CSIEW0, 0x007f0000);
3377 I915_WRITE(CSIEW1, 0x1e220004);
3378 I915_WRITE(CSIEW2, 0x04000004);
3379
3380 for (i = 0; i < 5; i++)
3381 I915_WRITE(PEW + (i * 4), 0);
3382 for (i = 0; i < 3; i++)
3383 I915_WRITE(DEW + (i * 4), 0);
3384
3385 /* Program P-state weights to account for frequency power adjustment */
3386 for (i = 0; i < 16; i++) {
3387 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
3388 unsigned long freq = intel_pxfreq(pxvidfreq);
3389 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
3390 PXVFREQ_PX_SHIFT;
3391 unsigned long val;
3392
3393 val = vid * vid;
3394 val *= (freq / 1000);
3395 val *= 255;
3396 val /= (127*127*900);
3397 if (val > 0xff)
3398 DRM_ERROR("bad pxval: %ld\n", val);
3399 pxw[i] = val;
3400 }
3401 /* Render standby states get 0 weight */
3402 pxw[14] = 0;
3403 pxw[15] = 0;
3404
3405 for (i = 0; i < 4; i++) {
3406 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
3407 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
3408 I915_WRITE(PXW + (i * 4), val);
3409 }
3410
3411 /* Adjust magic regs to magic values (more experimental results) */
3412 I915_WRITE(OGW0, 0);
3413 I915_WRITE(OGW1, 0);
3414 I915_WRITE(EG0, 0x00007f00);
3415 I915_WRITE(EG1, 0x0000000e);
3416 I915_WRITE(EG2, 0x000e0000);
3417 I915_WRITE(EG3, 0x68000300);
3418 I915_WRITE(EG4, 0x42000000);
3419 I915_WRITE(EG5, 0x00140031);
3420 I915_WRITE(EG6, 0);
3421 I915_WRITE(EG7, 0);
3422
3423 for (i = 0; i < 8; i++)
3424 I915_WRITE(PXWL + (i * 4), 0);
3425
3426 /* Enable PMON + select events */
3427 I915_WRITE(ECR, 0x80000019);
3428
3429 lcfuse = I915_READ(LCFUSE02);
3430
3431 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
3432}
3433
3434void intel_disable_gt_powersave(struct drm_device *dev)
3435{
3436 struct drm_i915_private *dev_priv = dev->dev_private;
3437
3438 if (IS_IRONLAKE_M(dev)) {
3439 ironlake_disable_drps(dev);
3440 ironlake_disable_rc6(dev);
3441 } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
3442 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
3443 mutex_lock(&dev_priv->rps.hw_lock);
3444 gen6_disable_rps(dev);
3445 mutex_unlock(&dev_priv->rps.hw_lock);
3446 }
3447}
3448
3449static void intel_gen6_powersave_work(struct work_struct *work)
3450{
3451 struct drm_i915_private *dev_priv =
3452 container_of(work, struct drm_i915_private,
3453 rps.delayed_resume_work.work);
3454 struct drm_device *dev = dev_priv->dev;
3455
3456 mutex_lock(&dev_priv->rps.hw_lock);
3457 gen6_enable_rps(dev);
3458 gen6_update_ring_freq(dev);
3459 mutex_unlock(&dev_priv->rps.hw_lock);
3460}
3461
3462void intel_enable_gt_powersave(struct drm_device *dev)
3463{
3464 struct drm_i915_private *dev_priv = dev->dev_private;
3465
3466 if (IS_IRONLAKE_M(dev)) {
3467 ironlake_enable_drps(dev);
3468 ironlake_enable_rc6(dev);
3469 intel_init_emon(dev);
3470 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
3471 /*
3472 * PCU communication is slow and this doesn't need to be
3473 * done at any specific time, so do this out of our fast path
3474 * to make resume and init faster.
3475 */
3476 schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
3477 round_jiffies_up_relative(HZ));
3478 }
3479}
3480
3481static void ibx_init_clock_gating(struct drm_device *dev)
3482{
3483 struct drm_i915_private *dev_priv = dev->dev_private;
3484
3485 /*
3486 * On Ibex Peak and Cougar Point, we need to disable clock
3487 * gating for the panel power sequencer or it will fail to
3488 * start up when no ports are active.
3489 */
3490 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3491}
3492
3493static void ironlake_init_clock_gating(struct drm_device *dev)
3494{
3495 struct drm_i915_private *dev_priv = dev->dev_private;
3496 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
3497
3498 /* Required for FBC */
3499 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
3500 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
3501 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
3502
3503 I915_WRITE(PCH_3DCGDIS0,
3504 MARIUNIT_CLOCK_GATE_DISABLE |
3505 SVSMUNIT_CLOCK_GATE_DISABLE);
3506 I915_WRITE(PCH_3DCGDIS1,
3507 VFMUNIT_CLOCK_GATE_DISABLE);
3508
3509 /*
3510 * According to the spec the following bits should be set in
3511 * order to enable memory self-refresh
3512 * The bit 22/21 of 0x42004
3513 * The bit 5 of 0x42020
3514 * The bit 15 of 0x45000
3515 */
3516 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3517 (I915_READ(ILK_DISPLAY_CHICKEN2) |
3518 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
3519 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
3520 I915_WRITE(DISP_ARB_CTL,
3521 (I915_READ(DISP_ARB_CTL) |
3522 DISP_FBC_WM_DIS));
3523 I915_WRITE(WM3_LP_ILK, 0);
3524 I915_WRITE(WM2_LP_ILK, 0);
3525 I915_WRITE(WM1_LP_ILK, 0);
3526
3527 /*
3528 * Based on the document from hardware guys the following bits
3529 * should be set unconditionally in order to enable FBC.
3530 * The bit 22 of 0x42000
3531 * The bit 22 of 0x42004
3532 * The bit 7,8,9 of 0x42020.
3533 */
3534 if (IS_IRONLAKE_M(dev)) {
3535 I915_WRITE(ILK_DISPLAY_CHICKEN1,
3536 I915_READ(ILK_DISPLAY_CHICKEN1) |
3537 ILK_FBCQ_DIS);
3538 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3539 I915_READ(ILK_DISPLAY_CHICKEN2) |
3540 ILK_DPARB_GATE);
3541 }
3542
3543 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
3544
3545 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3546 I915_READ(ILK_DISPLAY_CHICKEN2) |
3547 ILK_ELPIN_409_SELECT);
3548 I915_WRITE(_3D_CHICKEN2,
3549 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
3550 _3D_CHICKEN2_WM_READ_PIPELINED);
3551
3552 /* WaDisableRenderCachePipelinedFlush */
3553 I915_WRITE(CACHE_MODE_0,
3554 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
3555
3556 ibx_init_clock_gating(dev);
3557}
3558
3559static void cpt_init_clock_gating(struct drm_device *dev)
3560{
3561 struct drm_i915_private *dev_priv = dev->dev_private;
3562 int pipe;
3563
3564 /*
3565 * On Ibex Peak and Cougar Point, we need to disable clock
3566 * gating for the panel power sequencer or it will fail to
3567 * start up when no ports are active.
3568 */
3569 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3570 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
3571 DPLS_EDP_PPS_FIX_DIS);
3572 /* The below fixes the weird display corruption, a few pixels shifted
3573 * downward, on (only) LVDS of some HP laptops with IVY.
3574 */
3575 for_each_pipe(pipe)
3576 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
3577 /* WADP0ClockGatingDisable */
3578 for_each_pipe(pipe) {
3579 I915_WRITE(TRANS_CHICKEN1(pipe),
3580 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
3581 }
3582}
3583
3584static void gen6_init_clock_gating(struct drm_device *dev)
3585{
3586 struct drm_i915_private *dev_priv = dev->dev_private;
3587 int pipe;
3588 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
3589
3590 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
3591
3592 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3593 I915_READ(ILK_DISPLAY_CHICKEN2) |
3594 ILK_ELPIN_409_SELECT);
3595
3596 /* WaDisableHiZPlanesWhenMSAAEnabled */
3597 I915_WRITE(_3D_CHICKEN,
3598 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
3599
3600 /* WaSetupGtModeTdRowDispatch */
3601 if (IS_SNB_GT1(dev))
3602 I915_WRITE(GEN6_GT_MODE,
3603 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
3604
3605 I915_WRITE(WM3_LP_ILK, 0);
3606 I915_WRITE(WM2_LP_ILK, 0);
3607 I915_WRITE(WM1_LP_ILK, 0);
3608
3609 I915_WRITE(CACHE_MODE_0,
3610 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
3611
3612 I915_WRITE(GEN6_UCGCTL1,
3613 I915_READ(GEN6_UCGCTL1) |
3614 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
3615 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
3616
3617 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3618 * gating disable must be set. Failure to set it results in
3619 * flickering pixels due to Z write ordering failures after
3620 * some amount of runtime in the Mesa "fire" demo, and Unigine
3621 * Sanctuary and Tropics, and apparently anything else with
3622 * alpha test or pixel discard.
3623 *
3624 * According to the spec, bit 11 (RCCUNIT) must also be set,
3625 * but we didn't debug actual testcases to find it out.
3626 *
3627 * Also apply WaDisableVDSUnitClockGating and
3628 * WaDisableRCPBUnitClockGating.
3629 */
3630 I915_WRITE(GEN6_UCGCTL2,
3631 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
3632 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
3633 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3634
3635 /* Bspec says we need to always set all mask bits. */
3636 I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
3637 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
3638
3639 /*
3640 * According to the spec the following bits should be
3641 * set in order to enable memory self-refresh and fbc:
3642 * The bit21 and bit22 of 0x42000
3643 * The bit21 and bit22 of 0x42004
3644 * The bit5 and bit7 of 0x42020
3645 * The bit14 of 0x70180
3646 * The bit14 of 0x71180
3647 */
3648 I915_WRITE(ILK_DISPLAY_CHICKEN1,
3649 I915_READ(ILK_DISPLAY_CHICKEN1) |
3650 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
3651 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3652 I915_READ(ILK_DISPLAY_CHICKEN2) |
3653 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
3654 I915_WRITE(ILK_DSPCLK_GATE_D,
3655 I915_READ(ILK_DSPCLK_GATE_D) |
3656 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
3657 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
3658
3659 /* WaMbcDriverBootEnable */
3660 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3661 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3662
3663 for_each_pipe(pipe) {
3664 I915_WRITE(DSPCNTR(pipe),
3665 I915_READ(DSPCNTR(pipe)) |
3666 DISPPLANE_TRICKLE_FEED_DISABLE);
3667 intel_flush_display_plane(dev_priv, pipe);
3668 }
3669
3670 /* The default value should be 0x200 according to docs, but the two
3671 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
3672 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
3673 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
3674
3675 cpt_init_clock_gating(dev);
3676}
3677
3678static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
3679{
3680 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
3681
3682 reg &= ~GEN7_FF_SCHED_MASK;
3683 reg |= GEN7_FF_TS_SCHED_HW;
3684 reg |= GEN7_FF_VS_SCHED_HW;
3685 reg |= GEN7_FF_DS_SCHED_HW;
3686
3687 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
3688}
3689
3690static void lpt_init_clock_gating(struct drm_device *dev)
3691{
3692 struct drm_i915_private *dev_priv = dev->dev_private;
3693
3694 /*
3695 * TODO: this bit should only be enabled when really needed, then
3696 * disabled when not needed anymore in order to save power.
3697 */
3698 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
3699 I915_WRITE(SOUTH_DSPCLK_GATE_D,
3700 I915_READ(SOUTH_DSPCLK_GATE_D) |
3701 PCH_LP_PARTITION_LEVEL_DISABLE);
3702}
3703
3704static void haswell_init_clock_gating(struct drm_device *dev)
3705{
3706 struct drm_i915_private *dev_priv = dev->dev_private;
3707 int pipe;
3708
3709 I915_WRITE(WM3_LP_ILK, 0);
3710 I915_WRITE(WM2_LP_ILK, 0);
3711 I915_WRITE(WM1_LP_ILK, 0);
3712
3713 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3714 * This implements the WaDisableRCZUnitClockGating workaround.
3715 */
3716 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3717
3718 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3719 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3720 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3721
3722 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3723 I915_WRITE(GEN7_L3CNTLREG1,
3724 GEN7_WA_FOR_GEN7_L3_CONTROL);
3725 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3726 GEN7_WA_L3_CHICKEN_MODE);
3727
3728 /* This is required by WaCatErrorRejectionIssue */
3729 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3730 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3731 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3732
3733 for_each_pipe(pipe) {
3734 I915_WRITE(DSPCNTR(pipe),
3735 I915_READ(DSPCNTR(pipe)) |
3736 DISPPLANE_TRICKLE_FEED_DISABLE);
3737 intel_flush_display_plane(dev_priv, pipe);
3738 }
3739
3740 gen7_setup_fixed_func_scheduler(dev_priv);
3741
3742 /* WaDisable4x2SubspanOptimization */
3743 I915_WRITE(CACHE_MODE_1,
3744 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3745
3746 /* WaMbcDriverBootEnable */
3747 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3748 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3749
3750 /* XXX: This is a workaround for early silicon revisions and should be
3751 * removed later.
3752 */
3753 I915_WRITE(WM_DBG,
3754 I915_READ(WM_DBG) |
3755 WM_DBG_DISALLOW_MULTIPLE_LP |
3756 WM_DBG_DISALLOW_SPRITE |
3757 WM_DBG_DISALLOW_MAXFIFO);
3758
3759 lpt_init_clock_gating(dev);
3760}
3761
3762static void ivybridge_init_clock_gating(struct drm_device *dev)
3763{
3764 struct drm_i915_private *dev_priv = dev->dev_private;
3765 int pipe;
3766 uint32_t snpcr;
3767
3768 I915_WRITE(WM3_LP_ILK, 0);
3769 I915_WRITE(WM2_LP_ILK, 0);
3770 I915_WRITE(WM1_LP_ILK, 0);
3771
3772 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
3773
3774 /* WaDisableEarlyCull */
3775 I915_WRITE(_3D_CHICKEN3,
3776 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
3777
3778 /* WaDisableBackToBackFlipFix */
3779 I915_WRITE(IVB_CHICKEN3,
3780 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3781 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3782
3783 /* WaDisablePSDDualDispatchEnable */
3784 if (IS_IVB_GT1(dev))
3785 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
3786 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3787 else
3788 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
3789 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3790
3791 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3792 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3793 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3794
3795 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3796 I915_WRITE(GEN7_L3CNTLREG1,
3797 GEN7_WA_FOR_GEN7_L3_CONTROL);
3798 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3799 GEN7_WA_L3_CHICKEN_MODE);
3800 if (IS_IVB_GT1(dev))
3801 I915_WRITE(GEN7_ROW_CHICKEN2,
3802 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
3803 else
3804 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
3805 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
3806
3807
3808 /* WaForceL3Serialization */
3809 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3810 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3811
3812 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3813 * gating disable must be set. Failure to set it results in
3814 * flickering pixels due to Z write ordering failures after
3815 * some amount of runtime in the Mesa "fire" demo, and Unigine
3816 * Sanctuary and Tropics, and apparently anything else with
3817 * alpha test or pixel discard.
3818 *
3819 * According to the spec, bit 11 (RCCUNIT) must also be set,
3820 * but we didn't debug actual testcases to find it out.
3821 *
3822 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3823 * This implements the WaDisableRCZUnitClockGating workaround.
3824 */
3825 I915_WRITE(GEN6_UCGCTL2,
3826 GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
3827 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3828
3829 /* This is required by WaCatErrorRejectionIssue */
3830 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3831 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3832 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3833
3834 for_each_pipe(pipe) {
3835 I915_WRITE(DSPCNTR(pipe),
3836 I915_READ(DSPCNTR(pipe)) |
3837 DISPPLANE_TRICKLE_FEED_DISABLE);
3838 intel_flush_display_plane(dev_priv, pipe);
3839 }
3840
3841 /* WaMbcDriverBootEnable */
3842 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3843 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3844
3845 gen7_setup_fixed_func_scheduler(dev_priv);
3846
3847 /* WaDisable4x2SubspanOptimization */
3848 I915_WRITE(CACHE_MODE_1,
3849 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3850
3851 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3852 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3853 snpcr |= GEN6_MBC_SNPCR_MED;
3854 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3855
3856 cpt_init_clock_gating(dev);
3857}
3858
3859static void valleyview_init_clock_gating(struct drm_device *dev)
3860{
3861 struct drm_i915_private *dev_priv = dev->dev_private;
3862 int pipe;
3863
3864 I915_WRITE(WM3_LP_ILK, 0);
3865 I915_WRITE(WM2_LP_ILK, 0);
3866 I915_WRITE(WM1_LP_ILK, 0);
3867
3868 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
3869
3870 /* WaDisableEarlyCull */
3871 I915_WRITE(_3D_CHICKEN3,
3872 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
3873
3874 /* WaDisableBackToBackFlipFix */
3875 I915_WRITE(IVB_CHICKEN3,
3876 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3877 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3878
3879 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
3880 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3881
3882 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3883 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3884 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3885
3886 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3887 I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
3888 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
3889
3890 /* WaForceL3Serialization */
3891 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3892 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3893
3894 /* WaDisableDopClockGating */
3895 I915_WRITE(GEN7_ROW_CHICKEN2,
3896 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
3897
3898 /* WaForceL3Serialization */
3899 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3900 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3901
3902 /* This is required by WaCatErrorRejectionIssue */
3903 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3904 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3905 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3906
3907 /* WaMbcDriverBootEnable */
3908 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3909 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3910
3911
3912 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3913 * gating disable must be set. Failure to set it results in
3914 * flickering pixels due to Z write ordering failures after
3915 * some amount of runtime in the Mesa "fire" demo, and Unigine
3916 * Sanctuary and Tropics, and apparently anything else with
3917 * alpha test or pixel discard.
3918 *
3919 * According to the spec, bit 11 (RCCUNIT) must also be set,
3920 * but we didn't debug actual testcases to find it out.
3921 *
3922 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3923 * This implements the WaDisableRCZUnitClockGating workaround.
3924 *
3925 * Also apply WaDisableVDSUnitClockGating and
3926 * WaDisableRCPBUnitClockGating.
3927 */
3928 I915_WRITE(GEN6_UCGCTL2,
3929 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
3930 GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
3931 GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
3932 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
3933 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3934
3935 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
3936
3937 for_each_pipe(pipe) {
3938 I915_WRITE(DSPCNTR(pipe),
3939 I915_READ(DSPCNTR(pipe)) |
3940 DISPPLANE_TRICKLE_FEED_DISABLE);
3941 intel_flush_display_plane(dev_priv, pipe);
3942 }
3943
3944 I915_WRITE(CACHE_MODE_1,
3945 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3946
3947 /*
3948 * On ValleyView, the GUnit needs to signal the GT
3949 * when flip and other events complete. So enable
3950 * all the GUnit->GT interrupts here
3951 */
3952 I915_WRITE(VLV_DPFLIPSTAT, PIPEB_LINE_COMPARE_INT_EN |
3953 PIPEB_HLINE_INT_EN | PIPEB_VBLANK_INT_EN |
3954 SPRITED_FLIPDONE_INT_EN | SPRITEC_FLIPDONE_INT_EN |
3955 PLANEB_FLIPDONE_INT_EN | PIPEA_LINE_COMPARE_INT_EN |
3956 PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
3957 SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
3958 PLANEA_FLIPDONE_INT_EN);
3959
3960 /*
3961 * WaDisableVLVClockGating_VBIIssue
3962 * Disable clock gating on th GCFG unit to prevent a delay
3963 * in the reporting of vblank events.
3964 */
3965 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
3966}
3967
3968static void g4x_init_clock_gating(struct drm_device *dev)
3969{
3970 struct drm_i915_private *dev_priv = dev->dev_private;
3971 uint32_t dspclk_gate;
3972
3973 I915_WRITE(RENCLK_GATE_D1, 0);
3974 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
3975 GS_UNIT_CLOCK_GATE_DISABLE |
3976 CL_UNIT_CLOCK_GATE_DISABLE);
3977 I915_WRITE(RAMCLK_GATE_D, 0);
3978 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
3979 OVRUNIT_CLOCK_GATE_DISABLE |
3980 OVCUNIT_CLOCK_GATE_DISABLE;
3981 if (IS_GM45(dev))
3982 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
3983 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
3984
3985 /* WaDisableRenderCachePipelinedFlush */
3986 I915_WRITE(CACHE_MODE_0,
3987 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
3988}
3989
3990static void crestline_init_clock_gating(struct drm_device *dev)
3991{
3992 struct drm_i915_private *dev_priv = dev->dev_private;
3993
3994 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
3995 I915_WRITE(RENCLK_GATE_D2, 0);
3996 I915_WRITE(DSPCLK_GATE_D, 0);
3997 I915_WRITE(RAMCLK_GATE_D, 0);
3998 I915_WRITE16(DEUC, 0);
3999}
4000
4001static void broadwater_init_clock_gating(struct drm_device *dev)
4002{
4003 struct drm_i915_private *dev_priv = dev->dev_private;
4004
4005 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
4006 I965_RCC_CLOCK_GATE_DISABLE |
4007 I965_RCPB_CLOCK_GATE_DISABLE |
4008 I965_ISC_CLOCK_GATE_DISABLE |
4009 I965_FBC_CLOCK_GATE_DISABLE);
4010 I915_WRITE(RENCLK_GATE_D2, 0);
4011}
4012
4013static void gen3_init_clock_gating(struct drm_device *dev)
4014{
4015 struct drm_i915_private *dev_priv = dev->dev_private;
4016 u32 dstate = I915_READ(D_STATE);
4017
4018 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
4019 DSTATE_DOT_CLOCK_GATING;
4020 I915_WRITE(D_STATE, dstate);
4021
4022 if (IS_PINEVIEW(dev))
4023 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
4024
4025 /* IIR "flip pending" means done if this bit is set */
4026 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
4027}
4028
4029static void i85x_init_clock_gating(struct drm_device *dev)
4030{
4031 struct drm_i915_private *dev_priv = dev->dev_private;
4032
4033 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
4034}
4035
4036static void i830_init_clock_gating(struct drm_device *dev)
4037{
4038 struct drm_i915_private *dev_priv = dev->dev_private;
4039
4040 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
4041}
4042
4043void intel_init_clock_gating(struct drm_device *dev)
4044{
4045 struct drm_i915_private *dev_priv = dev->dev_private;
4046
4047 dev_priv->display.init_clock_gating(dev);
4048}
4049
4050/* Starting with Haswell, we have different power wells for
4051 * different parts of the GPU. This attempts to enable them all.
4052 */
4053void intel_init_power_wells(struct drm_device *dev)
4054{
4055 struct drm_i915_private *dev_priv = dev->dev_private;
4056 unsigned long power_wells[] = {
4057 HSW_PWR_WELL_CTL1,
4058 HSW_PWR_WELL_CTL2,
4059 HSW_PWR_WELL_CTL4
4060 };
4061 int i;
4062
4063 if (!IS_HASWELL(dev))
4064 return;
4065
4066 mutex_lock(&dev->struct_mutex);
4067
4068 for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
4069 int well = I915_READ(power_wells[i]);
4070
4071 if ((well & HSW_PWR_WELL_STATE) == 0) {
4072 I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
4073 if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
4074 DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
4075 }
4076 }
4077
4078 mutex_unlock(&dev->struct_mutex);
4079}
4080
4081/* Set up chip specific power management-related functions */
4082void intel_init_pm(struct drm_device *dev)
4083{
4084 struct drm_i915_private *dev_priv = dev->dev_private;
4085
4086 if (I915_HAS_FBC(dev)) {
4087 if (HAS_PCH_SPLIT(dev)) {
4088 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
4089 dev_priv->display.enable_fbc = ironlake_enable_fbc;
4090 dev_priv->display.disable_fbc = ironlake_disable_fbc;
4091 } else if (IS_GM45(dev)) {
4092 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
4093 dev_priv->display.enable_fbc = g4x_enable_fbc;
4094 dev_priv->display.disable_fbc = g4x_disable_fbc;
4095 } else if (IS_CRESTLINE(dev)) {
4096 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
4097 dev_priv->display.enable_fbc = i8xx_enable_fbc;
4098 dev_priv->display.disable_fbc = i8xx_disable_fbc;
4099 }
4100 /* 855GM needs testing */
4101 }
4102
4103 /* For cxsr */
4104 if (IS_PINEVIEW(dev))
4105 i915_pineview_get_mem_freq(dev);
4106 else if (IS_GEN5(dev))
4107 i915_ironlake_get_mem_freq(dev);
4108
4109 /* For FIFO watermark updates */
4110 if (HAS_PCH_SPLIT(dev)) {
4111 if (IS_GEN5(dev)) {
4112 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
4113 dev_priv->display.update_wm = ironlake_update_wm;
4114 else {
4115 DRM_DEBUG_KMS("Failed to get proper latency. "
4116 "Disable CxSR\n");
4117 dev_priv->display.update_wm = NULL;
4118 }
4119 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
4120 } else if (IS_GEN6(dev)) {
4121 if (SNB_READ_WM0_LATENCY()) {
4122 dev_priv->display.update_wm = sandybridge_update_wm;
4123 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
4124 } else {
4125 DRM_DEBUG_KMS("Failed to read display plane latency. "
4126 "Disable CxSR\n");
4127 dev_priv->display.update_wm = NULL;
4128 }
4129 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
4130 } else if (IS_IVYBRIDGE(dev)) {
4131 /* FIXME: detect B0+ stepping and use auto training */
4132 if (SNB_READ_WM0_LATENCY()) {
4133 dev_priv->display.update_wm = ivybridge_update_wm;
4134 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
4135 } else {
4136 DRM_DEBUG_KMS("Failed to read display plane latency. "
4137 "Disable CxSR\n");
4138 dev_priv->display.update_wm = NULL;
4139 }
4140 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
4141 } else if (IS_HASWELL(dev)) {
4142 if (SNB_READ_WM0_LATENCY()) {
4143 dev_priv->display.update_wm = sandybridge_update_wm;
4144 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
4145 dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
4146 } else {
4147 DRM_DEBUG_KMS("Failed to read display plane latency. "
4148 "Disable CxSR\n");
4149 dev_priv->display.update_wm = NULL;
4150 }
4151 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
4152 } else
4153 dev_priv->display.update_wm = NULL;
4154 } else if (IS_VALLEYVIEW(dev)) {
4155 dev_priv->display.update_wm = valleyview_update_wm;
4156 dev_priv->display.init_clock_gating =
4157 valleyview_init_clock_gating;
4158 } else if (IS_PINEVIEW(dev)) {
4159 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
4160 dev_priv->is_ddr3,
4161 dev_priv->fsb_freq,
4162 dev_priv->mem_freq)) {
4163 DRM_INFO("failed to find known CxSR latency "
4164 "(found ddr%s fsb freq %d, mem freq %d), "
4165 "disabling CxSR\n",
4166 (dev_priv->is_ddr3 == 1) ? "3" : "2",
4167 dev_priv->fsb_freq, dev_priv->mem_freq);
4168 /* Disable CxSR and never update its watermark again */
4169 pineview_disable_cxsr(dev);
4170 dev_priv->display.update_wm = NULL;
4171 } else
4172 dev_priv->display.update_wm = pineview_update_wm;
4173 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
4174 } else if (IS_G4X(dev)) {
4175 dev_priv->display.update_wm = g4x_update_wm;
4176 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
4177 } else if (IS_GEN4(dev)) {
4178 dev_priv->display.update_wm = i965_update_wm;
4179 if (IS_CRESTLINE(dev))
4180 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
4181 else if (IS_BROADWATER(dev))
4182 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
4183 } else if (IS_GEN3(dev)) {
4184 dev_priv->display.update_wm = i9xx_update_wm;
4185 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
4186 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
4187 } else if (IS_I865G(dev)) {
4188 dev_priv->display.update_wm = i830_update_wm;
4189 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
4190 dev_priv->display.get_fifo_size = i830_get_fifo_size;
4191 } else if (IS_I85X(dev)) {
4192 dev_priv->display.update_wm = i9xx_update_wm;
4193 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
4194 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
4195 } else {
4196 dev_priv->display.update_wm = i830_update_wm;
4197 dev_priv->display.init_clock_gating = i830_init_clock_gating;
4198 if (IS_845G(dev))
4199 dev_priv->display.get_fifo_size = i845_get_fifo_size;
4200 else
4201 dev_priv->display.get_fifo_size = i830_get_fifo_size;
4202 }
4203}
4204
4205static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
4206{
4207 u32 gt_thread_status_mask;
4208
4209 if (IS_HASWELL(dev_priv->dev))
4210 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
4211 else
4212 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
4213
4214 /* w/a for a sporadic read returning 0 by waiting for the GT
4215 * thread to wake up.
4216 */
4217 if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
4218 DRM_ERROR("GT thread status wait timed out\n");
4219}
4220
4221static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
4222{
4223 I915_WRITE_NOTRACE(FORCEWAKE, 0);
4224 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4225}
4226
4227static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4228{
4229 u32 forcewake_ack;
4230
4231 if (IS_HASWELL(dev_priv->dev))
4232 forcewake_ack = FORCEWAKE_ACK_HSW;
4233 else
4234 forcewake_ack = FORCEWAKE_ACK;
4235
4236 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
4237 FORCEWAKE_ACK_TIMEOUT_MS))
4238 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4239
4240 I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
4241 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4242
4243 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
4244 FORCEWAKE_ACK_TIMEOUT_MS))
4245 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4246
4247 __gen6_gt_wait_for_thread_c0(dev_priv);
4248}
4249
4250static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
4251{
4252 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
4253 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4254}
4255
4256static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
4257{
4258 u32 forcewake_ack;
4259
4260 if (IS_HASWELL(dev_priv->dev))
4261 forcewake_ack = FORCEWAKE_ACK_HSW;
4262 else
4263 forcewake_ack = FORCEWAKE_MT_ACK;
4264
4265 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
4266 FORCEWAKE_ACK_TIMEOUT_MS))
4267 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4268
4269 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4270 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4271
4272 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
4273 FORCEWAKE_ACK_TIMEOUT_MS))
4274 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4275
4276 __gen6_gt_wait_for_thread_c0(dev_priv);
4277}
4278
4279/*
4280 * Generally this is called implicitly by the register read function. However,
4281 * if some sequence requires the GT to not power down then this function should
4282 * be called at the beginning of the sequence followed by a call to
4283 * gen6_gt_force_wake_put() at the end of the sequence.
4284 */
4285void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4286{
4287 unsigned long irqflags;
4288
4289 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
4290 if (dev_priv->forcewake_count++ == 0)
4291 dev_priv->gt.force_wake_get(dev_priv);
4292 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
4293}
4294
4295void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
4296{
4297 u32 gtfifodbg;
4298 gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
4299 if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
4300 "MMIO read or write has been dropped %x\n", gtfifodbg))
4301 I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
4302}
4303
4304static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
4305{
4306 I915_WRITE_NOTRACE(FORCEWAKE, 0);
4307 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
4308 gen6_gt_check_fifodbg(dev_priv);
4309}
4310
4311static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
4312{
4313 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4314 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
4315 gen6_gt_check_fifodbg(dev_priv);
4316}
4317
4318/*
4319 * see gen6_gt_force_wake_get()
4320 */
4321void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
4322{
4323 unsigned long irqflags;
4324
4325 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
4326 if (--dev_priv->forcewake_count == 0)
4327 dev_priv->gt.force_wake_put(dev_priv);
4328 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
4329}
4330
4331int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
4332{
4333 int ret = 0;
4334
4335 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
4336 int loop = 500;
4337 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
4338 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
4339 udelay(10);
4340 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
4341 }
4342 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
4343 ++ret;
4344 dev_priv->gt_fifo_count = fifo;
4345 }
4346 dev_priv->gt_fifo_count--;
4347
4348 return ret;
4349}
4350
4351static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
4352{
4353 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
4354}
4355
4356static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4357{
4358 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
4359 FORCEWAKE_ACK_TIMEOUT_MS))
4360 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4361
4362 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4363
4364 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
4365 FORCEWAKE_ACK_TIMEOUT_MS))
4366 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4367
4368 __gen6_gt_wait_for_thread_c0(dev_priv);
4369}
4370
4371static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
4372{
4373 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4374 /* The below doubles as a POSTING_READ */
4375 gen6_gt_check_fifodbg(dev_priv);
4376}
4377
4378void intel_gt_reset(struct drm_device *dev)
4379{
4380 struct drm_i915_private *dev_priv = dev->dev_private;
4381
4382 if (IS_VALLEYVIEW(dev)) {
4383 vlv_force_wake_reset(dev_priv);
4384 } else if (INTEL_INFO(dev)->gen >= 6) {
4385 __gen6_gt_force_wake_reset(dev_priv);
4386 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4387 __gen6_gt_force_wake_mt_reset(dev_priv);
4388 }
4389}
4390
4391void intel_gt_init(struct drm_device *dev)
4392{
4393 struct drm_i915_private *dev_priv = dev->dev_private;
4394
4395 spin_lock_init(&dev_priv->gt_lock);
4396
4397 intel_gt_reset(dev);
4398
4399 if (IS_VALLEYVIEW(dev)) {
4400 dev_priv->gt.force_wake_get = vlv_force_wake_get;
4401 dev_priv->gt.force_wake_put = vlv_force_wake_put;
4402 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
4403 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
4404 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
4405 } else if (IS_GEN6(dev)) {
4406 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
4407 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
4408 }
4409 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
4410 intel_gen6_powersave_work);
4411}
4412
4413int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
4414{
4415 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4416
4417 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
4418 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
4419 return -EAGAIN;
4420 }
4421
4422 I915_WRITE(GEN6_PCODE_DATA, *val);
4423 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
4424
4425 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
4426 500)) {
4427 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
4428 return -ETIMEDOUT;
4429 }
4430
4431 *val = I915_READ(GEN6_PCODE_DATA);
4432 I915_WRITE(GEN6_PCODE_DATA, 0);
4433
4434 return 0;
4435}
4436
4437int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
4438{
4439 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4440
4441 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
4442 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
4443 return -EAGAIN;
4444 }
4445
4446 I915_WRITE(GEN6_PCODE_DATA, val);
4447 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
4448
4449 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
4450 500)) {
4451 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
4452 return -ETIMEDOUT;
4453 }
4454
4455 I915_WRITE(GEN6_PCODE_DATA, 0);
4456
4457 return 0;
4458}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ae253e04c39..c30626ea9f9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -27,60 +27,39 @@
27 * 27 *
28 */ 28 */
29 29
30#include <drm/drmP.h> 30#include "drmP.h"
31#include "drm.h"
31#include "i915_drv.h" 32#include "i915_drv.h"
32#include <drm/i915_drm.h> 33#include "i915_drm.h"
33#include "i915_trace.h" 34#include "i915_trace.h"
34#include "intel_drv.h" 35#include "intel_drv.h"
35 36
36/*
37 * 965+ support PIPE_CONTROL commands, which provide finer grained control
38 * over cache flushing.
39 */
40struct pipe_control {
41 struct drm_i915_gem_object *obj;
42 volatile u32 *cpu_page;
43 u32 gtt_offset;
44};
45
46static inline int ring_space(struct intel_ring_buffer *ring) 37static inline int ring_space(struct intel_ring_buffer *ring)
47{ 38{
48 int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); 39 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
49 if (space < 0) 40 if (space < 0)
50 space += ring->size; 41 space += ring->size;
51 return space; 42 return space;
52} 43}
53 44
54static int 45static u32 i915_gem_get_seqno(struct drm_device *dev)
55gen2_render_ring_flush(struct intel_ring_buffer *ring,
56 u32 invalidate_domains,
57 u32 flush_domains)
58{ 46{
59 u32 cmd; 47 drm_i915_private_t *dev_priv = dev->dev_private;
60 int ret; 48 u32 seqno;
61 49
62 cmd = MI_FLUSH; 50 seqno = dev_priv->next_seqno;
63 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
64 cmd |= MI_NO_WRITE_FLUSH;
65 51
66 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 52 /* reserve 0 for non-seqno */
67 cmd |= MI_READ_FLUSH; 53 if (++dev_priv->next_seqno == 0)
54 dev_priv->next_seqno = 1;
68 55
69 ret = intel_ring_begin(ring, 2); 56 return seqno;
70 if (ret)
71 return ret;
72
73 intel_ring_emit(ring, cmd);
74 intel_ring_emit(ring, MI_NOOP);
75 intel_ring_advance(ring);
76
77 return 0;
78} 57}
79 58
80static int 59static int
81gen4_render_ring_flush(struct intel_ring_buffer *ring, 60render_ring_flush(struct intel_ring_buffer *ring,
82 u32 invalidate_domains, 61 u32 invalidate_domains,
83 u32 flush_domains) 62 u32 flush_domains)
84{ 63{
85 struct drm_device *dev = ring->dev; 64 struct drm_device *dev = ring->dev;
86 u32 cmd; 65 u32 cmd;
@@ -115,8 +94,17 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
115 */ 94 */
116 95
117 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 96 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
118 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) 97 if ((invalidate_domains|flush_domains) &
98 I915_GEM_DOMAIN_RENDER)
119 cmd &= ~MI_NO_WRITE_FLUSH; 99 cmd &= ~MI_NO_WRITE_FLUSH;
100 if (INTEL_INFO(dev)->gen < 4) {
101 /*
102 * On the 965, the sampler cache always gets flushed
103 * and this bit is reserved.
104 */
105 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
106 cmd |= MI_READ_FLUSH;
107 }
120 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 108 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
121 cmd |= MI_EXE_FLUSH; 109 cmd |= MI_EXE_FLUSH;
122 110
@@ -135,209 +123,6 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
135 return 0; 123 return 0;
136} 124}
137 125
138/**
139 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
140 * implementing two workarounds on gen6. From section 1.4.7.1
141 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
142 *
143 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
144 * produced by non-pipelined state commands), software needs to first
145 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
146 * 0.
147 *
148 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
149 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
150 *
151 * And the workaround for these two requires this workaround first:
152 *
153 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
154 * BEFORE the pipe-control with a post-sync op and no write-cache
155 * flushes.
156 *
157 * And this last workaround is tricky because of the requirements on
158 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
159 * volume 2 part 1:
160 *
161 * "1 of the following must also be set:
162 * - Render Target Cache Flush Enable ([12] of DW1)
163 * - Depth Cache Flush Enable ([0] of DW1)
164 * - Stall at Pixel Scoreboard ([1] of DW1)
165 * - Depth Stall ([13] of DW1)
166 * - Post-Sync Operation ([13] of DW1)
167 * - Notify Enable ([8] of DW1)"
168 *
169 * The cache flushes require the workaround flush that triggered this
170 * one, so we can't use it. Depth stall would trigger the same.
171 * Post-sync nonzero is what triggered this second workaround, so we
172 * can't use that one either. Notify enable is IRQs, which aren't
173 * really our business. That leaves only stall at scoreboard.
174 */
175static int
176intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
177{
178 struct pipe_control *pc = ring->private;
179 u32 scratch_addr = pc->gtt_offset + 128;
180 int ret;
181
182
183 ret = intel_ring_begin(ring, 6);
184 if (ret)
185 return ret;
186
187 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
188 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
189 PIPE_CONTROL_STALL_AT_SCOREBOARD);
190 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
191 intel_ring_emit(ring, 0); /* low dword */
192 intel_ring_emit(ring, 0); /* high dword */
193 intel_ring_emit(ring, MI_NOOP);
194 intel_ring_advance(ring);
195
196 ret = intel_ring_begin(ring, 6);
197 if (ret)
198 return ret;
199
200 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
201 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
202 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
203 intel_ring_emit(ring, 0);
204 intel_ring_emit(ring, 0);
205 intel_ring_emit(ring, MI_NOOP);
206 intel_ring_advance(ring);
207
208 return 0;
209}
210
211static int
212gen6_render_ring_flush(struct intel_ring_buffer *ring,
213 u32 invalidate_domains, u32 flush_domains)
214{
215 u32 flags = 0;
216 struct pipe_control *pc = ring->private;
217 u32 scratch_addr = pc->gtt_offset + 128;
218 int ret;
219
220 /* Force SNB workarounds for PIPE_CONTROL flushes */
221 ret = intel_emit_post_sync_nonzero_flush(ring);
222 if (ret)
223 return ret;
224
225 /* Just flush everything. Experiments have shown that reducing the
226 * number of bits based on the write domains has little performance
227 * impact.
228 */
229 if (flush_domains) {
230 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
231 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
232 /*
233 * Ensure that any following seqno writes only happen
234 * when the render cache is indeed flushed.
235 */
236 flags |= PIPE_CONTROL_CS_STALL;
237 }
238 if (invalidate_domains) {
239 flags |= PIPE_CONTROL_TLB_INVALIDATE;
240 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
241 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
242 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
243 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
244 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
245 /*
246 * TLB invalidate requires a post-sync write.
247 */
248 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
249 }
250
251 ret = intel_ring_begin(ring, 4);
252 if (ret)
253 return ret;
254
255 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
256 intel_ring_emit(ring, flags);
257 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
258 intel_ring_emit(ring, 0);
259 intel_ring_advance(ring);
260
261 return 0;
262}
263
264static int
265gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
266{
267 int ret;
268
269 ret = intel_ring_begin(ring, 4);
270 if (ret)
271 return ret;
272
273 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
274 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
275 PIPE_CONTROL_STALL_AT_SCOREBOARD);
276 intel_ring_emit(ring, 0);
277 intel_ring_emit(ring, 0);
278 intel_ring_advance(ring);
279
280 return 0;
281}
282
283static int
284gen7_render_ring_flush(struct intel_ring_buffer *ring,
285 u32 invalidate_domains, u32 flush_domains)
286{
287 u32 flags = 0;
288 struct pipe_control *pc = ring->private;
289 u32 scratch_addr = pc->gtt_offset + 128;
290 int ret;
291
292 /*
293 * Ensure that any following seqno writes only happen when the render
294 * cache is indeed flushed.
295 *
296 * Workaround: 4th PIPE_CONTROL command (except the ones with only
297 * read-cache invalidate bits set) must have the CS_STALL bit set. We
298 * don't try to be clever and just set it unconditionally.
299 */
300 flags |= PIPE_CONTROL_CS_STALL;
301
302 /* Just flush everything. Experiments have shown that reducing the
303 * number of bits based on the write domains has little performance
304 * impact.
305 */
306 if (flush_domains) {
307 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
308 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
309 }
310 if (invalidate_domains) {
311 flags |= PIPE_CONTROL_TLB_INVALIDATE;
312 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
313 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
314 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
315 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
316 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
317 /*
318 * TLB invalidate requires a post-sync write.
319 */
320 flags |= PIPE_CONTROL_QW_WRITE;
321
322 /* Workaround: we must issue a pipe_control with CS-stall bit
323 * set before a pipe_control command that has the state cache
324 * invalidate bit set. */
325 gen7_render_ring_cs_stall_wa(ring);
326 }
327
328 ret = intel_ring_begin(ring, 4);
329 if (ret)
330 return ret;
331
332 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
333 intel_ring_emit(ring, flags);
334 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
335 intel_ring_emit(ring, 0);
336 intel_ring_advance(ring);
337
338 return 0;
339}
340
341static void ring_write_tail(struct intel_ring_buffer *ring, 126static void ring_write_tail(struct intel_ring_buffer *ring,
342 u32 value) 127 u32 value)
343{ 128{
@@ -356,20 +141,17 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
356 141
357static int init_ring_common(struct intel_ring_buffer *ring) 142static int init_ring_common(struct intel_ring_buffer *ring)
358{ 143{
359 struct drm_device *dev = ring->dev; 144 drm_i915_private_t *dev_priv = ring->dev->dev_private;
360 drm_i915_private_t *dev_priv = dev->dev_private;
361 struct drm_i915_gem_object *obj = ring->obj; 145 struct drm_i915_gem_object *obj = ring->obj;
362 int ret = 0;
363 u32 head; 146 u32 head;
364 147
365 if (HAS_FORCE_WAKE(dev))
366 gen6_gt_force_wake_get(dev_priv);
367
368 /* Stop the ring if it's running. */ 148 /* Stop the ring if it's running. */
369 I915_WRITE_CTL(ring, 0); 149 I915_WRITE_CTL(ring, 0);
370 I915_WRITE_HEAD(ring, 0); 150 I915_WRITE_HEAD(ring, 0);
371 ring->write_tail(ring, 0); 151 ring->write_tail(ring, 0);
372 152
153 /* Initialize the ring. */
154 I915_WRITE_START(ring, obj->gtt_offset);
373 head = I915_READ_HEAD(ring) & HEAD_ADDR; 155 head = I915_READ_HEAD(ring) & HEAD_ADDR;
374 156
375 /* G45 ring initialization fails to reset head to zero */ 157 /* G45 ring initialization fails to reset head to zero */
@@ -395,19 +177,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
395 } 177 }
396 } 178 }
397 179
398 /* Initialize the ring. This must happen _after_ we've cleared the ring
399 * registers with the above sequence (the readback of the HEAD registers
400 * also enforces ordering), otherwise the hw might lose the new ring
401 * register values. */
402 I915_WRITE_START(ring, obj->gtt_offset);
403 I915_WRITE_CTL(ring, 180 I915_WRITE_CTL(ring,
404 ((ring->size - PAGE_SIZE) & RING_NR_PAGES) 181 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
405 | RING_VALID); 182 | RING_REPORT_64K | RING_VALID);
406 183
407 /* If the head is still not zero, the ring is dead */ 184 /* If the head is still not zero, the ring is dead */
408 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && 185 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
409 I915_READ_START(ring) == obj->gtt_offset && 186 I915_READ_START(ring) != obj->gtt_offset ||
410 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { 187 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
411 DRM_ERROR("%s initialization failed " 188 DRM_ERROR("%s initialization failed "
412 "ctl %08x head %08x tail %08x start %08x\n", 189 "ctl %08x head %08x tail %08x start %08x\n",
413 ring->name, 190 ring->name,
@@ -415,8 +192,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
415 I915_READ_HEAD(ring), 192 I915_READ_HEAD(ring),
416 I915_READ_TAIL(ring), 193 I915_READ_TAIL(ring),
417 I915_READ_START(ring)); 194 I915_READ_START(ring));
418 ret = -EIO; 195 return -EIO;
419 goto out;
420 } 196 }
421 197
422 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 198 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
@@ -425,16 +201,21 @@ static int init_ring_common(struct intel_ring_buffer *ring)
425 ring->head = I915_READ_HEAD(ring); 201 ring->head = I915_READ_HEAD(ring);
426 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 202 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
427 ring->space = ring_space(ring); 203 ring->space = ring_space(ring);
428 ring->last_retired_head = -1;
429 } 204 }
430 205
431out: 206 return 0;
432 if (HAS_FORCE_WAKE(dev))
433 gen6_gt_force_wake_put(dev_priv);
434
435 return ret;
436} 207}
437 208
209/*
210 * 965+ support PIPE_CONTROL commands, which provide finer grained control
211 * over cache flushing.
212 */
213struct pipe_control {
214 struct drm_i915_gem_object *obj;
215 volatile u32 *cpu_page;
216 u32 gtt_offset;
217};
218
438static int 219static int
439init_pipe_control(struct intel_ring_buffer *ring) 220init_pipe_control(struct intel_ring_buffer *ring)
440{ 221{
@@ -458,12 +239,12 @@ init_pipe_control(struct intel_ring_buffer *ring)
458 239
459 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 240 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
460 241
461 ret = i915_gem_object_pin(obj, 4096, true, false); 242 ret = i915_gem_object_pin(obj, 4096, true);
462 if (ret) 243 if (ret)
463 goto err_unref; 244 goto err_unref;
464 245
465 pc->gtt_offset = obj->gtt_offset; 246 pc->gtt_offset = obj->gtt_offset;
466 pc->cpu_page = kmap(sg_page(obj->pages->sgl)); 247 pc->cpu_page = kmap(obj->pages[0]);
467 if (pc->cpu_page == NULL) 248 if (pc->cpu_page == NULL)
468 goto err_unpin; 249 goto err_unpin;
469 250
@@ -490,8 +271,7 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
490 return; 271 return;
491 272
492 obj = pc->obj; 273 obj = pc->obj;
493 274 kunmap(obj->pages[0]);
494 kunmap(sg_page(obj->pages->sgl));
495 i915_gem_object_unpin(obj); 275 i915_gem_object_unpin(obj);
496 drm_gem_object_unreference(&obj->base); 276 drm_gem_object_unreference(&obj->base);
497 277
@@ -506,153 +286,123 @@ static int init_render_ring(struct intel_ring_buffer *ring)
506 int ret = init_ring_common(ring); 286 int ret = init_ring_common(ring);
507 287
508 if (INTEL_INFO(dev)->gen > 3) { 288 if (INTEL_INFO(dev)->gen > 3) {
509 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 289 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
290 if (IS_GEN6(dev) || IS_GEN7(dev))
291 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
292 I915_WRITE(MI_MODE, mode);
510 if (IS_GEN7(dev)) 293 if (IS_GEN7(dev))
511 I915_WRITE(GFX_MODE_GEN7, 294 I915_WRITE(GFX_MODE_GEN7,
512 _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | 295 GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
513 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 296 GFX_MODE_ENABLE(GFX_REPLAY_MODE));
514 } 297 }
515 298
516 if (INTEL_INFO(dev)->gen >= 5) { 299 if (INTEL_INFO(dev)->gen >= 6) {
300 } else if (IS_GEN5(dev)) {
517 ret = init_pipe_control(ring); 301 ret = init_pipe_control(ring);
518 if (ret) 302 if (ret)
519 return ret; 303 return ret;
520 } 304 }
521 305
522 if (IS_GEN6(dev)) {
523 /* From the Sandybridge PRM, volume 1 part 3, page 24:
524 * "If this bit is set, STCunit will have LRA as replacement
525 * policy. [...] This bit must be reset. LRA replacement
526 * policy is not supported."
527 */
528 I915_WRITE(CACHE_MODE_0,
529 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
530
531 /* This is not explicitly set for GEN6, so read the register.
532 * see intel_ring_mi_set_context() for why we care.
533 * TODO: consider explicitly setting the bit for GEN5
534 */
535 ring->itlb_before_ctx_switch =
536 !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
537 }
538
539 if (INTEL_INFO(dev)->gen >= 6)
540 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
541
542 if (HAS_L3_GPU_CACHE(dev))
543 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
544
545 return ret; 306 return ret;
546} 307}
547 308
548static void render_ring_cleanup(struct intel_ring_buffer *ring) 309static void render_ring_cleanup(struct intel_ring_buffer *ring)
549{ 310{
550 struct drm_device *dev = ring->dev;
551
552 if (!ring->private) 311 if (!ring->private)
553 return; 312 return;
554 313
555 if (HAS_BROKEN_CS_TLB(dev))
556 drm_gem_object_unreference(to_gem_object(ring->private));
557
558 cleanup_pipe_control(ring); 314 cleanup_pipe_control(ring);
559} 315}
560 316
561static void 317static void
562update_mboxes(struct intel_ring_buffer *ring, 318update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
563 u32 mmio_offset)
564{ 319{
565 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 320 struct drm_device *dev = ring->dev;
566 intel_ring_emit(ring, mmio_offset); 321 struct drm_i915_private *dev_priv = dev->dev_private;
567 intel_ring_emit(ring, ring->outstanding_lazy_request); 322 int id;
323
324 /*
325 * cs -> 1 = vcs, 0 = bcs
326 * vcs -> 1 = bcs, 0 = cs,
327 * bcs -> 1 = cs, 0 = vcs.
328 */
329 id = ring - dev_priv->ring;
330 id += 2 - i;
331 id %= 3;
332
333 intel_ring_emit(ring,
334 MI_SEMAPHORE_MBOX |
335 MI_SEMAPHORE_REGISTER |
336 MI_SEMAPHORE_UPDATE);
337 intel_ring_emit(ring, seqno);
338 intel_ring_emit(ring,
339 RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
568} 340}
569 341
570/**
571 * gen6_add_request - Update the semaphore mailbox registers
572 *
573 * @ring - ring that is adding a request
574 * @seqno - return seqno stuck into the ring
575 *
576 * Update the mailbox registers in the *other* rings with the current seqno.
577 * This acts like a signal in the canonical semaphore.
578 */
579static int 342static int
580gen6_add_request(struct intel_ring_buffer *ring) 343gen6_add_request(struct intel_ring_buffer *ring,
344 u32 *result)
581{ 345{
582 u32 mbox1_reg; 346 u32 seqno;
583 u32 mbox2_reg;
584 int ret; 347 int ret;
585 348
586 ret = intel_ring_begin(ring, 10); 349 ret = intel_ring_begin(ring, 10);
587 if (ret) 350 if (ret)
588 return ret; 351 return ret;
589 352
590 mbox1_reg = ring->signal_mbox[0]; 353 seqno = i915_gem_get_seqno(ring->dev);
591 mbox2_reg = ring->signal_mbox[1]; 354 update_semaphore(ring, 0, seqno);
355 update_semaphore(ring, 1, seqno);
592 356
593 update_mboxes(ring, mbox1_reg);
594 update_mboxes(ring, mbox2_reg);
595 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 357 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
596 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 358 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
597 intel_ring_emit(ring, ring->outstanding_lazy_request); 359 intel_ring_emit(ring, seqno);
598 intel_ring_emit(ring, MI_USER_INTERRUPT); 360 intel_ring_emit(ring, MI_USER_INTERRUPT);
599 intel_ring_advance(ring); 361 intel_ring_advance(ring);
600 362
363 *result = seqno;
601 return 0; 364 return 0;
602} 365}
603 366
604/** 367int
605 * intel_ring_sync - sync the waiter to the signaller on seqno 368intel_ring_sync(struct intel_ring_buffer *ring,
606 * 369 struct intel_ring_buffer *to,
607 * @waiter - ring that is waiting 370 u32 seqno)
608 * @signaller - ring which has, or will signal
609 * @seqno - seqno which the waiter will block on
610 */
611static int
612gen6_ring_sync(struct intel_ring_buffer *waiter,
613 struct intel_ring_buffer *signaller,
614 u32 seqno)
615{ 371{
616 int ret; 372 int ret;
617 u32 dw1 = MI_SEMAPHORE_MBOX |
618 MI_SEMAPHORE_COMPARE |
619 MI_SEMAPHORE_REGISTER;
620 373
621 /* Throughout all of the GEM code, seqno passed implies our current 374 ret = intel_ring_begin(ring, 4);
622 * seqno is >= the last seqno executed. However for hardware the
623 * comparison is strictly greater than.
624 */
625 seqno -= 1;
626
627 WARN_ON(signaller->semaphore_register[waiter->id] ==
628 MI_SEMAPHORE_SYNC_INVALID);
629
630 ret = intel_ring_begin(waiter, 4);
631 if (ret) 375 if (ret)
632 return ret; 376 return ret;
633 377
634 intel_ring_emit(waiter, 378 intel_ring_emit(ring,
635 dw1 | signaller->semaphore_register[waiter->id]); 379 MI_SEMAPHORE_MBOX |
636 intel_ring_emit(waiter, seqno); 380 MI_SEMAPHORE_REGISTER |
637 intel_ring_emit(waiter, 0); 381 intel_ring_sync_index(ring, to) << 17 |
638 intel_ring_emit(waiter, MI_NOOP); 382 MI_SEMAPHORE_COMPARE);
639 intel_ring_advance(waiter); 383 intel_ring_emit(ring, seqno);
384 intel_ring_emit(ring, 0);
385 intel_ring_emit(ring, MI_NOOP);
386 intel_ring_advance(ring);
640 387
641 return 0; 388 return 0;
642} 389}
643 390
644#define PIPE_CONTROL_FLUSH(ring__, addr__) \ 391#define PIPE_CONTROL_FLUSH(ring__, addr__) \
645do { \ 392do { \
646 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 393 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
647 PIPE_CONTROL_DEPTH_STALL); \ 394 PIPE_CONTROL_DEPTH_STALL | 2); \
648 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ 395 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
649 intel_ring_emit(ring__, 0); \ 396 intel_ring_emit(ring__, 0); \
650 intel_ring_emit(ring__, 0); \ 397 intel_ring_emit(ring__, 0); \
651} while (0) 398} while (0)
652 399
653static int 400static int
654pc_render_add_request(struct intel_ring_buffer *ring) 401pc_render_add_request(struct intel_ring_buffer *ring,
402 u32 *result)
655{ 403{
404 struct drm_device *dev = ring->dev;
405 u32 seqno = i915_gem_get_seqno(dev);
656 struct pipe_control *pc = ring->private; 406 struct pipe_control *pc = ring->private;
657 u32 scratch_addr = pc->gtt_offset + 128; 407 u32 scratch_addr = pc->gtt_offset + 128;
658 int ret; 408 int ret;
@@ -669,11 +419,10 @@ pc_render_add_request(struct intel_ring_buffer *ring)
669 if (ret) 419 if (ret)
670 return ret; 420 return ret;
671 421
672 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 422 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
673 PIPE_CONTROL_WRITE_FLUSH | 423 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
674 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
675 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 424 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
676 intel_ring_emit(ring, ring->outstanding_lazy_request); 425 intel_ring_emit(ring, seqno);
677 intel_ring_emit(ring, 0); 426 intel_ring_emit(ring, 0);
678 PIPE_CONTROL_FLUSH(ring, scratch_addr); 427 PIPE_CONTROL_FLUSH(ring, scratch_addr);
679 scratch_addr += 128; /* write to separate cachelines */ 428 scratch_addr += 128; /* write to separate cachelines */
@@ -686,152 +435,123 @@ pc_render_add_request(struct intel_ring_buffer *ring)
686 PIPE_CONTROL_FLUSH(ring, scratch_addr); 435 PIPE_CONTROL_FLUSH(ring, scratch_addr);
687 scratch_addr += 128; 436 scratch_addr += 128;
688 PIPE_CONTROL_FLUSH(ring, scratch_addr); 437 PIPE_CONTROL_FLUSH(ring, scratch_addr);
689 438 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
690 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 439 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
691 PIPE_CONTROL_WRITE_FLUSH |
692 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
693 PIPE_CONTROL_NOTIFY); 440 PIPE_CONTROL_NOTIFY);
694 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 441 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
695 intel_ring_emit(ring, ring->outstanding_lazy_request); 442 intel_ring_emit(ring, seqno);
696 intel_ring_emit(ring, 0); 443 intel_ring_emit(ring, 0);
697 intel_ring_advance(ring); 444 intel_ring_advance(ring);
698 445
446 *result = seqno;
699 return 0; 447 return 0;
700} 448}
701 449
702static u32 450static int
703gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) 451render_ring_add_request(struct intel_ring_buffer *ring,
452 u32 *result)
704{ 453{
705 /* Workaround to force correct ordering between irq and seqno writes on 454 struct drm_device *dev = ring->dev;
706 * ivb (and maybe also on snb) by reading from a CS register (like 455 u32 seqno = i915_gem_get_seqno(dev);
707 * ACTHD) before reading the status page. */ 456 int ret;
708 if (!lazy_coherency) 457
709 intel_ring_get_active_head(ring); 458 ret = intel_ring_begin(ring, 4);
710 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 459 if (ret)
460 return ret;
461
462 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
463 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
464 intel_ring_emit(ring, seqno);
465 intel_ring_emit(ring, MI_USER_INTERRUPT);
466 intel_ring_advance(ring);
467
468 *result = seqno;
469 return 0;
711} 470}
712 471
713static u32 472static u32
714ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) 473ring_get_seqno(struct intel_ring_buffer *ring)
715{ 474{
716 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 475 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
717} 476}
718 477
719static u32 478static u32
720pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) 479pc_render_get_seqno(struct intel_ring_buffer *ring)
721{ 480{
722 struct pipe_control *pc = ring->private; 481 struct pipe_control *pc = ring->private;
723 return pc->cpu_page[0]; 482 return pc->cpu_page[0];
724} 483}
725 484
726static bool 485static void
727gen5_ring_get_irq(struct intel_ring_buffer *ring) 486ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
728{ 487{
729 struct drm_device *dev = ring->dev; 488 dev_priv->gt_irq_mask &= ~mask;
730 drm_i915_private_t *dev_priv = dev->dev_private; 489 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
731 unsigned long flags; 490 POSTING_READ(GTIMR);
732
733 if (!dev->irq_enabled)
734 return false;
735
736 spin_lock_irqsave(&dev_priv->irq_lock, flags);
737 if (ring->irq_refcount++ == 0) {
738 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
739 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
740 POSTING_READ(GTIMR);
741 }
742 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
743
744 return true;
745} 491}
746 492
747static void 493static void
748gen5_ring_put_irq(struct intel_ring_buffer *ring) 494ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
749{ 495{
750 struct drm_device *dev = ring->dev; 496 dev_priv->gt_irq_mask |= mask;
751 drm_i915_private_t *dev_priv = dev->dev_private; 497 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
752 unsigned long flags; 498 POSTING_READ(GTIMR);
753
754 spin_lock_irqsave(&dev_priv->irq_lock, flags);
755 if (--ring->irq_refcount == 0) {
756 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
757 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
758 POSTING_READ(GTIMR);
759 }
760 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
761} 499}
762 500
763static bool 501static void
764i9xx_ring_get_irq(struct intel_ring_buffer *ring) 502i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
765{ 503{
766 struct drm_device *dev = ring->dev; 504 dev_priv->irq_mask &= ~mask;
767 drm_i915_private_t *dev_priv = dev->dev_private; 505 I915_WRITE(IMR, dev_priv->irq_mask);
768 unsigned long flags; 506 POSTING_READ(IMR);
769
770 if (!dev->irq_enabled)
771 return false;
772
773 spin_lock_irqsave(&dev_priv->irq_lock, flags);
774 if (ring->irq_refcount++ == 0) {
775 dev_priv->irq_mask &= ~ring->irq_enable_mask;
776 I915_WRITE(IMR, dev_priv->irq_mask);
777 POSTING_READ(IMR);
778 }
779 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
780
781 return true;
782} 507}
783 508
784static void 509static void
785i9xx_ring_put_irq(struct intel_ring_buffer *ring) 510i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
786{ 511{
787 struct drm_device *dev = ring->dev; 512 dev_priv->irq_mask |= mask;
788 drm_i915_private_t *dev_priv = dev->dev_private; 513 I915_WRITE(IMR, dev_priv->irq_mask);
789 unsigned long flags; 514 POSTING_READ(IMR);
790
791 spin_lock_irqsave(&dev_priv->irq_lock, flags);
792 if (--ring->irq_refcount == 0) {
793 dev_priv->irq_mask |= ring->irq_enable_mask;
794 I915_WRITE(IMR, dev_priv->irq_mask);
795 POSTING_READ(IMR);
796 }
797 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
798} 515}
799 516
800static bool 517static bool
801i8xx_ring_get_irq(struct intel_ring_buffer *ring) 518render_ring_get_irq(struct intel_ring_buffer *ring)
802{ 519{
803 struct drm_device *dev = ring->dev; 520 struct drm_device *dev = ring->dev;
804 drm_i915_private_t *dev_priv = dev->dev_private; 521 drm_i915_private_t *dev_priv = dev->dev_private;
805 unsigned long flags;
806 522
807 if (!dev->irq_enabled) 523 if (!dev->irq_enabled)
808 return false; 524 return false;
809 525
810 spin_lock_irqsave(&dev_priv->irq_lock, flags); 526 spin_lock(&ring->irq_lock);
811 if (ring->irq_refcount++ == 0) { 527 if (ring->irq_refcount++ == 0) {
812 dev_priv->irq_mask &= ~ring->irq_enable_mask; 528 if (HAS_PCH_SPLIT(dev))
813 I915_WRITE16(IMR, dev_priv->irq_mask); 529 ironlake_enable_irq(dev_priv,
814 POSTING_READ16(IMR); 530 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
531 else
532 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
815 } 533 }
816 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 534 spin_unlock(&ring->irq_lock);
817 535
818 return true; 536 return true;
819} 537}
820 538
821static void 539static void
822i8xx_ring_put_irq(struct intel_ring_buffer *ring) 540render_ring_put_irq(struct intel_ring_buffer *ring)
823{ 541{
824 struct drm_device *dev = ring->dev; 542 struct drm_device *dev = ring->dev;
825 drm_i915_private_t *dev_priv = dev->dev_private; 543 drm_i915_private_t *dev_priv = dev->dev_private;
826 unsigned long flags;
827 544
828 spin_lock_irqsave(&dev_priv->irq_lock, flags); 545 spin_lock(&ring->irq_lock);
829 if (--ring->irq_refcount == 0) { 546 if (--ring->irq_refcount == 0) {
830 dev_priv->irq_mask |= ring->irq_enable_mask; 547 if (HAS_PCH_SPLIT(dev))
831 I915_WRITE16(IMR, dev_priv->irq_mask); 548 ironlake_disable_irq(dev_priv,
832 POSTING_READ16(IMR); 549 GT_USER_INTERRUPT |
550 GT_PIPE_NOTIFY);
551 else
552 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
833 } 553 }
834 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 554 spin_unlock(&ring->irq_lock);
835} 555}
836 556
837void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 557void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -845,13 +565,13 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
845 */ 565 */
846 if (IS_GEN7(dev)) { 566 if (IS_GEN7(dev)) {
847 switch (ring->id) { 567 switch (ring->id) {
848 case RCS: 568 case RING_RENDER:
849 mmio = RENDER_HWS_PGA_GEN7; 569 mmio = RENDER_HWS_PGA_GEN7;
850 break; 570 break;
851 case BCS: 571 case RING_BLT:
852 mmio = BLT_HWS_PGA_GEN7; 572 mmio = BLT_HWS_PGA_GEN7;
853 break; 573 break;
854 case VCS: 574 case RING_BSD:
855 mmio = BSD_HWS_PGA_GEN7; 575 mmio = BSD_HWS_PGA_GEN7;
856 break; 576 break;
857 } 577 }
@@ -883,80 +603,101 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
883} 603}
884 604
885static int 605static int
886i9xx_add_request(struct intel_ring_buffer *ring) 606ring_add_request(struct intel_ring_buffer *ring,
607 u32 *result)
887{ 608{
609 u32 seqno;
888 int ret; 610 int ret;
889 611
890 ret = intel_ring_begin(ring, 4); 612 ret = intel_ring_begin(ring, 4);
891 if (ret) 613 if (ret)
892 return ret; 614 return ret;
893 615
616 seqno = i915_gem_get_seqno(ring->dev);
617
894 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 618 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
895 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 619 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
896 intel_ring_emit(ring, ring->outstanding_lazy_request); 620 intel_ring_emit(ring, seqno);
897 intel_ring_emit(ring, MI_USER_INTERRUPT); 621 intel_ring_emit(ring, MI_USER_INTERRUPT);
898 intel_ring_advance(ring); 622 intel_ring_advance(ring);
899 623
624 *result = seqno;
900 return 0; 625 return 0;
901} 626}
902 627
903static bool 628static bool
904gen6_ring_get_irq(struct intel_ring_buffer *ring) 629gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
905{ 630{
906 struct drm_device *dev = ring->dev; 631 struct drm_device *dev = ring->dev;
907 drm_i915_private_t *dev_priv = dev->dev_private; 632 drm_i915_private_t *dev_priv = dev->dev_private;
908 unsigned long flags;
909 633
910 if (!dev->irq_enabled) 634 if (!dev->irq_enabled)
911 return false; 635 return false;
912 636
913 /* It looks like we need to prevent the gt from suspending while waiting 637 spin_lock(&ring->irq_lock);
914 * for an notifiy irq, otherwise irqs seem to get lost on at least the
915 * blt/bsd rings on ivb. */
916 gen6_gt_force_wake_get(dev_priv);
917
918 spin_lock_irqsave(&dev_priv->irq_lock, flags);
919 if (ring->irq_refcount++ == 0) { 638 if (ring->irq_refcount++ == 0) {
920 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 639 ring->irq_mask &= ~rflag;
921 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | 640 I915_WRITE_IMR(ring, ring->irq_mask);
922 GEN6_RENDER_L3_PARITY_ERROR)); 641 ironlake_enable_irq(dev_priv, gflag);
923 else
924 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
925 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
926 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
927 POSTING_READ(GTIMR);
928 } 642 }
929 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 643 spin_unlock(&ring->irq_lock);
930 644
931 return true; 645 return true;
932} 646}
933 647
934static void 648static void
935gen6_ring_put_irq(struct intel_ring_buffer *ring) 649gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
936{ 650{
937 struct drm_device *dev = ring->dev; 651 struct drm_device *dev = ring->dev;
938 drm_i915_private_t *dev_priv = dev->dev_private; 652 drm_i915_private_t *dev_priv = dev->dev_private;
939 unsigned long flags;
940 653
941 spin_lock_irqsave(&dev_priv->irq_lock, flags); 654 spin_lock(&ring->irq_lock);
942 if (--ring->irq_refcount == 0) { 655 if (--ring->irq_refcount == 0) {
943 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 656 ring->irq_mask |= rflag;
944 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); 657 I915_WRITE_IMR(ring, ring->irq_mask);
658 ironlake_disable_irq(dev_priv, gflag);
659 }
660 spin_unlock(&ring->irq_lock);
661}
662
663static bool
664bsd_ring_get_irq(struct intel_ring_buffer *ring)
665{
666 struct drm_device *dev = ring->dev;
667 drm_i915_private_t *dev_priv = dev->dev_private;
668
669 if (!dev->irq_enabled)
670 return false;
671
672 spin_lock(&ring->irq_lock);
673 if (ring->irq_refcount++ == 0) {
674 if (IS_G4X(dev))
675 i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
945 else 676 else
946 I915_WRITE_IMR(ring, ~0); 677 ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
947 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
948 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
949 POSTING_READ(GTIMR);
950 } 678 }
951 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 679 spin_unlock(&ring->irq_lock);
680
681 return true;
682}
683static void
684bsd_ring_put_irq(struct intel_ring_buffer *ring)
685{
686 struct drm_device *dev = ring->dev;
687 drm_i915_private_t *dev_priv = dev->dev_private;
952 688
953 gen6_gt_force_wake_put(dev_priv); 689 spin_lock(&ring->irq_lock);
690 if (--ring->irq_refcount == 0) {
691 if (IS_G4X(dev))
692 i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
693 else
694 ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
695 }
696 spin_unlock(&ring->irq_lock);
954} 697}
955 698
956static int 699static int
957i965_dispatch_execbuffer(struct intel_ring_buffer *ring, 700ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
958 u32 offset, u32 length,
959 unsigned flags)
960{ 701{
961 int ret; 702 int ret;
962 703
@@ -965,82 +706,46 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
965 return ret; 706 return ret;
966 707
967 intel_ring_emit(ring, 708 intel_ring_emit(ring,
968 MI_BATCH_BUFFER_START | 709 MI_BATCH_BUFFER_START | (2 << 6) |
969 MI_BATCH_GTT | 710 MI_BATCH_NON_SECURE_I965);
970 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
971 intel_ring_emit(ring, offset); 711 intel_ring_emit(ring, offset);
972 intel_ring_advance(ring); 712 intel_ring_advance(ring);
973 713
974 return 0; 714 return 0;
975} 715}
976 716
977/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
978#define I830_BATCH_LIMIT (256*1024)
979static int 717static int
980i830_dispatch_execbuffer(struct intel_ring_buffer *ring, 718render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
981 u32 offset, u32 len, 719 u32 offset, u32 len)
982 unsigned flags)
983{ 720{
721 struct drm_device *dev = ring->dev;
984 int ret; 722 int ret;
985 723
986 if (flags & I915_DISPATCH_PINNED) { 724 if (IS_I830(dev) || IS_845G(dev)) {
987 ret = intel_ring_begin(ring, 4); 725 ret = intel_ring_begin(ring, 4);
988 if (ret) 726 if (ret)
989 return ret; 727 return ret;
990 728
991 intel_ring_emit(ring, MI_BATCH_BUFFER); 729 intel_ring_emit(ring, MI_BATCH_BUFFER);
992 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 730 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
993 intel_ring_emit(ring, offset + len - 8); 731 intel_ring_emit(ring, offset + len - 8);
994 intel_ring_emit(ring, MI_NOOP); 732 intel_ring_emit(ring, 0);
995 intel_ring_advance(ring);
996 } else { 733 } else {
997 struct drm_i915_gem_object *obj = ring->private; 734 ret = intel_ring_begin(ring, 2);
998 u32 cs_offset = obj->gtt_offset;
999
1000 if (len > I830_BATCH_LIMIT)
1001 return -ENOSPC;
1002
1003 ret = intel_ring_begin(ring, 9+3);
1004 if (ret) 735 if (ret)
1005 return ret; 736 return ret;
1006 /* Blit the batch (which has now all relocs applied) to the stable batch
1007 * scratch bo area (so that the CS never stumbles over its tlb
1008 * invalidation bug) ... */
1009 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
1010 XY_SRC_COPY_BLT_WRITE_ALPHA |
1011 XY_SRC_COPY_BLT_WRITE_RGB);
1012 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
1013 intel_ring_emit(ring, 0);
1014 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1015 intel_ring_emit(ring, cs_offset);
1016 intel_ring_emit(ring, 0);
1017 intel_ring_emit(ring, 4096);
1018 intel_ring_emit(ring, offset);
1019 intel_ring_emit(ring, MI_FLUSH);
1020 737
1021 /* ... and execute it. */ 738 if (INTEL_INFO(dev)->gen >= 4) {
1022 intel_ring_emit(ring, MI_BATCH_BUFFER); 739 intel_ring_emit(ring,
1023 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 740 MI_BATCH_BUFFER_START | (2 << 6) |
1024 intel_ring_emit(ring, cs_offset + len - 8); 741 MI_BATCH_NON_SECURE_I965);
1025 intel_ring_advance(ring); 742 intel_ring_emit(ring, offset);
743 } else {
744 intel_ring_emit(ring,
745 MI_BATCH_BUFFER_START | (2 << 6));
746 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
747 }
1026 } 748 }
1027
1028 return 0;
1029}
1030
1031static int
1032i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1033 u32 offset, u32 len,
1034 unsigned flags)
1035{
1036 int ret;
1037
1038 ret = intel_ring_begin(ring, 2);
1039 if (ret)
1040 return ret;
1041
1042 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1043 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1044 intel_ring_advance(ring); 749 intel_ring_advance(ring);
1045 750
1046 return 0; 751 return 0;
@@ -1048,21 +753,25 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1048 753
1049static void cleanup_status_page(struct intel_ring_buffer *ring) 754static void cleanup_status_page(struct intel_ring_buffer *ring)
1050{ 755{
756 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1051 struct drm_i915_gem_object *obj; 757 struct drm_i915_gem_object *obj;
1052 758
1053 obj = ring->status_page.obj; 759 obj = ring->status_page.obj;
1054 if (obj == NULL) 760 if (obj == NULL)
1055 return; 761 return;
1056 762
1057 kunmap(sg_page(obj->pages->sgl)); 763 kunmap(obj->pages[0]);
1058 i915_gem_object_unpin(obj); 764 i915_gem_object_unpin(obj);
1059 drm_gem_object_unreference(&obj->base); 765 drm_gem_object_unreference(&obj->base);
1060 ring->status_page.obj = NULL; 766 ring->status_page.obj = NULL;
767
768 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
1061} 769}
1062 770
1063static int init_status_page(struct intel_ring_buffer *ring) 771static int init_status_page(struct intel_ring_buffer *ring)
1064{ 772{
1065 struct drm_device *dev = ring->dev; 773 struct drm_device *dev = ring->dev;
774 drm_i915_private_t *dev_priv = dev->dev_private;
1066 struct drm_i915_gem_object *obj; 775 struct drm_i915_gem_object *obj;
1067 int ret; 776 int ret;
1068 777
@@ -1075,15 +784,15 @@ static int init_status_page(struct intel_ring_buffer *ring)
1075 784
1076 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 785 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1077 786
1078 ret = i915_gem_object_pin(obj, 4096, true, false); 787 ret = i915_gem_object_pin(obj, 4096, true);
1079 if (ret != 0) { 788 if (ret != 0) {
1080 goto err_unref; 789 goto err_unref;
1081 } 790 }
1082 791
1083 ring->status_page.gfx_addr = obj->gtt_offset; 792 ring->status_page.gfx_addr = obj->gtt_offset;
1084 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 793 ring->status_page.page_addr = kmap(obj->pages[0]);
1085 if (ring->status_page.page_addr == NULL) { 794 if (ring->status_page.page_addr == NULL) {
1086 ret = -ENOMEM; 795 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
1087 goto err_unpin; 796 goto err_unpin;
1088 } 797 }
1089 ring->status_page.obj = obj; 798 ring->status_page.obj = obj;
@@ -1103,53 +812,25 @@ err:
1103 return ret; 812 return ret;
1104} 813}
1105 814
1106static int init_phys_hws_pga(struct intel_ring_buffer *ring) 815int intel_init_ring_buffer(struct drm_device *dev,
1107{ 816 struct intel_ring_buffer *ring)
1108 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1109 u32 addr;
1110
1111 if (!dev_priv->status_page_dmah) {
1112 dev_priv->status_page_dmah =
1113 drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1114 if (!dev_priv->status_page_dmah)
1115 return -ENOMEM;
1116 }
1117
1118 addr = dev_priv->status_page_dmah->busaddr;
1119 if (INTEL_INFO(ring->dev)->gen >= 4)
1120 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
1121 I915_WRITE(HWS_PGA, addr);
1122
1123 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1124 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1125
1126 return 0;
1127}
1128
1129static int intel_init_ring_buffer(struct drm_device *dev,
1130 struct intel_ring_buffer *ring)
1131{ 817{
1132 struct drm_i915_gem_object *obj; 818 struct drm_i915_gem_object *obj;
1133 struct drm_i915_private *dev_priv = dev->dev_private;
1134 int ret; 819 int ret;
1135 820
1136 ring->dev = dev; 821 ring->dev = dev;
1137 INIT_LIST_HEAD(&ring->active_list); 822 INIT_LIST_HEAD(&ring->active_list);
1138 INIT_LIST_HEAD(&ring->request_list); 823 INIT_LIST_HEAD(&ring->request_list);
1139 ring->size = 32 * PAGE_SIZE; 824 INIT_LIST_HEAD(&ring->gpu_write_list);
1140 memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
1141 825
1142 init_waitqueue_head(&ring->irq_queue); 826 init_waitqueue_head(&ring->irq_queue);
827 spin_lock_init(&ring->irq_lock);
828 ring->irq_mask = ~0;
1143 829
1144 if (I915_NEED_GFX_HWS(dev)) { 830 if (I915_NEED_GFX_HWS(dev)) {
1145 ret = init_status_page(ring); 831 ret = init_status_page(ring);
1146 if (ret) 832 if (ret)
1147 return ret; 833 return ret;
1148 } else {
1149 BUG_ON(ring->id != RCS);
1150 ret = init_phys_hws_pga(ring);
1151 if (ret)
1152 return ret;
1153 } 834 }
1154 835
1155 obj = i915_gem_alloc_object(dev, ring->size); 836 obj = i915_gem_alloc_object(dev, ring->size);
@@ -1161,23 +842,24 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1161 842
1162 ring->obj = obj; 843 ring->obj = obj;
1163 844
1164 ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); 845 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
1165 if (ret) 846 if (ret)
1166 goto err_unref; 847 goto err_unref;
1167 848
1168 ret = i915_gem_object_set_to_gtt_domain(obj, true); 849 ring->map.size = ring->size;
1169 if (ret) 850 ring->map.offset = dev->agp->base + obj->gtt_offset;
1170 goto err_unpin; 851 ring->map.type = 0;
852 ring->map.flags = 0;
853 ring->map.mtrr = 0;
1171 854
1172 ring->virtual_start = 855 drm_core_ioremap_wc(&ring->map, dev);
1173 ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, 856 if (ring->map.handle == NULL) {
1174 ring->size);
1175 if (ring->virtual_start == NULL) {
1176 DRM_ERROR("Failed to map ringbuffer.\n"); 857 DRM_ERROR("Failed to map ringbuffer.\n");
1177 ret = -EINVAL; 858 ret = -EINVAL;
1178 goto err_unpin; 859 goto err_unpin;
1179 } 860 }
1180 861
862 ring->virtual_start = ring->map.handle;
1181 ret = ring->init(ring); 863 ret = ring->init(ring);
1182 if (ret) 864 if (ret)
1183 goto err_unmap; 865 goto err_unmap;
@@ -1187,13 +869,13 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1187 * of the buffer. 869 * of the buffer.
1188 */ 870 */
1189 ring->effective_size = ring->size; 871 ring->effective_size = ring->size;
1190 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 872 if (IS_I830(ring->dev))
1191 ring->effective_size -= 128; 873 ring->effective_size -= 128;
1192 874
1193 return 0; 875 return 0;
1194 876
1195err_unmap: 877err_unmap:
1196 iounmap(ring->virtual_start); 878 drm_core_ioremapfree(&ring->map, dev);
1197err_unpin: 879err_unpin:
1198 i915_gem_object_unpin(obj); 880 i915_gem_object_unpin(obj);
1199err_unref: 881err_unref:
@@ -1214,14 +896,14 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1214 896
1215 /* Disable the ring buffer. The ring must be idle at this point */ 897 /* Disable the ring buffer. The ring must be idle at this point */
1216 dev_priv = ring->dev->dev_private; 898 dev_priv = ring->dev->dev_private;
1217 ret = intel_ring_idle(ring); 899 ret = intel_wait_ring_idle(ring);
1218 if (ret) 900 if (ret)
1219 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 901 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1220 ring->name, ret); 902 ring->name, ret);
1221 903
1222 I915_WRITE_CTL(ring, 0); 904 I915_WRITE_CTL(ring, 0);
1223 905
1224 iounmap(ring->virtual_start); 906 drm_core_ioremapfree(&ring->map, ring->dev);
1225 907
1226 i915_gem_object_unpin(ring->obj); 908 i915_gem_object_unpin(ring->obj);
1227 drm_gem_object_unreference(&ring->obj->base); 909 drm_gem_object_unreference(&ring->obj->base);
@@ -1233,93 +915,50 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1233 cleanup_status_page(ring); 915 cleanup_status_page(ring);
1234} 916}
1235 917
1236static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) 918static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1237{
1238 int ret;
1239
1240 ret = i915_wait_seqno(ring, seqno);
1241 if (!ret)
1242 i915_gem_retire_requests_ring(ring);
1243
1244 return ret;
1245}
1246
1247static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1248{ 919{
1249 struct drm_i915_gem_request *request; 920 unsigned int *virt;
1250 u32 seqno = 0; 921 int rem = ring->size - ring->tail;
1251 int ret;
1252
1253 i915_gem_retire_requests_ring(ring);
1254 922
1255 if (ring->last_retired_head != -1) { 923 if (ring->space < rem) {
1256 ring->head = ring->last_retired_head; 924 int ret = intel_wait_ring_buffer(ring, rem);
1257 ring->last_retired_head = -1; 925 if (ret)
1258 ring->space = ring_space(ring); 926 return ret;
1259 if (ring->space >= n)
1260 return 0;
1261 } 927 }
1262 928
1263 list_for_each_entry(request, &ring->request_list, list) { 929 virt = (unsigned int *)(ring->virtual_start + ring->tail);
1264 int space; 930 rem /= 8;
1265 931 while (rem--) {
1266 if (request->tail == -1) 932 *virt++ = MI_NOOP;
1267 continue; 933 *virt++ = MI_NOOP;
1268
1269 space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
1270 if (space < 0)
1271 space += ring->size;
1272 if (space >= n) {
1273 seqno = request->seqno;
1274 break;
1275 }
1276
1277 /* Consume this request in case we need more space than
1278 * is available and so need to prevent a race between
1279 * updating last_retired_head and direct reads of
1280 * I915_RING_HEAD. It also provides a nice sanity check.
1281 */
1282 request->tail = -1;
1283 } 934 }
1284 935
1285 if (seqno == 0) 936 ring->tail = 0;
1286 return -ENOSPC;
1287
1288 ret = intel_ring_wait_seqno(ring, seqno);
1289 if (ret)
1290 return ret;
1291
1292 if (WARN_ON(ring->last_retired_head == -1))
1293 return -ENOSPC;
1294
1295 ring->head = ring->last_retired_head;
1296 ring->last_retired_head = -1;
1297 ring->space = ring_space(ring); 937 ring->space = ring_space(ring);
1298 if (WARN_ON(ring->space < n))
1299 return -ENOSPC;
1300 938
1301 return 0; 939 return 0;
1302} 940}
1303 941
1304static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) 942int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1305{ 943{
1306 struct drm_device *dev = ring->dev; 944 struct drm_device *dev = ring->dev;
1307 struct drm_i915_private *dev_priv = dev->dev_private; 945 struct drm_i915_private *dev_priv = dev->dev_private;
1308 unsigned long end; 946 unsigned long end;
1309 int ret; 947 u32 head;
1310 948
1311 ret = intel_ring_wait_request(ring, n); 949 /* If the reported head position has wrapped or hasn't advanced,
1312 if (ret != -ENOSPC) 950 * fallback to the slow and accurate path.
1313 return ret; 951 */
952 head = intel_read_status_page(ring, 4);
953 if (head > ring->head) {
954 ring->head = head;
955 ring->space = ring_space(ring);
956 if (ring->space >= n)
957 return 0;
958 }
1314 959
1315 trace_i915_ring_wait_begin(ring); 960 trace_i915_ring_wait_begin(ring);
1316 /* With GEM the hangcheck timer should kick us out of the loop, 961 end = jiffies + 3 * HZ;
1317 * leaving it early runs the risk of corrupting GEM state (due
1318 * to running on almost untested codepaths). But on resume
1319 * timers don't work yet, so prevent a complete hang in that
1320 * case by choosing an insanely large timeout. */
1321 end = jiffies + 60 * HZ;
1322
1323 do { 962 do {
1324 ring->head = I915_READ_HEAD(ring); 963 ring->head = I915_READ_HEAD(ring);
1325 ring->space = ring_space(ring); 964 ring->space = ring_space(ring);
@@ -1335,84 +974,22 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1335 } 974 }
1336 975
1337 msleep(1); 976 msleep(1);
1338 977 if (atomic_read(&dev_priv->mm.wedged))
1339 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); 978 return -EAGAIN;
1340 if (ret)
1341 return ret;
1342 } while (!time_after(jiffies, end)); 979 } while (!time_after(jiffies, end));
1343 trace_i915_ring_wait_end(ring); 980 trace_i915_ring_wait_end(ring);
1344 return -EBUSY; 981 return -EBUSY;
1345} 982}
1346 983
1347static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1348{
1349 uint32_t __iomem *virt;
1350 int rem = ring->size - ring->tail;
1351
1352 if (ring->space < rem) {
1353 int ret = ring_wait_for_space(ring, rem);
1354 if (ret)
1355 return ret;
1356 }
1357
1358 virt = ring->virtual_start + ring->tail;
1359 rem /= 4;
1360 while (rem--)
1361 iowrite32(MI_NOOP, virt++);
1362
1363 ring->tail = 0;
1364 ring->space = ring_space(ring);
1365
1366 return 0;
1367}
1368
1369int intel_ring_idle(struct intel_ring_buffer *ring)
1370{
1371 u32 seqno;
1372 int ret;
1373
1374 /* We need to add any requests required to flush the objects and ring */
1375 if (ring->outstanding_lazy_request) {
1376 ret = i915_add_request(ring, NULL, NULL);
1377 if (ret)
1378 return ret;
1379 }
1380
1381 /* Wait upon the last request to be completed */
1382 if (list_empty(&ring->request_list))
1383 return 0;
1384
1385 seqno = list_entry(ring->request_list.prev,
1386 struct drm_i915_gem_request,
1387 list)->seqno;
1388
1389 return i915_wait_seqno(ring, seqno);
1390}
1391
1392static int
1393intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1394{
1395 if (ring->outstanding_lazy_request)
1396 return 0;
1397
1398 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
1399}
1400
1401int intel_ring_begin(struct intel_ring_buffer *ring, 984int intel_ring_begin(struct intel_ring_buffer *ring,
1402 int num_dwords) 985 int num_dwords)
1403{ 986{
1404 drm_i915_private_t *dev_priv = ring->dev->dev_private; 987 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1405 int n = 4*num_dwords; 988 int n = 4*num_dwords;
1406 int ret; 989 int ret;
1407 990
1408 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); 991 if (unlikely(atomic_read(&dev_priv->mm.wedged)))
1409 if (ret) 992 return -EIO;
1410 return ret;
1411
1412 /* Preallocate the olr before touching the ring */
1413 ret = intel_ring_alloc_seqno(ring);
1414 if (ret)
1415 return ret;
1416 993
1417 if (unlikely(ring->tail + n > ring->effective_size)) { 994 if (unlikely(ring->tail + n > ring->effective_size)) {
1418 ret = intel_wrap_ring_buffer(ring); 995 ret = intel_wrap_ring_buffer(ring);
@@ -1421,7 +998,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1421 } 998 }
1422 999
1423 if (unlikely(ring->space < n)) { 1000 if (unlikely(ring->space < n)) {
1424 ret = ring_wait_for_space(ring, n); 1001 ret = intel_wait_ring_buffer(ring, n);
1425 if (unlikely(ret)) 1002 if (unlikely(ret))
1426 return ret; 1003 return ret;
1427 } 1004 }
@@ -1432,46 +1009,64 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1432 1009
1433void intel_ring_advance(struct intel_ring_buffer *ring) 1010void intel_ring_advance(struct intel_ring_buffer *ring)
1434{ 1011{
1435 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1436
1437 ring->tail &= ring->size - 1; 1012 ring->tail &= ring->size - 1;
1438 if (dev_priv->stop_rings & intel_ring_flag(ring))
1439 return;
1440 ring->write_tail(ring, ring->tail); 1013 ring->write_tail(ring, ring->tail);
1441} 1014}
1442 1015
1016static const struct intel_ring_buffer render_ring = {
1017 .name = "render ring",
1018 .id = RING_RENDER,
1019 .mmio_base = RENDER_RING_BASE,
1020 .size = 32 * PAGE_SIZE,
1021 .init = init_render_ring,
1022 .write_tail = ring_write_tail,
1023 .flush = render_ring_flush,
1024 .add_request = render_ring_add_request,
1025 .get_seqno = ring_get_seqno,
1026 .irq_get = render_ring_get_irq,
1027 .irq_put = render_ring_put_irq,
1028 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
1029 .cleanup = render_ring_cleanup,
1030};
1031
1032/* ring buffer for bit-stream decoder */
1033
1034static const struct intel_ring_buffer bsd_ring = {
1035 .name = "bsd ring",
1036 .id = RING_BSD,
1037 .mmio_base = BSD_RING_BASE,
1038 .size = 32 * PAGE_SIZE,
1039 .init = init_ring_common,
1040 .write_tail = ring_write_tail,
1041 .flush = bsd_ring_flush,
1042 .add_request = ring_add_request,
1043 .get_seqno = ring_get_seqno,
1044 .irq_get = bsd_ring_get_irq,
1045 .irq_put = bsd_ring_put_irq,
1046 .dispatch_execbuffer = ring_dispatch_execbuffer,
1047};
1048
1443 1049
1444static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, 1050static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1445 u32 value) 1051 u32 value)
1446{ 1052{
1447 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1053 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1448 1054
1449 /* Every tail move must follow the sequence below */ 1055 /* Every tail move must follow the sequence below */
1450 1056 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1451 /* Disable notification that the ring is IDLE. The GT 1057 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1452 * will then assume that it is busy and bring it out of rc6. 1058 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1453 */ 1059 I915_WRITE(GEN6_BSD_RNCID, 0x0);
1454 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 1060
1455 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1061 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1456 1062 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1457 /* Clear the context id. Here be magic! */ 1063 50))
1458 I915_WRITE64(GEN6_BSD_RNCID, 0x0); 1064 DRM_ERROR("timed out waiting for IDLE Indicator\n");
1459 1065
1460 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1066 I915_WRITE_TAIL(ring, value);
1461 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & 1067 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1462 GEN6_BSD_SLEEP_INDICATOR) == 0, 1068 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1463 50)) 1069 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1464 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1465
1466 /* Now that the ring is fully powered up, update the tail */
1467 I915_WRITE_TAIL(ring, value);
1468 POSTING_READ(RING_TAIL(ring->mmio_base));
1469
1470 /* Let the ring send IDLE messages to the GT again,
1471 * and so let it sleep to conserve power when idle.
1472 */
1473 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1474 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1475} 1070}
1476 1071
1477static int gen6_ring_flush(struct intel_ring_buffer *ring, 1072static int gen6_ring_flush(struct intel_ring_buffer *ring,
@@ -1485,17 +1080,10 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1485 return ret; 1080 return ret;
1486 1081
1487 cmd = MI_FLUSH_DW; 1082 cmd = MI_FLUSH_DW;
1488 /*
1489 * Bspec vol 1c.5 - video engine command streamer:
1490 * "If ENABLED, all TLBs will be invalidated once the flush
1491 * operation is complete. This bit is only valid when the
1492 * Post-Sync Operation field is a value of 1h or 3h."
1493 */
1494 if (invalidate & I915_GEM_GPU_DOMAINS) 1083 if (invalidate & I915_GEM_GPU_DOMAINS)
1495 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | 1084 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1496 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1497 intel_ring_emit(ring, cmd); 1085 intel_ring_emit(ring, cmd);
1498 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1086 intel_ring_emit(ring, 0);
1499 intel_ring_emit(ring, 0); 1087 intel_ring_emit(ring, 0);
1500 intel_ring_emit(ring, MI_NOOP); 1088 intel_ring_emit(ring, MI_NOOP);
1501 intel_ring_advance(ring); 1089 intel_ring_advance(ring);
@@ -1503,157 +1091,220 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1503} 1091}
1504 1092
1505static int 1093static int
1506hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, 1094gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1507 u32 offset, u32 len, 1095 u32 offset, u32 len)
1508 unsigned flags)
1509{ 1096{
1510 int ret; 1097 int ret;
1511 1098
1512 ret = intel_ring_begin(ring, 2); 1099 ret = intel_ring_begin(ring, 2);
1513 if (ret) 1100 if (ret)
1514 return ret; 1101 return ret;
1515 1102
1516 intel_ring_emit(ring, 1103 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1517 MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | 1104 /* bit0-7 is the length on GEN6+ */
1518 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); 1105 intel_ring_emit(ring, offset);
1519 /* bit0-7 is the length on GEN6+ */ 1106 intel_ring_advance(ring);
1520 intel_ring_emit(ring, offset);
1521 intel_ring_advance(ring);
1522 1107
1523 return 0; 1108 return 0;
1524} 1109}
1525 1110
1526static int 1111static bool
1527gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, 1112gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1528 u32 offset, u32 len,
1529 unsigned flags)
1530{ 1113{
1531 int ret; 1114 return gen6_ring_get_irq(ring,
1115 GT_USER_INTERRUPT,
1116 GEN6_RENDER_USER_INTERRUPT);
1117}
1532 1118
1533 ret = intel_ring_begin(ring, 2); 1119static void
1534 if (ret) 1120gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1535 return ret; 1121{
1122 return gen6_ring_put_irq(ring,
1123 GT_USER_INTERRUPT,
1124 GEN6_RENDER_USER_INTERRUPT);
1125}
1536 1126
1537 intel_ring_emit(ring, 1127static bool
1538 MI_BATCH_BUFFER_START | 1128gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1539 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 1129{
1540 /* bit0-7 is the length on GEN6+ */ 1130 return gen6_ring_get_irq(ring,
1541 intel_ring_emit(ring, offset); 1131 GT_GEN6_BSD_USER_INTERRUPT,
1542 intel_ring_advance(ring); 1132 GEN6_BSD_USER_INTERRUPT);
1133}
1543 1134
1544 return 0; 1135static void
1136gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1137{
1138 return gen6_ring_put_irq(ring,
1139 GT_GEN6_BSD_USER_INTERRUPT,
1140 GEN6_BSD_USER_INTERRUPT);
1545} 1141}
1546 1142
1143/* ring buffer for Video Codec for Gen6+ */
1144static const struct intel_ring_buffer gen6_bsd_ring = {
1145 .name = "gen6 bsd ring",
1146 .id = RING_BSD,
1147 .mmio_base = GEN6_BSD_RING_BASE,
1148 .size = 32 * PAGE_SIZE,
1149 .init = init_ring_common,
1150 .write_tail = gen6_bsd_ring_write_tail,
1151 .flush = gen6_ring_flush,
1152 .add_request = gen6_add_request,
1153 .get_seqno = ring_get_seqno,
1154 .irq_get = gen6_bsd_ring_get_irq,
1155 .irq_put = gen6_bsd_ring_put_irq,
1156 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1157};
1158
1547/* Blitter support (SandyBridge+) */ 1159/* Blitter support (SandyBridge+) */
1548 1160
1161static bool
1162blt_ring_get_irq(struct intel_ring_buffer *ring)
1163{
1164 return gen6_ring_get_irq(ring,
1165 GT_BLT_USER_INTERRUPT,
1166 GEN6_BLITTER_USER_INTERRUPT);
1167}
1168
1169static void
1170blt_ring_put_irq(struct intel_ring_buffer *ring)
1171{
1172 gen6_ring_put_irq(ring,
1173 GT_BLT_USER_INTERRUPT,
1174 GEN6_BLITTER_USER_INTERRUPT);
1175}
1176
1177
1178/* Workaround for some stepping of SNB,
1179 * each time when BLT engine ring tail moved,
1180 * the first command in the ring to be parsed
1181 * should be MI_BATCH_BUFFER_START
1182 */
1183#define NEED_BLT_WORKAROUND(dev) \
1184 (IS_GEN6(dev) && (dev->pdev->revision < 8))
1185
1186static inline struct drm_i915_gem_object *
1187to_blt_workaround(struct intel_ring_buffer *ring)
1188{
1189 return ring->private;
1190}
1191
1192static int blt_ring_init(struct intel_ring_buffer *ring)
1193{
1194 if (NEED_BLT_WORKAROUND(ring->dev)) {
1195 struct drm_i915_gem_object *obj;
1196 u32 *ptr;
1197 int ret;
1198
1199 obj = i915_gem_alloc_object(ring->dev, 4096);
1200 if (obj == NULL)
1201 return -ENOMEM;
1202
1203 ret = i915_gem_object_pin(obj, 4096, true);
1204 if (ret) {
1205 drm_gem_object_unreference(&obj->base);
1206 return ret;
1207 }
1208
1209 ptr = kmap(obj->pages[0]);
1210 *ptr++ = MI_BATCH_BUFFER_END;
1211 *ptr++ = MI_NOOP;
1212 kunmap(obj->pages[0]);
1213
1214 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1215 if (ret) {
1216 i915_gem_object_unpin(obj);
1217 drm_gem_object_unreference(&obj->base);
1218 return ret;
1219 }
1220
1221 ring->private = obj;
1222 }
1223
1224 return init_ring_common(ring);
1225}
1226
1227static int blt_ring_begin(struct intel_ring_buffer *ring,
1228 int num_dwords)
1229{
1230 if (ring->private) {
1231 int ret = intel_ring_begin(ring, num_dwords+2);
1232 if (ret)
1233 return ret;
1234
1235 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1236 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1237
1238 return 0;
1239 } else
1240 return intel_ring_begin(ring, 4);
1241}
1242
1549static int blt_ring_flush(struct intel_ring_buffer *ring, 1243static int blt_ring_flush(struct intel_ring_buffer *ring,
1550 u32 invalidate, u32 flush) 1244 u32 invalidate, u32 flush)
1551{ 1245{
1552 uint32_t cmd; 1246 uint32_t cmd;
1553 int ret; 1247 int ret;
1554 1248
1555 ret = intel_ring_begin(ring, 4); 1249 ret = blt_ring_begin(ring, 4);
1556 if (ret) 1250 if (ret)
1557 return ret; 1251 return ret;
1558 1252
1559 cmd = MI_FLUSH_DW; 1253 cmd = MI_FLUSH_DW;
1560 /*
1561 * Bspec vol 1c.3 - blitter engine command streamer:
1562 * "If ENABLED, all TLBs will be invalidated once the flush
1563 * operation is complete. This bit is only valid when the
1564 * Post-Sync Operation field is a value of 1h or 3h."
1565 */
1566 if (invalidate & I915_GEM_DOMAIN_RENDER) 1254 if (invalidate & I915_GEM_DOMAIN_RENDER)
1567 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | 1255 cmd |= MI_INVALIDATE_TLB;
1568 MI_FLUSH_DW_OP_STOREDW;
1569 intel_ring_emit(ring, cmd); 1256 intel_ring_emit(ring, cmd);
1570 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1257 intel_ring_emit(ring, 0);
1571 intel_ring_emit(ring, 0); 1258 intel_ring_emit(ring, 0);
1572 intel_ring_emit(ring, MI_NOOP); 1259 intel_ring_emit(ring, MI_NOOP);
1573 intel_ring_advance(ring); 1260 intel_ring_advance(ring);
1574 return 0; 1261 return 0;
1575} 1262}
1576 1263
1264static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1265{
1266 if (!ring->private)
1267 return;
1268
1269 i915_gem_object_unpin(ring->private);
1270 drm_gem_object_unreference(ring->private);
1271 ring->private = NULL;
1272}
1273
1274static const struct intel_ring_buffer gen6_blt_ring = {
1275 .name = "blt ring",
1276 .id = RING_BLT,
1277 .mmio_base = BLT_RING_BASE,
1278 .size = 32 * PAGE_SIZE,
1279 .init = blt_ring_init,
1280 .write_tail = ring_write_tail,
1281 .flush = blt_ring_flush,
1282 .add_request = gen6_add_request,
1283 .get_seqno = ring_get_seqno,
1284 .irq_get = blt_ring_get_irq,
1285 .irq_put = blt_ring_put_irq,
1286 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1287 .cleanup = blt_ring_cleanup,
1288};
1289
1577int intel_init_render_ring_buffer(struct drm_device *dev) 1290int intel_init_render_ring_buffer(struct drm_device *dev)
1578{ 1291{
1579 drm_i915_private_t *dev_priv = dev->dev_private; 1292 drm_i915_private_t *dev_priv = dev->dev_private;
1580 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 1293 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1581 1294
1582 ring->name = "render ring"; 1295 *ring = render_ring;
1583 ring->id = RCS;
1584 ring->mmio_base = RENDER_RING_BASE;
1585
1586 if (INTEL_INFO(dev)->gen >= 6) { 1296 if (INTEL_INFO(dev)->gen >= 6) {
1587 ring->add_request = gen6_add_request; 1297 ring->add_request = gen6_add_request;
1588 ring->flush = gen7_render_ring_flush; 1298 ring->irq_get = gen6_render_ring_get_irq;
1589 if (INTEL_INFO(dev)->gen == 6) 1299 ring->irq_put = gen6_render_ring_put_irq;
1590 ring->flush = gen6_render_ring_flush;
1591 ring->irq_get = gen6_ring_get_irq;
1592 ring->irq_put = gen6_ring_put_irq;
1593 ring->irq_enable_mask = GT_USER_INTERRUPT;
1594 ring->get_seqno = gen6_ring_get_seqno;
1595 ring->sync_to = gen6_ring_sync;
1596 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
1597 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
1598 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
1599 ring->signal_mbox[0] = GEN6_VRSYNC;
1600 ring->signal_mbox[1] = GEN6_BRSYNC;
1601 } else if (IS_GEN5(dev)) { 1300 } else if (IS_GEN5(dev)) {
1602 ring->add_request = pc_render_add_request; 1301 ring->add_request = pc_render_add_request;
1603 ring->flush = gen4_render_ring_flush;
1604 ring->get_seqno = pc_render_get_seqno; 1302 ring->get_seqno = pc_render_get_seqno;
1605 ring->irq_get = gen5_ring_get_irq;
1606 ring->irq_put = gen5_ring_put_irq;
1607 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
1608 } else {
1609 ring->add_request = i9xx_add_request;
1610 if (INTEL_INFO(dev)->gen < 4)
1611 ring->flush = gen2_render_ring_flush;
1612 else
1613 ring->flush = gen4_render_ring_flush;
1614 ring->get_seqno = ring_get_seqno;
1615 if (IS_GEN2(dev)) {
1616 ring->irq_get = i8xx_ring_get_irq;
1617 ring->irq_put = i8xx_ring_put_irq;
1618 } else {
1619 ring->irq_get = i9xx_ring_get_irq;
1620 ring->irq_put = i9xx_ring_put_irq;
1621 }
1622 ring->irq_enable_mask = I915_USER_INTERRUPT;
1623 } 1303 }
1624 ring->write_tail = ring_write_tail;
1625 if (IS_HASWELL(dev))
1626 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1627 else if (INTEL_INFO(dev)->gen >= 6)
1628 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1629 else if (INTEL_INFO(dev)->gen >= 4)
1630 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1631 else if (IS_I830(dev) || IS_845G(dev))
1632 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1633 else
1634 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1635 ring->init = init_render_ring;
1636 ring->cleanup = render_ring_cleanup;
1637 1304
1638 /* Workaround batchbuffer to combat CS tlb bug. */ 1305 if (!I915_NEED_GFX_HWS(dev)) {
1639 if (HAS_BROKEN_CS_TLB(dev)) { 1306 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1640 struct drm_i915_gem_object *obj; 1307 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1641 int ret;
1642
1643 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
1644 if (obj == NULL) {
1645 DRM_ERROR("Failed to allocate batch bo\n");
1646 return -ENOMEM;
1647 }
1648
1649 ret = i915_gem_object_pin(obj, 0, true, false);
1650 if (ret != 0) {
1651 drm_gem_object_unreference(&obj->base);
1652 DRM_ERROR("Failed to ping batch bo\n");
1653 return ret;
1654 }
1655
1656 ring->private = obj;
1657 } 1308 }
1658 1309
1659 return intel_init_ring_buffer(dev, ring); 1310 return intel_init_ring_buffer(dev, ring);
@@ -1663,66 +1314,44 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1663{ 1314{
1664 drm_i915_private_t *dev_priv = dev->dev_private; 1315 drm_i915_private_t *dev_priv = dev->dev_private;
1665 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 1316 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1666 int ret;
1667
1668 ring->name = "render ring";
1669 ring->id = RCS;
1670 ring->mmio_base = RENDER_RING_BASE;
1671 1317
1318 *ring = render_ring;
1672 if (INTEL_INFO(dev)->gen >= 6) { 1319 if (INTEL_INFO(dev)->gen >= 6) {
1673 /* non-kms not supported on gen6+ */ 1320 ring->add_request = gen6_add_request;
1674 return -ENODEV; 1321 ring->irq_get = gen6_render_ring_get_irq;
1322 ring->irq_put = gen6_render_ring_put_irq;
1323 } else if (IS_GEN5(dev)) {
1324 ring->add_request = pc_render_add_request;
1325 ring->get_seqno = pc_render_get_seqno;
1675 } 1326 }
1676 1327
1677 /* Note: gem is not supported on gen5/ilk without kms (the corresponding 1328 if (!I915_NEED_GFX_HWS(dev))
1678 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up 1329 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1679 * the special gen5 functions. */
1680 ring->add_request = i9xx_add_request;
1681 if (INTEL_INFO(dev)->gen < 4)
1682 ring->flush = gen2_render_ring_flush;
1683 else
1684 ring->flush = gen4_render_ring_flush;
1685 ring->get_seqno = ring_get_seqno;
1686 if (IS_GEN2(dev)) {
1687 ring->irq_get = i8xx_ring_get_irq;
1688 ring->irq_put = i8xx_ring_put_irq;
1689 } else {
1690 ring->irq_get = i9xx_ring_get_irq;
1691 ring->irq_put = i9xx_ring_put_irq;
1692 }
1693 ring->irq_enable_mask = I915_USER_INTERRUPT;
1694 ring->write_tail = ring_write_tail;
1695 if (INTEL_INFO(dev)->gen >= 4)
1696 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1697 else if (IS_I830(dev) || IS_845G(dev))
1698 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1699 else
1700 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1701 ring->init = init_render_ring;
1702 ring->cleanup = render_ring_cleanup;
1703 1330
1704 ring->dev = dev; 1331 ring->dev = dev;
1705 INIT_LIST_HEAD(&ring->active_list); 1332 INIT_LIST_HEAD(&ring->active_list);
1706 INIT_LIST_HEAD(&ring->request_list); 1333 INIT_LIST_HEAD(&ring->request_list);
1334 INIT_LIST_HEAD(&ring->gpu_write_list);
1707 1335
1708 ring->size = size; 1336 ring->size = size;
1709 ring->effective_size = ring->size; 1337 ring->effective_size = ring->size;
1710 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 1338 if (IS_I830(ring->dev))
1711 ring->effective_size -= 128; 1339 ring->effective_size -= 128;
1712 1340
1713 ring->virtual_start = ioremap_wc(start, size); 1341 ring->map.offset = start;
1714 if (ring->virtual_start == NULL) { 1342 ring->map.size = size;
1343 ring->map.type = 0;
1344 ring->map.flags = 0;
1345 ring->map.mtrr = 0;
1346
1347 drm_core_ioremap_wc(&ring->map, dev);
1348 if (ring->map.handle == NULL) {
1715 DRM_ERROR("can not ioremap virtual address for" 1349 DRM_ERROR("can not ioremap virtual address for"
1716 " ring buffer\n"); 1350 " ring buffer\n");
1717 return -ENOMEM; 1351 return -ENOMEM;
1718 } 1352 }
1719 1353
1720 if (!I915_NEED_GFX_HWS(dev)) { 1354 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1721 ret = init_phys_hws_pga(ring);
1722 if (ret)
1723 return ret;
1724 }
1725
1726 return 0; 1355 return 0;
1727} 1356}
1728 1357
@@ -1731,45 +1360,10 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1731 drm_i915_private_t *dev_priv = dev->dev_private; 1360 drm_i915_private_t *dev_priv = dev->dev_private;
1732 struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; 1361 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1733 1362
1734 ring->name = "bsd ring"; 1363 if (IS_GEN6(dev) || IS_GEN7(dev))
1735 ring->id = VCS; 1364 *ring = gen6_bsd_ring;
1736 1365 else
1737 ring->write_tail = ring_write_tail; 1366 *ring = bsd_ring;
1738 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1739 ring->mmio_base = GEN6_BSD_RING_BASE;
1740 /* gen6 bsd needs a special wa for tail updates */
1741 if (IS_GEN6(dev))
1742 ring->write_tail = gen6_bsd_ring_write_tail;
1743 ring->flush = gen6_ring_flush;
1744 ring->add_request = gen6_add_request;
1745 ring->get_seqno = gen6_ring_get_seqno;
1746 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
1747 ring->irq_get = gen6_ring_get_irq;
1748 ring->irq_put = gen6_ring_put_irq;
1749 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1750 ring->sync_to = gen6_ring_sync;
1751 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
1752 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
1753 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
1754 ring->signal_mbox[0] = GEN6_RVSYNC;
1755 ring->signal_mbox[1] = GEN6_BVSYNC;
1756 } else {
1757 ring->mmio_base = BSD_RING_BASE;
1758 ring->flush = bsd_ring_flush;
1759 ring->add_request = i9xx_add_request;
1760 ring->get_seqno = ring_get_seqno;
1761 if (IS_GEN5(dev)) {
1762 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1763 ring->irq_get = gen5_ring_get_irq;
1764 ring->irq_put = gen5_ring_put_irq;
1765 } else {
1766 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
1767 ring->irq_get = i9xx_ring_get_irq;
1768 ring->irq_put = i9xx_ring_put_irq;
1769 }
1770 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1771 }
1772 ring->init = init_ring_common;
1773 1367
1774 return intel_init_ring_buffer(dev, ring); 1368 return intel_init_ring_buffer(dev, ring);
1775} 1369}
@@ -1779,63 +1373,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1779 drm_i915_private_t *dev_priv = dev->dev_private; 1373 drm_i915_private_t *dev_priv = dev->dev_private;
1780 struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 1374 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1781 1375
1782 ring->name = "blitter ring"; 1376 *ring = gen6_blt_ring;
1783 ring->id = BCS;
1784
1785 ring->mmio_base = BLT_RING_BASE;
1786 ring->write_tail = ring_write_tail;
1787 ring->flush = blt_ring_flush;
1788 ring->add_request = gen6_add_request;
1789 ring->get_seqno = gen6_ring_get_seqno;
1790 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
1791 ring->irq_get = gen6_ring_get_irq;
1792 ring->irq_put = gen6_ring_put_irq;
1793 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1794 ring->sync_to = gen6_ring_sync;
1795 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
1796 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
1797 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
1798 ring->signal_mbox[0] = GEN6_RBSYNC;
1799 ring->signal_mbox[1] = GEN6_VBSYNC;
1800 ring->init = init_ring_common;
1801 1377
1802 return intel_init_ring_buffer(dev, ring); 1378 return intel_init_ring_buffer(dev, ring);
1803} 1379}
1804
1805int
1806intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
1807{
1808 int ret;
1809
1810 if (!ring->gpu_caches_dirty)
1811 return 0;
1812
1813 ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
1814 if (ret)
1815 return ret;
1816
1817 trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
1818
1819 ring->gpu_caches_dirty = false;
1820 return 0;
1821}
1822
1823int
1824intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
1825{
1826 uint32_t flush_domains;
1827 int ret;
1828
1829 flush_domains = 0;
1830 if (ring->gpu_caches_dirty)
1831 flush_domains = I915_GEM_GPU_DOMAINS;
1832
1833 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
1834 if (ret)
1835 return ret;
1836
1837 trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
1838
1839 ring->gpu_caches_dirty = false;
1840 return 0;
1841}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6af87cd0572..39ac2b634ae 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,19 +1,15 @@
1#ifndef _INTEL_RINGBUFFER_H_ 1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_ 2#define _INTEL_RINGBUFFER_H_
3 3
4/* 4enum {
5 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 5 RCS = 0x0,
6 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 6 VCS,
7 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" 7 BCS,
8 * 8 I915_NUM_RINGS,
9 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same 9};
10 * cacheline, the Head Pointer must not be greater than the Tail
11 * Pointer."
12 */
13#define I915_RING_FREE_SPACE 64
14 10
15struct intel_hw_status_page { 11struct intel_hw_status_page {
16 u32 *page_addr; 12 u32 __iomem *page_addr;
17 unsigned int gfx_addr; 13 unsigned int gfx_addr;
18 struct drm_i915_gem_object *obj; 14 struct drm_i915_gem_object *obj;
19}; 15};
@@ -40,11 +36,10 @@ struct intel_hw_status_page {
40struct intel_ring_buffer { 36struct intel_ring_buffer {
41 const char *name; 37 const char *name;
42 enum intel_ring_id { 38 enum intel_ring_id {
43 RCS = 0x0, 39 RING_RENDER = 0x1,
44 VCS, 40 RING_BSD = 0x2,
45 BCS, 41 RING_BLT = 0x4,
46 } id; 42 } id;
47#define I915_NUM_RINGS 3
48 u32 mmio_base; 43 u32 mmio_base;
49 void __iomem *virtual_start; 44 void __iomem *virtual_start;
50 struct drm_device *dev; 45 struct drm_device *dev;
@@ -57,19 +52,12 @@ struct intel_ring_buffer {
57 int effective_size; 52 int effective_size;
58 struct intel_hw_status_page status_page; 53 struct intel_hw_status_page status_page;
59 54
60 /** We track the position of the requests in the ring buffer, and 55 spinlock_t irq_lock;
61 * when each is retired we increment last_retired_head as the GPU 56 u32 irq_refcount;
62 * must have finished processing the request and so we know we 57 u32 irq_mask;
63 * can advance the ringbuffer up to that position. 58 u32 irq_seqno; /* last seq seem at irq time */
64 *
65 * last_retired_head is set to -1 after the value is consumed so
66 * we can detect new retirements.
67 */
68 u32 last_retired_head;
69
70 u32 irq_refcount; /* protected by dev_priv->irq_lock */
71 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
72 u32 trace_irq_seqno; 59 u32 trace_irq_seqno;
60 u32 waiting_seqno;
73 u32 sync_seqno[I915_NUM_RINGS-1]; 61 u32 sync_seqno[I915_NUM_RINGS-1];
74 bool __must_check (*irq_get)(struct intel_ring_buffer *ring); 62 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
75 void (*irq_put)(struct intel_ring_buffer *ring); 63 void (*irq_put)(struct intel_ring_buffer *ring);
@@ -81,27 +69,13 @@ struct intel_ring_buffer {
81 int __must_check (*flush)(struct intel_ring_buffer *ring, 69 int __must_check (*flush)(struct intel_ring_buffer *ring,
82 u32 invalidate_domains, 70 u32 invalidate_domains,
83 u32 flush_domains); 71 u32 flush_domains);
84 int (*add_request)(struct intel_ring_buffer *ring); 72 int (*add_request)(struct intel_ring_buffer *ring,
85 /* Some chipsets are not quite as coherent as advertised and need 73 u32 *seqno);
86 * an expensive kick to force a true read of the up-to-date seqno. 74 u32 (*get_seqno)(struct intel_ring_buffer *ring);
87 * However, the up-to-date seqno is not always required and the last
88 * seen value is good enough. Note that the seqno will always be
89 * monotonic, even if not coherent.
90 */
91 u32 (*get_seqno)(struct intel_ring_buffer *ring,
92 bool lazy_coherency);
93 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, 75 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
94 u32 offset, u32 length, 76 u32 offset, u32 length);
95 unsigned flags);
96#define I915_DISPATCH_SECURE 0x1
97#define I915_DISPATCH_PINNED 0x2
98 void (*cleanup)(struct intel_ring_buffer *ring); 77 void (*cleanup)(struct intel_ring_buffer *ring);
99 int (*sync_to)(struct intel_ring_buffer *ring,
100 struct intel_ring_buffer *to,
101 u32 seqno);
102 78
103 u32 semaphore_register[3]; /*our mbox written by others */
104 u32 signal_mbox[2]; /* mboxes this ring signals to */
105 /** 79 /**
106 * List of objects currently involved in rendering from the 80 * List of objects currently involved in rendering from the
107 * ringbuffer. 81 * ringbuffer.
@@ -121,35 +95,25 @@ struct intel_ring_buffer {
121 struct list_head request_list; 95 struct list_head request_list;
122 96
123 /** 97 /**
98 * List of objects currently pending a GPU write flush.
99 *
100 * All elements on this list will belong to either the
101 * active_list or flushing_list, last_rendering_seqno can
102 * be used to differentiate between the two elements.
103 */
104 struct list_head gpu_write_list;
105
106 /**
124 * Do we have some not yet emitted requests outstanding? 107 * Do we have some not yet emitted requests outstanding?
125 */ 108 */
126 u32 outstanding_lazy_request; 109 u32 outstanding_lazy_request;
127 bool gpu_caches_dirty;
128 110
129 wait_queue_head_t irq_queue; 111 wait_queue_head_t irq_queue;
130 112 drm_local_map_t map;
131 /**
132 * Do an explicit TLB flush before MI_SET_CONTEXT
133 */
134 bool itlb_before_ctx_switch;
135 struct i915_hw_context *default_context;
136 struct drm_i915_gem_object *last_context_obj;
137 113
138 void *private; 114 void *private;
139}; 115};
140 116
141static inline bool
142intel_ring_initialized(struct intel_ring_buffer *ring)
143{
144 return ring->obj != NULL;
145}
146
147static inline unsigned
148intel_ring_flag(struct intel_ring_buffer *ring)
149{
150 return 1 << ring->id;
151}
152
153static inline u32 117static inline u32
154intel_ring_sync_index(struct intel_ring_buffer *ring, 118intel_ring_sync_index(struct intel_ring_buffer *ring,
155 struct intel_ring_buffer *other) 119 struct intel_ring_buffer *other)
@@ -173,9 +137,7 @@ static inline u32
173intel_read_status_page(struct intel_ring_buffer *ring, 137intel_read_status_page(struct intel_ring_buffer *ring,
174 int reg) 138 int reg)
175{ 139{
176 /* Ensure that the compiler doesn't optimize away the load. */ 140 return ioread32(ring->status_page.page_addr + reg);
177 barrier();
178 return ring->status_page.page_addr[reg];
179} 141}
180 142
181/** 143/**
@@ -193,24 +155,34 @@ intel_read_status_page(struct intel_ring_buffer *ring,
193 * 155 *
194 * The area from dword 0x20 to 0x3ff is available for driver usage. 156 * The area from dword 0x20 to 0x3ff is available for driver usage.
195 */ 157 */
158#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
159#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
196#define I915_GEM_HWS_INDEX 0x20 160#define I915_GEM_HWS_INDEX 0x20
197#define I915_GEM_HWS_SCRATCH_INDEX 0x30 161#define I915_BREADCRUMB_INDEX 0x21
198#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
199 162
200void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 163void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
201 164
165int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
166static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
167{
168 return intel_wait_ring_buffer(ring, ring->size - 8);
169}
170
202int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 171int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
172
203static inline void intel_ring_emit(struct intel_ring_buffer *ring, 173static inline void intel_ring_emit(struct intel_ring_buffer *ring,
204 u32 data) 174 u32 data)
205{ 175{
206 iowrite32(data, ring->virtual_start + ring->tail); 176 iowrite32(data, ring->virtual_start + ring->tail);
207 ring->tail += 4; 177 ring->tail += 4;
208} 178}
179
209void intel_ring_advance(struct intel_ring_buffer *ring); 180void intel_ring_advance(struct intel_ring_buffer *ring);
210int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
211 181
212int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); 182u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
213int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); 183int intel_ring_sync(struct intel_ring_buffer *ring,
184 struct intel_ring_buffer *to,
185 u32 seqno);
214 186
215int intel_init_render_ring_buffer(struct drm_device *dev); 187int intel_init_render_ring_buffer(struct drm_device *dev);
216int intel_init_bsd_ring_buffer(struct drm_device *dev); 188int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -219,17 +191,6 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
219u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); 191u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
220void intel_ring_setup_status_page(struct intel_ring_buffer *ring); 192void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
221 193
222static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
223{
224 return ring->tail;
225}
226
227static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
228{
229 BUG_ON(ring->outstanding_lazy_request == 0);
230 return ring->outstanding_lazy_request;
231}
232
233static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) 194static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
234{ 195{
235 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) 196 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c275bf0fa36..66e47a0100c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -28,28 +28,27 @@
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/export.h> 31#include "drmP.h"
32#include <drm/drmP.h> 32#include "drm.h"
33#include <drm/drm_crtc.h> 33#include "drm_crtc.h"
34#include <drm/drm_edid.h> 34#include "drm_edid.h"
35#include "intel_drv.h" 35#include "intel_drv.h"
36#include <drm/i915_drm.h> 36#include "i915_drm.h"
37#include "i915_drv.h" 37#include "i915_drv.h"
38#include "intel_sdvo_regs.h" 38#include "intel_sdvo_regs.h"
39 39
40#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) 40#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
41#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) 41#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
42#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1) 42#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
43#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0) 43#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
44 44
45#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ 45#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
46 SDVO_TV_MASK) 46 SDVO_TV_MASK)
47 47
48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK) 48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
49#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) 49#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
50#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) 50#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
51#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) 51#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
52#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
53 52
54 53
55static const char *tv_format_names[] = { 54static const char *tv_format_names[] = {
@@ -73,7 +72,7 @@ struct intel_sdvo {
73 struct i2c_adapter ddc; 72 struct i2c_adapter ddc;
74 73
75 /* Register for the SDVO device: SDVOB or SDVOC */ 74 /* Register for the SDVO device: SDVOB or SDVOC */
76 uint32_t sdvo_reg; 75 int sdvo_reg;
77 76
78 /* Active outputs controlled by this SDVO output */ 77 /* Active outputs controlled by this SDVO output */
79 uint16_t controlled_output; 78 uint16_t controlled_output;
@@ -96,7 +95,7 @@ struct intel_sdvo {
96 /* 95 /*
97 * Hotplug activation bits for this device 96 * Hotplug activation bits for this device
98 */ 97 */
99 uint16_t hotplug_active; 98 uint8_t hotplug_active[2];
100 99
101 /** 100 /**
102 * This is used to select the color range of RBG outputs in HDMI mode. 101 * This is used to select the color range of RBG outputs in HDMI mode.
@@ -113,9 +112,6 @@ struct intel_sdvo {
113 */ 112 */
114 bool is_tv; 113 bool is_tv;
115 114
116 /* On different gens SDVOB is at different places. */
117 bool is_sdvob;
118
119 /* This is for current tv format name */ 115 /* This is for current tv format name */
120 int tv_format_index; 116 int tv_format_index;
121 117
@@ -140,10 +136,8 @@ struct intel_sdvo {
140 /* DDC bus used by this SDVO encoder */ 136 /* DDC bus used by this SDVO encoder */
141 uint8_t ddc_bus; 137 uint8_t ddc_bus;
142 138
143 /* 139 /* Input timings for adjusted_mode */
144 * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd 140 struct intel_sdvo_dtd input_dtd;
145 */
146 uint8_t dtd_sdvo_flags;
147}; 141};
148 142
149struct intel_sdvo_connector { 143struct intel_sdvo_connector {
@@ -152,7 +146,7 @@ struct intel_sdvo_connector {
152 /* Mark the type of connector */ 146 /* Mark the type of connector */
153 uint16_t output_flag; 147 uint16_t output_flag;
154 148
155 enum hdmi_force_audio force_audio; 149 int force_audio;
156 150
157 /* This contains all current supported TV format */ 151 /* This contains all current supported TV format */
158 u8 tv_format_supported[TV_FORMAT_NUM]; 152 u8 tv_format_supported[TV_FORMAT_NUM];
@@ -294,120 +288,121 @@ static const struct _sdvo_cmd_name {
294 u8 cmd; 288 u8 cmd;
295 const char *name; 289 const char *name;
296} sdvo_cmd_names[] = { 290} sdvo_cmd_names[] = {
297 SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), 291 SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
298 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), 292 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
299 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), 293 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
300 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), 294 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
301 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), 295 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
302 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), 296 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
303 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), 297 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
304 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), 298 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
305 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), 299 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
306 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), 300 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
307 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), 301 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
308 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), 302 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
309 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), 303 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
310 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), 304 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
311 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), 305 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
312 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), 306 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
313 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), 307 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
314 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), 308 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
315 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), 309 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
316 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), 310 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
317 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), 311 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
318 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), 312 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
319 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), 313 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
320 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), 314 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
321 SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), 315 SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
322 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), 316 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
323 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), 317 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
324 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), 318 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
325 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), 319 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
326 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), 320 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
327 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), 321 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
328 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), 322 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
329 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), 323 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
330 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), 324 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
331 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), 325 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
332 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES), 326 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
333 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE), 327 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
334 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE), 328 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
335 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE), 329 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
336 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), 330 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
337 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), 331 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
338 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), 332 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
339 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), 333 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
340 334
341 /* Add the op code for SDVO enhancements */ 335 /* Add the op code for SDVO enhancements */
342 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS), 336 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
343 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS), 337 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
344 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS), 338 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
345 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS), 339 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
346 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS), 340 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
347 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS), 341 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
348 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), 342 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
349 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), 343 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
350 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), 344 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
351 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE), 345 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
352 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE), 346 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
353 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE), 347 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
354 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST), 348 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
355 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST), 349 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
356 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST), 350 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
357 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS), 351 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
358 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS), 352 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
359 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS), 353 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
360 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H), 354 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
361 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H), 355 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
362 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H), 356 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
363 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), 357 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
364 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), 358 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
365 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), 359 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
366 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER), 360 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
367 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER), 361 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
368 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER), 362 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
369 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE), 363 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
370 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE), 364 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
371 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE), 365 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
372 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D), 366 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
373 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D), 367 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
374 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D), 368 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
375 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS), 369 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
376 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS), 370 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
377 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS), 371 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
378 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL), 372 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
379 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL), 373 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
380 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER), 374 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
381 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER), 375 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
382 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER), 376 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
383 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER), 377 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
384 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER), 378 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
385 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER), 379 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
386 380
387 /* HDMI op code */ 381 /* HDMI op code */
388 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), 382 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
389 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), 383 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
390 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE), 384 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
391 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI), 385 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
392 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI), 386 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
393 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP), 387 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
394 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY), 388 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
395 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY), 389 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
396 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER), 390 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
397 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT), 391 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
398 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT), 392 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
399 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX), 393 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
400 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX), 394 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
401 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO), 395 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
402 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT), 396 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
403 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT), 397 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
404 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE), 398 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
405 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE), 399 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
406 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA), 400 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
407 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), 401 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
408}; 402};
409 403
410#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC") 404#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
405#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
411 406
412static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, 407static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
413 const void *args, int args_len) 408 const void *args, int args_len)
@@ -444,20 +439,9 @@ static const char *cmd_status_names[] = {
444static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, 439static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
445 const void *args, int args_len) 440 const void *args, int args_len)
446{ 441{
447 u8 *buf, status; 442 u8 buf[args_len*2 + 2], status;
448 struct i2c_msg *msgs; 443 struct i2c_msg msgs[args_len + 3];
449 int i, ret = true; 444 int i, ret;
450
451 /* Would be simpler to allocate both in one go ? */
452 buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
453 if (!buf)
454 return false;
455
456 msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
457 if (!msgs) {
458 kfree(buf);
459 return false;
460 }
461 445
462 intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); 446 intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
463 447
@@ -491,25 +475,21 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
491 ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3); 475 ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
492 if (ret < 0) { 476 if (ret < 0) {
493 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); 477 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
494 ret = false; 478 return false;
495 goto out;
496 } 479 }
497 if (ret != i+3) { 480 if (ret != i+3) {
498 /* failure in I2C transfer */ 481 /* failure in I2C transfer */
499 DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3); 482 DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
500 ret = false; 483 return false;
501 } 484 }
502 485
503out: 486 return true;
504 kfree(msgs);
505 kfree(buf);
506 return ret;
507} 487}
508 488
509static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, 489static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
510 void *response, int response_len) 490 void *response, int response_len)
511{ 491{
512 u8 retry = 15; /* 5 quick checks, followed by 10 long checks */ 492 u8 retry = 5;
513 u8 status; 493 u8 status;
514 int i; 494 int i;
515 495
@@ -522,27 +502,14 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
522 * command to be complete. 502 * command to be complete.
523 * 503 *
524 * Check 5 times in case the hardware failed to read the docs. 504 * Check 5 times in case the hardware failed to read the docs.
525 *
526 * Also beware that the first response by many devices is to
527 * reply PENDING and stall for time. TVs are notorious for
528 * requiring longer than specified to complete their replies.
529 * Originally (in the DDX long ago), the delay was only ever 15ms
530 * with an additional delay of 30ms applied for TVs added later after
531 * many experiments. To accommodate both sets of delays, we do a
532 * sequence of slow checks if the device is falling behind and fails
533 * to reply within 5*15µs.
534 */ 505 */
535 if (!intel_sdvo_read_byte(intel_sdvo, 506 if (!intel_sdvo_read_byte(intel_sdvo,
536 SDVO_I2C_CMD_STATUS, 507 SDVO_I2C_CMD_STATUS,
537 &status)) 508 &status))
538 goto log_fail; 509 goto log_fail;
539 510
540 while (status == SDVO_CMD_STATUS_PENDING && --retry) { 511 while (status == SDVO_CMD_STATUS_PENDING && retry--) {
541 if (retry < 10) 512 udelay(15);
542 msleep(15);
543 else
544 udelay(15);
545
546 if (!intel_sdvo_read_byte(intel_sdvo, 513 if (!intel_sdvo_read_byte(intel_sdvo,
547 SDVO_I2C_CMD_STATUS, 514 SDVO_I2C_CMD_STATUS,
548 &status)) 515 &status))
@@ -645,14 +612,6 @@ static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
645 &outputs, sizeof(outputs)); 612 &outputs, sizeof(outputs));
646} 613}
647 614
648static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
649 u16 *outputs)
650{
651 return intel_sdvo_get_value(intel_sdvo,
652 SDVO_CMD_GET_ACTIVE_OUTPUTS,
653 outputs, sizeof(*outputs));
654}
655
656static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo, 615static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
657 int mode) 616 int mode)
658{ 617{
@@ -770,26 +729,21 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
770 uint16_t width, height; 729 uint16_t width, height;
771 uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; 730 uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
772 uint16_t h_sync_offset, v_sync_offset; 731 uint16_t h_sync_offset, v_sync_offset;
773 int mode_clock;
774 732
775 width = mode->hdisplay; 733 width = mode->crtc_hdisplay;
776 height = mode->vdisplay; 734 height = mode->crtc_vdisplay;
777 735
778 /* do some mode translations */ 736 /* do some mode translations */
779 h_blank_len = mode->htotal - mode->hdisplay; 737 h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
780 h_sync_len = mode->hsync_end - mode->hsync_start; 738 h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
781
782 v_blank_len = mode->vtotal - mode->vdisplay;
783 v_sync_len = mode->vsync_end - mode->vsync_start;
784 739
785 h_sync_offset = mode->hsync_start - mode->hdisplay; 740 v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
786 v_sync_offset = mode->vsync_start - mode->vdisplay; 741 v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
787 742
788 mode_clock = mode->clock; 743 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
789 mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1; 744 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
790 mode_clock /= 10;
791 dtd->part1.clock = mode_clock;
792 745
746 dtd->part1.clock = mode->clock / 10;
793 dtd->part1.h_active = width & 0xff; 747 dtd->part1.h_active = width & 0xff;
794 dtd->part1.h_blank = h_blank_len & 0xff; 748 dtd->part1.h_blank = h_blank_len & 0xff;
795 dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | 749 dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
@@ -808,12 +762,10 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
808 ((v_sync_len & 0x30) >> 4); 762 ((v_sync_len & 0x30) >> 4);
809 763
810 dtd->part2.dtd_flags = 0x18; 764 dtd->part2.dtd_flags = 0x18;
811 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
812 dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
813 if (mode->flags & DRM_MODE_FLAG_PHSYNC) 765 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
814 dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE; 766 dtd->part2.dtd_flags |= 0x2;
815 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 767 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
816 dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE; 768 dtd->part2.dtd_flags |= 0x4;
817 769
818 dtd->part2.sdvo_flags = 0; 770 dtd->part2.sdvo_flags = 0;
819 dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; 771 dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
@@ -847,11 +799,9 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
847 mode->clock = dtd->part1.clock * 10; 799 mode->clock = dtd->part1.clock * 10;
848 800
849 mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); 801 mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
850 if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE) 802 if (dtd->part2.dtd_flags & 0x2)
851 mode->flags |= DRM_MODE_FLAG_INTERLACE;
852 if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
853 mode->flags |= DRM_MODE_FLAG_PHSYNC; 803 mode->flags |= DRM_MODE_FLAG_PHSYNC;
854 if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE) 804 if (dtd->part2.dtd_flags & 0x4)
855 mode->flags |= DRM_MODE_FLAG_PVSYNC; 805 mode->flags |= DRM_MODE_FLAG_PVSYNC;
856} 806}
857 807
@@ -907,38 +857,31 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
907} 857}
908#endif 858#endif
909 859
910static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, 860static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
911 unsigned if_index, uint8_t tx_rate,
912 uint8_t *data, unsigned length)
913{ 861{
914 uint8_t set_buf_index[2] = { if_index, 0 }; 862 struct dip_infoframe avi_if = {
915 uint8_t hbuf_size, tmp[8]; 863 .type = DIP_TYPE_AVI,
916 int i; 864 .ver = DIP_VERSION_AVI,
865 .len = DIP_LEN_AVI,
866 };
867 uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
868 uint8_t set_buf_index[2] = { 1, 0 };
869 uint64_t *data = (uint64_t *)&avi_if;
870 unsigned i;
871
872 intel_dip_infoframe_csum(&avi_if);
917 873
918 if (!intel_sdvo_set_value(intel_sdvo, 874 if (!intel_sdvo_set_value(intel_sdvo,
919 SDVO_CMD_SET_HBUF_INDEX, 875 SDVO_CMD_SET_HBUF_INDEX,
920 set_buf_index, 2)) 876 set_buf_index, 2))
921 return false; 877 return false;
922 878
923 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO, 879 for (i = 0; i < sizeof(avi_if); i += 8) {
924 &hbuf_size, 1))
925 return false;
926
927 /* Buffer size is 0 based, hooray! */
928 hbuf_size++;
929
930 DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
931 if_index, length, hbuf_size);
932
933 for (i = 0; i < hbuf_size; i += 8) {
934 memset(tmp, 0, 8);
935 if (i < length)
936 memcpy(tmp, data + i, min_t(unsigned, 8, length - i));
937
938 if (!intel_sdvo_set_value(intel_sdvo, 880 if (!intel_sdvo_set_value(intel_sdvo,
939 SDVO_CMD_SET_HBUF_DATA, 881 SDVO_CMD_SET_HBUF_DATA,
940 tmp, 8)) 882 data, 8))
941 return false; 883 return false;
884 data++;
942 } 885 }
943 886
944 return intel_sdvo_set_value(intel_sdvo, 887 return intel_sdvo_set_value(intel_sdvo,
@@ -946,28 +889,6 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
946 &tx_rate, 1); 889 &tx_rate, 1);
947} 890}
948 891
949static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
950{
951 struct dip_infoframe avi_if = {
952 .type = DIP_TYPE_AVI,
953 .ver = DIP_VERSION_AVI,
954 .len = DIP_LEN_AVI,
955 };
956 uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
957
958 intel_dip_infoframe_csum(&avi_if);
959
960 /* sdvo spec says that the ecc is handled by the hw, and it looks like
961 * we must not send the ecc field, either. */
962 memcpy(sdvo_data, &avi_if, 3);
963 sdvo_data[3] = avi_if.checksum;
964 memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
965
966 return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
967 SDVO_HBUF_TX_VSYNC,
968 sdvo_data, sizeof(sdvo_data));
969}
970
971static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) 892static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
972{ 893{
973 struct intel_sdvo_tv_format format; 894 struct intel_sdvo_tv_format format;
@@ -985,7 +906,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
985 906
986static bool 907static bool
987intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, 908intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
988 const struct drm_display_mode *mode) 909 struct drm_display_mode *mode)
989{ 910{
990 struct intel_sdvo_dtd output_dtd; 911 struct intel_sdvo_dtd output_dtd;
991 912
@@ -1000,15 +921,11 @@ intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
1000 return true; 921 return true;
1001} 922}
1002 923
1003/* Asks the sdvo controller for the preferred input mode given the output mode.
1004 * Unfortunately we have to set up the full output mode to do that. */
1005static bool 924static bool
1006intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo, 925intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
1007 const struct drm_display_mode *mode, 926 struct drm_display_mode *mode,
1008 struct drm_display_mode *adjusted_mode) 927 struct drm_display_mode *adjusted_mode)
1009{ 928{
1010 struct intel_sdvo_dtd input_dtd;
1011
1012 /* Reset the input timing to the screen. Assume always input 0. */ 929 /* Reset the input timing to the screen. Assume always input 0. */
1013 if (!intel_sdvo_set_target_input(intel_sdvo)) 930 if (!intel_sdvo_set_target_input(intel_sdvo))
1014 return false; 931 return false;
@@ -1020,17 +937,17 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
1020 return false; 937 return false;
1021 938
1022 if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, 939 if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
1023 &input_dtd)) 940 &intel_sdvo->input_dtd))
1024 return false; 941 return false;
1025 942
1026 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); 943 intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
1027 intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags;
1028 944
945 drm_mode_set_crtcinfo(adjusted_mode, 0);
1029 return true; 946 return true;
1030} 947}
1031 948
1032static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, 949static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1033 const struct drm_display_mode *mode, 950 struct drm_display_mode *mode,
1034 struct drm_display_mode *adjusted_mode) 951 struct drm_display_mode *adjusted_mode)
1035{ 952{
1036 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); 953 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
@@ -1045,17 +962,17 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1045 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) 962 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
1046 return false; 963 return false;
1047 964
1048 (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, 965 (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
1049 mode, 966 mode,
1050 adjusted_mode); 967 adjusted_mode);
1051 } else if (intel_sdvo->is_lvds) { 968 } else if (intel_sdvo->is_lvds) {
1052 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, 969 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
1053 intel_sdvo->sdvo_lvds_fixed_mode)) 970 intel_sdvo->sdvo_lvds_fixed_mode))
1054 return false; 971 return false;
1055 972
1056 (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, 973 (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
1057 mode, 974 mode,
1058 adjusted_mode); 975 adjusted_mode);
1059 } 976 }
1060 977
1061 /* Make the CRTC code factor in the SDVO pixel multiplier. The 978 /* Make the CRTC code factor in the SDVO pixel multiplier. The
@@ -1078,7 +995,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1078 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); 995 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
1079 u32 sdvox; 996 u32 sdvox;
1080 struct intel_sdvo_in_out_map in_out; 997 struct intel_sdvo_in_out_map in_out;
1081 struct intel_sdvo_dtd input_dtd, output_dtd; 998 struct intel_sdvo_dtd input_dtd;
1082 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 999 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
1083 int rate; 1000 int rate;
1084 1001
@@ -1103,15 +1020,20 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1103 intel_sdvo->attached_output)) 1020 intel_sdvo->attached_output))
1104 return; 1021 return;
1105 1022
1106 /* lvds has a special fixed output timing. */ 1023 /* We have tried to get input timing in mode_fixup, and filled into
1107 if (intel_sdvo->is_lvds) 1024 * adjusted_mode.
1108 intel_sdvo_get_dtd_from_mode(&output_dtd, 1025 */
1109 intel_sdvo->sdvo_lvds_fixed_mode); 1026 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
1110 else 1027 input_dtd = intel_sdvo->input_dtd;
1111 intel_sdvo_get_dtd_from_mode(&output_dtd, mode); 1028 } else {
1112 if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) 1029 /* Set the output timing to the screen */
1113 DRM_INFO("Setting output timings on %s failed\n", 1030 if (!intel_sdvo_set_target_output(intel_sdvo,
1114 SDVO_NAME(intel_sdvo)); 1031 intel_sdvo->attached_output))
1032 return;
1033
1034 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1035 (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
1036 }
1115 1037
1116 /* Set the input timing to the screen. Assume always input 0. */ 1038 /* Set the input timing to the screen. Assume always input 0. */
1117 if (!intel_sdvo_set_target_input(intel_sdvo)) 1039 if (!intel_sdvo_set_target_input(intel_sdvo))
@@ -1129,15 +1051,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1129 !intel_sdvo_set_tv_format(intel_sdvo)) 1051 !intel_sdvo_set_tv_format(intel_sdvo))
1130 return; 1052 return;
1131 1053
1132 /* We have tried to get input timing in mode_fixup, and filled into 1054 (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
1133 * adjusted_mode.
1134 */
1135 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1136 if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
1137 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
1138 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
1139 DRM_INFO("Setting input timings on %s failed\n",
1140 SDVO_NAME(intel_sdvo));
1141 1055
1142 switch (pixel_multiplier) { 1056 switch (pixel_multiplier) {
1143 default: 1057 default:
@@ -1192,168 +1106,51 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1192 intel_sdvo_write_sdvox(intel_sdvo, sdvox); 1106 intel_sdvo_write_sdvox(intel_sdvo, sdvox);
1193} 1107}
1194 1108
1195static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector) 1109static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1196{
1197 struct intel_sdvo_connector *intel_sdvo_connector =
1198 to_intel_sdvo_connector(&connector->base);
1199 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
1200 u16 active_outputs;
1201
1202 intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
1203
1204 if (active_outputs & intel_sdvo_connector->output_flag)
1205 return true;
1206 else
1207 return false;
1208}
1209
1210static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
1211 enum pipe *pipe)
1212{ 1110{
1213 struct drm_device *dev = encoder->base.dev; 1111 struct drm_device *dev = encoder->dev;
1214 struct drm_i915_private *dev_priv = dev->dev_private; 1112 struct drm_i915_private *dev_priv = dev->dev_private;
1215 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1113 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
1216 u32 tmp; 1114 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
1217
1218 tmp = I915_READ(intel_sdvo->sdvo_reg);
1219
1220 if (!(tmp & SDVO_ENABLE))
1221 return false;
1222
1223 if (HAS_PCH_CPT(dev))
1224 *pipe = PORT_TO_PIPE_CPT(tmp);
1225 else
1226 *pipe = PORT_TO_PIPE(tmp);
1227
1228 return true;
1229}
1230
1231static void intel_disable_sdvo(struct intel_encoder *encoder)
1232{
1233 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1234 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1235 u32 temp; 1115 u32 temp;
1236 1116
1237 intel_sdvo_set_active_outputs(intel_sdvo, 0);
1238 if (0)
1239 intel_sdvo_set_encoder_power_state(intel_sdvo,
1240 DRM_MODE_DPMS_OFF);
1241
1242 temp = I915_READ(intel_sdvo->sdvo_reg);
1243 if ((temp & SDVO_ENABLE) != 0) {
1244 /* HW workaround for IBX, we need to move the port to
1245 * transcoder A before disabling it. */
1246 if (HAS_PCH_IBX(encoder->base.dev)) {
1247 struct drm_crtc *crtc = encoder->base.crtc;
1248 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
1249
1250 if (temp & SDVO_PIPE_B_SELECT) {
1251 temp &= ~SDVO_PIPE_B_SELECT;
1252 I915_WRITE(intel_sdvo->sdvo_reg, temp);
1253 POSTING_READ(intel_sdvo->sdvo_reg);
1254
1255 /* Again we need to write this twice. */
1256 I915_WRITE(intel_sdvo->sdvo_reg, temp);
1257 POSTING_READ(intel_sdvo->sdvo_reg);
1258
1259 /* Transcoder selection bits only update
1260 * effectively on vblank. */
1261 if (crtc)
1262 intel_wait_for_vblank(encoder->base.dev, pipe);
1263 else
1264 msleep(50);
1265 }
1266 }
1267
1268 intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
1269 }
1270}
1271
1272static void intel_enable_sdvo(struct intel_encoder *encoder)
1273{
1274 struct drm_device *dev = encoder->base.dev;
1275 struct drm_i915_private *dev_priv = dev->dev_private;
1276 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1277 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1278 u32 temp;
1279 bool input1, input2;
1280 int i;
1281 u8 status;
1282
1283 temp = I915_READ(intel_sdvo->sdvo_reg);
1284 if ((temp & SDVO_ENABLE) == 0) {
1285 /* HW workaround for IBX, we need to move the port
1286 * to transcoder A before disabling it. */
1287 if (HAS_PCH_IBX(dev)) {
1288 struct drm_crtc *crtc = encoder->base.crtc;
1289 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
1290
1291 /* Restore the transcoder select bit. */
1292 if (pipe == PIPE_B)
1293 temp |= SDVO_PIPE_B_SELECT;
1294 }
1295
1296 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
1297 }
1298 for (i = 0; i < 2; i++)
1299 intel_wait_for_vblank(dev, intel_crtc->pipe);
1300
1301 status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
1302 /* Warn if the device reported failure to sync.
1303 * A lot of SDVO devices fail to notify of sync, but it's
1304 * a given it the status is a success, we succeeded.
1305 */
1306 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
1307 DRM_DEBUG_KMS("First %s output reported failure to "
1308 "sync\n", SDVO_NAME(intel_sdvo));
1309 }
1310
1311 if (0)
1312 intel_sdvo_set_encoder_power_state(intel_sdvo,
1313 DRM_MODE_DPMS_ON);
1314 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
1315}
1316
1317static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
1318{
1319 struct drm_crtc *crtc;
1320 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1321
1322 /* dvo supports only 2 dpms states. */
1323 if (mode != DRM_MODE_DPMS_ON)
1324 mode = DRM_MODE_DPMS_OFF;
1325
1326 if (mode == connector->dpms)
1327 return;
1328
1329 connector->dpms = mode;
1330
1331 /* Only need to change hw state when actually enabled */
1332 crtc = intel_sdvo->base.base.crtc;
1333 if (!crtc) {
1334 intel_sdvo->base.connectors_active = false;
1335 return;
1336 }
1337
1338 if (mode != DRM_MODE_DPMS_ON) { 1117 if (mode != DRM_MODE_DPMS_ON) {
1339 intel_sdvo_set_active_outputs(intel_sdvo, 0); 1118 intel_sdvo_set_active_outputs(intel_sdvo, 0);
1340 if (0) 1119 if (0)
1341 intel_sdvo_set_encoder_power_state(intel_sdvo, mode); 1120 intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
1342 1121
1343 intel_sdvo->base.connectors_active = false; 1122 if (mode == DRM_MODE_DPMS_OFF) {
1344 1123 temp = I915_READ(intel_sdvo->sdvo_reg);
1345 intel_crtc_update_dpms(crtc); 1124 if ((temp & SDVO_ENABLE) != 0) {
1125 intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
1126 }
1127 }
1346 } else { 1128 } else {
1347 intel_sdvo->base.connectors_active = true; 1129 bool input1, input2;
1348 1130 int i;
1349 intel_crtc_update_dpms(crtc); 1131 u8 status;
1132
1133 temp = I915_READ(intel_sdvo->sdvo_reg);
1134 if ((temp & SDVO_ENABLE) == 0)
1135 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
1136 for (i = 0; i < 2; i++)
1137 intel_wait_for_vblank(dev, intel_crtc->pipe);
1138
1139 status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
1140 /* Warn if the device reported failure to sync.
1141 * A lot of SDVO devices fail to notify of sync, but it's
1142 * a given it the status is a success, we succeeded.
1143 */
1144 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
1145 DRM_DEBUG_KMS("First %s output reported failure to "
1146 "sync\n", SDVO_NAME(intel_sdvo));
1147 }
1350 1148
1351 if (0) 1149 if (0)
1352 intel_sdvo_set_encoder_power_state(intel_sdvo, mode); 1150 intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
1353 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); 1151 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
1354 } 1152 }
1355 1153 return;
1356 intel_modeset_check_state(connector->dev);
1357} 1154}
1358 1155
1359static int intel_sdvo_mode_valid(struct drm_connector *connector, 1156static int intel_sdvo_mode_valid(struct drm_connector *connector,
@@ -1418,36 +1215,27 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
1418 return true; 1215 return true;
1419} 1216}
1420 1217
1421static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo) 1218static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
1422{ 1219{
1423 struct drm_device *dev = intel_sdvo->base.base.dev; 1220 u8 response[2];
1424 uint16_t hotplug;
1425 1221
1426 /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise 1222 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
1427 * on the line. */ 1223 &response, 2) && response[0];
1428 if (IS_I945G(dev) || IS_I945GM(dev))
1429 return 0;
1430
1431 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
1432 &hotplug, sizeof(hotplug)))
1433 return 0;
1434
1435 return hotplug;
1436} 1224}
1437 1225
1438static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) 1226static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
1439{ 1227{
1440 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1228 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1441 1229
1442 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, 1230 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
1443 &intel_sdvo->hotplug_active, 2);
1444} 1231}
1445 1232
1446static bool 1233static bool
1447intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) 1234intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
1448{ 1235{
1449 /* Is there more than one type of output? */ 1236 /* Is there more than one type of output? */
1450 return hweight16(intel_sdvo->caps.output_flags) > 1; 1237 int caps = intel_sdvo->caps.output_flags & 0xf;
1238 return caps & -caps;
1451} 1239}
1452 1240
1453static struct edid * 1241static struct edid *
@@ -1464,12 +1252,11 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
1464 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1252 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1465 1253
1466 return drm_get_edid(connector, 1254 return drm_get_edid(connector,
1467 intel_gmbus_get_adapter(dev_priv, 1255 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1468 dev_priv->crt_ddc_pin));
1469} 1256}
1470 1257
1471static enum drm_connector_status 1258enum drm_connector_status
1472intel_sdvo_tmds_sink_detect(struct drm_connector *connector) 1259intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1473{ 1260{
1474 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 1261 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1475 enum drm_connector_status status; 1262 enum drm_connector_status status;
@@ -1516,30 +1303,19 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
1516 } 1303 }
1517 } else 1304 } else
1518 status = connector_status_disconnected; 1305 status = connector_status_disconnected;
1306 connector->display_info.raw_edid = NULL;
1519 kfree(edid); 1307 kfree(edid);
1520 } 1308 }
1521 1309
1522 if (status == connector_status_connected) { 1310 if (status == connector_status_connected) {
1523 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1311 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1524 if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO) 1312 if (intel_sdvo_connector->force_audio)
1525 intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON); 1313 intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
1526 } 1314 }
1527 1315
1528 return status; 1316 return status;
1529} 1317}
1530 1318
1531static bool
1532intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
1533 struct edid *edid)
1534{
1535 bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1536 bool connector_is_digital = !!IS_DIGITAL(sdvo);
1537
1538 DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
1539 connector_is_digital, monitor_is_digital);
1540 return connector_is_digital == monitor_is_digital;
1541}
1542
1543static enum drm_connector_status 1319static enum drm_connector_status
1544intel_sdvo_detect(struct drm_connector *connector, bool force) 1320intel_sdvo_detect(struct drm_connector *connector, bool force)
1545{ 1321{
@@ -1548,9 +1324,16 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1548 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1324 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1549 enum drm_connector_status ret; 1325 enum drm_connector_status ret;
1550 1326
1551 if (!intel_sdvo_get_value(intel_sdvo, 1327 if (!intel_sdvo_write_cmd(intel_sdvo,
1552 SDVO_CMD_GET_ATTACHED_DISPLAYS, 1328 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
1553 &response, 2)) 1329 return connector_status_unknown;
1330
1331 /* add 30ms delay when the output type might be TV */
1332 if (intel_sdvo->caps.output_flags &
1333 (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
1334 mdelay(30);
1335
1336 if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
1554 return connector_status_unknown; 1337 return connector_status_unknown;
1555 1338
1556 DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", 1339 DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
@@ -1568,7 +1351,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1568 if ((intel_sdvo_connector->output_flag & response) == 0) 1351 if ((intel_sdvo_connector->output_flag & response) == 0)
1569 ret = connector_status_disconnected; 1352 ret = connector_status_disconnected;
1570 else if (IS_TMDS(intel_sdvo_connector)) 1353 else if (IS_TMDS(intel_sdvo_connector))
1571 ret = intel_sdvo_tmds_sink_detect(connector); 1354 ret = intel_sdvo_hdmi_sink_detect(connector);
1572 else { 1355 else {
1573 struct edid *edid; 1356 struct edid *edid;
1574 1357
@@ -1577,12 +1360,11 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1577 if (edid == NULL) 1360 if (edid == NULL)
1578 edid = intel_sdvo_get_analog_edid(connector); 1361 edid = intel_sdvo_get_analog_edid(connector);
1579 if (edid != NULL) { 1362 if (edid != NULL) {
1580 if (intel_sdvo_connector_matches_edid(intel_sdvo_connector, 1363 if (edid->input & DRM_EDID_INPUT_DIGITAL)
1581 edid))
1582 ret = connector_status_connected;
1583 else
1584 ret = connector_status_disconnected; 1364 ret = connector_status_disconnected;
1585 1365 else
1366 ret = connector_status_connected;
1367 connector->display_info.raw_edid = NULL;
1586 kfree(edid); 1368 kfree(edid);
1587 } else 1369 } else
1588 ret = connector_status_connected; 1370 ret = connector_status_connected;
@@ -1622,12 +1404,16 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1622 edid = intel_sdvo_get_analog_edid(connector); 1404 edid = intel_sdvo_get_analog_edid(connector);
1623 1405
1624 if (edid != NULL) { 1406 if (edid != NULL) {
1625 if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector), 1407 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1626 edid)) { 1408 bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1409 bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
1410
1411 if (connector_is_digital == monitor_is_digital) {
1627 drm_mode_connector_update_edid_property(connector, edid); 1412 drm_mode_connector_update_edid_property(connector, edid);
1628 drm_add_edid_modes(connector, edid); 1413 drm_add_edid_modes(connector, edid);
1629 } 1414 }
1630 1415
1416 connector->display_info.raw_edid = NULL;
1631 kfree(edid); 1417 kfree(edid);
1632 } 1418 }
1633} 1419}
@@ -1765,6 +1551,9 @@ end:
1765 intel_sdvo->sdvo_lvds_fixed_mode = 1551 intel_sdvo->sdvo_lvds_fixed_mode =
1766 drm_mode_duplicate(connector->dev, newmode); 1552 drm_mode_duplicate(connector->dev, newmode);
1767 1553
1554 drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
1555 0);
1556
1768 intel_sdvo->is_lvds = true; 1557 intel_sdvo->is_lvds = true;
1769 break; 1558 break;
1770 } 1559 }
@@ -1839,7 +1628,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1839 intel_sdvo_destroy_enhance_property(connector); 1628 intel_sdvo_destroy_enhance_property(connector);
1840 drm_sysfs_connector_remove(connector); 1629 drm_sysfs_connector_remove(connector);
1841 drm_connector_cleanup(connector); 1630 drm_connector_cleanup(connector);
1842 kfree(intel_sdvo_connector); 1631 kfree(connector);
1843} 1632}
1844 1633
1845static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) 1634static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
@@ -1854,7 +1643,6 @@ static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
1854 edid = intel_sdvo_get_edid(connector); 1643 edid = intel_sdvo_get_edid(connector);
1855 if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL) 1644 if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
1856 has_audio = drm_detect_monitor_audio(edid); 1645 has_audio = drm_detect_monitor_audio(edid);
1857 kfree(edid);
1858 1646
1859 return has_audio; 1647 return has_audio;
1860} 1648}
@@ -1871,7 +1659,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1871 uint8_t cmd; 1659 uint8_t cmd;
1872 int ret; 1660 int ret;
1873 1661
1874 ret = drm_object_property_set_value(&connector->base, property, val); 1662 ret = drm_connector_property_set_value(connector, property, val);
1875 if (ret) 1663 if (ret)
1876 return ret; 1664 return ret;
1877 1665
@@ -1884,10 +1672,10 @@ intel_sdvo_set_property(struct drm_connector *connector,
1884 1672
1885 intel_sdvo_connector->force_audio = i; 1673 intel_sdvo_connector->force_audio = i;
1886 1674
1887 if (i == HDMI_AUDIO_AUTO) 1675 if (i == 0)
1888 has_audio = intel_sdvo_detect_hdmi_audio(connector); 1676 has_audio = intel_sdvo_detect_hdmi_audio(connector);
1889 else 1677 else
1890 has_audio = (i == HDMI_AUDIO_ON); 1678 has_audio = i > 0;
1891 1679
1892 if (has_audio == intel_sdvo->has_hdmi_audio) 1680 if (has_audio == intel_sdvo->has_hdmi_audio)
1893 return 0; 1681 return 0;
@@ -1926,7 +1714,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1926 } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) { 1714 } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
1927 temp_value = val; 1715 temp_value = val;
1928 if (intel_sdvo_connector->left == property) { 1716 if (intel_sdvo_connector->left == property) {
1929 drm_object_property_set_value(&connector->base, 1717 drm_connector_property_set_value(connector,
1930 intel_sdvo_connector->right, val); 1718 intel_sdvo_connector->right, val);
1931 if (intel_sdvo_connector->left_margin == temp_value) 1719 if (intel_sdvo_connector->left_margin == temp_value)
1932 return 0; 1720 return 0;
@@ -1938,7 +1726,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1938 cmd = SDVO_CMD_SET_OVERSCAN_H; 1726 cmd = SDVO_CMD_SET_OVERSCAN_H;
1939 goto set_value; 1727 goto set_value;
1940 } else if (intel_sdvo_connector->right == property) { 1728 } else if (intel_sdvo_connector->right == property) {
1941 drm_object_property_set_value(&connector->base, 1729 drm_connector_property_set_value(connector,
1942 intel_sdvo_connector->left, val); 1730 intel_sdvo_connector->left, val);
1943 if (intel_sdvo_connector->right_margin == temp_value) 1731 if (intel_sdvo_connector->right_margin == temp_value)
1944 return 0; 1732 return 0;
@@ -1950,7 +1738,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1950 cmd = SDVO_CMD_SET_OVERSCAN_H; 1738 cmd = SDVO_CMD_SET_OVERSCAN_H;
1951 goto set_value; 1739 goto set_value;
1952 } else if (intel_sdvo_connector->top == property) { 1740 } else if (intel_sdvo_connector->top == property) {
1953 drm_object_property_set_value(&connector->base, 1741 drm_connector_property_set_value(connector,
1954 intel_sdvo_connector->bottom, val); 1742 intel_sdvo_connector->bottom, val);
1955 if (intel_sdvo_connector->top_margin == temp_value) 1743 if (intel_sdvo_connector->top_margin == temp_value)
1956 return 0; 1744 return 0;
@@ -1962,7 +1750,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1962 cmd = SDVO_CMD_SET_OVERSCAN_V; 1750 cmd = SDVO_CMD_SET_OVERSCAN_V;
1963 goto set_value; 1751 goto set_value;
1964 } else if (intel_sdvo_connector->bottom == property) { 1752 } else if (intel_sdvo_connector->bottom == property) {
1965 drm_object_property_set_value(&connector->base, 1753 drm_connector_property_set_value(connector,
1966 intel_sdvo_connector->top, val); 1754 intel_sdvo_connector->top, val);
1967 if (intel_sdvo_connector->bottom_margin == temp_value) 1755 if (intel_sdvo_connector->bottom_margin == temp_value)
1968 return 0; 1756 return 0;
@@ -1999,8 +1787,8 @@ set_value:
1999done: 1787done:
2000 if (intel_sdvo->base.base.crtc) { 1788 if (intel_sdvo->base.base.crtc) {
2001 struct drm_crtc *crtc = intel_sdvo->base.base.crtc; 1789 struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
2002 intel_set_mode(crtc, &crtc->mode, 1790 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
2003 crtc->x, crtc->y, crtc->fb); 1791 crtc->y, crtc->fb);
2004 } 1792 }
2005 1793
2006 return 0; 1794 return 0;
@@ -2008,13 +1796,15 @@ done:
2008} 1796}
2009 1797
2010static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { 1798static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
1799 .dpms = intel_sdvo_dpms,
2011 .mode_fixup = intel_sdvo_mode_fixup, 1800 .mode_fixup = intel_sdvo_mode_fixup,
1801 .prepare = intel_encoder_prepare,
2012 .mode_set = intel_sdvo_mode_set, 1802 .mode_set = intel_sdvo_mode_set,
2013 .disable = intel_encoder_noop, 1803 .commit = intel_encoder_commit,
2014}; 1804};
2015 1805
2016static const struct drm_connector_funcs intel_sdvo_connector_funcs = { 1806static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2017 .dpms = intel_sdvo_dpms, 1807 .dpms = drm_helper_connector_dpms,
2018 .detect = intel_sdvo_detect, 1808 .detect = intel_sdvo_detect,
2019 .fill_modes = drm_helper_probe_single_connector_modes, 1809 .fill_modes = drm_helper_probe_single_connector_modes,
2020 .set_property = intel_sdvo_set_property, 1810 .set_property = intel_sdvo_set_property,
@@ -2092,7 +1882,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
2092{ 1882{
2093 struct sdvo_device_mapping *mapping; 1883 struct sdvo_device_mapping *mapping;
2094 1884
2095 if (sdvo->is_sdvob) 1885 if (IS_SDVOB(reg))
2096 mapping = &(dev_priv->sdvo_mappings[0]); 1886 mapping = &(dev_priv->sdvo_mappings[0]);
2097 else 1887 else
2098 mapping = &(dev_priv->sdvo_mappings[1]); 1888 mapping = &(dev_priv->sdvo_mappings[1]);
@@ -2108,31 +1898,26 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
2108 struct intel_sdvo *sdvo, u32 reg) 1898 struct intel_sdvo *sdvo, u32 reg)
2109{ 1899{
2110 struct sdvo_device_mapping *mapping; 1900 struct sdvo_device_mapping *mapping;
2111 u8 pin; 1901 u8 pin, speed;
2112 1902
2113 if (sdvo->is_sdvob) 1903 if (IS_SDVOB(reg))
2114 mapping = &dev_priv->sdvo_mappings[0]; 1904 mapping = &dev_priv->sdvo_mappings[0];
2115 else 1905 else
2116 mapping = &dev_priv->sdvo_mappings[1]; 1906 mapping = &dev_priv->sdvo_mappings[1];
2117 1907
2118 if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin)) 1908 pin = GMBUS_PORT_DPB;
1909 speed = GMBUS_RATE_1MHZ >> 8;
1910 if (mapping->initialized) {
2119 pin = mapping->i2c_pin; 1911 pin = mapping->i2c_pin;
2120 else 1912 speed = mapping->i2c_speed;
2121 pin = GMBUS_PORT_DPB; 1913 }
2122
2123 sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
2124
2125 /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
2126 * our code totally fails once we start using gmbus. Hence fall back to
2127 * bit banging for now. */
2128 intel_gmbus_force_bit(sdvo->i2c, true);
2129}
2130 1914
2131/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */ 1915 if (pin < GMBUS_NUM_PORTS) {
2132static void 1916 sdvo->i2c = &dev_priv->gmbus[pin].adapter;
2133intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo) 1917 intel_gmbus_set_speed(sdvo->i2c, speed);
2134{ 1918 intel_gmbus_force_bit(sdvo->i2c, true);
2135 intel_gmbus_force_bit(sdvo->i2c, false); 1919 } else
1920 sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
2136} 1921}
2137 1922
2138static bool 1923static bool
@@ -2142,12 +1927,12 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
2142} 1927}
2143 1928
2144static u8 1929static u8
2145intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) 1930intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
2146{ 1931{
2147 struct drm_i915_private *dev_priv = dev->dev_private; 1932 struct drm_i915_private *dev_priv = dev->dev_private;
2148 struct sdvo_device_mapping *my_mapping, *other_mapping; 1933 struct sdvo_device_mapping *my_mapping, *other_mapping;
2149 1934
2150 if (sdvo->is_sdvob) { 1935 if (IS_SDVOB(sdvo_reg)) {
2151 my_mapping = &dev_priv->sdvo_mappings[0]; 1936 my_mapping = &dev_priv->sdvo_mappings[0];
2152 other_mapping = &dev_priv->sdvo_mappings[1]; 1937 other_mapping = &dev_priv->sdvo_mappings[1];
2153 } else { 1938 } else {
@@ -2172,7 +1957,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
2172 /* No SDVO device info is found for another DVO port, 1957 /* No SDVO device info is found for another DVO port,
2173 * so use mapping assumption we had before BIOS parsing. 1958 * so use mapping assumption we had before BIOS parsing.
2174 */ 1959 */
2175 if (sdvo->is_sdvob) 1960 if (IS_SDVOB(sdvo_reg))
2176 return 0x70; 1961 return 0x70;
2177 else 1962 else
2178 return 0x72; 1963 return 0x72;
@@ -2190,10 +1975,9 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2190 drm_connector_helper_add(&connector->base.base, 1975 drm_connector_helper_add(&connector->base.base,
2191 &intel_sdvo_connector_helper_funcs); 1976 &intel_sdvo_connector_helper_funcs);
2192 1977
2193 connector->base.base.interlace_allowed = 1; 1978 connector->base.base.interlace_allowed = 0;
2194 connector->base.base.doublescan_allowed = 0; 1979 connector->base.base.doublescan_allowed = 0;
2195 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; 1980 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
2196 connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
2197 1981
2198 intel_connector_attach_encoder(&connector->base, &encoder->base); 1982 intel_connector_attach_encoder(&connector->base, &encoder->base);
2199 drm_sysfs_connector_add(&connector->base.base); 1983 drm_sysfs_connector_add(&connector->base.base);
@@ -2232,18 +2016,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2232 2016
2233 intel_connector = &intel_sdvo_connector->base; 2017 intel_connector = &intel_sdvo_connector->base;
2234 connector = &intel_connector->base; 2018 connector = &intel_connector->base;
2235 if (intel_sdvo_get_hotplug_support(intel_sdvo) & 2019 if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
2236 intel_sdvo_connector->output_flag) {
2237 connector->polled = DRM_CONNECTOR_POLL_HPD; 2020 connector->polled = DRM_CONNECTOR_POLL_HPD;
2238 intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag; 2021 intel_sdvo->hotplug_active[0] |= 1 << device;
2239 /* Some SDVO devices have one-shot hotplug interrupts. 2022 /* Some SDVO devices have one-shot hotplug interrupts.
2240 * Ensure that they get re-enabled when an interrupt happens. 2023 * Ensure that they get re-enabled when an interrupt happens.
2241 */ 2024 */
2242 intel_encoder->hot_plug = intel_sdvo_enable_hotplug; 2025 intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
2243 intel_sdvo_enable_hotplug(intel_encoder); 2026 intel_sdvo_enable_hotplug(intel_encoder);
2244 } else {
2245 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
2246 } 2027 }
2028 else
2029 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
2247 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2030 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2248 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2031 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2249 2032
@@ -2251,6 +2034,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2251 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2034 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2252 intel_sdvo->is_hdmi = true; 2035 intel_sdvo->is_hdmi = true;
2253 } 2036 }
2037 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2038 (1 << INTEL_ANALOG_CLONE_BIT));
2254 2039
2255 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2040 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2256 if (intel_sdvo->is_hdmi) 2041 if (intel_sdvo->is_hdmi)
@@ -2281,6 +2066,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2281 2066
2282 intel_sdvo->is_tv = true; 2067 intel_sdvo->is_tv = true;
2283 intel_sdvo->base.needs_tv_clock = true; 2068 intel_sdvo->base.needs_tv_clock = true;
2069 intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2284 2070
2285 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2071 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2286 2072
@@ -2323,6 +2109,9 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
2323 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; 2109 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
2324 } 2110 }
2325 2111
2112 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2113 (1 << INTEL_ANALOG_CLONE_BIT));
2114
2326 intel_sdvo_connector_init(intel_sdvo_connector, 2115 intel_sdvo_connector_init(intel_sdvo_connector,
2327 intel_sdvo); 2116 intel_sdvo);
2328 return true; 2117 return true;
@@ -2353,6 +2142,9 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2353 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; 2142 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
2354 } 2143 }
2355 2144
2145 intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
2146 (1 << INTEL_SDVO_LVDS_CLONE_BIT));
2147
2356 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2148 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2357 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) 2149 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
2358 goto err; 2150 goto err;
@@ -2390,10 +2182,6 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
2390 if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0)) 2182 if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
2391 return false; 2183 return false;
2392 2184
2393 if (flags & SDVO_OUTPUT_YPRPB0)
2394 if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
2395 return false;
2396
2397 if (flags & SDVO_OUTPUT_RGB0) 2185 if (flags & SDVO_OUTPUT_RGB0)
2398 if (!intel_sdvo_analog_init(intel_sdvo, 0)) 2186 if (!intel_sdvo_analog_init(intel_sdvo, 0))
2399 return false; 2187 return false;
@@ -2420,23 +2208,11 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
2420 bytes[0], bytes[1]); 2208 bytes[0], bytes[1]);
2421 return false; 2209 return false;
2422 } 2210 }
2423 intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2211 intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
2424 2212
2425 return true; 2213 return true;
2426} 2214}
2427 2215
2428static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
2429{
2430 struct drm_device *dev = intel_sdvo->base.base.dev;
2431 struct drm_connector *connector, *tmp;
2432
2433 list_for_each_entry_safe(connector, tmp,
2434 &dev->mode_config.connector_list, head) {
2435 if (intel_attached_encoder(connector) == &intel_sdvo->base)
2436 intel_sdvo_destroy(connector);
2437 }
2438}
2439
2440static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, 2216static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
2441 struct intel_sdvo_connector *intel_sdvo_connector, 2217 struct intel_sdvo_connector *intel_sdvo_connector,
2442 int type) 2218 int type)
@@ -2477,7 +2253,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
2477 i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); 2253 i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
2478 2254
2479 intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; 2255 intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
2480 drm_object_attach_property(&intel_sdvo_connector->base.base.base, 2256 drm_connector_attach_property(&intel_sdvo_connector->base.base,
2481 intel_sdvo_connector->tv_format, 0); 2257 intel_sdvo_connector->tv_format, 0);
2482 return true; 2258 return true;
2483 2259
@@ -2491,15 +2267,17 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
2491 intel_sdvo_connector->max_##name = data_value[0]; \ 2267 intel_sdvo_connector->max_##name = data_value[0]; \
2492 intel_sdvo_connector->cur_##name = response; \ 2268 intel_sdvo_connector->cur_##name = response; \
2493 intel_sdvo_connector->name = \ 2269 intel_sdvo_connector->name = \
2494 drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ 2270 drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
2495 if (!intel_sdvo_connector->name) return false; \ 2271 if (!intel_sdvo_connector->name) return false; \
2496 drm_object_attach_property(&connector->base, \ 2272 intel_sdvo_connector->name->values[0] = 0; \
2273 intel_sdvo_connector->name->values[1] = data_value[0]; \
2274 drm_connector_attach_property(connector, \
2497 intel_sdvo_connector->name, \ 2275 intel_sdvo_connector->name, \
2498 intel_sdvo_connector->cur_##name); \ 2276 intel_sdvo_connector->cur_##name); \
2499 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ 2277 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
2500 data_value[0], data_value[1], response); \ 2278 data_value[0], data_value[1], response); \
2501 } \ 2279 } \
2502} while (0) 2280} while(0)
2503 2281
2504static bool 2282static bool
2505intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, 2283intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
@@ -2526,20 +2304,26 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2526 intel_sdvo_connector->left_margin = data_value[0] - response; 2304 intel_sdvo_connector->left_margin = data_value[0] - response;
2527 intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin; 2305 intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
2528 intel_sdvo_connector->left = 2306 intel_sdvo_connector->left =
2529 drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]); 2307 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2308 "left_margin", 2);
2530 if (!intel_sdvo_connector->left) 2309 if (!intel_sdvo_connector->left)
2531 return false; 2310 return false;
2532 2311
2533 drm_object_attach_property(&connector->base, 2312 intel_sdvo_connector->left->values[0] = 0;
2313 intel_sdvo_connector->left->values[1] = data_value[0];
2314 drm_connector_attach_property(connector,
2534 intel_sdvo_connector->left, 2315 intel_sdvo_connector->left,
2535 intel_sdvo_connector->left_margin); 2316 intel_sdvo_connector->left_margin);
2536 2317
2537 intel_sdvo_connector->right = 2318 intel_sdvo_connector->right =
2538 drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]); 2319 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2320 "right_margin", 2);
2539 if (!intel_sdvo_connector->right) 2321 if (!intel_sdvo_connector->right)
2540 return false; 2322 return false;
2541 2323
2542 drm_object_attach_property(&connector->base, 2324 intel_sdvo_connector->right->values[0] = 0;
2325 intel_sdvo_connector->right->values[1] = data_value[0];
2326 drm_connector_attach_property(connector,
2543 intel_sdvo_connector->right, 2327 intel_sdvo_connector->right,
2544 intel_sdvo_connector->right_margin); 2328 intel_sdvo_connector->right_margin);
2545 DRM_DEBUG_KMS("h_overscan: max %d, " 2329 DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2562,22 +2346,26 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2562 intel_sdvo_connector->top_margin = data_value[0] - response; 2346 intel_sdvo_connector->top_margin = data_value[0] - response;
2563 intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin; 2347 intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
2564 intel_sdvo_connector->top = 2348 intel_sdvo_connector->top =
2565 drm_property_create_range(dev, 0, 2349 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2566 "top_margin", 0, data_value[0]); 2350 "top_margin", 2);
2567 if (!intel_sdvo_connector->top) 2351 if (!intel_sdvo_connector->top)
2568 return false; 2352 return false;
2569 2353
2570 drm_object_attach_property(&connector->base, 2354 intel_sdvo_connector->top->values[0] = 0;
2355 intel_sdvo_connector->top->values[1] = data_value[0];
2356 drm_connector_attach_property(connector,
2571 intel_sdvo_connector->top, 2357 intel_sdvo_connector->top,
2572 intel_sdvo_connector->top_margin); 2358 intel_sdvo_connector->top_margin);
2573 2359
2574 intel_sdvo_connector->bottom = 2360 intel_sdvo_connector->bottom =
2575 drm_property_create_range(dev, 0, 2361 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2576 "bottom_margin", 0, data_value[0]); 2362 "bottom_margin", 2);
2577 if (!intel_sdvo_connector->bottom) 2363 if (!intel_sdvo_connector->bottom)
2578 return false; 2364 return false;
2579 2365
2580 drm_object_attach_property(&connector->base, 2366 intel_sdvo_connector->bottom->values[0] = 0;
2367 intel_sdvo_connector->bottom->values[1] = data_value[0];
2368 drm_connector_attach_property(connector,
2581 intel_sdvo_connector->bottom, 2369 intel_sdvo_connector->bottom,
2582 intel_sdvo_connector->bottom_margin); 2370 intel_sdvo_connector->bottom_margin);
2583 DRM_DEBUG_KMS("v_overscan: max %d, " 2371 DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2605,11 +2393,13 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2605 intel_sdvo_connector->max_dot_crawl = 1; 2393 intel_sdvo_connector->max_dot_crawl = 1;
2606 intel_sdvo_connector->cur_dot_crawl = response & 0x1; 2394 intel_sdvo_connector->cur_dot_crawl = response & 0x1;
2607 intel_sdvo_connector->dot_crawl = 2395 intel_sdvo_connector->dot_crawl =
2608 drm_property_create_range(dev, 0, "dot_crawl", 0, 1); 2396 drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
2609 if (!intel_sdvo_connector->dot_crawl) 2397 if (!intel_sdvo_connector->dot_crawl)
2610 return false; 2398 return false;
2611 2399
2612 drm_object_attach_property(&connector->base, 2400 intel_sdvo_connector->dot_crawl->values[0] = 0;
2401 intel_sdvo_connector->dot_crawl->values[1] = 1;
2402 drm_connector_attach_property(connector,
2613 intel_sdvo_connector->dot_crawl, 2403 intel_sdvo_connector->dot_crawl,
2614 intel_sdvo_connector->cur_dot_crawl); 2404 intel_sdvo_connector->cur_dot_crawl);
2615 DRM_DEBUG_KMS("dot crawl: current %d\n", response); 2405 DRM_DEBUG_KMS("dot crawl: current %d\n", response);
@@ -2654,7 +2444,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
2654 2444
2655 if (IS_TV(intel_sdvo_connector)) 2445 if (IS_TV(intel_sdvo_connector))
2656 return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply); 2446 return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
2657 else if (IS_LVDS(intel_sdvo_connector)) 2447 else if(IS_LVDS(intel_sdvo_connector))
2658 return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); 2448 return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
2659 else 2449 else
2660 return true; 2450 return true;
@@ -2697,12 +2487,11 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
2697 return i2c_add_adapter(&sdvo->ddc) == 0; 2487 return i2c_add_adapter(&sdvo->ddc) == 0;
2698} 2488}
2699 2489
2700bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) 2490bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2701{ 2491{
2702 struct drm_i915_private *dev_priv = dev->dev_private; 2492 struct drm_i915_private *dev_priv = dev->dev_private;
2703 struct intel_encoder *intel_encoder; 2493 struct intel_encoder *intel_encoder;
2704 struct intel_sdvo *intel_sdvo; 2494 struct intel_sdvo *intel_sdvo;
2705 u32 hotplug_mask;
2706 int i; 2495 int i;
2707 2496
2708 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); 2497 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
@@ -2710,11 +2499,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2710 return false; 2499 return false;
2711 2500
2712 intel_sdvo->sdvo_reg = sdvo_reg; 2501 intel_sdvo->sdvo_reg = sdvo_reg;
2713 intel_sdvo->is_sdvob = is_sdvob; 2502 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
2714 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
2715 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); 2503 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
2716 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) 2504 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
2717 goto err_i2c_bus; 2505 kfree(intel_sdvo);
2506 return false;
2507 }
2718 2508
2719 /* encoder type will be decided later */ 2509 /* encoder type will be decided later */
2720 intel_encoder = &intel_sdvo->base; 2510 intel_encoder = &intel_sdvo->base;
@@ -2726,68 +2516,48 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2726 u8 byte; 2516 u8 byte;
2727 2517
2728 if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) { 2518 if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
2729 DRM_DEBUG_KMS("No SDVO device found on %s\n", 2519 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
2730 SDVO_NAME(intel_sdvo)); 2520 IS_SDVOB(sdvo_reg) ? 'B' : 'C');
2731 goto err; 2521 goto err;
2732 } 2522 }
2733 } 2523 }
2734 2524
2735 hotplug_mask = 0; 2525 if (IS_SDVOB(sdvo_reg))
2736 if (IS_G4X(dev)) { 2526 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2737 hotplug_mask = intel_sdvo->is_sdvob ? 2527 else
2738 SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X; 2528 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2739 } else if (IS_GEN4(dev)) {
2740 hotplug_mask = intel_sdvo->is_sdvob ?
2741 SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
2742 } else {
2743 hotplug_mask = intel_sdvo->is_sdvob ?
2744 SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
2745 }
2746 2529
2747 drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); 2530 drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
2748 2531
2749 intel_encoder->disable = intel_disable_sdvo;
2750 intel_encoder->enable = intel_enable_sdvo;
2751 intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
2752
2753 /* In default case sdvo lvds is false */ 2532 /* In default case sdvo lvds is false */
2754 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) 2533 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
2755 goto err; 2534 goto err;
2756 2535
2536 /* Set up hotplug command - note paranoia about contents of reply.
2537 * We assume that the hardware is in a sane state, and only touch
2538 * the bits we think we understand.
2539 */
2540 intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
2541 &intel_sdvo->hotplug_active, 2);
2542 intel_sdvo->hotplug_active[0] &= ~0x3;
2543
2757 if (intel_sdvo_output_setup(intel_sdvo, 2544 if (intel_sdvo_output_setup(intel_sdvo,
2758 intel_sdvo->caps.output_flags) != true) { 2545 intel_sdvo->caps.output_flags) != true) {
2759 DRM_DEBUG_KMS("SDVO output failed to setup on %s\n", 2546 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
2760 SDVO_NAME(intel_sdvo)); 2547 IS_SDVOB(sdvo_reg) ? 'B' : 'C');
2761 /* Output_setup can leave behind connectors! */ 2548 goto err;
2762 goto err_output;
2763 } 2549 }
2764 2550
2765 /*
2766 * Cloning SDVO with anything is often impossible, since the SDVO
2767 * encoder can request a special input timing mode. And even if that's
2768 * not the case we have evidence that cloning a plain unscaled mode with
2769 * VGA doesn't really work. Furthermore the cloning flags are way too
2770 * simplistic anyway to express such constraints, so just give up on
2771 * cloning for SDVO encoders.
2772 */
2773 intel_sdvo->base.cloneable = false;
2774
2775 /* Only enable the hotplug irq if we need it, to work around noisy
2776 * hotplug lines.
2777 */
2778 if (intel_sdvo->hotplug_active)
2779 dev_priv->hotplug_supported_mask |= hotplug_mask;
2780
2781 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); 2551 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
2782 2552
2783 /* Set the input timing to the screen. Assume always input 0. */ 2553 /* Set the input timing to the screen. Assume always input 0. */
2784 if (!intel_sdvo_set_target_input(intel_sdvo)) 2554 if (!intel_sdvo_set_target_input(intel_sdvo))
2785 goto err_output; 2555 goto err;
2786 2556
2787 if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, 2557 if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
2788 &intel_sdvo->pixel_clock_min, 2558 &intel_sdvo->pixel_clock_min,
2789 &intel_sdvo->pixel_clock_max)) 2559 &intel_sdvo->pixel_clock_max))
2790 goto err_output; 2560 goto err;
2791 2561
2792 DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " 2562 DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
2793 "clock range %dMHz - %dMHz, " 2563 "clock range %dMHz - %dMHz, "
@@ -2807,14 +2577,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2807 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); 2577 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
2808 return true; 2578 return true;
2809 2579
2810err_output:
2811 intel_sdvo_output_cleanup(intel_sdvo);
2812
2813err: 2580err:
2814 drm_encoder_cleanup(&intel_encoder->base); 2581 drm_encoder_cleanup(&intel_encoder->base);
2815 i2c_del_adapter(&intel_sdvo->ddc); 2582 i2c_del_adapter(&intel_sdvo->ddc);
2816err_i2c_bus:
2817 intel_sdvo_unselect_i2c_bus(intel_sdvo);
2818 kfree(intel_sdvo); 2583 kfree(intel_sdvo);
2819 2584
2820 return false; 2585 return false;
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 770bdd6ecd9..4f4e23bc2d1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright © 2006-2007 Intel Corporation 2 * Copyright © 2006-2007 Intel Corporation
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -46,68 +46,63 @@
46#define SDVO_OUTPUT_LAST (14) 46#define SDVO_OUTPUT_LAST (14)
47 47
48struct intel_sdvo_caps { 48struct intel_sdvo_caps {
49 u8 vendor_id; 49 u8 vendor_id;
50 u8 device_id; 50 u8 device_id;
51 u8 device_rev_id; 51 u8 device_rev_id;
52 u8 sdvo_version_major; 52 u8 sdvo_version_major;
53 u8 sdvo_version_minor; 53 u8 sdvo_version_minor;
54 unsigned int sdvo_inputs_mask:2; 54 unsigned int sdvo_inputs_mask:2;
55 unsigned int smooth_scaling:1; 55 unsigned int smooth_scaling:1;
56 unsigned int sharp_scaling:1; 56 unsigned int sharp_scaling:1;
57 unsigned int up_scaling:1; 57 unsigned int up_scaling:1;
58 unsigned int down_scaling:1; 58 unsigned int down_scaling:1;
59 unsigned int stall_support:1; 59 unsigned int stall_support:1;
60 unsigned int pad:1; 60 unsigned int pad:1;
61 u16 output_flags; 61 u16 output_flags;
62} __attribute__((packed)); 62} __attribute__((packed));
63 63
64/* Note: SDVO detailed timing flags match EDID misc flags. */
65#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
66#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
67#define DTD_FLAG_INTERLACE (1 << 7)
68
69/** This matches the EDID DTD structure, more or less */ 64/** This matches the EDID DTD structure, more or less */
70struct intel_sdvo_dtd { 65struct intel_sdvo_dtd {
71 struct { 66 struct {
72 u16 clock; /**< pixel clock, in 10kHz units */ 67 u16 clock; /**< pixel clock, in 10kHz units */
73 u8 h_active; /**< lower 8 bits (pixels) */ 68 u8 h_active; /**< lower 8 bits (pixels) */
74 u8 h_blank; /**< lower 8 bits (pixels) */ 69 u8 h_blank; /**< lower 8 bits (pixels) */
75 u8 h_high; /**< upper 4 bits each h_active, h_blank */ 70 u8 h_high; /**< upper 4 bits each h_active, h_blank */
76 u8 v_active; /**< lower 8 bits (lines) */ 71 u8 v_active; /**< lower 8 bits (lines) */
77 u8 v_blank; /**< lower 8 bits (lines) */ 72 u8 v_blank; /**< lower 8 bits (lines) */
78 u8 v_high; /**< upper 4 bits each v_active, v_blank */ 73 u8 v_high; /**< upper 4 bits each v_active, v_blank */
79 } part1; 74 } part1;
80 75
81 struct { 76 struct {
82 u8 h_sync_off; /**< lower 8 bits, from hblank start */ 77 u8 h_sync_off; /**< lower 8 bits, from hblank start */
83 u8 h_sync_width; /**< lower 8 bits (pixels) */ 78 u8 h_sync_width; /**< lower 8 bits (pixels) */
84 /** lower 4 bits each vsync offset, vsync width */ 79 /** lower 4 bits each vsync offset, vsync width */
85 u8 v_sync_off_width; 80 u8 v_sync_off_width;
86 /** 81 /**
87 * 2 high bits of hsync offset, 2 high bits of hsync width, 82 * 2 high bits of hsync offset, 2 high bits of hsync width,
88 * bits 4-5 of vsync offset, and 2 high bits of vsync width. 83 * bits 4-5 of vsync offset, and 2 high bits of vsync width.
89 */ 84 */
90 u8 sync_off_width_high; 85 u8 sync_off_width_high;
91 u8 dtd_flags; 86 u8 dtd_flags;
92 u8 sdvo_flags; 87 u8 sdvo_flags;
93 /** bits 6-7 of vsync offset at bits 6-7 */ 88 /** bits 6-7 of vsync offset at bits 6-7 */
94 u8 v_sync_off_high; 89 u8 v_sync_off_high;
95 u8 reserved; 90 u8 reserved;
96 } part2; 91 } part2;
97} __attribute__((packed)); 92} __attribute__((packed));
98 93
99struct intel_sdvo_pixel_clock_range { 94struct intel_sdvo_pixel_clock_range {
100 u16 min; /**< pixel clock, in 10kHz units */ 95 u16 min; /**< pixel clock, in 10kHz units */
101 u16 max; /**< pixel clock, in 10kHz units */ 96 u16 max; /**< pixel clock, in 10kHz units */
102} __attribute__((packed)); 97} __attribute__((packed));
103 98
104struct intel_sdvo_preferred_input_timing_args { 99struct intel_sdvo_preferred_input_timing_args {
105 u16 clock; 100 u16 clock;
106 u16 width; 101 u16 width;
107 u16 height; 102 u16 height;
108 u8 interlace:1; 103 u8 interlace:1;
109 u8 scaled:1; 104 u8 scaled:1;
110 u8 pad:6; 105 u8 pad:6;
111} __attribute__((packed)); 106} __attribute__((packed));
112 107
113/* I2C registers for SDVO */ 108/* I2C registers for SDVO */
@@ -159,9 +154,9 @@ struct intel_sdvo_preferred_input_timing_args {
159 */ 154 */
160#define SDVO_CMD_GET_TRAINED_INPUTS 0x03 155#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
161struct intel_sdvo_get_trained_inputs_response { 156struct intel_sdvo_get_trained_inputs_response {
162 unsigned int input0_trained:1; 157 unsigned int input0_trained:1;
163 unsigned int input1_trained:1; 158 unsigned int input1_trained:1;
164 unsigned int pad:6; 159 unsigned int pad:6;
165} __attribute__((packed)); 160} __attribute__((packed));
166 161
167/** Returns a struct intel_sdvo_output_flags of active outputs. */ 162/** Returns a struct intel_sdvo_output_flags of active outputs. */
@@ -182,7 +177,7 @@ struct intel_sdvo_get_trained_inputs_response {
182 */ 177 */
183#define SDVO_CMD_GET_IN_OUT_MAP 0x06 178#define SDVO_CMD_GET_IN_OUT_MAP 0x06
184struct intel_sdvo_in_out_map { 179struct intel_sdvo_in_out_map {
185 u16 in0, in1; 180 u16 in0, in1;
186}; 181};
187 182
188/** 183/**
@@ -215,10 +210,10 @@ struct intel_sdvo_in_out_map {
215 210
216#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f 211#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
217struct intel_sdvo_get_interrupt_event_source_response { 212struct intel_sdvo_get_interrupt_event_source_response {
218 u16 interrupt_status; 213 u16 interrupt_status;
219 unsigned int ambient_light_interrupt:1; 214 unsigned int ambient_light_interrupt:1;
220 unsigned int hdmi_audio_encrypt_change:1; 215 unsigned int hdmi_audio_encrypt_change:1;
221 unsigned int pad:6; 216 unsigned int pad:6;
222} __attribute__((packed)); 217} __attribute__((packed));
223 218
224/** 219/**
@@ -230,8 +225,8 @@ struct intel_sdvo_get_interrupt_event_source_response {
230 */ 225 */
231#define SDVO_CMD_SET_TARGET_INPUT 0x10 226#define SDVO_CMD_SET_TARGET_INPUT 0x10
232struct intel_sdvo_set_target_input_args { 227struct intel_sdvo_set_target_input_args {
233 unsigned int target_1:1; 228 unsigned int target_1:1;
234 unsigned int pad:7; 229 unsigned int pad:7;
235} __attribute__((packed)); 230} __attribute__((packed));
236 231
237/** 232/**
@@ -319,57 +314,57 @@ struct intel_sdvo_set_target_input_args {
319#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 314#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
320/** 6 bytes of bit flags for TV formats shared by all TV format functions */ 315/** 6 bytes of bit flags for TV formats shared by all TV format functions */
321struct intel_sdvo_tv_format { 316struct intel_sdvo_tv_format {
322 unsigned int ntsc_m:1; 317 unsigned int ntsc_m:1;
323 unsigned int ntsc_j:1; 318 unsigned int ntsc_j:1;
324 unsigned int ntsc_443:1; 319 unsigned int ntsc_443:1;
325 unsigned int pal_b:1; 320 unsigned int pal_b:1;
326 unsigned int pal_d:1; 321 unsigned int pal_d:1;
327 unsigned int pal_g:1; 322 unsigned int pal_g:1;
328 unsigned int pal_h:1; 323 unsigned int pal_h:1;
329 unsigned int pal_i:1; 324 unsigned int pal_i:1;
330 325
331 unsigned int pal_m:1; 326 unsigned int pal_m:1;
332 unsigned int pal_n:1; 327 unsigned int pal_n:1;
333 unsigned int pal_nc:1; 328 unsigned int pal_nc:1;
334 unsigned int pal_60:1; 329 unsigned int pal_60:1;
335 unsigned int secam_b:1; 330 unsigned int secam_b:1;
336 unsigned int secam_d:1; 331 unsigned int secam_d:1;
337 unsigned int secam_g:1; 332 unsigned int secam_g:1;
338 unsigned int secam_k:1; 333 unsigned int secam_k:1;
339 334
340 unsigned int secam_k1:1; 335 unsigned int secam_k1:1;
341 unsigned int secam_l:1; 336 unsigned int secam_l:1;
342 unsigned int secam_60:1; 337 unsigned int secam_60:1;
343 unsigned int hdtv_std_smpte_240m_1080i_59:1; 338 unsigned int hdtv_std_smpte_240m_1080i_59:1;
344 unsigned int hdtv_std_smpte_240m_1080i_60:1; 339 unsigned int hdtv_std_smpte_240m_1080i_60:1;
345 unsigned int hdtv_std_smpte_260m_1080i_59:1; 340 unsigned int hdtv_std_smpte_260m_1080i_59:1;
346 unsigned int hdtv_std_smpte_260m_1080i_60:1; 341 unsigned int hdtv_std_smpte_260m_1080i_60:1;
347 unsigned int hdtv_std_smpte_274m_1080i_50:1; 342 unsigned int hdtv_std_smpte_274m_1080i_50:1;
348 343
349 unsigned int hdtv_std_smpte_274m_1080i_59:1; 344 unsigned int hdtv_std_smpte_274m_1080i_59:1;
350 unsigned int hdtv_std_smpte_274m_1080i_60:1; 345 unsigned int hdtv_std_smpte_274m_1080i_60:1;
351 unsigned int hdtv_std_smpte_274m_1080p_23:1; 346 unsigned int hdtv_std_smpte_274m_1080p_23:1;
352 unsigned int hdtv_std_smpte_274m_1080p_24:1; 347 unsigned int hdtv_std_smpte_274m_1080p_24:1;
353 unsigned int hdtv_std_smpte_274m_1080p_25:1; 348 unsigned int hdtv_std_smpte_274m_1080p_25:1;
354 unsigned int hdtv_std_smpte_274m_1080p_29:1; 349 unsigned int hdtv_std_smpte_274m_1080p_29:1;
355 unsigned int hdtv_std_smpte_274m_1080p_30:1; 350 unsigned int hdtv_std_smpte_274m_1080p_30:1;
356 unsigned int hdtv_std_smpte_274m_1080p_50:1; 351 unsigned int hdtv_std_smpte_274m_1080p_50:1;
357 352
358 unsigned int hdtv_std_smpte_274m_1080p_59:1; 353 unsigned int hdtv_std_smpte_274m_1080p_59:1;
359 unsigned int hdtv_std_smpte_274m_1080p_60:1; 354 unsigned int hdtv_std_smpte_274m_1080p_60:1;
360 unsigned int hdtv_std_smpte_295m_1080i_50:1; 355 unsigned int hdtv_std_smpte_295m_1080i_50:1;
361 unsigned int hdtv_std_smpte_295m_1080p_50:1; 356 unsigned int hdtv_std_smpte_295m_1080p_50:1;
362 unsigned int hdtv_std_smpte_296m_720p_59:1; 357 unsigned int hdtv_std_smpte_296m_720p_59:1;
363 unsigned int hdtv_std_smpte_296m_720p_60:1; 358 unsigned int hdtv_std_smpte_296m_720p_60:1;
364 unsigned int hdtv_std_smpte_296m_720p_50:1; 359 unsigned int hdtv_std_smpte_296m_720p_50:1;
365 unsigned int hdtv_std_smpte_293m_480p_59:1; 360 unsigned int hdtv_std_smpte_293m_480p_59:1;
366 361
367 unsigned int hdtv_std_smpte_170m_480i_59:1; 362 unsigned int hdtv_std_smpte_170m_480i_59:1;
368 unsigned int hdtv_std_iturbt601_576i_50:1; 363 unsigned int hdtv_std_iturbt601_576i_50:1;
369 unsigned int hdtv_std_iturbt601_576p_50:1; 364 unsigned int hdtv_std_iturbt601_576p_50:1;
370 unsigned int hdtv_std_eia_7702a_480i_60:1; 365 unsigned int hdtv_std_eia_7702a_480i_60:1;
371 unsigned int hdtv_std_eia_7702a_480p_60:1; 366 unsigned int hdtv_std_eia_7702a_480p_60:1;
372 unsigned int pad:3; 367 unsigned int pad:3;
373} __attribute__((packed)); 368} __attribute__((packed));
374 369
375#define SDVO_CMD_GET_TV_FORMAT 0x28 370#define SDVO_CMD_GET_TV_FORMAT 0x28
@@ -379,53 +374,53 @@ struct intel_sdvo_tv_format {
379/** Returns the resolutiosn that can be used with the given TV format */ 374/** Returns the resolutiosn that can be used with the given TV format */
380#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83 375#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
381struct intel_sdvo_sdtv_resolution_request { 376struct intel_sdvo_sdtv_resolution_request {
382 unsigned int ntsc_m:1; 377 unsigned int ntsc_m:1;
383 unsigned int ntsc_j:1; 378 unsigned int ntsc_j:1;
384 unsigned int ntsc_443:1; 379 unsigned int ntsc_443:1;
385 unsigned int pal_b:1; 380 unsigned int pal_b:1;
386 unsigned int pal_d:1; 381 unsigned int pal_d:1;
387 unsigned int pal_g:1; 382 unsigned int pal_g:1;
388 unsigned int pal_h:1; 383 unsigned int pal_h:1;
389 unsigned int pal_i:1; 384 unsigned int pal_i:1;
390 385
391 unsigned int pal_m:1; 386 unsigned int pal_m:1;
392 unsigned int pal_n:1; 387 unsigned int pal_n:1;
393 unsigned int pal_nc:1; 388 unsigned int pal_nc:1;
394 unsigned int pal_60:1; 389 unsigned int pal_60:1;
395 unsigned int secam_b:1; 390 unsigned int secam_b:1;
396 unsigned int secam_d:1; 391 unsigned int secam_d:1;
397 unsigned int secam_g:1; 392 unsigned int secam_g:1;
398 unsigned int secam_k:1; 393 unsigned int secam_k:1;
399 394
400 unsigned int secam_k1:1; 395 unsigned int secam_k1:1;
401 unsigned int secam_l:1; 396 unsigned int secam_l:1;
402 unsigned int secam_60:1; 397 unsigned int secam_60:1;
403 unsigned int pad:5; 398 unsigned int pad:5;
404} __attribute__((packed)); 399} __attribute__((packed));
405 400
406struct intel_sdvo_sdtv_resolution_reply { 401struct intel_sdvo_sdtv_resolution_reply {
407 unsigned int res_320x200:1; 402 unsigned int res_320x200:1;
408 unsigned int res_320x240:1; 403 unsigned int res_320x240:1;
409 unsigned int res_400x300:1; 404 unsigned int res_400x300:1;
410 unsigned int res_640x350:1; 405 unsigned int res_640x350:1;
411 unsigned int res_640x400:1; 406 unsigned int res_640x400:1;
412 unsigned int res_640x480:1; 407 unsigned int res_640x480:1;
413 unsigned int res_704x480:1; 408 unsigned int res_704x480:1;
414 unsigned int res_704x576:1; 409 unsigned int res_704x576:1;
415 410
416 unsigned int res_720x350:1; 411 unsigned int res_720x350:1;
417 unsigned int res_720x400:1; 412 unsigned int res_720x400:1;
418 unsigned int res_720x480:1; 413 unsigned int res_720x480:1;
419 unsigned int res_720x540:1; 414 unsigned int res_720x540:1;
420 unsigned int res_720x576:1; 415 unsigned int res_720x576:1;
421 unsigned int res_768x576:1; 416 unsigned int res_768x576:1;
422 unsigned int res_800x600:1; 417 unsigned int res_800x600:1;
423 unsigned int res_832x624:1; 418 unsigned int res_832x624:1;
424 419
425 unsigned int res_920x766:1; 420 unsigned int res_920x766:1;
426 unsigned int res_1024x768:1; 421 unsigned int res_1024x768:1;
427 unsigned int res_1280x1024:1; 422 unsigned int res_1280x1024:1;
428 unsigned int pad:5; 423 unsigned int pad:5;
429} __attribute__((packed)); 424} __attribute__((packed));
430 425
431/* Get supported resolution with squire pixel aspect ratio that can be 426/* Get supported resolution with squire pixel aspect ratio that can be
@@ -433,90 +428,90 @@ struct intel_sdvo_sdtv_resolution_reply {
433#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85 428#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
434 429
435struct intel_sdvo_hdtv_resolution_request { 430struct intel_sdvo_hdtv_resolution_request {
436 unsigned int hdtv_std_smpte_240m_1080i_59:1; 431 unsigned int hdtv_std_smpte_240m_1080i_59:1;
437 unsigned int hdtv_std_smpte_240m_1080i_60:1; 432 unsigned int hdtv_std_smpte_240m_1080i_60:1;
438 unsigned int hdtv_std_smpte_260m_1080i_59:1; 433 unsigned int hdtv_std_smpte_260m_1080i_59:1;
439 unsigned int hdtv_std_smpte_260m_1080i_60:1; 434 unsigned int hdtv_std_smpte_260m_1080i_60:1;
440 unsigned int hdtv_std_smpte_274m_1080i_50:1; 435 unsigned int hdtv_std_smpte_274m_1080i_50:1;
441 unsigned int hdtv_std_smpte_274m_1080i_59:1; 436 unsigned int hdtv_std_smpte_274m_1080i_59:1;
442 unsigned int hdtv_std_smpte_274m_1080i_60:1; 437 unsigned int hdtv_std_smpte_274m_1080i_60:1;
443 unsigned int hdtv_std_smpte_274m_1080p_23:1; 438 unsigned int hdtv_std_smpte_274m_1080p_23:1;
444 439
445 unsigned int hdtv_std_smpte_274m_1080p_24:1; 440 unsigned int hdtv_std_smpte_274m_1080p_24:1;
446 unsigned int hdtv_std_smpte_274m_1080p_25:1; 441 unsigned int hdtv_std_smpte_274m_1080p_25:1;
447 unsigned int hdtv_std_smpte_274m_1080p_29:1; 442 unsigned int hdtv_std_smpte_274m_1080p_29:1;
448 unsigned int hdtv_std_smpte_274m_1080p_30:1; 443 unsigned int hdtv_std_smpte_274m_1080p_30:1;
449 unsigned int hdtv_std_smpte_274m_1080p_50:1; 444 unsigned int hdtv_std_smpte_274m_1080p_50:1;
450 unsigned int hdtv_std_smpte_274m_1080p_59:1; 445 unsigned int hdtv_std_smpte_274m_1080p_59:1;
451 unsigned int hdtv_std_smpte_274m_1080p_60:1; 446 unsigned int hdtv_std_smpte_274m_1080p_60:1;
452 unsigned int hdtv_std_smpte_295m_1080i_50:1; 447 unsigned int hdtv_std_smpte_295m_1080i_50:1;
453 448
454 unsigned int hdtv_std_smpte_295m_1080p_50:1; 449 unsigned int hdtv_std_smpte_295m_1080p_50:1;
455 unsigned int hdtv_std_smpte_296m_720p_59:1; 450 unsigned int hdtv_std_smpte_296m_720p_59:1;
456 unsigned int hdtv_std_smpte_296m_720p_60:1; 451 unsigned int hdtv_std_smpte_296m_720p_60:1;
457 unsigned int hdtv_std_smpte_296m_720p_50:1; 452 unsigned int hdtv_std_smpte_296m_720p_50:1;
458 unsigned int hdtv_std_smpte_293m_480p_59:1; 453 unsigned int hdtv_std_smpte_293m_480p_59:1;
459 unsigned int hdtv_std_smpte_170m_480i_59:1; 454 unsigned int hdtv_std_smpte_170m_480i_59:1;
460 unsigned int hdtv_std_iturbt601_576i_50:1; 455 unsigned int hdtv_std_iturbt601_576i_50:1;
461 unsigned int hdtv_std_iturbt601_576p_50:1; 456 unsigned int hdtv_std_iturbt601_576p_50:1;
462 457
463 unsigned int hdtv_std_eia_7702a_480i_60:1; 458 unsigned int hdtv_std_eia_7702a_480i_60:1;
464 unsigned int hdtv_std_eia_7702a_480p_60:1; 459 unsigned int hdtv_std_eia_7702a_480p_60:1;
465 unsigned int pad:6; 460 unsigned int pad:6;
466} __attribute__((packed)); 461} __attribute__((packed));
467 462
468struct intel_sdvo_hdtv_resolution_reply { 463struct intel_sdvo_hdtv_resolution_reply {
469 unsigned int res_640x480:1; 464 unsigned int res_640x480:1;
470 unsigned int res_800x600:1; 465 unsigned int res_800x600:1;
471 unsigned int res_1024x768:1; 466 unsigned int res_1024x768:1;
472 unsigned int res_1280x960:1; 467 unsigned int res_1280x960:1;
473 unsigned int res_1400x1050:1; 468 unsigned int res_1400x1050:1;
474 unsigned int res_1600x1200:1; 469 unsigned int res_1600x1200:1;
475 unsigned int res_1920x1440:1; 470 unsigned int res_1920x1440:1;
476 unsigned int res_2048x1536:1; 471 unsigned int res_2048x1536:1;
477 472
478 unsigned int res_2560x1920:1; 473 unsigned int res_2560x1920:1;
479 unsigned int res_3200x2400:1; 474 unsigned int res_3200x2400:1;
480 unsigned int res_3840x2880:1; 475 unsigned int res_3840x2880:1;
481 unsigned int pad1:5; 476 unsigned int pad1:5;
482 477
483 unsigned int res_848x480:1; 478 unsigned int res_848x480:1;
484 unsigned int res_1064x600:1; 479 unsigned int res_1064x600:1;
485 unsigned int res_1280x720:1; 480 unsigned int res_1280x720:1;
486 unsigned int res_1360x768:1; 481 unsigned int res_1360x768:1;
487 unsigned int res_1704x960:1; 482 unsigned int res_1704x960:1;
488 unsigned int res_1864x1050:1; 483 unsigned int res_1864x1050:1;
489 unsigned int res_1920x1080:1; 484 unsigned int res_1920x1080:1;
490 unsigned int res_2128x1200:1; 485 unsigned int res_2128x1200:1;
491 486
492 unsigned int res_2560x1400:1; 487 unsigned int res_2560x1400:1;
493 unsigned int res_2728x1536:1; 488 unsigned int res_2728x1536:1;
494 unsigned int res_3408x1920:1; 489 unsigned int res_3408x1920:1;
495 unsigned int res_4264x2400:1; 490 unsigned int res_4264x2400:1;
496 unsigned int res_5120x2880:1; 491 unsigned int res_5120x2880:1;
497 unsigned int pad2:3; 492 unsigned int pad2:3;
498 493
499 unsigned int res_768x480:1; 494 unsigned int res_768x480:1;
500 unsigned int res_960x600:1; 495 unsigned int res_960x600:1;
501 unsigned int res_1152x720:1; 496 unsigned int res_1152x720:1;
502 unsigned int res_1124x768:1; 497 unsigned int res_1124x768:1;
503 unsigned int res_1536x960:1; 498 unsigned int res_1536x960:1;
504 unsigned int res_1680x1050:1; 499 unsigned int res_1680x1050:1;
505 unsigned int res_1728x1080:1; 500 unsigned int res_1728x1080:1;
506 unsigned int res_1920x1200:1; 501 unsigned int res_1920x1200:1;
507 502
508 unsigned int res_2304x1440:1; 503 unsigned int res_2304x1440:1;
509 unsigned int res_2456x1536:1; 504 unsigned int res_2456x1536:1;
510 unsigned int res_3072x1920:1; 505 unsigned int res_3072x1920:1;
511 unsigned int res_3840x2400:1; 506 unsigned int res_3840x2400:1;
512 unsigned int res_4608x2880:1; 507 unsigned int res_4608x2880:1;
513 unsigned int pad3:3; 508 unsigned int pad3:3;
514 509
515 unsigned int res_1280x1024:1; 510 unsigned int res_1280x1024:1;
516 unsigned int pad4:7; 511 unsigned int pad4:7;
517 512
518 unsigned int res_1280x768:1; 513 unsigned int res_1280x768:1;
519 unsigned int pad5:7; 514 unsigned int pad5:7;
520} __attribute__((packed)); 515} __attribute__((packed));
521 516
522/* Get supported power state returns info for encoder and monitor, rely on 517/* Get supported power state returns info for encoder and monitor, rely on
@@ -544,25 +539,25 @@ struct intel_sdvo_hdtv_resolution_reply {
544 * The high fields are bits 8:9 of the 10-bit values. 539 * The high fields are bits 8:9 of the 10-bit values.
545 */ 540 */
546struct sdvo_panel_power_sequencing { 541struct sdvo_panel_power_sequencing {
547 u8 t0; 542 u8 t0;
548 u8 t1; 543 u8 t1;
549 u8 t2; 544 u8 t2;
550 u8 t3; 545 u8 t3;
551 u8 t4; 546 u8 t4;
552 547
553 unsigned int t0_high:2; 548 unsigned int t0_high:2;
554 unsigned int t1_high:2; 549 unsigned int t1_high:2;
555 unsigned int t2_high:2; 550 unsigned int t2_high:2;
556 unsigned int t3_high:2; 551 unsigned int t3_high:2;
557 552
558 unsigned int t4_high:2; 553 unsigned int t4_high:2;
559 unsigned int pad:6; 554 unsigned int pad:6;
560} __attribute__((packed)); 555} __attribute__((packed));
561 556
562#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30 557#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
563struct sdvo_max_backlight_reply { 558struct sdvo_max_backlight_reply {
564 u8 max_value; 559 u8 max_value;
565 u8 default_value; 560 u8 default_value;
566} __attribute__((packed)); 561} __attribute__((packed));
567 562
568#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31 563#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
@@ -570,16 +565,16 @@ struct sdvo_max_backlight_reply {
570 565
571#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33 566#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
572struct sdvo_get_ambient_light_reply { 567struct sdvo_get_ambient_light_reply {
573 u16 trip_low; 568 u16 trip_low;
574 u16 trip_high; 569 u16 trip_high;
575 u16 value; 570 u16 value;
576} __attribute__((packed)); 571} __attribute__((packed));
577#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34 572#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
578struct sdvo_set_ambient_light_reply { 573struct sdvo_set_ambient_light_reply {
579 u16 trip_low; 574 u16 trip_low;
580 u16 trip_high; 575 u16 trip_high;
581 unsigned int enable:1; 576 unsigned int enable:1;
582 unsigned int pad:7; 577 unsigned int pad:7;
583} __attribute__((packed)); 578} __attribute__((packed));
584 579
585/* Set display power state */ 580/* Set display power state */
@@ -591,23 +586,23 @@ struct sdvo_set_ambient_light_reply {
591 586
592#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84 587#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
593struct intel_sdvo_enhancements_reply { 588struct intel_sdvo_enhancements_reply {
594 unsigned int flicker_filter:1; 589 unsigned int flicker_filter:1;
595 unsigned int flicker_filter_adaptive:1; 590 unsigned int flicker_filter_adaptive:1;
596 unsigned int flicker_filter_2d:1; 591 unsigned int flicker_filter_2d:1;
597 unsigned int saturation:1; 592 unsigned int saturation:1;
598 unsigned int hue:1; 593 unsigned int hue:1;
599 unsigned int brightness:1; 594 unsigned int brightness:1;
600 unsigned int contrast:1; 595 unsigned int contrast:1;
601 unsigned int overscan_h:1; 596 unsigned int overscan_h:1;
602 597
603 unsigned int overscan_v:1; 598 unsigned int overscan_v:1;
604 unsigned int hpos:1; 599 unsigned int hpos:1;
605 unsigned int vpos:1; 600 unsigned int vpos:1;
606 unsigned int sharpness:1; 601 unsigned int sharpness:1;
607 unsigned int dot_crawl:1; 602 unsigned int dot_crawl:1;
608 unsigned int dither:1; 603 unsigned int dither:1;
609 unsigned int tv_chroma_filter:1; 604 unsigned int tv_chroma_filter:1;
610 unsigned int tv_luma_filter:1; 605 unsigned int tv_luma_filter:1;
611} __attribute__((packed)); 606} __attribute__((packed));
612 607
613/* Picture enhancement limits below are dependent on the current TV format, 608/* Picture enhancement limits below are dependent on the current TV format,
@@ -628,8 +623,8 @@ struct intel_sdvo_enhancements_reply {
628#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74 623#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
629#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77 624#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
630struct intel_sdvo_enhancement_limits_reply { 625struct intel_sdvo_enhancement_limits_reply {
631 u16 max_value; 626 u16 max_value;
632 u16 default_value; 627 u16 default_value;
633} __attribute__((packed)); 628} __attribute__((packed));
634 629
635#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f 630#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
@@ -670,8 +665,8 @@ struct intel_sdvo_enhancement_limits_reply {
670#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78 665#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
671#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79 666#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
672struct intel_sdvo_enhancements_arg { 667struct intel_sdvo_enhancements_arg {
673 u16 value; 668 u16 value;
674} __attribute__((packed)); 669}__attribute__((packed));
675 670
676#define SDVO_CMD_GET_DOT_CRAWL 0x70 671#define SDVO_CMD_GET_DOT_CRAWL 0x70
677#define SDVO_CMD_SET_DOT_CRAWL 0x71 672#define SDVO_CMD_SET_DOT_CRAWL 0x71
@@ -708,8 +703,6 @@ struct intel_sdvo_enhancements_arg {
708#define SDVO_CMD_SET_AUDIO_STAT 0x91 703#define SDVO_CMD_SET_AUDIO_STAT 0x91
709#define SDVO_CMD_GET_AUDIO_STAT 0x92 704#define SDVO_CMD_GET_AUDIO_STAT 0x92
710#define SDVO_CMD_SET_HBUF_INDEX 0x93 705#define SDVO_CMD_SET_HBUF_INDEX 0x93
711 #define SDVO_HBUF_INDEX_ELD 0
712 #define SDVO_HBUF_INDEX_AVI_IF 1
713#define SDVO_CMD_GET_HBUF_INDEX 0x94 706#define SDVO_CMD_GET_HBUF_INDEX 0x94
714#define SDVO_CMD_GET_HBUF_INFO 0x95 707#define SDVO_CMD_GET_HBUF_INFO 0x95
715#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96 708#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
@@ -724,7 +717,7 @@ struct intel_sdvo_enhancements_arg {
724#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c 717#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
725#define SDVO_NEED_TO_STALL (1 << 7) 718#define SDVO_NEED_TO_STALL (1 << 7)
726 719
727struct intel_sdvo_encode { 720struct intel_sdvo_encode{
728 u8 dvi_rev; 721 u8 dvi_rev;
729 u8 hdmi_rev; 722 u8 hdmi_rev;
730} __attribute__ ((packed)); 723} __attribute__ ((packed));
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
deleted file mode 100644
index d7b060e0a23..00000000000
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ /dev/null
@@ -1,730 +0,0 @@
1/*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Jesse Barnes <jbarnes@virtuousgeek.org>
25 *
26 * New plane/sprite handling.
27 *
28 * The older chips had a separate interface for programming plane related
29 * registers; newer ones are much simpler and we can use the new DRM plane
30 * support.
31 */
32#include <drm/drmP.h>
33#include <drm/drm_crtc.h>
34#include <drm/drm_fourcc.h>
35#include "intel_drv.h"
36#include <drm/i915_drm.h>
37#include "i915_drv.h"
38
39static void
40ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
41 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
42 unsigned int crtc_w, unsigned int crtc_h,
43 uint32_t x, uint32_t y,
44 uint32_t src_w, uint32_t src_h)
45{
46 struct drm_device *dev = plane->dev;
47 struct drm_i915_private *dev_priv = dev->dev_private;
48 struct intel_plane *intel_plane = to_intel_plane(plane);
49 int pipe = intel_plane->pipe;
50 u32 sprctl, sprscale = 0;
51 unsigned long sprsurf_offset, linear_offset;
52 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
53
54 sprctl = I915_READ(SPRCTL(pipe));
55
56 /* Mask out pixel format bits in case we change it */
57 sprctl &= ~SPRITE_PIXFORMAT_MASK;
58 sprctl &= ~SPRITE_RGB_ORDER_RGBX;
59 sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
60 sprctl &= ~SPRITE_TILED;
61
62 switch (fb->pixel_format) {
63 case DRM_FORMAT_XBGR8888:
64 sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
65 break;
66 case DRM_FORMAT_XRGB8888:
67 sprctl |= SPRITE_FORMAT_RGBX888;
68 break;
69 case DRM_FORMAT_YUYV:
70 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
71 break;
72 case DRM_FORMAT_YVYU:
73 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
74 break;
75 case DRM_FORMAT_UYVY:
76 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
77 break;
78 case DRM_FORMAT_VYUY:
79 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
80 break;
81 default:
82 BUG();
83 }
84
85 if (obj->tiling_mode != I915_TILING_NONE)
86 sprctl |= SPRITE_TILED;
87
88 /* must disable */
89 sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
90 sprctl |= SPRITE_ENABLE;
91
92 /* Sizes are 0 based */
93 src_w--;
94 src_h--;
95 crtc_w--;
96 crtc_h--;
97
98 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
99
100 /*
101 * IVB workaround: must disable low power watermarks for at least
102 * one frame before enabling scaling. LP watermarks can be re-enabled
103 * when scaling is disabled.
104 */
105 if (crtc_w != src_w || crtc_h != src_h) {
106 if (!dev_priv->sprite_scaling_enabled) {
107 dev_priv->sprite_scaling_enabled = true;
108 intel_update_watermarks(dev);
109 intel_wait_for_vblank(dev, pipe);
110 }
111 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
112 } else {
113 if (dev_priv->sprite_scaling_enabled) {
114 dev_priv->sprite_scaling_enabled = false;
115 /* potentially re-enable LP watermarks */
116 intel_update_watermarks(dev);
117 }
118 }
119
120 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
121 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
122
123 linear_offset = y * fb->pitches[0] + x * pixel_size;
124 sprsurf_offset =
125 intel_gen4_compute_offset_xtiled(&x, &y,
126 pixel_size, fb->pitches[0]);
127 linear_offset -= sprsurf_offset;
128
129 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
130 * register */
131 if (IS_HASWELL(dev))
132 I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
133 else if (obj->tiling_mode != I915_TILING_NONE)
134 I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
135 else
136 I915_WRITE(SPRLINOFF(pipe), linear_offset);
137
138 I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
139 if (intel_plane->can_scale)
140 I915_WRITE(SPRSCALE(pipe), sprscale);
141 I915_WRITE(SPRCTL(pipe), sprctl);
142 I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
143 POSTING_READ(SPRSURF(pipe));
144}
145
146static void
147ivb_disable_plane(struct drm_plane *plane)
148{
149 struct drm_device *dev = plane->dev;
150 struct drm_i915_private *dev_priv = dev->dev_private;
151 struct intel_plane *intel_plane = to_intel_plane(plane);
152 int pipe = intel_plane->pipe;
153
154 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
155 /* Can't leave the scaler enabled... */
156 if (intel_plane->can_scale)
157 I915_WRITE(SPRSCALE(pipe), 0);
158 /* Activate double buffered register update */
159 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
160 POSTING_READ(SPRSURF(pipe));
161
162 dev_priv->sprite_scaling_enabled = false;
163 intel_update_watermarks(dev);
164}
165
166static int
167ivb_update_colorkey(struct drm_plane *plane,
168 struct drm_intel_sprite_colorkey *key)
169{
170 struct drm_device *dev = plane->dev;
171 struct drm_i915_private *dev_priv = dev->dev_private;
172 struct intel_plane *intel_plane;
173 u32 sprctl;
174 int ret = 0;
175
176 intel_plane = to_intel_plane(plane);
177
178 I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
179 I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
180 I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
181
182 sprctl = I915_READ(SPRCTL(intel_plane->pipe));
183 sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
184 if (key->flags & I915_SET_COLORKEY_DESTINATION)
185 sprctl |= SPRITE_DEST_KEY;
186 else if (key->flags & I915_SET_COLORKEY_SOURCE)
187 sprctl |= SPRITE_SOURCE_KEY;
188 I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
189
190 POSTING_READ(SPRKEYMSK(intel_plane->pipe));
191
192 return ret;
193}
194
195static void
196ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
197{
198 struct drm_device *dev = plane->dev;
199 struct drm_i915_private *dev_priv = dev->dev_private;
200 struct intel_plane *intel_plane;
201 u32 sprctl;
202
203 intel_plane = to_intel_plane(plane);
204
205 key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
206 key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
207 key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
208 key->flags = 0;
209
210 sprctl = I915_READ(SPRCTL(intel_plane->pipe));
211
212 if (sprctl & SPRITE_DEST_KEY)
213 key->flags = I915_SET_COLORKEY_DESTINATION;
214 else if (sprctl & SPRITE_SOURCE_KEY)
215 key->flags = I915_SET_COLORKEY_SOURCE;
216 else
217 key->flags = I915_SET_COLORKEY_NONE;
218}
219
220static void
221ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
222 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
223 unsigned int crtc_w, unsigned int crtc_h,
224 uint32_t x, uint32_t y,
225 uint32_t src_w, uint32_t src_h)
226{
227 struct drm_device *dev = plane->dev;
228 struct drm_i915_private *dev_priv = dev->dev_private;
229 struct intel_plane *intel_plane = to_intel_plane(plane);
230 int pipe = intel_plane->pipe;
231 unsigned long dvssurf_offset, linear_offset;
232 u32 dvscntr, dvsscale;
233 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
234
235 dvscntr = I915_READ(DVSCNTR(pipe));
236
237 /* Mask out pixel format bits in case we change it */
238 dvscntr &= ~DVS_PIXFORMAT_MASK;
239 dvscntr &= ~DVS_RGB_ORDER_XBGR;
240 dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
241 dvscntr &= ~DVS_TILED;
242
243 switch (fb->pixel_format) {
244 case DRM_FORMAT_XBGR8888:
245 dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
246 break;
247 case DRM_FORMAT_XRGB8888:
248 dvscntr |= DVS_FORMAT_RGBX888;
249 break;
250 case DRM_FORMAT_YUYV:
251 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
252 break;
253 case DRM_FORMAT_YVYU:
254 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
255 break;
256 case DRM_FORMAT_UYVY:
257 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
258 break;
259 case DRM_FORMAT_VYUY:
260 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
261 break;
262 default:
263 BUG();
264 }
265
266 if (obj->tiling_mode != I915_TILING_NONE)
267 dvscntr |= DVS_TILED;
268
269 if (IS_GEN6(dev))
270 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
271 dvscntr |= DVS_ENABLE;
272
273 /* Sizes are 0 based */
274 src_w--;
275 src_h--;
276 crtc_w--;
277 crtc_h--;
278
279 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
280
281 dvsscale = 0;
282 if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
283 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
284
285 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
286 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
287
288 linear_offset = y * fb->pitches[0] + x * pixel_size;
289 dvssurf_offset =
290 intel_gen4_compute_offset_xtiled(&x, &y,
291 pixel_size, fb->pitches[0]);
292 linear_offset -= dvssurf_offset;
293
294 if (obj->tiling_mode != I915_TILING_NONE)
295 I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
296 else
297 I915_WRITE(DVSLINOFF(pipe), linear_offset);
298
299 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
300 I915_WRITE(DVSSCALE(pipe), dvsscale);
301 I915_WRITE(DVSCNTR(pipe), dvscntr);
302 I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
303 POSTING_READ(DVSSURF(pipe));
304}
305
306static void
307ilk_disable_plane(struct drm_plane *plane)
308{
309 struct drm_device *dev = plane->dev;
310 struct drm_i915_private *dev_priv = dev->dev_private;
311 struct intel_plane *intel_plane = to_intel_plane(plane);
312 int pipe = intel_plane->pipe;
313
314 I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
315 /* Disable the scaler */
316 I915_WRITE(DVSSCALE(pipe), 0);
317 /* Flush double buffered register updates */
318 I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
319 POSTING_READ(DVSSURF(pipe));
320}
321
322static void
323intel_enable_primary(struct drm_crtc *crtc)
324{
325 struct drm_device *dev = crtc->dev;
326 struct drm_i915_private *dev_priv = dev->dev_private;
327 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
328 int reg = DSPCNTR(intel_crtc->plane);
329
330 if (!intel_crtc->primary_disabled)
331 return;
332
333 intel_crtc->primary_disabled = false;
334 intel_update_fbc(dev);
335
336 I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
337}
338
339static void
340intel_disable_primary(struct drm_crtc *crtc)
341{
342 struct drm_device *dev = crtc->dev;
343 struct drm_i915_private *dev_priv = dev->dev_private;
344 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
345 int reg = DSPCNTR(intel_crtc->plane);
346
347 if (intel_crtc->primary_disabled)
348 return;
349
350 I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
351
352 intel_crtc->primary_disabled = true;
353 intel_update_fbc(dev);
354}
355
356static int
357ilk_update_colorkey(struct drm_plane *plane,
358 struct drm_intel_sprite_colorkey *key)
359{
360 struct drm_device *dev = plane->dev;
361 struct drm_i915_private *dev_priv = dev->dev_private;
362 struct intel_plane *intel_plane;
363 u32 dvscntr;
364 int ret = 0;
365
366 intel_plane = to_intel_plane(plane);
367
368 I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
369 I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
370 I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
371
372 dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
373 dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
374 if (key->flags & I915_SET_COLORKEY_DESTINATION)
375 dvscntr |= DVS_DEST_KEY;
376 else if (key->flags & I915_SET_COLORKEY_SOURCE)
377 dvscntr |= DVS_SOURCE_KEY;
378 I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
379
380 POSTING_READ(DVSKEYMSK(intel_plane->pipe));
381
382 return ret;
383}
384
385static void
386ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
387{
388 struct drm_device *dev = plane->dev;
389 struct drm_i915_private *dev_priv = dev->dev_private;
390 struct intel_plane *intel_plane;
391 u32 dvscntr;
392
393 intel_plane = to_intel_plane(plane);
394
395 key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
396 key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
397 key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
398 key->flags = 0;
399
400 dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
401
402 if (dvscntr & DVS_DEST_KEY)
403 key->flags = I915_SET_COLORKEY_DESTINATION;
404 else if (dvscntr & DVS_SOURCE_KEY)
405 key->flags = I915_SET_COLORKEY_SOURCE;
406 else
407 key->flags = I915_SET_COLORKEY_NONE;
408}
409
410static int
411intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
412 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
413 unsigned int crtc_w, unsigned int crtc_h,
414 uint32_t src_x, uint32_t src_y,
415 uint32_t src_w, uint32_t src_h)
416{
417 struct drm_device *dev = plane->dev;
418 struct drm_i915_private *dev_priv = dev->dev_private;
419 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
420 struct intel_plane *intel_plane = to_intel_plane(plane);
421 struct intel_framebuffer *intel_fb;
422 struct drm_i915_gem_object *obj, *old_obj;
423 int pipe = intel_plane->pipe;
424 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
425 pipe);
426 int ret = 0;
427 int x = src_x >> 16, y = src_y >> 16;
428 int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
429 bool disable_primary = false;
430
431 intel_fb = to_intel_framebuffer(fb);
432 obj = intel_fb->obj;
433
434 old_obj = intel_plane->obj;
435
436 src_w = src_w >> 16;
437 src_h = src_h >> 16;
438
439 /* Pipe must be running... */
440 if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
441 return -EINVAL;
442
443 if (crtc_x >= primary_w || crtc_y >= primary_h)
444 return -EINVAL;
445
446 /* Don't modify another pipe's plane */
447 if (intel_plane->pipe != intel_crtc->pipe)
448 return -EINVAL;
449
450 /* Sprite planes can be linear or x-tiled surfaces */
451 switch (obj->tiling_mode) {
452 case I915_TILING_NONE:
453 case I915_TILING_X:
454 break;
455 default:
456 return -EINVAL;
457 }
458
459 /*
460 * Clamp the width & height into the visible area. Note we don't
461 * try to scale the source if part of the visible region is offscreen.
462 * The caller must handle that by adjusting source offset and size.
463 */
464 if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
465 crtc_w += crtc_x;
466 crtc_x = 0;
467 }
468 if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
469 goto out;
470 if ((crtc_x + crtc_w) > primary_w)
471 crtc_w = primary_w - crtc_x;
472
473 if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
474 crtc_h += crtc_y;
475 crtc_y = 0;
476 }
477 if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
478 goto out;
479 if (crtc_y + crtc_h > primary_h)
480 crtc_h = primary_h - crtc_y;
481
482 if (!crtc_w || !crtc_h) /* Again, nothing to display */
483 goto out;
484
485 /*
486 * We may not have a scaler, eg. HSW does not have it any more
487 */
488 if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
489 return -EINVAL;
490
491 /*
492 * We can take a larger source and scale it down, but
493 * only so much... 16x is the max on SNB.
494 */
495 if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
496 return -EINVAL;
497
498 /*
499 * If the sprite is completely covering the primary plane,
500 * we can disable the primary and save power.
501 */
502 if ((crtc_x == 0) && (crtc_y == 0) &&
503 (crtc_w == primary_w) && (crtc_h == primary_h))
504 disable_primary = true;
505
506 mutex_lock(&dev->struct_mutex);
507
508 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
509 if (ret)
510 goto out_unlock;
511
512 intel_plane->obj = obj;
513
514 /*
515 * Be sure to re-enable the primary before the sprite is no longer
516 * covering it fully.
517 */
518 if (!disable_primary)
519 intel_enable_primary(crtc);
520
521 intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
522 crtc_w, crtc_h, x, y, src_w, src_h);
523
524 if (disable_primary)
525 intel_disable_primary(crtc);
526
527 /* Unpin old obj after new one is active to avoid ugliness */
528 if (old_obj) {
529 /*
530 * It's fairly common to simply update the position of
531 * an existing object. In that case, we don't need to
532 * wait for vblank to avoid ugliness, we only need to
533 * do the pin & ref bookkeeping.
534 */
535 if (old_obj != obj) {
536 mutex_unlock(&dev->struct_mutex);
537 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
538 mutex_lock(&dev->struct_mutex);
539 }
540 intel_unpin_fb_obj(old_obj);
541 }
542
543out_unlock:
544 mutex_unlock(&dev->struct_mutex);
545out:
546 return ret;
547}
548
549static int
550intel_disable_plane(struct drm_plane *plane)
551{
552 struct drm_device *dev = plane->dev;
553 struct intel_plane *intel_plane = to_intel_plane(plane);
554 int ret = 0;
555
556 if (plane->crtc)
557 intel_enable_primary(plane->crtc);
558 intel_plane->disable_plane(plane);
559
560 if (!intel_plane->obj)
561 goto out;
562
563 mutex_lock(&dev->struct_mutex);
564 intel_unpin_fb_obj(intel_plane->obj);
565 intel_plane->obj = NULL;
566 mutex_unlock(&dev->struct_mutex);
567out:
568
569 return ret;
570}
571
572static void intel_destroy_plane(struct drm_plane *plane)
573{
574 struct intel_plane *intel_plane = to_intel_plane(plane);
575 intel_disable_plane(plane);
576 drm_plane_cleanup(plane);
577 kfree(intel_plane);
578}
579
580int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
581 struct drm_file *file_priv)
582{
583 struct drm_intel_sprite_colorkey *set = data;
584 struct drm_mode_object *obj;
585 struct drm_plane *plane;
586 struct intel_plane *intel_plane;
587 int ret = 0;
588
589 if (!drm_core_check_feature(dev, DRIVER_MODESET))
590 return -ENODEV;
591
592 /* Make sure we don't try to enable both src & dest simultaneously */
593 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
594 return -EINVAL;
595
596 mutex_lock(&dev->mode_config.mutex);
597
598 obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
599 if (!obj) {
600 ret = -EINVAL;
601 goto out_unlock;
602 }
603
604 plane = obj_to_plane(obj);
605 intel_plane = to_intel_plane(plane);
606 ret = intel_plane->update_colorkey(plane, set);
607
608out_unlock:
609 mutex_unlock(&dev->mode_config.mutex);
610 return ret;
611}
612
613int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
614 struct drm_file *file_priv)
615{
616 struct drm_intel_sprite_colorkey *get = data;
617 struct drm_mode_object *obj;
618 struct drm_plane *plane;
619 struct intel_plane *intel_plane;
620 int ret = 0;
621
622 if (!drm_core_check_feature(dev, DRIVER_MODESET))
623 return -ENODEV;
624
625 mutex_lock(&dev->mode_config.mutex);
626
627 obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
628 if (!obj) {
629 ret = -EINVAL;
630 goto out_unlock;
631 }
632
633 plane = obj_to_plane(obj);
634 intel_plane = to_intel_plane(plane);
635 intel_plane->get_colorkey(plane, get);
636
637out_unlock:
638 mutex_unlock(&dev->mode_config.mutex);
639 return ret;
640}
641
642static const struct drm_plane_funcs intel_plane_funcs = {
643 .update_plane = intel_update_plane,
644 .disable_plane = intel_disable_plane,
645 .destroy = intel_destroy_plane,
646};
647
648static uint32_t ilk_plane_formats[] = {
649 DRM_FORMAT_XRGB8888,
650 DRM_FORMAT_YUYV,
651 DRM_FORMAT_YVYU,
652 DRM_FORMAT_UYVY,
653 DRM_FORMAT_VYUY,
654};
655
656static uint32_t snb_plane_formats[] = {
657 DRM_FORMAT_XBGR8888,
658 DRM_FORMAT_XRGB8888,
659 DRM_FORMAT_YUYV,
660 DRM_FORMAT_YVYU,
661 DRM_FORMAT_UYVY,
662 DRM_FORMAT_VYUY,
663};
664
665int
666intel_plane_init(struct drm_device *dev, enum pipe pipe)
667{
668 struct intel_plane *intel_plane;
669 unsigned long possible_crtcs;
670 const uint32_t *plane_formats;
671 int num_plane_formats;
672 int ret;
673
674 if (INTEL_INFO(dev)->gen < 5)
675 return -ENODEV;
676
677 intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
678 if (!intel_plane)
679 return -ENOMEM;
680
681 switch (INTEL_INFO(dev)->gen) {
682 case 5:
683 case 6:
684 intel_plane->can_scale = true;
685 intel_plane->max_downscale = 16;
686 intel_plane->update_plane = ilk_update_plane;
687 intel_plane->disable_plane = ilk_disable_plane;
688 intel_plane->update_colorkey = ilk_update_colorkey;
689 intel_plane->get_colorkey = ilk_get_colorkey;
690
691 if (IS_GEN6(dev)) {
692 plane_formats = snb_plane_formats;
693 num_plane_formats = ARRAY_SIZE(snb_plane_formats);
694 } else {
695 plane_formats = ilk_plane_formats;
696 num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
697 }
698 break;
699
700 case 7:
701 if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
702 intel_plane->can_scale = false;
703 else
704 intel_plane->can_scale = true;
705 intel_plane->max_downscale = 2;
706 intel_plane->update_plane = ivb_update_plane;
707 intel_plane->disable_plane = ivb_disable_plane;
708 intel_plane->update_colorkey = ivb_update_colorkey;
709 intel_plane->get_colorkey = ivb_get_colorkey;
710
711 plane_formats = snb_plane_formats;
712 num_plane_formats = ARRAY_SIZE(snb_plane_formats);
713 break;
714
715 default:
716 kfree(intel_plane);
717 return -ENODEV;
718 }
719
720 intel_plane->pipe = pipe;
721 possible_crtcs = (1 << pipe);
722 ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
723 &intel_plane_funcs,
724 plane_formats, num_plane_formats,
725 false);
726 if (ret)
727 kfree(intel_plane);
728
729 return ret;
730}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ea93520c127..210d570fd51 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -30,11 +30,12 @@
30 * Integrated TV-out support for the 915GM and 945GM. 30 * Integrated TV-out support for the 915GM and 945GM.
31 */ 31 */
32 32
33#include <drm/drmP.h> 33#include "drmP.h"
34#include <drm/drm_crtc.h> 34#include "drm.h"
35#include <drm/drm_edid.h> 35#include "drm_crtc.h"
36#include "drm_edid.h"
36#include "intel_drv.h" 37#include "intel_drv.h"
37#include <drm/i915_drm.h> 38#include "i915_drm.h"
38#include "i915_drv.h" 39#include "i915_drv.h"
39 40
40enum tv_margin { 41enum tv_margin {
@@ -193,10 +194,10 @@ static const u32 filter_table[] = {
193 * 194 *
194 * if (f >= 1) { 195 * if (f >= 1) {
195 * exp = 0x7; 196 * exp = 0x7;
196 * mant = 1 << 8; 197 * mant = 1 << 8;
197 * } else { 198 * } else {
198 * for (exp = 0; exp < 3 && f < 0.5; exp++) 199 * for (exp = 0; exp < 3 && f < 0.5; exp++)
199 * f *= 2.0; 200 * f *= 2.0;
200 * mant = (f * (1 << 9) + 0.5); 201 * mant = (f * (1 << 9) + 0.5);
201 * if (mant >= (1 << 9)) 202 * if (mant >= (1 << 9))
202 * mant = (1 << 9) - 1; 203 * mant = (1 << 9) - 1;
@@ -416,7 +417,7 @@ static const struct tv_mode tv_modes[] = {
416 { 417 {
417 .name = "NTSC-M", 418 .name = "NTSC-M",
418 .clock = 108000, 419 .clock = 108000,
419 .refresh = 59940, 420 .refresh = 29970,
420 .oversample = TV_OVERSAMPLE_8X, 421 .oversample = TV_OVERSAMPLE_8X,
421 .component_only = 0, 422 .component_only = 0,
422 /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ 423 /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
@@ -429,7 +430,7 @@ static const struct tv_mode tv_modes[] = {
429 .vsync_start_f1 = 6, .vsync_start_f2 = 7, 430 .vsync_start_f1 = 6, .vsync_start_f2 = 7,
430 .vsync_len = 6, 431 .vsync_len = 6,
431 432
432 .veq_ena = true, .veq_start_f1 = 0, 433 .veq_ena = true, .veq_start_f1 = 0,
433 .veq_start_f2 = 1, .veq_len = 18, 434 .veq_start_f2 = 1, .veq_len = 18,
434 435
435 .vi_end_f1 = 20, .vi_end_f2 = 21, 436 .vi_end_f1 = 20, .vi_end_f2 = 21,
@@ -459,7 +460,7 @@ static const struct tv_mode tv_modes[] = {
459 { 460 {
460 .name = "NTSC-443", 461 .name = "NTSC-443",
461 .clock = 108000, 462 .clock = 108000,
462 .refresh = 59940, 463 .refresh = 29970,
463 .oversample = TV_OVERSAMPLE_8X, 464 .oversample = TV_OVERSAMPLE_8X,
464 .component_only = 0, 465 .component_only = 0,
465 /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */ 466 /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
@@ -471,7 +472,7 @@ static const struct tv_mode tv_modes[] = {
471 .vsync_start_f1 = 6, .vsync_start_f2 = 7, 472 .vsync_start_f1 = 6, .vsync_start_f2 = 7,
472 .vsync_len = 6, 473 .vsync_len = 6,
473 474
474 .veq_ena = true, .veq_start_f1 = 0, 475 .veq_ena = true, .veq_start_f1 = 0,
475 .veq_start_f2 = 1, .veq_len = 18, 476 .veq_start_f2 = 1, .veq_len = 18,
476 477
477 .vi_end_f1 = 20, .vi_end_f2 = 21, 478 .vi_end_f1 = 20, .vi_end_f2 = 21,
@@ -501,7 +502,7 @@ static const struct tv_mode tv_modes[] = {
501 { 502 {
502 .name = "NTSC-J", 503 .name = "NTSC-J",
503 .clock = 108000, 504 .clock = 108000,
504 .refresh = 59940, 505 .refresh = 29970,
505 .oversample = TV_OVERSAMPLE_8X, 506 .oversample = TV_OVERSAMPLE_8X,
506 .component_only = 0, 507 .component_only = 0,
507 508
@@ -514,7 +515,7 @@ static const struct tv_mode tv_modes[] = {
514 .vsync_start_f1 = 6, .vsync_start_f2 = 7, 515 .vsync_start_f1 = 6, .vsync_start_f2 = 7,
515 .vsync_len = 6, 516 .vsync_len = 6,
516 517
517 .veq_ena = true, .veq_start_f1 = 0, 518 .veq_ena = true, .veq_start_f1 = 0,
518 .veq_start_f2 = 1, .veq_len = 18, 519 .veq_start_f2 = 1, .veq_len = 18,
519 520
520 .vi_end_f1 = 20, .vi_end_f2 = 21, 521 .vi_end_f1 = 20, .vi_end_f2 = 21,
@@ -544,7 +545,7 @@ static const struct tv_mode tv_modes[] = {
544 { 545 {
545 .name = "PAL-M", 546 .name = "PAL-M",
546 .clock = 108000, 547 .clock = 108000,
547 .refresh = 59940, 548 .refresh = 29970,
548 .oversample = TV_OVERSAMPLE_8X, 549 .oversample = TV_OVERSAMPLE_8X,
549 .component_only = 0, 550 .component_only = 0,
550 551
@@ -557,7 +558,7 @@ static const struct tv_mode tv_modes[] = {
557 .vsync_start_f1 = 6, .vsync_start_f2 = 7, 558 .vsync_start_f1 = 6, .vsync_start_f2 = 7,
558 .vsync_len = 6, 559 .vsync_len = 6,
559 560
560 .veq_ena = true, .veq_start_f1 = 0, 561 .veq_ena = true, .veq_start_f1 = 0,
561 .veq_start_f2 = 1, .veq_len = 18, 562 .veq_start_f2 = 1, .veq_len = 18,
562 563
563 .vi_end_f1 = 20, .vi_end_f2 = 21, 564 .vi_end_f1 = 20, .vi_end_f2 = 21,
@@ -588,7 +589,7 @@ static const struct tv_mode tv_modes[] = {
588 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ 589 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
589 .name = "PAL-N", 590 .name = "PAL-N",
590 .clock = 108000, 591 .clock = 108000,
591 .refresh = 50000, 592 .refresh = 25000,
592 .oversample = TV_OVERSAMPLE_8X, 593 .oversample = TV_OVERSAMPLE_8X,
593 .component_only = 0, 594 .component_only = 0,
594 595
@@ -601,14 +602,14 @@ static const struct tv_mode tv_modes[] = {
601 .vsync_start_f1 = 6, .vsync_start_f2 = 7, 602 .vsync_start_f1 = 6, .vsync_start_f2 = 7,
602 .vsync_len = 6, 603 .vsync_len = 6,
603 604
604 .veq_ena = true, .veq_start_f1 = 0, 605 .veq_ena = true, .veq_start_f1 = 0,
605 .veq_start_f2 = 1, .veq_len = 18, 606 .veq_start_f2 = 1, .veq_len = 18,
606 607
607 .vi_end_f1 = 24, .vi_end_f2 = 25, 608 .vi_end_f1 = 24, .vi_end_f2 = 25,
608 .nbr_end = 286, 609 .nbr_end = 286,
609 610
610 .burst_ena = true, 611 .burst_ena = true,
611 .hburst_start = 73, .hburst_len = 34, 612 .hburst_start = 73, .hburst_len = 34,
612 .vburst_start_f1 = 8, .vburst_end_f1 = 285, 613 .vburst_start_f1 = 8, .vburst_end_f1 = 285,
613 .vburst_start_f2 = 8, .vburst_end_f2 = 286, 614 .vburst_start_f2 = 8, .vburst_end_f2 = 286,
614 .vburst_start_f3 = 9, .vburst_end_f3 = 286, 615 .vburst_start_f3 = 9, .vburst_end_f3 = 286,
@@ -633,7 +634,7 @@ static const struct tv_mode tv_modes[] = {
633 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ 634 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
634 .name = "PAL", 635 .name = "PAL",
635 .clock = 108000, 636 .clock = 108000,
636 .refresh = 50000, 637 .refresh = 25000,
637 .oversample = TV_OVERSAMPLE_8X, 638 .oversample = TV_OVERSAMPLE_8X,
638 .component_only = 0, 639 .component_only = 0,
639 640
@@ -645,7 +646,7 @@ static const struct tv_mode tv_modes[] = {
645 .vsync_start_f1 = 5, .vsync_start_f2 = 6, 646 .vsync_start_f1 = 5, .vsync_start_f2 = 6,
646 .vsync_len = 5, 647 .vsync_len = 5,
647 648
648 .veq_ena = true, .veq_start_f1 = 0, 649 .veq_ena = true, .veq_start_f1 = 0,
649 .veq_start_f2 = 1, .veq_len = 15, 650 .veq_start_f2 = 1, .veq_len = 15,
650 651
651 .vi_end_f1 = 24, .vi_end_f2 = 25, 652 .vi_end_f1 = 24, .vi_end_f2 = 25,
@@ -673,8 +674,8 @@ static const struct tv_mode tv_modes[] = {
673 .filter_table = filter_table, 674 .filter_table = filter_table,
674 }, 675 },
675 { 676 {
676 .name = "480p", 677 .name = "480p@59.94Hz",
677 .clock = 107520, 678 .clock = 107520,
678 .refresh = 59940, 679 .refresh = 59940,
679 .oversample = TV_OVERSAMPLE_4X, 680 .oversample = TV_OVERSAMPLE_4X,
680 .component_only = 1, 681 .component_only = 1,
@@ -682,7 +683,31 @@ static const struct tv_mode tv_modes[] = {
682 .hsync_end = 64, .hblank_end = 122, 683 .hsync_end = 64, .hblank_end = 122,
683 .hblank_start = 842, .htotal = 857, 684 .hblank_start = 842, .htotal = 857,
684 685
685 .progressive = true, .trilevel_sync = false, 686 .progressive = true,.trilevel_sync = false,
687
688 .vsync_start_f1 = 12, .vsync_start_f2 = 12,
689 .vsync_len = 12,
690
691 .veq_ena = false,
692
693 .vi_end_f1 = 44, .vi_end_f2 = 44,
694 .nbr_end = 479,
695
696 .burst_ena = false,
697
698 .filter_table = filter_table,
699 },
700 {
701 .name = "480p@60Hz",
702 .clock = 107520,
703 .refresh = 60000,
704 .oversample = TV_OVERSAMPLE_4X,
705 .component_only = 1,
706
707 .hsync_end = 64, .hblank_end = 122,
708 .hblank_start = 842, .htotal = 856,
709
710 .progressive = true,.trilevel_sync = false,
686 711
687 .vsync_start_f1 = 12, .vsync_start_f2 = 12, 712 .vsync_start_f1 = 12, .vsync_start_f2 = 12,
688 .vsync_len = 12, 713 .vsync_len = 12,
@@ -698,7 +723,7 @@ static const struct tv_mode tv_modes[] = {
698 }, 723 },
699 { 724 {
700 .name = "576p", 725 .name = "576p",
701 .clock = 107520, 726 .clock = 107520,
702 .refresh = 50000, 727 .refresh = 50000,
703 .oversample = TV_OVERSAMPLE_4X, 728 .oversample = TV_OVERSAMPLE_4X,
704 .component_only = 1, 729 .component_only = 1,
@@ -706,7 +731,7 @@ static const struct tv_mode tv_modes[] = {
706 .hsync_end = 64, .hblank_end = 139, 731 .hsync_end = 64, .hblank_end = 139,
707 .hblank_start = 859, .htotal = 863, 732 .hblank_start = 859, .htotal = 863,
708 733
709 .progressive = true, .trilevel_sync = false, 734 .progressive = true, .trilevel_sync = false,
710 735
711 .vsync_start_f1 = 10, .vsync_start_f2 = 10, 736 .vsync_start_f1 = 10, .vsync_start_f2 = 10,
712 .vsync_len = 10, 737 .vsync_len = 10,
@@ -730,7 +755,31 @@ static const struct tv_mode tv_modes[] = {
730 .hsync_end = 80, .hblank_end = 300, 755 .hsync_end = 80, .hblank_end = 300,
731 .hblank_start = 1580, .htotal = 1649, 756 .hblank_start = 1580, .htotal = 1649,
732 757
733 .progressive = true, .trilevel_sync = true, 758 .progressive = true, .trilevel_sync = true,
759
760 .vsync_start_f1 = 10, .vsync_start_f2 = 10,
761 .vsync_len = 10,
762
763 .veq_ena = false,
764
765 .vi_end_f1 = 29, .vi_end_f2 = 29,
766 .nbr_end = 719,
767
768 .burst_ena = false,
769
770 .filter_table = filter_table,
771 },
772 {
773 .name = "720p@59.94Hz",
774 .clock = 148800,
775 .refresh = 59940,
776 .oversample = TV_OVERSAMPLE_2X,
777 .component_only = 1,
778
779 .hsync_end = 80, .hblank_end = 300,
780 .hblank_start = 1580, .htotal = 1651,
781
782 .progressive = true, .trilevel_sync = true,
734 783
735 .vsync_start_f1 = 10, .vsync_start_f2 = 10, 784 .vsync_start_f1 = 10, .vsync_start_f2 = 10,
736 .vsync_len = 10, 785 .vsync_len = 10,
@@ -754,7 +803,7 @@ static const struct tv_mode tv_modes[] = {
754 .hsync_end = 80, .hblank_end = 300, 803 .hsync_end = 80, .hblank_end = 300,
755 .hblank_start = 1580, .htotal = 1979, 804 .hblank_start = 1580, .htotal = 1979,
756 805
757 .progressive = true, .trilevel_sync = true, 806 .progressive = true, .trilevel_sync = true,
758 807
759 .vsync_start_f1 = 10, .vsync_start_f2 = 10, 808 .vsync_start_f1 = 10, .vsync_start_f2 = 10,
760 .vsync_len = 10, 809 .vsync_len = 10,
@@ -772,19 +821,19 @@ static const struct tv_mode tv_modes[] = {
772 { 821 {
773 .name = "1080i@50Hz", 822 .name = "1080i@50Hz",
774 .clock = 148800, 823 .clock = 148800,
775 .refresh = 50000, 824 .refresh = 25000,
776 .oversample = TV_OVERSAMPLE_2X, 825 .oversample = TV_OVERSAMPLE_2X,
777 .component_only = 1, 826 .component_only = 1,
778 827
779 .hsync_end = 88, .hblank_end = 235, 828 .hsync_end = 88, .hblank_end = 235,
780 .hblank_start = 2155, .htotal = 2639, 829 .hblank_start = 2155, .htotal = 2639,
781 830
782 .progressive = false, .trilevel_sync = true, 831 .progressive = false, .trilevel_sync = true,
783 832
784 .vsync_start_f1 = 4, .vsync_start_f2 = 5, 833 .vsync_start_f1 = 4, .vsync_start_f2 = 5,
785 .vsync_len = 10, 834 .vsync_len = 10,
786 835
787 .veq_ena = true, .veq_start_f1 = 4, 836 .veq_ena = true, .veq_start_f1 = 4,
788 .veq_start_f2 = 4, .veq_len = 10, 837 .veq_start_f2 = 4, .veq_len = 10,
789 838
790 839
@@ -798,19 +847,19 @@ static const struct tv_mode tv_modes[] = {
798 { 847 {
799 .name = "1080i@60Hz", 848 .name = "1080i@60Hz",
800 .clock = 148800, 849 .clock = 148800,
801 .refresh = 60000, 850 .refresh = 30000,
802 .oversample = TV_OVERSAMPLE_2X, 851 .oversample = TV_OVERSAMPLE_2X,
803 .component_only = 1, 852 .component_only = 1,
804 853
805 .hsync_end = 88, .hblank_end = 235, 854 .hsync_end = 88, .hblank_end = 235,
806 .hblank_start = 2155, .htotal = 2199, 855 .hblank_start = 2155, .htotal = 2199,
807 856
808 .progressive = false, .trilevel_sync = true, 857 .progressive = false, .trilevel_sync = true,
809 858
810 .vsync_start_f1 = 4, .vsync_start_f2 = 5, 859 .vsync_start_f1 = 4, .vsync_start_f2 = 5,
811 .vsync_len = 10, 860 .vsync_len = 10,
812 861
813 .veq_ena = true, .veq_start_f1 = 4, 862 .veq_ena = true, .veq_start_f1 = 4,
814 .veq_start_f2 = 4, .veq_len = 10, 863 .veq_start_f2 = 4, .veq_len = 10,
815 864
816 865
@@ -821,6 +870,32 @@ static const struct tv_mode tv_modes[] = {
821 870
822 .filter_table = filter_table, 871 .filter_table = filter_table,
823 }, 872 },
873 {
874 .name = "1080i@59.94Hz",
875 .clock = 148800,
876 .refresh = 29970,
877 .oversample = TV_OVERSAMPLE_2X,
878 .component_only = 1,
879
880 .hsync_end = 88, .hblank_end = 235,
881 .hblank_start = 2155, .htotal = 2201,
882
883 .progressive = false, .trilevel_sync = true,
884
885 .vsync_start_f1 = 4, .vsync_start_f2 = 5,
886 .vsync_len = 10,
887
888 .veq_ena = true, .veq_start_f1 = 4,
889 .veq_start_f2 = 4, .veq_len = 10,
890
891
892 .vi_end_f1 = 21, .vi_end_f2 = 22,
893 .nbr_end = 539,
894
895 .burst_ena = false,
896
897 .filter_table = filter_table,
898 },
824}; 899};
825 900
826static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) 901static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
@@ -835,37 +910,22 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
835 base); 910 base);
836} 911}
837 912
838static bool
839intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
840{
841 struct drm_device *dev = encoder->base.dev;
842 struct drm_i915_private *dev_priv = dev->dev_private;
843 u32 tmp = I915_READ(TV_CTL);
844
845 if (!(tmp & TV_ENC_ENABLE))
846 return false;
847
848 *pipe = PORT_TO_PIPE(tmp);
849
850 return true;
851}
852
853static void 913static void
854intel_enable_tv(struct intel_encoder *encoder) 914intel_tv_dpms(struct drm_encoder *encoder, int mode)
855{ 915{
856 struct drm_device *dev = encoder->base.dev; 916 struct drm_device *dev = encoder->dev;
857 struct drm_i915_private *dev_priv = dev->dev_private;
858
859 I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
860}
861
862static void
863intel_disable_tv(struct intel_encoder *encoder)
864{
865 struct drm_device *dev = encoder->base.dev;
866 struct drm_i915_private *dev_priv = dev->dev_private; 917 struct drm_i915_private *dev_priv = dev->dev_private;
867 918
868 I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); 919 switch(mode) {
920 case DRM_MODE_DPMS_ON:
921 I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
922 break;
923 case DRM_MODE_DPMS_STANDBY:
924 case DRM_MODE_DPMS_SUSPEND:
925 case DRM_MODE_DPMS_OFF:
926 I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
927 break;
928 }
869} 929}
870 930
871static const struct tv_mode * 931static const struct tv_mode *
@@ -873,7 +933,7 @@ intel_tv_mode_lookup(const char *tv_format)
873{ 933{
874 int i; 934 int i;
875 935
876 for (i = 0; i < ARRAY_SIZE(tv_modes); i++) { 936 for (i = 0; i < sizeof(tv_modes) / sizeof (tv_modes[0]); i++) {
877 const struct tv_mode *tv_mode = &tv_modes[i]; 937 const struct tv_mode *tv_mode = &tv_modes[i];
878 938
879 if (!strcmp(tv_format, tv_mode->name)) 939 if (!strcmp(tv_format, tv_mode->name))
@@ -905,18 +965,24 @@ intel_tv_mode_valid(struct drm_connector *connector,
905 965
906 966
907static bool 967static bool
908intel_tv_mode_fixup(struct drm_encoder *encoder, 968intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
909 const struct drm_display_mode *mode,
910 struct drm_display_mode *adjusted_mode) 969 struct drm_display_mode *adjusted_mode)
911{ 970{
971 struct drm_device *dev = encoder->dev;
972 struct drm_mode_config *drm_config = &dev->mode_config;
912 struct intel_tv *intel_tv = enc_to_intel_tv(encoder); 973 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
913 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 974 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
975 struct drm_encoder *other_encoder;
914 976
915 if (!tv_mode) 977 if (!tv_mode)
916 return false; 978 return false;
917 979
918 if (intel_encoder_check_is_cloned(&intel_tv->base)) 980 /* FIXME: lock encoder list */
919 return false; 981 list_for_each_entry(other_encoder, &drm_config->encoder_list, head) {
982 if (other_encoder != encoder &&
983 other_encoder->crtc == encoder->crtc)
984 return false;
985 }
920 986
921 adjusted_mode->clock = tv_mode->clock; 987 adjusted_mode->clock = tv_mode->clock;
922 return true; 988 return true;
@@ -1062,7 +1128,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1062 if (color_conversion) { 1128 if (color_conversion) {
1063 I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) | 1129 I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
1064 color_conversion->gy); 1130 color_conversion->gy);
1065 I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) | 1131 I915_WRITE(TV_CSC_Y2,(color_conversion->by << 16) |
1066 color_conversion->ay); 1132 color_conversion->ay);
1067 I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) | 1133 I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
1068 color_conversion->gu); 1134 color_conversion->gu);
@@ -1088,11 +1154,13 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1088 int dspcntr_reg = DSPCNTR(intel_crtc->plane); 1154 int dspcntr_reg = DSPCNTR(intel_crtc->plane);
1089 int pipeconf = I915_READ(pipeconf_reg); 1155 int pipeconf = I915_READ(pipeconf_reg);
1090 int dspcntr = I915_READ(dspcntr_reg); 1156 int dspcntr = I915_READ(dspcntr_reg);
1157 int dspbase_reg = DSPADDR(intel_crtc->plane);
1091 int xpos = 0x0, ypos = 0x0; 1158 int xpos = 0x0, ypos = 0x0;
1092 unsigned int xsize, ysize; 1159 unsigned int xsize, ysize;
1093 /* Pipe must be off here */ 1160 /* Pipe must be off here */
1094 I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); 1161 I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
1095 intel_flush_display_plane(dev_priv, intel_crtc->plane); 1162 /* Flush the plane changes */
1163 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1096 1164
1097 /* Wait for vblank for the disable to take effect */ 1165 /* Wait for vblank for the disable to take effect */
1098 if (IS_GEN2(dev)) 1166 if (IS_GEN2(dev))
@@ -1121,7 +1189,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1121 1189
1122 I915_WRITE(pipeconf_reg, pipeconf); 1190 I915_WRITE(pipeconf_reg, pipeconf);
1123 I915_WRITE(dspcntr_reg, dspcntr); 1191 I915_WRITE(dspcntr_reg, dspcntr);
1124 intel_flush_display_plane(dev_priv, intel_crtc->plane); 1192 /* Flush the plane changes */
1193 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1125 } 1194 }
1126 1195
1127 j = 0; 1196 j = 0;
@@ -1163,7 +1232,7 @@ static const struct drm_display_mode reported_modes[] = {
1163 * \return false if TV is disconnected. 1232 * \return false if TV is disconnected.
1164 */ 1233 */
1165static int 1234static int
1166intel_tv_detect_type(struct intel_tv *intel_tv, 1235intel_tv_detect_type (struct intel_tv *intel_tv,
1167 struct drm_connector *connector) 1236 struct drm_connector *connector)
1168{ 1237{
1169 struct drm_encoder *encoder = &intel_tv->base.base; 1238 struct drm_encoder *encoder = &intel_tv->base.base;
@@ -1206,15 +1275,6 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1206 DAC_B_0_7_V | 1275 DAC_B_0_7_V |
1207 DAC_C_0_7_V); 1276 DAC_C_0_7_V);
1208 1277
1209
1210 /*
1211 * The TV sense state should be cleared to zero on cantiga platform. Otherwise
1212 * the TV is misdetected. This is hardware requirement.
1213 */
1214 if (IS_GM45(dev))
1215 tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
1216 TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
1217
1218 I915_WRITE(TV_CTL, tv_ctl); 1278 I915_WRITE(TV_CTL, tv_ctl);
1219 I915_WRITE(TV_DAC, tv_dac); 1279 I915_WRITE(TV_DAC, tv_dac);
1220 POSTING_READ(TV_DAC); 1280 POSTING_READ(TV_DAC);
@@ -1247,11 +1307,6 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1247 1307
1248 I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); 1308 I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
1249 I915_WRITE(TV_CTL, save_tv_ctl); 1309 I915_WRITE(TV_CTL, save_tv_ctl);
1250 POSTING_READ(TV_CTL);
1251
1252 /* For unknown reasons the hw barfs if we don't do this vblank wait. */
1253 intel_wait_for_vblank(intel_tv->base.base.dev,
1254 to_intel_crtc(intel_tv->base.base.crtc)->pipe);
1255 1310
1256 /* Restore interrupt config */ 1311 /* Restore interrupt config */
1257 if (connector->polled & DRM_CONNECTOR_POLL_HPD) { 1312 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
@@ -1289,7 +1344,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
1289 } 1344 }
1290 1345
1291 intel_tv->tv_format = tv_mode->name; 1346 intel_tv->tv_format = tv_mode->name;
1292 drm_object_property_set_value(&connector->base, 1347 drm_connector_property_set_value(connector,
1293 connector->dev->mode_config.tv_mode_property, i); 1348 connector->dev->mode_config.tv_mode_property, i);
1294} 1349}
1295 1350
@@ -1307,13 +1362,19 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1307 int type; 1362 int type;
1308 1363
1309 mode = reported_modes[0]; 1364 mode = reported_modes[0];
1365 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1310 1366
1311 if (force) { 1367 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
1368 type = intel_tv_detect_type(intel_tv, connector);
1369 } else if (force) {
1312 struct intel_load_detect_pipe tmp; 1370 struct intel_load_detect_pipe tmp;
1313 1371
1314 if (intel_get_load_detect_pipe(connector, &mode, &tmp)) { 1372 if (intel_get_load_detect_pipe(&intel_tv->base, connector,
1373 &mode, &tmp)) {
1315 type = intel_tv_detect_type(intel_tv, connector); 1374 type = intel_tv_detect_type(intel_tv, connector);
1316 intel_release_load_detect_pipe(connector, &tmp); 1375 intel_release_load_detect_pipe(&intel_tv->base,
1376 connector,
1377 &tmp);
1317 } else 1378 } else
1318 return connector_status_unknown; 1379 return connector_status_unknown;
1319 } else 1380 } else
@@ -1425,7 +1486,7 @@ intel_tv_get_modes(struct drm_connector *connector)
1425} 1486}
1426 1487
1427static void 1488static void
1428intel_tv_destroy(struct drm_connector *connector) 1489intel_tv_destroy (struct drm_connector *connector)
1429{ 1490{
1430 drm_sysfs_connector_remove(connector); 1491 drm_sysfs_connector_remove(connector);
1431 drm_connector_cleanup(connector); 1492 drm_connector_cleanup(connector);
@@ -1443,7 +1504,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1443 int ret = 0; 1504 int ret = 0;
1444 bool changed = false; 1505 bool changed = false;
1445 1506
1446 ret = drm_object_property_set_value(&connector->base, property, val); 1507 ret = drm_connector_property_set_value(connector, property, val);
1447 if (ret < 0) 1508 if (ret < 0)
1448 goto out; 1509 goto out;
1449 1510
@@ -1479,20 +1540,22 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1479 } 1540 }
1480 1541
1481 if (changed && crtc) 1542 if (changed && crtc)
1482 intel_set_mode(crtc, &crtc->mode, 1543 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
1483 crtc->x, crtc->y, crtc->fb); 1544 crtc->y, crtc->fb);
1484out: 1545out:
1485 return ret; 1546 return ret;
1486} 1547}
1487 1548
1488static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { 1549static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
1550 .dpms = intel_tv_dpms,
1489 .mode_fixup = intel_tv_mode_fixup, 1551 .mode_fixup = intel_tv_mode_fixup,
1552 .prepare = intel_encoder_prepare,
1490 .mode_set = intel_tv_mode_set, 1553 .mode_set = intel_tv_mode_set,
1491 .disable = intel_encoder_noop, 1554 .commit = intel_encoder_commit,
1492}; 1555};
1493 1556
1494static const struct drm_connector_funcs intel_tv_connector_funcs = { 1557static const struct drm_connector_funcs intel_tv_connector_funcs = {
1495 .dpms = intel_connector_dpms, 1558 .dpms = drm_helper_connector_dpms,
1496 .detect = intel_tv_detect, 1559 .detect = intel_tv_detect,
1497 .destroy = intel_tv_destroy, 1560 .destroy = intel_tv_destroy,
1498 .set_property = intel_tv_set_property, 1561 .set_property = intel_tv_set_property,
@@ -1622,15 +1685,10 @@ intel_tv_init(struct drm_device *dev)
1622 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, 1685 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
1623 DRM_MODE_ENCODER_TVDAC); 1686 DRM_MODE_ENCODER_TVDAC);
1624 1687
1625 intel_encoder->enable = intel_enable_tv;
1626 intel_encoder->disable = intel_disable_tv;
1627 intel_encoder->get_hw_state = intel_tv_get_hw_state;
1628 intel_connector->get_hw_state = intel_connector_get_hw_state;
1629
1630 intel_connector_attach_encoder(intel_connector, intel_encoder); 1688 intel_connector_attach_encoder(intel_connector, intel_encoder);
1631 intel_encoder->type = INTEL_OUTPUT_TVOUT; 1689 intel_encoder->type = INTEL_OUTPUT_TVOUT;
1632 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 1690 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1633 intel_encoder->cloneable = false; 1691 intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
1634 intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); 1692 intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
1635 intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT); 1693 intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
1636 intel_tv->type = DRM_MODE_CONNECTOR_Unknown; 1694 intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
@@ -1655,18 +1713,18 @@ intel_tv_init(struct drm_device *dev)
1655 ARRAY_SIZE(tv_modes), 1713 ARRAY_SIZE(tv_modes),
1656 tv_format_names); 1714 tv_format_names);
1657 1715
1658 drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property, 1716 drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
1659 initial_mode); 1717 initial_mode);
1660 drm_object_attach_property(&connector->base, 1718 drm_connector_attach_property(connector,
1661 dev->mode_config.tv_left_margin_property, 1719 dev->mode_config.tv_left_margin_property,
1662 intel_tv->margin[TV_MARGIN_LEFT]); 1720 intel_tv->margin[TV_MARGIN_LEFT]);
1663 drm_object_attach_property(&connector->base, 1721 drm_connector_attach_property(connector,
1664 dev->mode_config.tv_top_margin_property, 1722 dev->mode_config.tv_top_margin_property,
1665 intel_tv->margin[TV_MARGIN_TOP]); 1723 intel_tv->margin[TV_MARGIN_TOP]);
1666 drm_object_attach_property(&connector->base, 1724 drm_connector_attach_property(connector,
1667 dev->mode_config.tv_right_margin_property, 1725 dev->mode_config.tv_right_margin_property,
1668 intel_tv->margin[TV_MARGIN_RIGHT]); 1726 intel_tv->margin[TV_MARGIN_RIGHT]);
1669 drm_object_attach_property(&connector->base, 1727 drm_connector_attach_property(connector,
1670 dev->mode_config.tv_bottom_margin_property, 1728 dev->mode_config.tv_bottom_margin_property,
1671 intel_tv->margin[TV_MARGIN_BOTTOM]); 1729 intel_tv->margin[TV_MARGIN_BOTTOM]);
1672 drm_sysfs_connector_add(connector); 1730 drm_sysfs_connector_add(connector);