aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/dvo.h16
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c17
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c23
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c588
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c18
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c251
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c73
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c61
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h239
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1514
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c65
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c174
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c44
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c391
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c144
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c16
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c194
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h328
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c220
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h25
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c165
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c144
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2129
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c401
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h147
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c115
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c221
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c99
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c1
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c23
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c60
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c420
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c152
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h20
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c210
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c70
39 files changed, 5729 insertions, 3091 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b0bacdba6d7e..0f2c5493242b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,6 +40,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
40 dvo_ivch.o \ 40 dvo_ivch.o \
41 dvo_tfp410.o \ 41 dvo_tfp410.o \
42 dvo_sil164.o \ 42 dvo_sil164.o \
43 dvo_ns2501.o \
43 i915_gem_dmabuf.o 44 i915_gem_dmabuf.o
44 45
45i915-$(CONFIG_COMPAT) += i915_ioc32.o 46i915-$(CONFIG_COMPAT) += i915_ioc32.o
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 573de82c9f5a..33a62ad80100 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -57,13 +57,12 @@ struct intel_dvo_dev_ops {
57 void (*create_resources)(struct intel_dvo_device *dvo); 57 void (*create_resources)(struct intel_dvo_device *dvo);
58 58
59 /* 59 /*
60 * Turn on/off output or set intermediate power levels if available. 60 * Turn on/off output.
61 * 61 *
62 * Unsupported intermediate modes drop to the lower power setting. 62 * Because none of our dvo drivers support an intermediate power levels,
63 * If the mode is DPMSModeOff, the output must be disabled, 63 * we don't expose this in the interfac.
64 * as the DPLL may be disabled afterwards.
65 */ 64 */
66 void (*dpms)(struct intel_dvo_device *dvo, int mode); 65 void (*dpms)(struct intel_dvo_device *dvo, bool enable);
67 66
68 /* 67 /*
69 * Callback for testing a video mode for a given output. 68 * Callback for testing a video mode for a given output.
@@ -114,6 +113,12 @@ struct intel_dvo_dev_ops {
114 */ 113 */
115 enum drm_connector_status (*detect)(struct intel_dvo_device *dvo); 114 enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
116 115
116 /*
117 * Probe the current hw status, returning true if the connected output
118 * is active.
119 */
120 bool (*get_hw_state)(struct intel_dvo_device *dev);
121
117 /** 122 /**
118 * Query the device for the modes it provides. 123 * Query the device for the modes it provides.
119 * 124 *
@@ -139,5 +144,6 @@ extern struct intel_dvo_dev_ops ch7xxx_ops;
139extern struct intel_dvo_dev_ops ivch_ops; 144extern struct intel_dvo_dev_ops ivch_ops;
140extern struct intel_dvo_dev_ops tfp410_ops; 145extern struct intel_dvo_dev_ops tfp410_ops;
141extern struct intel_dvo_dev_ops ch7017_ops; 146extern struct intel_dvo_dev_ops ch7017_ops;
147extern struct intel_dvo_dev_ops ns2501_ops;
142 148
143#endif /* _INTEL_DVO_H */ 149#endif /* _INTEL_DVO_H */
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1ca799a1e1fc..86b27d1d90c2 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -163,7 +163,7 @@ struct ch7017_priv {
163}; 163};
164 164
165static void ch7017_dump_regs(struct intel_dvo_device *dvo); 165static void ch7017_dump_regs(struct intel_dvo_device *dvo);
166static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); 166static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
167 167
168static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val) 168static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
169{ 169{
@@ -309,7 +309,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
309 lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED | 309 lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
310 (mode->hdisplay & 0x0700) >> 8; 310 (mode->hdisplay & 0x0700) >> 8;
311 311
312 ch7017_dpms(dvo, DRM_MODE_DPMS_OFF); 312 ch7017_dpms(dvo, false);
313 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, 313 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
314 horizontal_active_pixel_input); 314 horizontal_active_pixel_input);
315 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT, 315 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
@@ -331,7 +331,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
331} 331}
332 332
333/* set the CH7017 power state */ 333/* set the CH7017 power state */
334static void ch7017_dpms(struct intel_dvo_device *dvo, int mode) 334static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
335{ 335{
336 uint8_t val; 336 uint8_t val;
337 337
@@ -345,7 +345,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
345 CH7017_DAC3_POWER_DOWN | 345 CH7017_DAC3_POWER_DOWN |
346 CH7017_TV_POWER_DOWN_EN); 346 CH7017_TV_POWER_DOWN_EN);
347 347
348 if (mode == DRM_MODE_DPMS_ON) { 348 if (enable) {
349 /* Turn on the LVDS */ 349 /* Turn on the LVDS */
350 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, 350 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
351 val & ~CH7017_LVDS_POWER_DOWN_EN); 351 val & ~CH7017_LVDS_POWER_DOWN_EN);
@@ -359,6 +359,18 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
359 msleep(20); 359 msleep(20);
360} 360}
361 361
362static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
363{
364 uint8_t val;
365
366 ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
367
368 if (val & CH7017_LVDS_POWER_DOWN_EN)
369 return false;
370 else
371 return true;
372}
373
362static void ch7017_dump_regs(struct intel_dvo_device *dvo) 374static void ch7017_dump_regs(struct intel_dvo_device *dvo)
363{ 375{
364 uint8_t val; 376 uint8_t val;
@@ -396,6 +408,7 @@ struct intel_dvo_dev_ops ch7017_ops = {
396 .mode_valid = ch7017_mode_valid, 408 .mode_valid = ch7017_mode_valid,
397 .mode_set = ch7017_mode_set, 409 .mode_set = ch7017_mode_set,
398 .dpms = ch7017_dpms, 410 .dpms = ch7017_dpms,
411 .get_hw_state = ch7017_get_hw_state,
399 .dump_regs = ch7017_dump_regs, 412 .dump_regs = ch7017_dump_regs,
400 .destroy = ch7017_destroy, 413 .destroy = ch7017_destroy,
401}; 414};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 4a036600e806..38f3a6cb8c7d 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -289,14 +289,26 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
289} 289}
290 290
291/* set the CH7xxx power state */ 291/* set the CH7xxx power state */
292static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode) 292static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
293{ 293{
294 if (mode == DRM_MODE_DPMS_ON) 294 if (enable)
295 ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP); 295 ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
296 else 296 else
297 ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD); 297 ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
298} 298}
299 299
300static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
301{
302 u8 val;
303
304 ch7xxx_readb(dvo, CH7xxx_PM, &val);
305
306 if (val & CH7xxx_PM_FPD)
307 return false;
308 else
309 return true;
310}
311
300static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) 312static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
301{ 313{
302 int i; 314 int i;
@@ -326,6 +338,7 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
326 .mode_valid = ch7xxx_mode_valid, 338 .mode_valid = ch7xxx_mode_valid,
327 .mode_set = ch7xxx_mode_set, 339 .mode_set = ch7xxx_mode_set,
328 .dpms = ch7xxx_dpms, 340 .dpms = ch7xxx_dpms,
341 .get_hw_state = ch7xxx_get_hw_state,
329 .dump_regs = ch7xxx_dump_regs, 342 .dump_regs = ch7xxx_dump_regs,
330 .destroy = ch7xxx_destroy, 343 .destroy = ch7xxx_destroy,
331}; 344};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 04f2893d5e3c..baaf65bf0bdd 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -288,7 +288,7 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
288} 288}
289 289
290/** Sets the power state of the panel connected to the ivch */ 290/** Sets the power state of the panel connected to the ivch */
291static void ivch_dpms(struct intel_dvo_device *dvo, int mode) 291static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
292{ 292{
293 int i; 293 int i;
294 uint16_t vr01, vr30, backlight; 294 uint16_t vr01, vr30, backlight;
@@ -297,13 +297,13 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
297 if (!ivch_read(dvo, VR01, &vr01)) 297 if (!ivch_read(dvo, VR01, &vr01))
298 return; 298 return;
299 299
300 if (mode == DRM_MODE_DPMS_ON) 300 if (enable)
301 backlight = 1; 301 backlight = 1;
302 else 302 else
303 backlight = 0; 303 backlight = 0;
304 ivch_write(dvo, VR80, backlight); 304 ivch_write(dvo, VR80, backlight);
305 305
306 if (mode == DRM_MODE_DPMS_ON) 306 if (enable)
307 vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE; 307 vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
308 else 308 else
309 vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE); 309 vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
@@ -315,7 +315,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
315 if (!ivch_read(dvo, VR30, &vr30)) 315 if (!ivch_read(dvo, VR30, &vr30))
316 break; 316 break;
317 317
318 if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON)) 318 if (((vr30 & VR30_PANEL_ON) != 0) == enable)
319 break; 319 break;
320 udelay(1000); 320 udelay(1000);
321 } 321 }
@@ -323,6 +323,20 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
323 udelay(16 * 1000); 323 udelay(16 * 1000);
324} 324}
325 325
326static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
327{
328 uint16_t vr01;
329
330 /* Set the new power state of the panel. */
331 if (!ivch_read(dvo, VR01, &vr01))
332 return false;
333
334 if (vr01 & VR01_LCD_ENABLE)
335 return true;
336 else
337 return false;
338}
339
326static void ivch_mode_set(struct intel_dvo_device *dvo, 340static void ivch_mode_set(struct intel_dvo_device *dvo,
327 struct drm_display_mode *mode, 341 struct drm_display_mode *mode,
328 struct drm_display_mode *adjusted_mode) 342 struct drm_display_mode *adjusted_mode)
@@ -413,6 +427,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
413struct intel_dvo_dev_ops ivch_ops = { 427struct intel_dvo_dev_ops ivch_ops = {
414 .init = ivch_init, 428 .init = ivch_init,
415 .dpms = ivch_dpms, 429 .dpms = ivch_dpms,
430 .get_hw_state = ivch_get_hw_state,
416 .mode_valid = ivch_mode_valid, 431 .mode_valid = ivch_mode_valid,
417 .mode_set = ivch_mode_set, 432 .mode_set = ivch_mode_set,
418 .detect = ivch_detect, 433 .detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
new file mode 100644
index 000000000000..c4a255be6979
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -0,0 +1,588 @@
1/*
2 *
3 * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
4 *
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include "dvo.h"
30#include "i915_reg.h"
31#include "i915_drv.h"
32
33#define NS2501_VID 0x1305
34#define NS2501_DID 0x6726
35
36#define NS2501_VID_LO 0x00
37#define NS2501_VID_HI 0x01
38#define NS2501_DID_LO 0x02
39#define NS2501_DID_HI 0x03
40#define NS2501_REV 0x04
41#define NS2501_RSVD 0x05
42#define NS2501_FREQ_LO 0x06
43#define NS2501_FREQ_HI 0x07
44
45#define NS2501_REG8 0x08
46#define NS2501_8_VEN (1<<5)
47#define NS2501_8_HEN (1<<4)
48#define NS2501_8_DSEL (1<<3)
49#define NS2501_8_BPAS (1<<2)
50#define NS2501_8_RSVD (1<<1)
51#define NS2501_8_PD (1<<0)
52
53#define NS2501_REG9 0x09
54#define NS2501_9_VLOW (1<<7)
55#define NS2501_9_MSEL_MASK (0x7<<4)
56#define NS2501_9_TSEL (1<<3)
57#define NS2501_9_RSEN (1<<2)
58#define NS2501_9_RSVD (1<<1)
59#define NS2501_9_MDI (1<<0)
60
61#define NS2501_REGC 0x0c
62
63struct ns2501_priv {
64 //I2CDevRec d;
65 bool quiet;
66 int reg_8_shadow;
67 int reg_8_set;
68 // Shadow registers for i915
69 int dvoc;
70 int pll_a;
71 int srcdim;
72 int fw_blc;
73};
74
75#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
76
77/*
78 * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
79 * laptops does not react on the i2c bus unless
80 * both the PLL is running and the display is configured in its native
81 * resolution.
82 * This function forces the DVO on, and stores the registers it touches.
83 * Afterwards, registers are restored to regular values.
84 *
85 * This is pretty much a hack, though it works.
86 * Without that, ns2501_readb and ns2501_writeb fail
87 * when switching the resolution.
88 */
89
90static void enable_dvo(struct intel_dvo_device *dvo)
91{
92 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
93 struct i2c_adapter *adapter = dvo->i2c_bus;
94 struct intel_gmbus *bus = container_of(adapter,
95 struct intel_gmbus,
96 adapter);
97 struct drm_i915_private *dev_priv = bus->dev_priv;
98
99 DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
100
101 ns->dvoc = I915_READ(DVO_C);
102 ns->pll_a = I915_READ(_DPLL_A);
103 ns->srcdim = I915_READ(DVOC_SRCDIM);
104 ns->fw_blc = I915_READ(FW_BLC);
105
106 I915_WRITE(DVOC, 0x10004084);
107 I915_WRITE(_DPLL_A, 0xd0820000);
108 I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768
109 I915_WRITE(FW_BLC, 0x1080304);
110
111 I915_WRITE(DVOC, 0x90004084);
112}
113
114/*
115 * Restore the I915 registers modified by the above
116 * trigger function.
117 */
118static void restore_dvo(struct intel_dvo_device *dvo)
119{
120 struct i2c_adapter *adapter = dvo->i2c_bus;
121 struct intel_gmbus *bus = container_of(adapter,
122 struct intel_gmbus,
123 adapter);
124 struct drm_i915_private *dev_priv = bus->dev_priv;
125 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
126
127 I915_WRITE(DVOC, ns->dvoc);
128 I915_WRITE(_DPLL_A, ns->pll_a);
129 I915_WRITE(DVOC_SRCDIM, ns->srcdim);
130 I915_WRITE(FW_BLC, ns->fw_blc);
131}
132
133/*
134** Read a register from the ns2501.
135** Returns true if successful, false otherwise.
136** If it returns false, it might be wise to enable the
137** DVO with the above function.
138*/
139static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
140{
141 struct ns2501_priv *ns = dvo->dev_priv;
142 struct i2c_adapter *adapter = dvo->i2c_bus;
143 u8 out_buf[2];
144 u8 in_buf[2];
145
146 struct i2c_msg msgs[] = {
147 {
148 .addr = dvo->slave_addr,
149 .flags = 0,
150 .len = 1,
151 .buf = out_buf,
152 },
153 {
154 .addr = dvo->slave_addr,
155 .flags = I2C_M_RD,
156 .len = 1,
157 .buf = in_buf,
158 }
159 };
160
161 out_buf[0] = addr;
162 out_buf[1] = 0;
163
164 if (i2c_transfer(adapter, msgs, 2) == 2) {
165 *ch = in_buf[0];
166 return true;
167 };
168
169 if (!ns->quiet) {
170 DRM_DEBUG_KMS
171 ("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
172 adapter->name, dvo->slave_addr);
173 }
174
175 return false;
176}
177
178/*
179** Write a register to the ns2501.
180** Returns true if successful, false otherwise.
181** If it returns false, it might be wise to enable the
182** DVO with the above function.
183*/
184static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
185{
186 struct ns2501_priv *ns = dvo->dev_priv;
187 struct i2c_adapter *adapter = dvo->i2c_bus;
188 uint8_t out_buf[2];
189
190 struct i2c_msg msg = {
191 .addr = dvo->slave_addr,
192 .flags = 0,
193 .len = 2,
194 .buf = out_buf,
195 };
196
197 out_buf[0] = addr;
198 out_buf[1] = ch;
199
200 if (i2c_transfer(adapter, &msg, 1) == 1) {
201 return true;
202 }
203
204 if (!ns->quiet) {
205 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
206 addr, adapter->name, dvo->slave_addr);
207 }
208
209 return false;
210}
211
212/* National Semiconductor 2501 driver for chip on i2c bus
213 * scan for the chip on the bus.
214 * Hope the VBIOS initialized the PLL correctly so we can
215 * talk to it. If not, it will not be seen and not detected.
216 * Bummer!
217 */
218static bool ns2501_init(struct intel_dvo_device *dvo,
219 struct i2c_adapter *adapter)
220{
221 /* this will detect the NS2501 chip on the specified i2c bus */
222 struct ns2501_priv *ns;
223 unsigned char ch;
224
225 ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
226 if (ns == NULL)
227 return false;
228
229 dvo->i2c_bus = adapter;
230 dvo->dev_priv = ns;
231 ns->quiet = true;
232
233 if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
234 goto out;
235
236 if (ch != (NS2501_VID & 0xff)) {
237 DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
238 ch, adapter->name, dvo->slave_addr);
239 goto out;
240 }
241
242 if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
243 goto out;
244
245 if (ch != (NS2501_DID & 0xff)) {
246 DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
247 ch, adapter->name, dvo->slave_addr);
248 goto out;
249 }
250 ns->quiet = false;
251 ns->reg_8_set = 0;
252 ns->reg_8_shadow =
253 NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
254
255 DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
256 return true;
257
258out:
259 kfree(ns);
260 return false;
261}
262
263static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
264{
265 /*
266 * This is a Laptop display, it doesn't have hotplugging.
267 * Even if not, the detection bit of the 2501 is unreliable as
268 * it only works for some display types.
269 * It is even more unreliable as the PLL must be active for
270 * allowing reading from the chiop.
271 */
272 return connector_status_connected;
273}
274
275static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
276 struct drm_display_mode *mode)
277{
278 DRM_DEBUG_KMS
279 ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
280 __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
281 mode->vtotal);
282
283 /*
284 * Currently, these are all the modes I have data from.
285 * More might exist. Unclear how to find the native resolution
286 * of the panel in here so we could always accept it
287 * by disabling the scaler.
288 */
289 if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
290 (mode->hdisplay == 640 && mode->vdisplay == 480) ||
291 (mode->hdisplay == 1024 && mode->vdisplay == 768)) {
292 return MODE_OK;
293 } else {
294 return MODE_ONE_SIZE; /* Is this a reasonable error? */
295 }
296}
297
298static void ns2501_mode_set(struct intel_dvo_device *dvo,
299 struct drm_display_mode *mode,
300 struct drm_display_mode *adjusted_mode)
301{
302 bool ok;
303 bool restore = false;
304 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
305
306 DRM_DEBUG_KMS
307 ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
308 __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
309 mode->vtotal);
310
311 /*
312 * Where do I find the native resolution for which scaling is not required???
313 *
314 * First trigger the DVO on as otherwise the chip does not appear on the i2c
315 * bus.
316 */
317 do {
318 ok = true;
319
320 if (mode->hdisplay == 800 && mode->vdisplay == 600) {
321 /* mode 277 */
322 ns->reg_8_shadow &= ~NS2501_8_BPAS;
323 DRM_DEBUG_KMS("%s: switching to 800x600\n",
324 __FUNCTION__);
325
326 /*
327 * No, I do not know where this data comes from.
328 * It is just what the video bios left in the DVO, so
329 * I'm just copying it here over.
330 * This also means that I cannot support any other modes
331 * except the ones supported by the bios.
332 */
333 ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
334 ok &= ns2501_writeb(dvo, 0x1b, 0x19);
335 ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
336 ok &= ns2501_writeb(dvo, 0x1d, 0x02);
337
338 ok &= ns2501_writeb(dvo, 0x34, 0x03);
339 ok &= ns2501_writeb(dvo, 0x35, 0xff);
340
341 ok &= ns2501_writeb(dvo, 0x80, 0x27);
342 ok &= ns2501_writeb(dvo, 0x81, 0x03);
343 ok &= ns2501_writeb(dvo, 0x82, 0x41);
344 ok &= ns2501_writeb(dvo, 0x83, 0x05);
345
346 ok &= ns2501_writeb(dvo, 0x8d, 0x02);
347 ok &= ns2501_writeb(dvo, 0x8e, 0x04);
348 ok &= ns2501_writeb(dvo, 0x8f, 0x00);
349
350 ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
351 ok &= ns2501_writeb(dvo, 0x91, 0x07);
352 ok &= ns2501_writeb(dvo, 0x94, 0x00);
353 ok &= ns2501_writeb(dvo, 0x95, 0x00);
354
355 ok &= ns2501_writeb(dvo, 0x96, 0x00);
356
357 ok &= ns2501_writeb(dvo, 0x99, 0x00);
358 ok &= ns2501_writeb(dvo, 0x9a, 0x88);
359
360 ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
361 ok &= ns2501_writeb(dvo, 0x9d, 0x00);
362 ok &= ns2501_writeb(dvo, 0x9e, 0x25);
363 ok &= ns2501_writeb(dvo, 0x9f, 0x03);
364
365 ok &= ns2501_writeb(dvo, 0xa4, 0x80);
366
367 ok &= ns2501_writeb(dvo, 0xb6, 0x00);
368
369 ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
370 ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
371
372 ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
373 ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
374
375 ok &= ns2501_writeb(dvo, 0xc2, 0x00);
376 ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
377
378 ok &= ns2501_writeb(dvo, 0xc4, 0x03);
379 ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
380
381 ok &= ns2501_writeb(dvo, 0xc6, 0x00);
382 ok &= ns2501_writeb(dvo, 0xc7, 0x73);
383 ok &= ns2501_writeb(dvo, 0xc8, 0x02);
384
385 } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
386 /* mode 274 */
387 DRM_DEBUG_KMS("%s: switching to 640x480\n",
388 __FUNCTION__);
389 /*
390 * No, I do not know where this data comes from.
391 * It is just what the video bios left in the DVO, so
392 * I'm just copying it here over.
393 * This also means that I cannot support any other modes
394 * except the ones supported by the bios.
395 */
396 ns->reg_8_shadow &= ~NS2501_8_BPAS;
397
398 ok &= ns2501_writeb(dvo, 0x11, 0xa0);
399 ok &= ns2501_writeb(dvo, 0x1b, 0x11);
400 ok &= ns2501_writeb(dvo, 0x1c, 0x54);
401 ok &= ns2501_writeb(dvo, 0x1d, 0x03);
402
403 ok &= ns2501_writeb(dvo, 0x34, 0x03);
404 ok &= ns2501_writeb(dvo, 0x35, 0xff);
405
406 ok &= ns2501_writeb(dvo, 0x80, 0xff);
407 ok &= ns2501_writeb(dvo, 0x81, 0x07);
408 ok &= ns2501_writeb(dvo, 0x82, 0x3d);
409 ok &= ns2501_writeb(dvo, 0x83, 0x05);
410
411 ok &= ns2501_writeb(dvo, 0x8d, 0x02);
412 ok &= ns2501_writeb(dvo, 0x8e, 0x10);
413 ok &= ns2501_writeb(dvo, 0x8f, 0x00);
414
415 ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
416 ok &= ns2501_writeb(dvo, 0x91, 0x07);
417 ok &= ns2501_writeb(dvo, 0x94, 0x00);
418 ok &= ns2501_writeb(dvo, 0x95, 0x00);
419
420 ok &= ns2501_writeb(dvo, 0x96, 0x05);
421
422 ok &= ns2501_writeb(dvo, 0x99, 0x00);
423 ok &= ns2501_writeb(dvo, 0x9a, 0x88);
424
425 ok &= ns2501_writeb(dvo, 0x9c, 0x24);
426 ok &= ns2501_writeb(dvo, 0x9d, 0x00);
427 ok &= ns2501_writeb(dvo, 0x9e, 0x25);
428 ok &= ns2501_writeb(dvo, 0x9f, 0x03);
429
430 ok &= ns2501_writeb(dvo, 0xa4, 0x84);
431
432 ok &= ns2501_writeb(dvo, 0xb6, 0x09);
433
434 ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
435 ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
436
437 ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
438 ok &= ns2501_writeb(dvo, 0xc1, 0x90);
439
440 ok &= ns2501_writeb(dvo, 0xc2, 0x00);
441 ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
442
443 ok &= ns2501_writeb(dvo, 0xc4, 0x03);
444 ok &= ns2501_writeb(dvo, 0xc5, 0x16);
445
446 ok &= ns2501_writeb(dvo, 0xc6, 0x00);
447 ok &= ns2501_writeb(dvo, 0xc7, 0x02);
448 ok &= ns2501_writeb(dvo, 0xc8, 0x02);
449
450 } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
451 /* mode 280 */
452 DRM_DEBUG_KMS("%s: switching to 1024x768\n",
453 __FUNCTION__);
454 /*
455 * This might or might not work, actually. I'm silently
456 * assuming here that the native panel resolution is
457 * 1024x768. If not, then this leaves the scaler disabled
458 * generating a picture that is likely not the expected.
459 *
460 * Problem is that I do not know where to take the panel
461 * dimensions from.
462 *
463 * Enable the bypass, scaling not required.
464 *
465 * The scaler registers are irrelevant here....
466 *
467 */
468 ns->reg_8_shadow |= NS2501_8_BPAS;
469 ok &= ns2501_writeb(dvo, 0x37, 0x44);
470 } else {
471 /*
472 * Data not known. Bummer!
473 * Hopefully, the code should not go here
474 * as mode_OK delivered no other modes.
475 */
476 ns->reg_8_shadow |= NS2501_8_BPAS;
477 }
478 ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
479
480 if (!ok) {
481 if (restore)
482 restore_dvo(dvo);
483 enable_dvo(dvo);
484 restore = true;
485 }
486 } while (!ok);
487 /*
488 * Restore the old i915 registers before
489 * forcing the ns2501 on.
490 */
491 if (restore)
492 restore_dvo(dvo);
493}
494
495/* set the NS2501 power state */
496static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
497{
498 unsigned char ch;
499
500 if (!ns2501_readb(dvo, NS2501_REG8, &ch))
501 return false;
502
503 if (ch & NS2501_8_PD)
504 return true;
505 else
506 return false;
507}
508
509/* set the NS2501 power state */
510static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
511{
512 bool ok;
513 bool restore = false;
514 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
515 unsigned char ch;
516
517 DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
518 __FUNCTION__, enable);
519
520 ch = ns->reg_8_shadow;
521
522 if (enable)
523 ch |= NS2501_8_PD;
524 else
525 ch &= ~NS2501_8_PD;
526
527 if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
528 ns->reg_8_set = 1;
529 ns->reg_8_shadow = ch;
530
531 do {
532 ok = true;
533 ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
534 ok &=
535 ns2501_writeb(dvo, 0x34,
536 enable ? 0x03 : 0x00);
537 ok &=
538 ns2501_writeb(dvo, 0x35,
539 enable ? 0xff : 0x00);
540 if (!ok) {
541 if (restore)
542 restore_dvo(dvo);
543 enable_dvo(dvo);
544 restore = true;
545 }
546 } while (!ok);
547
548 if (restore)
549 restore_dvo(dvo);
550 }
551}
552
553static void ns2501_dump_regs(struct intel_dvo_device *dvo)
554{
555 uint8_t val;
556
557 ns2501_readb(dvo, NS2501_FREQ_LO, &val);
558 DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
559 ns2501_readb(dvo, NS2501_FREQ_HI, &val);
560 DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
561 ns2501_readb(dvo, NS2501_REG8, &val);
562 DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
563 ns2501_readb(dvo, NS2501_REG9, &val);
564 DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
565 ns2501_readb(dvo, NS2501_REGC, &val);
566 DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
567}
568
569static void ns2501_destroy(struct intel_dvo_device *dvo)
570{
571 struct ns2501_priv *ns = dvo->dev_priv;
572
573 if (ns) {
574 kfree(ns);
575 dvo->dev_priv = NULL;
576 }
577}
578
579struct intel_dvo_dev_ops ns2501_ops = {
580 .init = ns2501_init,
581 .detect = ns2501_detect,
582 .mode_valid = ns2501_mode_valid,
583 .mode_set = ns2501_mode_set,
584 .dpms = ns2501_dpms,
585 .get_hw_state = ns2501_get_hw_state,
586 .dump_regs = ns2501_dump_regs,
587 .destroy = ns2501_destroy,
588};
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index a0b13a6f619d..4debd32e3e4c 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -208,7 +208,7 @@ static void sil164_mode_set(struct intel_dvo_device *dvo,
208} 208}
209 209
210/* set the SIL164 power state */ 210/* set the SIL164 power state */
211static void sil164_dpms(struct intel_dvo_device *dvo, int mode) 211static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
212{ 212{
213 int ret; 213 int ret;
214 unsigned char ch; 214 unsigned char ch;
@@ -217,7 +217,7 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
217 if (ret == false) 217 if (ret == false)
218 return; 218 return;
219 219
220 if (mode == DRM_MODE_DPMS_ON) 220 if (enable)
221 ch |= SIL164_8_PD; 221 ch |= SIL164_8_PD;
222 else 222 else
223 ch &= ~SIL164_8_PD; 223 ch &= ~SIL164_8_PD;
@@ -226,6 +226,21 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
226 return; 226 return;
227} 227}
228 228
229static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
230{
231 int ret;
232 unsigned char ch;
233
234 ret = sil164_readb(dvo, SIL164_REG8, &ch);
235 if (ret == false)
236 return false;
237
238 if (ch & SIL164_8_PD)
239 return true;
240 else
241 return false;
242}
243
229static void sil164_dump_regs(struct intel_dvo_device *dvo) 244static void sil164_dump_regs(struct intel_dvo_device *dvo)
230{ 245{
231 uint8_t val; 246 uint8_t val;
@@ -258,6 +273,7 @@ struct intel_dvo_dev_ops sil164_ops = {
258 .mode_valid = sil164_mode_valid, 273 .mode_valid = sil164_mode_valid,
259 .mode_set = sil164_mode_set, 274 .mode_set = sil164_mode_set,
260 .dpms = sil164_dpms, 275 .dpms = sil164_dpms,
276 .get_hw_state = sil164_get_hw_state,
261 .dump_regs = sil164_dump_regs, 277 .dump_regs = sil164_dump_regs,
262 .destroy = sil164_destroy, 278 .destroy = sil164_destroy,
263}; 279};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index aa2cd3ec54aa..e17f1b07e915 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -234,14 +234,14 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
234} 234}
235 235
236/* set the tfp410 power state */ 236/* set the tfp410 power state */
237static void tfp410_dpms(struct intel_dvo_device *dvo, int mode) 237static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
238{ 238{
239 uint8_t ctl1; 239 uint8_t ctl1;
240 240
241 if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) 241 if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
242 return; 242 return;
243 243
244 if (mode == DRM_MODE_DPMS_ON) 244 if (enable)
245 ctl1 |= TFP410_CTL_1_PD; 245 ctl1 |= TFP410_CTL_1_PD;
246 else 246 else
247 ctl1 &= ~TFP410_CTL_1_PD; 247 ctl1 &= ~TFP410_CTL_1_PD;
@@ -249,6 +249,19 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
249 tfp410_writeb(dvo, TFP410_CTL_1, ctl1); 249 tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
250} 250}
251 251
252static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
253{
254 uint8_t ctl1;
255
256 if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
257 return false;
258
259 if (ctl1 & TFP410_CTL_1_PD)
260 return true;
261 else
262 return false;
263}
264
252static void tfp410_dump_regs(struct intel_dvo_device *dvo) 265static void tfp410_dump_regs(struct intel_dvo_device *dvo)
253{ 266{
254 uint8_t val, val2; 267 uint8_t val, val2;
@@ -299,6 +312,7 @@ struct intel_dvo_dev_ops tfp410_ops = {
299 .mode_valid = tfp410_mode_valid, 312 .mode_valid = tfp410_mode_valid,
300 .mode_set = tfp410_mode_set, 313 .mode_set = tfp410_mode_set,
301 .dpms = tfp410_dpms, 314 .dpms = tfp410_dpms,
315 .get_hw_state = tfp410_get_hw_state,
302 .dump_regs = tfp410_dump_regs, 316 .dump_regs = tfp410_dump_regs,
303 .destroy = tfp410_destroy, 317 .destroy = tfp410_destroy,
304}; 318};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 63f01e29c1fa..dde8b505bf7f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -43,7 +43,6 @@
43 43
44enum { 44enum {
45 ACTIVE_LIST, 45 ACTIVE_LIST,
46 FLUSHING_LIST,
47 INACTIVE_LIST, 46 INACTIVE_LIST,
48 PINNED_LIST, 47 PINNED_LIST,
49}; 48};
@@ -61,28 +60,11 @@ static int i915_capabilities(struct seq_file *m, void *data)
61 60
62 seq_printf(m, "gen: %d\n", info->gen); 61 seq_printf(m, "gen: %d\n", info->gen);
63 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 62 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
64#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 63#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65 B(is_mobile); 64#define DEV_INFO_SEP ;
66 B(is_i85x); 65 DEV_INFO_FLAGS;
67 B(is_i915g); 66#undef DEV_INFO_FLAG
68 B(is_i945gm); 67#undef DEV_INFO_SEP
69 B(is_g33);
70 B(need_gfx_hws);
71 B(is_g4x);
72 B(is_pineview);
73 B(is_broadwater);
74 B(is_crestline);
75 B(has_fbc);
76 B(has_pipe_cxsr);
77 B(has_hotplug);
78 B(cursor_needs_physical);
79 B(has_overlay);
80 B(overlay_needs_physical);
81 B(supports_tv);
82 B(has_bsd_ring);
83 B(has_blt_ring);
84 B(has_llc);
85#undef B
86 68
87 return 0; 69 return 0;
88} 70}
@@ -120,20 +102,23 @@ static const char *cache_level_str(int type)
120static void 102static void
121describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 103describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
122{ 104{
123 seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s", 105 seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
124 &obj->base, 106 &obj->base,
125 get_pin_flag(obj), 107 get_pin_flag(obj),
126 get_tiling_flag(obj), 108 get_tiling_flag(obj),
127 obj->base.size / 1024, 109 obj->base.size / 1024,
128 obj->base.read_domains, 110 obj->base.read_domains,
129 obj->base.write_domain, 111 obj->base.write_domain,
130 obj->last_rendering_seqno, 112 obj->last_read_seqno,
113 obj->last_write_seqno,
131 obj->last_fenced_seqno, 114 obj->last_fenced_seqno,
132 cache_level_str(obj->cache_level), 115 cache_level_str(obj->cache_level),
133 obj->dirty ? " dirty" : "", 116 obj->dirty ? " dirty" : "",
134 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 117 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
135 if (obj->base.name) 118 if (obj->base.name)
136 seq_printf(m, " (name: %d)", obj->base.name); 119 seq_printf(m, " (name: %d)", obj->base.name);
120 if (obj->pin_count)
121 seq_printf(m, " (pinned x %d)", obj->pin_count);
137 if (obj->fence_reg != I915_FENCE_REG_NONE) 122 if (obj->fence_reg != I915_FENCE_REG_NONE)
138 seq_printf(m, " (fence: %d)", obj->fence_reg); 123 seq_printf(m, " (fence: %d)", obj->fence_reg);
139 if (obj->gtt_space != NULL) 124 if (obj->gtt_space != NULL)
@@ -176,10 +161,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
176 seq_printf(m, "Inactive:\n"); 161 seq_printf(m, "Inactive:\n");
177 head = &dev_priv->mm.inactive_list; 162 head = &dev_priv->mm.inactive_list;
178 break; 163 break;
179 case FLUSHING_LIST:
180 seq_printf(m, "Flushing:\n");
181 head = &dev_priv->mm.flushing_list;
182 break;
183 default: 164 default:
184 mutex_unlock(&dev->struct_mutex); 165 mutex_unlock(&dev->struct_mutex);
185 return -EINVAL; 166 return -EINVAL;
@@ -217,8 +198,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
217 struct drm_info_node *node = (struct drm_info_node *) m->private; 198 struct drm_info_node *node = (struct drm_info_node *) m->private;
218 struct drm_device *dev = node->minor->dev; 199 struct drm_device *dev = node->minor->dev;
219 struct drm_i915_private *dev_priv = dev->dev_private; 200 struct drm_i915_private *dev_priv = dev->dev_private;
220 u32 count, mappable_count; 201 u32 count, mappable_count, purgeable_count;
221 size_t size, mappable_size; 202 size_t size, mappable_size, purgeable_size;
222 struct drm_i915_gem_object *obj; 203 struct drm_i915_gem_object *obj;
223 int ret; 204 int ret;
224 205
@@ -231,13 +212,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
231 dev_priv->mm.object_memory); 212 dev_priv->mm.object_memory);
232 213
233 size = count = mappable_size = mappable_count = 0; 214 size = count = mappable_size = mappable_count = 0;
234 count_objects(&dev_priv->mm.gtt_list, gtt_list); 215 count_objects(&dev_priv->mm.bound_list, gtt_list);
235 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 216 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
236 count, mappable_count, size, mappable_size); 217 count, mappable_count, size, mappable_size);
237 218
238 size = count = mappable_size = mappable_count = 0; 219 size = count = mappable_size = mappable_count = 0;
239 count_objects(&dev_priv->mm.active_list, mm_list); 220 count_objects(&dev_priv->mm.active_list, mm_list);
240 count_objects(&dev_priv->mm.flushing_list, mm_list);
241 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 221 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
242 count, mappable_count, size, mappable_size); 222 count, mappable_count, size, mappable_size);
243 223
@@ -246,8 +226,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
246 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 226 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
247 count, mappable_count, size, mappable_size); 227 count, mappable_count, size, mappable_size);
248 228
229 size = count = purgeable_size = purgeable_count = 0;
230 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) {
231 size += obj->base.size, ++count;
232 if (obj->madv == I915_MADV_DONTNEED)
233 purgeable_size += obj->base.size, ++purgeable_count;
234 }
235 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
236
249 size = count = mappable_size = mappable_count = 0; 237 size = count = mappable_size = mappable_count = 0;
250 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 238 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
251 if (obj->fault_mappable) { 239 if (obj->fault_mappable) {
252 size += obj->gtt_space->size; 240 size += obj->gtt_space->size;
253 ++count; 241 ++count;
@@ -256,7 +244,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
256 mappable_size += obj->gtt_space->size; 244 mappable_size += obj->gtt_space->size;
257 ++mappable_count; 245 ++mappable_count;
258 } 246 }
247 if (obj->madv == I915_MADV_DONTNEED) {
248 purgeable_size += obj->base.size;
249 ++purgeable_count;
250 }
259 } 251 }
252 seq_printf(m, "%u purgeable objects, %zu bytes\n",
253 purgeable_count, purgeable_size);
260 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 254 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
261 mappable_count, mappable_size); 255 mappable_count, mappable_size);
262 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 256 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
@@ -285,7 +279,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
285 return ret; 279 return ret;
286 280
287 total_obj_size = total_gtt_size = count = 0; 281 total_obj_size = total_gtt_size = count = 0;
288 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 282 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
289 if (list == PINNED_LIST && obj->pin_count == 0) 283 if (list == PINNED_LIST && obj->pin_count == 0)
290 continue; 284 continue;
291 285
@@ -358,40 +352,22 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
358 struct drm_info_node *node = (struct drm_info_node *) m->private; 352 struct drm_info_node *node = (struct drm_info_node *) m->private;
359 struct drm_device *dev = node->minor->dev; 353 struct drm_device *dev = node->minor->dev;
360 drm_i915_private_t *dev_priv = dev->dev_private; 354 drm_i915_private_t *dev_priv = dev->dev_private;
355 struct intel_ring_buffer *ring;
361 struct drm_i915_gem_request *gem_request; 356 struct drm_i915_gem_request *gem_request;
362 int ret, count; 357 int ret, count, i;
363 358
364 ret = mutex_lock_interruptible(&dev->struct_mutex); 359 ret = mutex_lock_interruptible(&dev->struct_mutex);
365 if (ret) 360 if (ret)
366 return ret; 361 return ret;
367 362
368 count = 0; 363 count = 0;
369 if (!list_empty(&dev_priv->ring[RCS].request_list)) { 364 for_each_ring(ring, dev_priv, i) {
370 seq_printf(m, "Render requests:\n"); 365 if (list_empty(&ring->request_list))
371 list_for_each_entry(gem_request, 366 continue;
372 &dev_priv->ring[RCS].request_list, 367
373 list) { 368 seq_printf(m, "%s requests:\n", ring->name);
374 seq_printf(m, " %d @ %d\n",
375 gem_request->seqno,
376 (int) (jiffies - gem_request->emitted_jiffies));
377 }
378 count++;
379 }
380 if (!list_empty(&dev_priv->ring[VCS].request_list)) {
381 seq_printf(m, "BSD requests:\n");
382 list_for_each_entry(gem_request,
383 &dev_priv->ring[VCS].request_list,
384 list) {
385 seq_printf(m, " %d @ %d\n",
386 gem_request->seqno,
387 (int) (jiffies - gem_request->emitted_jiffies));
388 }
389 count++;
390 }
391 if (!list_empty(&dev_priv->ring[BCS].request_list)) {
392 seq_printf(m, "BLT requests:\n");
393 list_for_each_entry(gem_request, 369 list_for_each_entry(gem_request,
394 &dev_priv->ring[BCS].request_list, 370 &ring->request_list,
395 list) { 371 list) {
396 seq_printf(m, " %d @ %d\n", 372 seq_printf(m, " %d @ %d\n",
397 gem_request->seqno, 373 gem_request->seqno,
@@ -412,7 +388,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
412{ 388{
413 if (ring->get_seqno) { 389 if (ring->get_seqno) {
414 seq_printf(m, "Current sequence (%s): %d\n", 390 seq_printf(m, "Current sequence (%s): %d\n",
415 ring->name, ring->get_seqno(ring)); 391 ring->name, ring->get_seqno(ring, false));
416 } 392 }
417} 393}
418 394
@@ -421,14 +397,15 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
421 struct drm_info_node *node = (struct drm_info_node *) m->private; 397 struct drm_info_node *node = (struct drm_info_node *) m->private;
422 struct drm_device *dev = node->minor->dev; 398 struct drm_device *dev = node->minor->dev;
423 drm_i915_private_t *dev_priv = dev->dev_private; 399 drm_i915_private_t *dev_priv = dev->dev_private;
400 struct intel_ring_buffer *ring;
424 int ret, i; 401 int ret, i;
425 402
426 ret = mutex_lock_interruptible(&dev->struct_mutex); 403 ret = mutex_lock_interruptible(&dev->struct_mutex);
427 if (ret) 404 if (ret)
428 return ret; 405 return ret;
429 406
430 for (i = 0; i < I915_NUM_RINGS; i++) 407 for_each_ring(ring, dev_priv, i)
431 i915_ring_seqno_info(m, &dev_priv->ring[i]); 408 i915_ring_seqno_info(m, ring);
432 409
433 mutex_unlock(&dev->struct_mutex); 410 mutex_unlock(&dev->struct_mutex);
434 411
@@ -441,6 +418,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
441 struct drm_info_node *node = (struct drm_info_node *) m->private; 418 struct drm_info_node *node = (struct drm_info_node *) m->private;
442 struct drm_device *dev = node->minor->dev; 419 struct drm_device *dev = node->minor->dev;
443 drm_i915_private_t *dev_priv = dev->dev_private; 420 drm_i915_private_t *dev_priv = dev->dev_private;
421 struct intel_ring_buffer *ring;
444 int ret, i, pipe; 422 int ret, i, pipe;
445 423
446 ret = mutex_lock_interruptible(&dev->struct_mutex); 424 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -518,13 +496,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
518 } 496 }
519 seq_printf(m, "Interrupts received: %d\n", 497 seq_printf(m, "Interrupts received: %d\n",
520 atomic_read(&dev_priv->irq_received)); 498 atomic_read(&dev_priv->irq_received));
521 for (i = 0; i < I915_NUM_RINGS; i++) { 499 for_each_ring(ring, dev_priv, i) {
522 if (IS_GEN6(dev) || IS_GEN7(dev)) { 500 if (IS_GEN6(dev) || IS_GEN7(dev)) {
523 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", 501 seq_printf(m,
524 dev_priv->ring[i].name, 502 "Graphics Interrupt mask (%s): %08x\n",
525 I915_READ_IMR(&dev_priv->ring[i])); 503 ring->name, I915_READ_IMR(ring));
526 } 504 }
527 i915_ring_seqno_info(m, &dev_priv->ring[i]); 505 i915_ring_seqno_info(m, ring);
528 } 506 }
529 mutex_unlock(&dev->struct_mutex); 507 mutex_unlock(&dev->struct_mutex);
530 508
@@ -547,7 +525,8 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
547 for (i = 0; i < dev_priv->num_fence_regs; i++) { 525 for (i = 0; i < dev_priv->num_fence_regs; i++) {
548 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 526 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
549 527
550 seq_printf(m, "Fenced object[%2d] = ", i); 528 seq_printf(m, "Fence %d, pin count = %d, object = ",
529 i, dev_priv->fence_regs[i].pin_count);
551 if (obj == NULL) 530 if (obj == NULL)
552 seq_printf(m, "unused"); 531 seq_printf(m, "unused");
553 else 532 else
@@ -629,12 +608,12 @@ static void print_error_buffers(struct seq_file *m,
629 seq_printf(m, "%s [%d]:\n", name, count); 608 seq_printf(m, "%s [%d]:\n", name, count);
630 609
631 while (count--) { 610 while (count--) {
632 seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s", 611 seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
633 err->gtt_offset, 612 err->gtt_offset,
634 err->size, 613 err->size,
635 err->read_domains, 614 err->read_domains,
636 err->write_domain, 615 err->write_domain,
637 err->seqno, 616 err->rseqno, err->wseqno,
638 pin_flag(err->pinned), 617 pin_flag(err->pinned),
639 tiling_flag(err->tiling), 618 tiling_flag(err->tiling),
640 dirty_flag(err->dirty), 619 dirty_flag(err->dirty),
@@ -666,10 +645,9 @@ static void i915_ring_error_state(struct seq_file *m,
666 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 645 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
667 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 646 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
668 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 647 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
669 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) { 648 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
670 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
671 seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); 649 seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
672 } 650
673 if (INTEL_INFO(dev)->gen >= 4) 651 if (INTEL_INFO(dev)->gen >= 4)
674 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 652 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
675 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 653 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
@@ -718,11 +696,17 @@ static int i915_error_state(struct seq_file *m, void *unused)
718 for (i = 0; i < dev_priv->num_fence_regs; i++) 696 for (i = 0; i < dev_priv->num_fence_regs; i++)
719 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 697 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
720 698
699 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
700 seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]);
701
721 if (INTEL_INFO(dev)->gen >= 6) { 702 if (INTEL_INFO(dev)->gen >= 6) {
722 seq_printf(m, "ERROR: 0x%08x\n", error->error); 703 seq_printf(m, "ERROR: 0x%08x\n", error->error);
723 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 704 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
724 } 705 }
725 706
707 if (INTEL_INFO(dev)->gen == 7)
708 seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
709
726 for_each_ring(ring, dev_priv, i) 710 for_each_ring(ring, dev_priv, i)
727 i915_ring_error_state(m, dev, error, i); 711 i915_ring_error_state(m, dev, error, i);
728 712
@@ -798,10 +782,14 @@ i915_error_state_write(struct file *filp,
798 struct seq_file *m = filp->private_data; 782 struct seq_file *m = filp->private_data;
799 struct i915_error_state_file_priv *error_priv = m->private; 783 struct i915_error_state_file_priv *error_priv = m->private;
800 struct drm_device *dev = error_priv->dev; 784 struct drm_device *dev = error_priv->dev;
785 int ret;
801 786
802 DRM_DEBUG_DRIVER("Resetting error state\n"); 787 DRM_DEBUG_DRIVER("Resetting error state\n");
803 788
804 mutex_lock(&dev->struct_mutex); 789 ret = mutex_lock_interruptible(&dev->struct_mutex);
790 if (ret)
791 return ret;
792
805 i915_destroy_error_state(dev); 793 i915_destroy_error_state(dev);
806 mutex_unlock(&dev->struct_mutex); 794 mutex_unlock(&dev->struct_mutex);
807 795
@@ -925,7 +913,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
925 seq_printf(m, "Render p-state limit: %d\n", 913 seq_printf(m, "Render p-state limit: %d\n",
926 rp_state_limits & 0xff); 914 rp_state_limits & 0xff);
927 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 915 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
928 GEN6_CAGF_SHIFT) * 50); 916 GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
929 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 917 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
930 GEN6_CURICONT_MASK); 918 GEN6_CURICONT_MASK);
931 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 919 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@@ -941,15 +929,15 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
941 929
942 max_freq = (rp_state_cap & 0xff0000) >> 16; 930 max_freq = (rp_state_cap & 0xff0000) >> 16;
943 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 931 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
944 max_freq * 50); 932 max_freq * GT_FREQUENCY_MULTIPLIER);
945 933
946 max_freq = (rp_state_cap & 0xff00) >> 8; 934 max_freq = (rp_state_cap & 0xff00) >> 8;
947 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 935 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
948 max_freq * 50); 936 max_freq * GT_FREQUENCY_MULTIPLIER);
949 937
950 max_freq = rp_state_cap & 0xff; 938 max_freq = rp_state_cap & 0xff;
951 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 939 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
952 max_freq * 50); 940 max_freq * GT_FREQUENCY_MULTIPLIER);
953 } else { 941 } else {
954 seq_printf(m, "no P-state info available\n"); 942 seq_printf(m, "no P-state info available\n");
955 } 943 }
@@ -1291,7 +1279,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1291 1279
1292 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); 1280 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1293 1281
1294 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; 1282 for (gpu_freq = dev_priv->rps.min_delay;
1283 gpu_freq <= dev_priv->rps.max_delay;
1295 gpu_freq++) { 1284 gpu_freq++) {
1296 I915_WRITE(GEN6_PCODE_DATA, gpu_freq); 1285 I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
1297 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | 1286 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
@@ -1302,7 +1291,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1302 continue; 1291 continue;
1303 } 1292 }
1304 ia_freq = I915_READ(GEN6_PCODE_DATA); 1293 ia_freq = I915_READ(GEN6_PCODE_DATA);
1305 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); 1294 seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
1306 } 1295 }
1307 1296
1308 mutex_unlock(&dev->struct_mutex); 1297 mutex_unlock(&dev->struct_mutex);
@@ -1471,8 +1460,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1471 struct drm_info_node *node = (struct drm_info_node *) m->private; 1460 struct drm_info_node *node = (struct drm_info_node *) m->private;
1472 struct drm_device *dev = node->minor->dev; 1461 struct drm_device *dev = node->minor->dev;
1473 struct drm_i915_private *dev_priv = dev->dev_private; 1462 struct drm_i915_private *dev_priv = dev->dev_private;
1463 int ret;
1464
1465 ret = mutex_lock_interruptible(&dev->struct_mutex);
1466 if (ret)
1467 return ret;
1474 1468
1475 mutex_lock(&dev->struct_mutex);
1476 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1469 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1477 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1470 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1478 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1471 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
@@ -1519,9 +1512,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
1519 if (INTEL_INFO(dev)->gen == 6) 1512 if (INTEL_INFO(dev)->gen == 6)
1520 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1513 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1521 1514
1522 for (i = 0; i < I915_NUM_RINGS; i++) { 1515 for_each_ring(ring, dev_priv, i) {
1523 ring = &dev_priv->ring[i];
1524
1525 seq_printf(m, "%s\n", ring->name); 1516 seq_printf(m, "%s\n", ring->name);
1526 if (INTEL_INFO(dev)->gen == 7) 1517 if (INTEL_INFO(dev)->gen == 7)
1527 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1518 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
@@ -1673,7 +1664,7 @@ i915_ring_stop_write(struct file *filp,
1673 struct drm_device *dev = filp->private_data; 1664 struct drm_device *dev = filp->private_data;
1674 struct drm_i915_private *dev_priv = dev->dev_private; 1665 struct drm_i915_private *dev_priv = dev->dev_private;
1675 char buf[20]; 1666 char buf[20];
1676 int val = 0; 1667 int val = 0, ret;
1677 1668
1678 if (cnt > 0) { 1669 if (cnt > 0) {
1679 if (cnt > sizeof(buf) - 1) 1670 if (cnt > sizeof(buf) - 1)
@@ -1688,7 +1679,10 @@ i915_ring_stop_write(struct file *filp,
1688 1679
1689 DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val); 1680 DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
1690 1681
1691 mutex_lock(&dev->struct_mutex); 1682 ret = mutex_lock_interruptible(&dev->struct_mutex);
1683 if (ret)
1684 return ret;
1685
1692 dev_priv->stop_rings = val; 1686 dev_priv->stop_rings = val;
1693 mutex_unlock(&dev->struct_mutex); 1687 mutex_unlock(&dev->struct_mutex);
1694 1688
@@ -1712,10 +1706,18 @@ i915_max_freq_read(struct file *filp,
1712 struct drm_device *dev = filp->private_data; 1706 struct drm_device *dev = filp->private_data;
1713 drm_i915_private_t *dev_priv = dev->dev_private; 1707 drm_i915_private_t *dev_priv = dev->dev_private;
1714 char buf[80]; 1708 char buf[80];
1715 int len; 1709 int len, ret;
1710
1711 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1712 return -ENODEV;
1713
1714 ret = mutex_lock_interruptible(&dev->struct_mutex);
1715 if (ret)
1716 return ret;
1716 1717
1717 len = snprintf(buf, sizeof(buf), 1718 len = snprintf(buf, sizeof(buf),
1718 "max freq: %d\n", dev_priv->max_delay * 50); 1719 "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
1720 mutex_unlock(&dev->struct_mutex);
1719 1721
1720 if (len > sizeof(buf)) 1722 if (len > sizeof(buf))
1721 len = sizeof(buf); 1723 len = sizeof(buf);
@@ -1732,7 +1734,10 @@ i915_max_freq_write(struct file *filp,
1732 struct drm_device *dev = filp->private_data; 1734 struct drm_device *dev = filp->private_data;
1733 struct drm_i915_private *dev_priv = dev->dev_private; 1735 struct drm_i915_private *dev_priv = dev->dev_private;
1734 char buf[20]; 1736 char buf[20];
1735 int val = 1; 1737 int val = 1, ret;
1738
1739 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1740 return -ENODEV;
1736 1741
1737 if (cnt > 0) { 1742 if (cnt > 0) {
1738 if (cnt > sizeof(buf) - 1) 1743 if (cnt > sizeof(buf) - 1)
@@ -1747,12 +1752,17 @@ i915_max_freq_write(struct file *filp,
1747 1752
1748 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); 1753 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
1749 1754
1755 ret = mutex_lock_interruptible(&dev->struct_mutex);
1756 if (ret)
1757 return ret;
1758
1750 /* 1759 /*
1751 * Turbo will still be enabled, but won't go above the set value. 1760 * Turbo will still be enabled, but won't go above the set value.
1752 */ 1761 */
1753 dev_priv->max_delay = val / 50; 1762 dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
1754 1763
1755 gen6_set_rps(dev, val / 50); 1764 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
1765 mutex_unlock(&dev->struct_mutex);
1756 1766
1757 return cnt; 1767 return cnt;
1758} 1768}
@@ -1772,10 +1782,18 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
1772 struct drm_device *dev = filp->private_data; 1782 struct drm_device *dev = filp->private_data;
1773 drm_i915_private_t *dev_priv = dev->dev_private; 1783 drm_i915_private_t *dev_priv = dev->dev_private;
1774 char buf[80]; 1784 char buf[80];
1775 int len; 1785 int len, ret;
1786
1787 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1788 return -ENODEV;
1789
1790 ret = mutex_lock_interruptible(&dev->struct_mutex);
1791 if (ret)
1792 return ret;
1776 1793
1777 len = snprintf(buf, sizeof(buf), 1794 len = snprintf(buf, sizeof(buf),
1778 "min freq: %d\n", dev_priv->min_delay * 50); 1795 "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
1796 mutex_unlock(&dev->struct_mutex);
1779 1797
1780 if (len > sizeof(buf)) 1798 if (len > sizeof(buf))
1781 len = sizeof(buf); 1799 len = sizeof(buf);
@@ -1790,7 +1808,10 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
1790 struct drm_device *dev = filp->private_data; 1808 struct drm_device *dev = filp->private_data;
1791 struct drm_i915_private *dev_priv = dev->dev_private; 1809 struct drm_i915_private *dev_priv = dev->dev_private;
1792 char buf[20]; 1810 char buf[20];
1793 int val = 1; 1811 int val = 1, ret;
1812
1813 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1814 return -ENODEV;
1794 1815
1795 if (cnt > 0) { 1816 if (cnt > 0) {
1796 if (cnt > sizeof(buf) - 1) 1817 if (cnt > sizeof(buf) - 1)
@@ -1805,12 +1826,17 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
1805 1826
1806 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); 1827 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
1807 1828
1829 ret = mutex_lock_interruptible(&dev->struct_mutex);
1830 if (ret)
1831 return ret;
1832
1808 /* 1833 /*
1809 * Turbo will still be enabled, but won't go below the set value. 1834 * Turbo will still be enabled, but won't go below the set value.
1810 */ 1835 */
1811 dev_priv->min_delay = val / 50; 1836 dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
1812 1837
1813 gen6_set_rps(dev, val / 50); 1838 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
1839 mutex_unlock(&dev->struct_mutex);
1814 1840
1815 return cnt; 1841 return cnt;
1816} 1842}
@@ -1833,9 +1859,15 @@ i915_cache_sharing_read(struct file *filp,
1833 drm_i915_private_t *dev_priv = dev->dev_private; 1859 drm_i915_private_t *dev_priv = dev->dev_private;
1834 char buf[80]; 1860 char buf[80];
1835 u32 snpcr; 1861 u32 snpcr;
1836 int len; 1862 int len, ret;
1863
1864 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1865 return -ENODEV;
1866
1867 ret = mutex_lock_interruptible(&dev->struct_mutex);
1868 if (ret)
1869 return ret;
1837 1870
1838 mutex_lock(&dev_priv->dev->struct_mutex);
1839 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1871 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1840 mutex_unlock(&dev_priv->dev->struct_mutex); 1872 mutex_unlock(&dev_priv->dev->struct_mutex);
1841 1873
@@ -1861,6 +1893,9 @@ i915_cache_sharing_write(struct file *filp,
1861 u32 snpcr; 1893 u32 snpcr;
1862 int val = 1; 1894 int val = 1;
1863 1895
1896 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1897 return -ENODEV;
1898
1864 if (cnt > 0) { 1899 if (cnt > 0) {
1865 if (cnt > sizeof(buf) - 1) 1900 if (cnt > sizeof(buf) - 1)
1866 return -EINVAL; 1901 return -EINVAL;
@@ -1924,16 +1959,11 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
1924{ 1959{
1925 struct drm_device *dev = inode->i_private; 1960 struct drm_device *dev = inode->i_private;
1926 struct drm_i915_private *dev_priv = dev->dev_private; 1961 struct drm_i915_private *dev_priv = dev->dev_private;
1927 int ret;
1928 1962
1929 if (INTEL_INFO(dev)->gen < 6) 1963 if (INTEL_INFO(dev)->gen < 6)
1930 return 0; 1964 return 0;
1931 1965
1932 ret = mutex_lock_interruptible(&dev->struct_mutex);
1933 if (ret)
1934 return ret;
1935 gen6_gt_force_wake_get(dev_priv); 1966 gen6_gt_force_wake_get(dev_priv);
1936 mutex_unlock(&dev->struct_mutex);
1937 1967
1938 return 0; 1968 return 0;
1939} 1969}
@@ -1946,16 +1976,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
1946 if (INTEL_INFO(dev)->gen < 6) 1976 if (INTEL_INFO(dev)->gen < 6)
1947 return 0; 1977 return 0;
1948 1978
1949 /*
1950 * It's bad that we can potentially hang userspace if struct_mutex gets
1951 * forever stuck. However, if we cannot acquire this lock it means that
1952 * almost certainly the driver has hung, is not unload-able. Therefore
1953 * hanging here is probably a minor inconvenience not to be seen my
1954 * almost every user.
1955 */
1956 mutex_lock(&dev->struct_mutex);
1957 gen6_gt_force_wake_put(dev_priv); 1979 gen6_gt_force_wake_put(dev_priv);
1958 mutex_unlock(&dev->struct_mutex);
1959 1980
1960 return 0; 1981 return 0;
1961} 1982}
@@ -2005,7 +2026,6 @@ static struct drm_info_list i915_debugfs_list[] = {
2005 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 2026 {"i915_gem_gtt", i915_gem_gtt_info, 0},
2006 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 2027 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
2007 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 2028 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
2008 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
2009 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 2029 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
2010 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 2030 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2011 {"i915_gem_request", i915_gem_request_info, 0}, 2031 {"i915_gem_request", i915_gem_request_info, 0},
@@ -2066,6 +2086,7 @@ int i915_debugfs_init(struct drm_minor *minor)
2066 &i915_cache_sharing_fops); 2086 &i915_cache_sharing_fops);
2067 if (ret) 2087 if (ret)
2068 return ret; 2088 return ret;
2089
2069 ret = i915_debugfs_create(minor->debugfs_root, minor, 2090 ret = i915_debugfs_create(minor->debugfs_root, minor,
2070 "i915_ring_stop", 2091 "i915_ring_stop",
2071 &i915_ring_stop_fops); 2092 &i915_ring_stop_fops);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 804f1c98e279..c9bfd83dde64 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -234,10 +234,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
234 } 234 }
235 } 235 }
236 236
237 dev_priv->cpp = init->cpp; 237 dev_priv->dri1.cpp = init->cpp;
238 dev_priv->back_offset = init->back_offset; 238 dev_priv->dri1.back_offset = init->back_offset;
239 dev_priv->front_offset = init->front_offset; 239 dev_priv->dri1.front_offset = init->front_offset;
240 dev_priv->current_page = 0; 240 dev_priv->dri1.current_page = 0;
241 if (master_priv->sarea_priv) 241 if (master_priv->sarea_priv)
242 master_priv->sarea_priv->pf_current_page = 0; 242 master_priv->sarea_priv->pf_current_page = 0;
243 243
@@ -574,7 +574,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
574 574
575 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", 575 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
576 __func__, 576 __func__,
577 dev_priv->current_page, 577 dev_priv->dri1.current_page,
578 master_priv->sarea_priv->pf_current_page); 578 master_priv->sarea_priv->pf_current_page);
579 579
580 i915_kernel_lost_context(dev); 580 i915_kernel_lost_context(dev);
@@ -588,12 +588,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
588 588
589 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 589 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
590 OUT_RING(0); 590 OUT_RING(0);
591 if (dev_priv->current_page == 0) { 591 if (dev_priv->dri1.current_page == 0) {
592 OUT_RING(dev_priv->back_offset); 592 OUT_RING(dev_priv->dri1.back_offset);
593 dev_priv->current_page = 1; 593 dev_priv->dri1.current_page = 1;
594 } else { 594 } else {
595 OUT_RING(dev_priv->front_offset); 595 OUT_RING(dev_priv->dri1.front_offset);
596 dev_priv->current_page = 0; 596 dev_priv->dri1.current_page = 0;
597 } 597 }
598 OUT_RING(0); 598 OUT_RING(0);
599 599
@@ -612,7 +612,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
612 ADVANCE_LP_RING(); 612 ADVANCE_LP_RING();
613 } 613 }
614 614
615 master_priv->sarea_priv->pf_current_page = dev_priv->current_page; 615 master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
616 return 0; 616 return 0;
617} 617}
618 618
@@ -1008,6 +1008,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
1008 case I915_PARAM_HAS_WAIT_TIMEOUT: 1008 case I915_PARAM_HAS_WAIT_TIMEOUT:
1009 value = 1; 1009 value = 1;
1010 break; 1010 break;
1011 case I915_PARAM_HAS_SEMAPHORES:
1012 value = i915_semaphore_is_enabled(dev);
1013 break;
1014 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
1015 value = 1;
1016 break;
1011 default: 1017 default:
1012 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 1018 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1013 param->param); 1019 param->param);
@@ -1424,6 +1430,21 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1424 kfree(ap); 1430 kfree(ap);
1425} 1431}
1426 1432
1433static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1434{
1435 const struct intel_device_info *info = dev_priv->info;
1436
1437#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
1438#define DEV_INFO_SEP ,
1439 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
1440 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
1441 info->gen,
1442 dev_priv->dev->pdev->device,
1443 DEV_INFO_FLAGS);
1444#undef DEV_INFO_FLAG
1445#undef DEV_INFO_SEP
1446}
1447
1427/** 1448/**
1428 * i915_driver_load - setup chip and create an initial config 1449 * i915_driver_load - setup chip and create an initial config
1429 * @dev: DRM device 1450 * @dev: DRM device
@@ -1439,7 +1460,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1439{ 1460{
1440 struct drm_i915_private *dev_priv; 1461 struct drm_i915_private *dev_priv;
1441 struct intel_device_info *info; 1462 struct intel_device_info *info;
1442 int ret = 0, mmio_bar; 1463 int ret = 0, mmio_bar, mmio_size;
1443 uint32_t aperture_size; 1464 uint32_t aperture_size;
1444 1465
1445 info = (struct intel_device_info *) flags; 1466 info = (struct intel_device_info *) flags;
@@ -1448,7 +1469,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1448 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) 1469 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1449 return -ENODEV; 1470 return -ENODEV;
1450 1471
1451
1452 /* i915 has 4 more counters */ 1472 /* i915 has 4 more counters */
1453 dev->counters += 4; 1473 dev->counters += 4;
1454 dev->types[6] = _DRM_STAT_IRQ; 1474 dev->types[6] = _DRM_STAT_IRQ;
@@ -1464,6 +1484,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1464 dev_priv->dev = dev; 1484 dev_priv->dev = dev;
1465 dev_priv->info = info; 1485 dev_priv->info = info;
1466 1486
1487 i915_dump_device_info(dev_priv);
1488
1467 if (i915_get_bridge_dev(dev)) { 1489 if (i915_get_bridge_dev(dev)) {
1468 ret = -EIO; 1490 ret = -EIO;
1469 goto free_priv; 1491 goto free_priv;
@@ -1503,7 +1525,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1503 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); 1525 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1504 1526
1505 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1527 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1506 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); 1528 /* Before gen4, the registers and the GTT are behind different BARs.
1529 * However, from gen4 onwards, the registers and the GTT are shared
1530 * in the same BAR, so we want to restrict this ioremap from
1531 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1532 * the register BAR remains the same size for all the earlier
1533 * generations up to Ironlake.
1534 */
1535 if (info->gen < 5)
1536 mmio_size = 512*1024;
1537 else
1538 mmio_size = 2*1024*1024;
1539
1540 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
1507 if (!dev_priv->regs) { 1541 if (!dev_priv->regs) {
1508 DRM_ERROR("failed to map registers\n"); 1542 DRM_ERROR("failed to map registers\n");
1509 ret = -EIO; 1543 ret = -EIO;
@@ -1535,11 +1569,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1535 * 1569 *
1536 * All tasks on the workqueue are expected to acquire the dev mutex 1570 * All tasks on the workqueue are expected to acquire the dev mutex
1537 * so there is no point in running more than one instance of the 1571 * so there is no point in running more than one instance of the
1538 * workqueue at any time: max_active = 1 and NON_REENTRANT. 1572 * workqueue at any time. Use an ordered one.
1539 */ 1573 */
1540 dev_priv->wq = alloc_workqueue("i915", 1574 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1541 WQ_UNBOUND | WQ_NON_REENTRANT,
1542 1);
1543 if (dev_priv->wq == NULL) { 1575 if (dev_priv->wq == NULL) {
1544 DRM_ERROR("Failed to create our workqueue.\n"); 1576 DRM_ERROR("Failed to create our workqueue.\n");
1545 ret = -ENOMEM; 1577 ret = -ENOMEM;
@@ -1585,7 +1617,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1585 1617
1586 spin_lock_init(&dev_priv->irq_lock); 1618 spin_lock_init(&dev_priv->irq_lock);
1587 spin_lock_init(&dev_priv->error_lock); 1619 spin_lock_init(&dev_priv->error_lock);
1588 spin_lock_init(&dev_priv->rps_lock); 1620 spin_lock_init(&dev_priv->rps.lock);
1589 spin_lock_init(&dev_priv->dpio_lock); 1621 spin_lock_init(&dev_priv->dpio_lock);
1590 1622
1591 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1623 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
@@ -1835,6 +1867,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
1835 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1867 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1836 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1868 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1837 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 1869 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1870 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
1871 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
1838 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), 1872 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1839 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1873 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1840 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1874 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -1857,6 +1891,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
1857 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), 1891 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
1858 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), 1892 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
1859 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), 1893 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
1894 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
1860}; 1895};
1861 1896
1862int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 1897int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f6825324e72d..aac4e5e1a5b9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -469,6 +469,9 @@ static int i915_drm_freeze(struct drm_device *dev)
469 "GEM idle failed, resume might fail\n"); 469 "GEM idle failed, resume might fail\n");
470 return error; 470 return error;
471 } 471 }
472
473 intel_modeset_disable(dev);
474
472 drm_irq_uninstall(dev); 475 drm_irq_uninstall(dev);
473 } 476 }
474 477
@@ -542,13 +545,9 @@ static int i915_drm_thaw(struct drm_device *dev)
542 mutex_unlock(&dev->struct_mutex); 545 mutex_unlock(&dev->struct_mutex);
543 546
544 intel_modeset_init_hw(dev); 547 intel_modeset_init_hw(dev);
548 intel_modeset_setup_hw_state(dev);
545 drm_mode_config_reset(dev); 549 drm_mode_config_reset(dev);
546 drm_irq_install(dev); 550 drm_irq_install(dev);
547
548 /* Resume the modeset for every activated CRTC */
549 mutex_lock(&dev->mode_config.mutex);
550 drm_helper_resume_force_mode(dev);
551 mutex_unlock(&dev->mode_config.mutex);
552 } 551 }
553 552
554 intel_opregion_init(dev); 553 intel_opregion_init(dev);
@@ -1059,7 +1058,7 @@ static bool IS_DISPLAYREG(u32 reg)
1059 * This should make it easier to transition modules over to the 1058 * This should make it easier to transition modules over to the
1060 * new register block scheme, since we can do it incrementally. 1059 * new register block scheme, since we can do it incrementally.
1061 */ 1060 */
1062 if (reg >= 0x180000) 1061 if (reg >= VLV_DISPLAY_BASE)
1063 return false; 1062 return false;
1064 1063
1065 if (reg >= RENDER_RING_BASE && 1064 if (reg >= RENDER_RING_BASE &&
@@ -1173,9 +1172,59 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1173 if (unlikely(__fifo_ret)) { \ 1172 if (unlikely(__fifo_ret)) { \
1174 gen6_gt_check_fifodbg(dev_priv); \ 1173 gen6_gt_check_fifodbg(dev_priv); \
1175 } \ 1174 } \
1175 if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
1176 DRM_ERROR("Unclaimed write to %x\n", reg); \
1177 writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT); \
1178 } \
1176} 1179}
1177__i915_write(8, b) 1180__i915_write(8, b)
1178__i915_write(16, w) 1181__i915_write(16, w)
1179__i915_write(32, l) 1182__i915_write(32, l)
1180__i915_write(64, q) 1183__i915_write(64, q)
1181#undef __i915_write 1184#undef __i915_write
1185
1186static const struct register_whitelist {
1187 uint64_t offset;
1188 uint32_t size;
1189 uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1190} whitelist[] = {
1191 { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
1192};
1193
1194int i915_reg_read_ioctl(struct drm_device *dev,
1195 void *data, struct drm_file *file)
1196{
1197 struct drm_i915_private *dev_priv = dev->dev_private;
1198 struct drm_i915_reg_read *reg = data;
1199 struct register_whitelist const *entry = whitelist;
1200 int i;
1201
1202 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1203 if (entry->offset == reg->offset &&
1204 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1205 break;
1206 }
1207
1208 if (i == ARRAY_SIZE(whitelist))
1209 return -EINVAL;
1210
1211 switch (entry->size) {
1212 case 8:
1213 reg->val = I915_READ64(reg->offset);
1214 break;
1215 case 4:
1216 reg->val = I915_READ(reg->offset);
1217 break;
1218 case 2:
1219 reg->val = I915_READ16(reg->offset);
1220 break;
1221 case 1:
1222 reg->val = I915_READ8(reg->offset);
1223 break;
1224 default:
1225 WARN_ON(1);
1226 return -EINVAL;
1227 }
1228
1229 return 0;
1230}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 627fe35781b4..4f2831aa5fed 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -109,6 +109,7 @@ struct intel_pch_pll {
109 109
110#define WATCH_COHERENCY 0 110#define WATCH_COHERENCY 0
111#define WATCH_LISTS 0 111#define WATCH_LISTS 0
112#define WATCH_GTT 0
112 113
113#define I915_GEM_PHYS_CURSOR_0 1 114#define I915_GEM_PHYS_CURSOR_0 1
114#define I915_GEM_PHYS_CURSOR_1 2 115#define I915_GEM_PHYS_CURSOR_1 2
@@ -195,9 +196,10 @@ struct drm_i915_error_state {
195 u32 cpu_ring_head[I915_NUM_RINGS]; 196 u32 cpu_ring_head[I915_NUM_RINGS];
196 u32 cpu_ring_tail[I915_NUM_RINGS]; 197 u32 cpu_ring_tail[I915_NUM_RINGS];
197 u32 error; /* gen6+ */ 198 u32 error; /* gen6+ */
199 u32 err_int; /* gen7 */
198 u32 instpm[I915_NUM_RINGS]; 200 u32 instpm[I915_NUM_RINGS];
199 u32 instps[I915_NUM_RINGS]; 201 u32 instps[I915_NUM_RINGS];
200 u32 instdone1; 202 u32 extra_instdone[I915_NUM_INSTDONE_REG];
201 u32 seqno[I915_NUM_RINGS]; 203 u32 seqno[I915_NUM_RINGS];
202 u64 bbaddr; 204 u64 bbaddr;
203 u32 fault_reg[I915_NUM_RINGS]; 205 u32 fault_reg[I915_NUM_RINGS];
@@ -221,7 +223,7 @@ struct drm_i915_error_state {
221 struct drm_i915_error_buffer { 223 struct drm_i915_error_buffer {
222 u32 size; 224 u32 size;
223 u32 name; 225 u32 name;
224 u32 seqno; 226 u32 rseqno, wseqno;
225 u32 gtt_offset; 227 u32 gtt_offset;
226 u32 read_domains; 228 u32 read_domains;
227 u32 write_domain; 229 u32 write_domain;
@@ -239,7 +241,6 @@ struct drm_i915_error_state {
239}; 241};
240 242
241struct drm_i915_display_funcs { 243struct drm_i915_display_funcs {
242 void (*dpms)(struct drm_crtc *crtc, int mode);
243 bool (*fbc_enabled)(struct drm_device *dev); 244 bool (*fbc_enabled)(struct drm_device *dev);
244 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); 245 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
245 void (*disable_fbc)(struct drm_device *dev); 246 void (*disable_fbc)(struct drm_device *dev);
@@ -248,7 +249,6 @@ struct drm_i915_display_funcs {
248 void (*update_wm)(struct drm_device *dev); 249 void (*update_wm)(struct drm_device *dev);
249 void (*update_sprite_wm)(struct drm_device *dev, int pipe, 250 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
250 uint32_t sprite_width, int pixel_size); 251 uint32_t sprite_width, int pixel_size);
251 void (*sanitize_pm)(struct drm_device *dev);
252 void (*update_linetime_wm)(struct drm_device *dev, int pipe, 252 void (*update_linetime_wm)(struct drm_device *dev, int pipe,
253 struct drm_display_mode *mode); 253 struct drm_display_mode *mode);
254 int (*crtc_mode_set)(struct drm_crtc *crtc, 254 int (*crtc_mode_set)(struct drm_crtc *crtc,
@@ -256,6 +256,8 @@ struct drm_i915_display_funcs {
256 struct drm_display_mode *adjusted_mode, 256 struct drm_display_mode *adjusted_mode,
257 int x, int y, 257 int x, int y,
258 struct drm_framebuffer *old_fb); 258 struct drm_framebuffer *old_fb);
259 void (*crtc_enable)(struct drm_crtc *crtc);
260 void (*crtc_disable)(struct drm_crtc *crtc);
259 void (*off)(struct drm_crtc *crtc); 261 void (*off)(struct drm_crtc *crtc);
260 void (*write_eld)(struct drm_connector *connector, 262 void (*write_eld)(struct drm_connector *connector,
261 struct drm_crtc *crtc); 263 struct drm_crtc *crtc);
@@ -279,6 +281,32 @@ struct drm_i915_gt_funcs {
279 void (*force_wake_put)(struct drm_i915_private *dev_priv); 281 void (*force_wake_put)(struct drm_i915_private *dev_priv);
280}; 282};
281 283
284#define DEV_INFO_FLAGS \
285 DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
286 DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
287 DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
288 DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
289 DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
290 DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
291 DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
292 DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
293 DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
294 DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
295 DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
296 DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
297 DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
298 DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
299 DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
300 DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
301 DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
302 DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
303 DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
304 DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
305 DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
306 DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
307 DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
308 DEV_INFO_FLAG(has_llc)
309
282struct intel_device_info { 310struct intel_device_info {
283 u8 gen; 311 u8 gen;
284 u8 is_mobile:1; 312 u8 is_mobile:1;
@@ -402,12 +430,6 @@ typedef struct drm_i915_private {
402 430
403 struct resource mch_res; 431 struct resource mch_res;
404 432
405 unsigned int cpp;
406 int back_offset;
407 int front_offset;
408 int current_page;
409 int page_flipping;
410
411 atomic_t irq_received; 433 atomic_t irq_received;
412 434
413 /* protects the irq masks */ 435 /* protects the irq masks */
@@ -425,7 +447,6 @@ typedef struct drm_i915_private {
425 u32 hotplug_supported_mask; 447 u32 hotplug_supported_mask;
426 struct work_struct hotplug_work; 448 struct work_struct hotplug_work;
427 449
428 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
429 int num_pipe; 450 int num_pipe;
430 int num_pch_pll; 451 int num_pch_pll;
431 452
@@ -434,8 +455,7 @@ typedef struct drm_i915_private {
434 struct timer_list hangcheck_timer; 455 struct timer_list hangcheck_timer;
435 int hangcheck_count; 456 int hangcheck_count;
436 uint32_t last_acthd[I915_NUM_RINGS]; 457 uint32_t last_acthd[I915_NUM_RINGS];
437 uint32_t last_instdone; 458 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
438 uint32_t last_instdone1;
439 459
440 unsigned int stop_rings; 460 unsigned int stop_rings;
441 461
@@ -666,7 +686,13 @@ typedef struct drm_i915_private {
666 struct drm_mm gtt_space; 686 struct drm_mm gtt_space;
667 /** List of all objects in gtt_space. Used to restore gtt 687 /** List of all objects in gtt_space. Used to restore gtt
668 * mappings on resume */ 688 * mappings on resume */
669 struct list_head gtt_list; 689 struct list_head bound_list;
690 /**
691 * List of objects which are not bound to the GTT (thus
692 * are idle and not used by the GPU) but still have
693 * (presumably uncached) pages still attached.
694 */
695 struct list_head unbound_list;
670 696
671 /** Usable portion of the GTT for GEM */ 697 /** Usable portion of the GTT for GEM */
672 unsigned long gtt_start; 698 unsigned long gtt_start;
@@ -696,17 +722,6 @@ typedef struct drm_i915_private {
696 struct list_head active_list; 722 struct list_head active_list;
697 723
698 /** 724 /**
699 * List of objects which are not in the ringbuffer but which
700 * still have a write_domain which needs to be flushed before
701 * unbinding.
702 *
703 * last_rendering_seqno is 0 while an object is in this list.
704 *
705 * A reference is held on the buffer while on this list.
706 */
707 struct list_head flushing_list;
708
709 /**
710 * LRU list of objects which are not in the ringbuffer and 725 * LRU list of objects which are not in the ringbuffer and
711 * are ready to unbind, but are still in the GTT. 726 * are ready to unbind, but are still in the GTT.
712 * 727 *
@@ -775,6 +790,12 @@ typedef struct drm_i915_private {
775 struct { 790 struct {
776 unsigned allow_batchbuffer : 1; 791 unsigned allow_batchbuffer : 1;
777 u32 __iomem *gfx_hws_cpu_addr; 792 u32 __iomem *gfx_hws_cpu_addr;
793
794 unsigned int cpp;
795 int back_offset;
796 int front_offset;
797 int current_page;
798 int page_flipping;
778 } dri1; 799 } dri1;
779 800
780 /* Kernel Modesetting */ 801 /* Kernel Modesetting */
@@ -796,9 +817,6 @@ typedef struct drm_i915_private {
796 bool lvds_downclock_avail; 817 bool lvds_downclock_avail;
797 /* indicates the reduced downclock for LVDS*/ 818 /* indicates the reduced downclock for LVDS*/
798 int lvds_downclock; 819 int lvds_downclock;
799 struct work_struct idle_work;
800 struct timer_list idle_timer;
801 bool busy;
802 u16 orig_clock; 820 u16 orig_clock;
803 int child_dev_num; 821 int child_dev_num;
804 struct child_device_config *child_dev; 822 struct child_device_config *child_dev;
@@ -807,26 +825,41 @@ typedef struct drm_i915_private {
807 825
808 bool mchbar_need_disable; 826 bool mchbar_need_disable;
809 827
810 struct work_struct rps_work; 828 /* gen6+ rps state */
811 spinlock_t rps_lock; 829 struct {
812 u32 pm_iir; 830 struct work_struct work;
813 831 u32 pm_iir;
814 u8 cur_delay; 832 /* lock - irqsave spinlock that protectects the work_struct and
815 u8 min_delay; 833 * pm_iir. */
816 u8 max_delay; 834 spinlock_t lock;
817 u8 fmax; 835
818 u8 fstart; 836 /* The below variables an all the rps hw state are protected by
819 837 * dev->struct mutext. */
820 u64 last_count1; 838 u8 cur_delay;
821 unsigned long last_time1; 839 u8 min_delay;
822 unsigned long chipset_power; 840 u8 max_delay;
823 u64 last_count2; 841 } rps;
824 struct timespec last_time2; 842
825 unsigned long gfx_power; 843 /* ilk-only ips/rps state. Everything in here is protected by the global
826 int c_m; 844 * mchdev_lock in intel_pm.c */
827 int r_t; 845 struct {
828 u8 corr; 846 u8 cur_delay;
829 spinlock_t *mchdev_lock; 847 u8 min_delay;
848 u8 max_delay;
849 u8 fmax;
850 u8 fstart;
851
852 u64 last_count1;
853 unsigned long last_time1;
854 unsigned long chipset_power;
855 u64 last_count2;
856 struct timespec last_time2;
857 unsigned long gfx_power;
858 u8 corr;
859
860 int c_m;
861 int r_t;
862 } ips;
830 863
831 enum no_fbc_reason no_fbc_reason; 864 enum no_fbc_reason no_fbc_reason;
832 865
@@ -861,30 +894,48 @@ enum hdmi_force_audio {
861}; 894};
862 895
863enum i915_cache_level { 896enum i915_cache_level {
864 I915_CACHE_NONE, 897 I915_CACHE_NONE = 0,
865 I915_CACHE_LLC, 898 I915_CACHE_LLC,
866 I915_CACHE_LLC_MLC, /* gen6+ */ 899 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
900};
901
902struct drm_i915_gem_object_ops {
903 /* Interface between the GEM object and its backing storage.
904 * get_pages() is called once prior to the use of the associated set
905 * of pages before to binding them into the GTT, and put_pages() is
906 * called after we no longer need them. As we expect there to be
907 * associated cost with migrating pages between the backing storage
908 * and making them available for the GPU (e.g. clflush), we may hold
909 * onto the pages after they are no longer referenced by the GPU
910 * in case they may be used again shortly (for example migrating the
911 * pages to a different memory domain within the GTT). put_pages()
912 * will therefore most likely be called when the object itself is
913 * being released or under memory pressure (where we attempt to
914 * reap pages for the shrinker).
915 */
916 int (*get_pages)(struct drm_i915_gem_object *);
917 void (*put_pages)(struct drm_i915_gem_object *);
867}; 918};
868 919
869struct drm_i915_gem_object { 920struct drm_i915_gem_object {
870 struct drm_gem_object base; 921 struct drm_gem_object base;
871 922
923 const struct drm_i915_gem_object_ops *ops;
924
872 /** Current space allocated to this object in the GTT, if any. */ 925 /** Current space allocated to this object in the GTT, if any. */
873 struct drm_mm_node *gtt_space; 926 struct drm_mm_node *gtt_space;
874 struct list_head gtt_list; 927 struct list_head gtt_list;
875 928
876 /** This object's place on the active/flushing/inactive lists */ 929 /** This object's place on the active/inactive lists */
877 struct list_head ring_list; 930 struct list_head ring_list;
878 struct list_head mm_list; 931 struct list_head mm_list;
879 /** This object's place on GPU write list */
880 struct list_head gpu_write_list;
881 /** This object's place in the batchbuffer or on the eviction list */ 932 /** This object's place in the batchbuffer or on the eviction list */
882 struct list_head exec_list; 933 struct list_head exec_list;
883 934
884 /** 935 /**
885 * This is set if the object is on the active or flushing lists 936 * This is set if the object is on the active lists (has pending
886 * (has pending rendering), and is not set if it's on inactive (ready 937 * rendering and so a non-zero seqno), and is not set if it i s on
887 * to be unbound). 938 * inactive (ready to be unbound) list.
888 */ 939 */
889 unsigned int active:1; 940 unsigned int active:1;
890 941
@@ -895,12 +946,6 @@ struct drm_i915_gem_object {
895 unsigned int dirty:1; 946 unsigned int dirty:1;
896 947
897 /** 948 /**
898 * This is set if the object has been written to since the last
899 * GPU flush.
900 */
901 unsigned int pending_gpu_write:1;
902
903 /**
904 * Fence register bits (if any) for this object. Will be set 949 * Fence register bits (if any) for this object. Will be set
905 * as needed when mapped into the GTT. 950 * as needed when mapped into the GTT.
906 * Protected by dev->struct_mutex. 951 * Protected by dev->struct_mutex.
@@ -961,17 +1006,12 @@ struct drm_i915_gem_object {
961 1006
962 unsigned int has_aliasing_ppgtt_mapping:1; 1007 unsigned int has_aliasing_ppgtt_mapping:1;
963 unsigned int has_global_gtt_mapping:1; 1008 unsigned int has_global_gtt_mapping:1;
1009 unsigned int has_dma_mapping:1;
964 1010
965 struct page **pages; 1011 struct sg_table *pages;
966 1012 int pages_pin_count;
967 /**
968 * DMAR support
969 */
970 struct scatterlist *sg_list;
971 int num_sg;
972 1013
973 /* prime dma-buf support */ 1014 /* prime dma-buf support */
974 struct sg_table *sg_table;
975 void *dma_buf_vmapping; 1015 void *dma_buf_vmapping;
976 int vmapping_count; 1016 int vmapping_count;
977 1017
@@ -992,7 +1032,8 @@ struct drm_i915_gem_object {
992 struct intel_ring_buffer *ring; 1032 struct intel_ring_buffer *ring;
993 1033
994 /** Breadcrumb of last rendering to the buffer. */ 1034 /** Breadcrumb of last rendering to the buffer. */
995 uint32_t last_rendering_seqno; 1035 uint32_t last_read_seqno;
1036 uint32_t last_write_seqno;
996 /** Breadcrumb of last fenced GPU access to the buffer. */ 1037 /** Breadcrumb of last fenced GPU access to the buffer. */
997 uint32_t last_fenced_seqno; 1038 uint32_t last_fenced_seqno;
998 1039
@@ -1135,6 +1176,10 @@ struct drm_i915_file_private {
1135 1176
1136#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) 1177#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1137 1178
1179#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1180
1181#define GT_FREQUENCY_MULTIPLIER 50
1182
1138#include "i915_trace.h" 1183#include "i915_trace.h"
1139 1184
1140/** 1185/**
@@ -1256,6 +1301,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1256 struct drm_file *file_priv); 1301 struct drm_file *file_priv);
1257int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 1302int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1258 struct drm_file *file_priv); 1303 struct drm_file *file_priv);
1304int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
1305 struct drm_file *file);
1306int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
1307 struct drm_file *file);
1259int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 1308int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1260 struct drm_file *file_priv); 1309 struct drm_file *file_priv);
1261int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 1310int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
@@ -1274,24 +1323,42 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1274 struct drm_file *file_priv); 1323 struct drm_file *file_priv);
1275void i915_gem_load(struct drm_device *dev); 1324void i915_gem_load(struct drm_device *dev);
1276int i915_gem_init_object(struct drm_gem_object *obj); 1325int i915_gem_init_object(struct drm_gem_object *obj);
1277int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, 1326void i915_gem_object_init(struct drm_i915_gem_object *obj,
1278 uint32_t invalidate_domains, 1327 const struct drm_i915_gem_object_ops *ops);
1279 uint32_t flush_domains);
1280struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1328struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1281 size_t size); 1329 size_t size);
1282void i915_gem_free_object(struct drm_gem_object *obj); 1330void i915_gem_free_object(struct drm_gem_object *obj);
1283int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 1331int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1284 uint32_t alignment, 1332 uint32_t alignment,
1285 bool map_and_fenceable); 1333 bool map_and_fenceable,
1334 bool nonblocking);
1286void i915_gem_object_unpin(struct drm_i915_gem_object *obj); 1335void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1287int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); 1336int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1288void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1337void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1289void i915_gem_lastclose(struct drm_device *dev); 1338void i915_gem_lastclose(struct drm_device *dev);
1290 1339
1291int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, 1340int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
1292 gfp_t gfpmask); 1341static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1342{
1343 struct scatterlist *sg = obj->pages->sgl;
1344 while (n >= SG_MAX_SINGLE_ALLOC) {
1345 sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
1346 n -= SG_MAX_SINGLE_ALLOC - 1;
1347 }
1348 return sg_page(sg+n);
1349}
1350static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1351{
1352 BUG_ON(obj->pages == NULL);
1353 obj->pages_pin_count++;
1354}
1355static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1356{
1357 BUG_ON(obj->pages_pin_count == 0);
1358 obj->pages_pin_count--;
1359}
1360
1293int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1361int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1294int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1295int i915_gem_object_sync(struct drm_i915_gem_object *obj, 1362int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1296 struct intel_ring_buffer *to); 1363 struct intel_ring_buffer *to);
1297void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1364void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
@@ -1358,9 +1425,9 @@ void i915_gem_init_ppgtt(struct drm_device *dev);
1358void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1425void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1359int __must_check i915_gpu_idle(struct drm_device *dev); 1426int __must_check i915_gpu_idle(struct drm_device *dev);
1360int __must_check i915_gem_idle(struct drm_device *dev); 1427int __must_check i915_gem_idle(struct drm_device *dev);
1361int __must_check i915_add_request(struct intel_ring_buffer *ring, 1428int i915_add_request(struct intel_ring_buffer *ring,
1362 struct drm_file *file, 1429 struct drm_file *file,
1363 struct drm_i915_gem_request *request); 1430 struct drm_i915_gem_request *request);
1364int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, 1431int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
1365 uint32_t seqno); 1432 uint32_t seqno);
1366int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1433int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -1429,8 +1496,11 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
1429 1496
1430/* i915_gem_evict.c */ 1497/* i915_gem_evict.c */
1431int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 1498int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
1432 unsigned alignment, bool mappable); 1499 unsigned alignment,
1433int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only); 1500 unsigned cache_level,
1501 bool mappable,
1502 bool nonblock);
1503int i915_gem_evict_everything(struct drm_device *dev);
1434 1504
1435/* i915_gem_stolen.c */ 1505/* i915_gem_stolen.c */
1436int i915_gem_init_stolen(struct drm_device *dev); 1506int i915_gem_init_stolen(struct drm_device *dev);
@@ -1519,6 +1589,7 @@ extern void intel_modeset_init(struct drm_device *dev);
1519extern void intel_modeset_gem_init(struct drm_device *dev); 1589extern void intel_modeset_gem_init(struct drm_device *dev);
1520extern void intel_modeset_cleanup(struct drm_device *dev); 1590extern void intel_modeset_cleanup(struct drm_device *dev);
1521extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1591extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1592extern void intel_modeset_setup_hw_state(struct drm_device *dev);
1522extern bool intel_fbc_enabled(struct drm_device *dev); 1593extern bool intel_fbc_enabled(struct drm_device *dev);
1523extern void intel_disable_fbc(struct drm_device *dev); 1594extern void intel_disable_fbc(struct drm_device *dev);
1524extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1595extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -1529,6 +1600,8 @@ extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1529extern int intel_enable_rc6(const struct drm_device *dev); 1600extern int intel_enable_rc6(const struct drm_device *dev);
1530 1601
1531extern bool i915_semaphore_is_enabled(struct drm_device *dev); 1602extern bool i915_semaphore_is_enabled(struct drm_device *dev);
1603int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1604 struct drm_file *file);
1532 1605
1533/* overlay */ 1606/* overlay */
1534#ifdef CONFIG_DEBUG_FS 1607#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e2c93f7be8ed..e957f3740f68 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -36,12 +36,12 @@
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/dma-buf.h> 37#include <linux/dma-buf.h>
38 38
39static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 39static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 41static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
43 unsigned alignment, 42 unsigned alignment,
44 bool map_and_fenceable); 43 bool map_and_fenceable,
44 bool nonblocking);
45static int i915_gem_phys_pwrite(struct drm_device *dev, 45static int i915_gem_phys_pwrite(struct drm_device *dev,
46 struct drm_i915_gem_object *obj, 46 struct drm_i915_gem_object *obj,
47 struct drm_i915_gem_pwrite *args, 47 struct drm_i915_gem_pwrite *args,
@@ -55,6 +55,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
55 55
56static int i915_gem_inactive_shrink(struct shrinker *shrinker, 56static int i915_gem_inactive_shrink(struct shrinker *shrinker,
57 struct shrink_control *sc); 57 struct shrink_control *sc);
58static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
59static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
58static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 60static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
59 61
60static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) 62static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
@@ -140,7 +142,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
140static inline bool 142static inline bool
141i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) 143i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
142{ 144{
143 return !obj->active; 145 return obj->gtt_space && !obj->active;
144} 146}
145 147
146int 148int
@@ -179,7 +181,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
179 181
180 pinned = 0; 182 pinned = 0;
181 mutex_lock(&dev->struct_mutex); 183 mutex_lock(&dev->struct_mutex);
182 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) 184 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
183 if (obj->pin_count) 185 if (obj->pin_count)
184 pinned += obj->gtt_space->size; 186 pinned += obj->gtt_space->size;
185 mutex_unlock(&dev->struct_mutex); 187 mutex_unlock(&dev->struct_mutex);
@@ -340,7 +342,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
340 page_length); 342 page_length);
341 kunmap_atomic(vaddr); 343 kunmap_atomic(vaddr);
342 344
343 return ret; 345 return ret ? -EFAULT : 0;
344} 346}
345 347
346static void 348static void
@@ -391,7 +393,7 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
391 page_length); 393 page_length);
392 kunmap(page); 394 kunmap(page);
393 395
394 return ret; 396 return ret ? - EFAULT : 0;
395} 397}
396 398
397static int 399static int
@@ -400,7 +402,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
400 struct drm_i915_gem_pread *args, 402 struct drm_i915_gem_pread *args,
401 struct drm_file *file) 403 struct drm_file *file)
402{ 404{
403 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
404 char __user *user_data; 405 char __user *user_data;
405 ssize_t remain; 406 ssize_t remain;
406 loff_t offset; 407 loff_t offset;
@@ -409,7 +410,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
409 int hit_slowpath = 0; 410 int hit_slowpath = 0;
410 int prefaulted = 0; 411 int prefaulted = 0;
411 int needs_clflush = 0; 412 int needs_clflush = 0;
412 int release_page; 413 struct scatterlist *sg;
414 int i;
413 415
414 user_data = (char __user *) (uintptr_t) args->data_ptr; 416 user_data = (char __user *) (uintptr_t) args->data_ptr;
415 remain = args->size; 417 remain = args->size;
@@ -423,16 +425,30 @@ i915_gem_shmem_pread(struct drm_device *dev,
423 * anyway again before the next pread happens. */ 425 * anyway again before the next pread happens. */
424 if (obj->cache_level == I915_CACHE_NONE) 426 if (obj->cache_level == I915_CACHE_NONE)
425 needs_clflush = 1; 427 needs_clflush = 1;
426 ret = i915_gem_object_set_to_gtt_domain(obj, false); 428 if (obj->gtt_space) {
427 if (ret) 429 ret = i915_gem_object_set_to_gtt_domain(obj, false);
428 return ret; 430 if (ret)
431 return ret;
432 }
429 } 433 }
430 434
435 ret = i915_gem_object_get_pages(obj);
436 if (ret)
437 return ret;
438
439 i915_gem_object_pin_pages(obj);
440
431 offset = args->offset; 441 offset = args->offset;
432 442
433 while (remain > 0) { 443 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
434 struct page *page; 444 struct page *page;
435 445
446 if (i < offset >> PAGE_SHIFT)
447 continue;
448
449 if (remain <= 0)
450 break;
451
436 /* Operation in this page 452 /* Operation in this page
437 * 453 *
438 * shmem_page_offset = offset within page in shmem file 454 * shmem_page_offset = offset within page in shmem file
@@ -443,18 +459,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
443 if ((shmem_page_offset + page_length) > PAGE_SIZE) 459 if ((shmem_page_offset + page_length) > PAGE_SIZE)
444 page_length = PAGE_SIZE - shmem_page_offset; 460 page_length = PAGE_SIZE - shmem_page_offset;
445 461
446 if (obj->pages) { 462 page = sg_page(sg);
447 page = obj->pages[offset >> PAGE_SHIFT];
448 release_page = 0;
449 } else {
450 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
451 if (IS_ERR(page)) {
452 ret = PTR_ERR(page);
453 goto out;
454 }
455 release_page = 1;
456 }
457
458 page_do_bit17_swizzling = obj_do_bit17_swizzling && 463 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
459 (page_to_phys(page) & (1 << 17)) != 0; 464 (page_to_phys(page) & (1 << 17)) != 0;
460 465
@@ -465,7 +470,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
465 goto next_page; 470 goto next_page;
466 471
467 hit_slowpath = 1; 472 hit_slowpath = 1;
468 page_cache_get(page);
469 mutex_unlock(&dev->struct_mutex); 473 mutex_unlock(&dev->struct_mutex);
470 474
471 if (!prefaulted) { 475 if (!prefaulted) {
@@ -483,16 +487,12 @@ i915_gem_shmem_pread(struct drm_device *dev,
483 needs_clflush); 487 needs_clflush);
484 488
485 mutex_lock(&dev->struct_mutex); 489 mutex_lock(&dev->struct_mutex);
486 page_cache_release(page); 490
487next_page: 491next_page:
488 mark_page_accessed(page); 492 mark_page_accessed(page);
489 if (release_page)
490 page_cache_release(page);
491 493
492 if (ret) { 494 if (ret)
493 ret = -EFAULT;
494 goto out; 495 goto out;
495 }
496 496
497 remain -= page_length; 497 remain -= page_length;
498 user_data += page_length; 498 user_data += page_length;
@@ -500,6 +500,8 @@ next_page:
500 } 500 }
501 501
502out: 502out:
503 i915_gem_object_unpin_pages(obj);
504
503 if (hit_slowpath) { 505 if (hit_slowpath) {
504 /* Fixup: Kill any reinstated backing storage pages */ 506 /* Fixup: Kill any reinstated backing storage pages */
505 if (obj->madv == __I915_MADV_PURGED) 507 if (obj->madv == __I915_MADV_PURGED)
@@ -605,7 +607,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
605 char __user *user_data; 607 char __user *user_data;
606 int page_offset, page_length, ret; 608 int page_offset, page_length, ret;
607 609
608 ret = i915_gem_object_pin(obj, 0, true); 610 ret = i915_gem_object_pin(obj, 0, true, true);
609 if (ret) 611 if (ret)
610 goto out; 612 goto out;
611 613
@@ -685,7 +687,7 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
685 page_length); 687 page_length);
686 kunmap_atomic(vaddr); 688 kunmap_atomic(vaddr);
687 689
688 return ret; 690 return ret ? -EFAULT : 0;
689} 691}
690 692
691/* Only difference to the fast-path function is that this can handle bit17 693/* Only difference to the fast-path function is that this can handle bit17
@@ -719,7 +721,7 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
719 page_do_bit17_swizzling); 721 page_do_bit17_swizzling);
720 kunmap(page); 722 kunmap(page);
721 723
722 return ret; 724 return ret ? -EFAULT : 0;
723} 725}
724 726
725static int 727static int
@@ -728,7 +730,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
728 struct drm_i915_gem_pwrite *args, 730 struct drm_i915_gem_pwrite *args,
729 struct drm_file *file) 731 struct drm_file *file)
730{ 732{
731 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
732 ssize_t remain; 733 ssize_t remain;
733 loff_t offset; 734 loff_t offset;
734 char __user *user_data; 735 char __user *user_data;
@@ -737,7 +738,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
737 int hit_slowpath = 0; 738 int hit_slowpath = 0;
738 int needs_clflush_after = 0; 739 int needs_clflush_after = 0;
739 int needs_clflush_before = 0; 740 int needs_clflush_before = 0;
740 int release_page; 741 int i;
742 struct scatterlist *sg;
741 743
742 user_data = (char __user *) (uintptr_t) args->data_ptr; 744 user_data = (char __user *) (uintptr_t) args->data_ptr;
743 remain = args->size; 745 remain = args->size;
@@ -751,9 +753,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
751 * right away and we therefore have to clflush anyway. */ 753 * right away and we therefore have to clflush anyway. */
752 if (obj->cache_level == I915_CACHE_NONE) 754 if (obj->cache_level == I915_CACHE_NONE)
753 needs_clflush_after = 1; 755 needs_clflush_after = 1;
754 ret = i915_gem_object_set_to_gtt_domain(obj, true); 756 if (obj->gtt_space) {
755 if (ret) 757 ret = i915_gem_object_set_to_gtt_domain(obj, true);
756 return ret; 758 if (ret)
759 return ret;
760 }
757 } 761 }
758 /* Same trick applies for invalidate partially written cachelines before 762 /* Same trick applies for invalidate partially written cachelines before
759 * writing. */ 763 * writing. */
@@ -761,13 +765,25 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
761 && obj->cache_level == I915_CACHE_NONE) 765 && obj->cache_level == I915_CACHE_NONE)
762 needs_clflush_before = 1; 766 needs_clflush_before = 1;
763 767
768 ret = i915_gem_object_get_pages(obj);
769 if (ret)
770 return ret;
771
772 i915_gem_object_pin_pages(obj);
773
764 offset = args->offset; 774 offset = args->offset;
765 obj->dirty = 1; 775 obj->dirty = 1;
766 776
767 while (remain > 0) { 777 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
768 struct page *page; 778 struct page *page;
769 int partial_cacheline_write; 779 int partial_cacheline_write;
770 780
781 if (i < offset >> PAGE_SHIFT)
782 continue;
783
784 if (remain <= 0)
785 break;
786
771 /* Operation in this page 787 /* Operation in this page
772 * 788 *
773 * shmem_page_offset = offset within page in shmem file 789 * shmem_page_offset = offset within page in shmem file
@@ -786,18 +802,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
786 ((shmem_page_offset | page_length) 802 ((shmem_page_offset | page_length)
787 & (boot_cpu_data.x86_clflush_size - 1)); 803 & (boot_cpu_data.x86_clflush_size - 1));
788 804
789 if (obj->pages) { 805 page = sg_page(sg);
790 page = obj->pages[offset >> PAGE_SHIFT];
791 release_page = 0;
792 } else {
793 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
794 if (IS_ERR(page)) {
795 ret = PTR_ERR(page);
796 goto out;
797 }
798 release_page = 1;
799 }
800
801 page_do_bit17_swizzling = obj_do_bit17_swizzling && 806 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
802 (page_to_phys(page) & (1 << 17)) != 0; 807 (page_to_phys(page) & (1 << 17)) != 0;
803 808
@@ -809,26 +814,20 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
809 goto next_page; 814 goto next_page;
810 815
811 hit_slowpath = 1; 816 hit_slowpath = 1;
812 page_cache_get(page);
813 mutex_unlock(&dev->struct_mutex); 817 mutex_unlock(&dev->struct_mutex);
814
815 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, 818 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
816 user_data, page_do_bit17_swizzling, 819 user_data, page_do_bit17_swizzling,
817 partial_cacheline_write, 820 partial_cacheline_write,
818 needs_clflush_after); 821 needs_clflush_after);
819 822
820 mutex_lock(&dev->struct_mutex); 823 mutex_lock(&dev->struct_mutex);
821 page_cache_release(page); 824
822next_page: 825next_page:
823 set_page_dirty(page); 826 set_page_dirty(page);
824 mark_page_accessed(page); 827 mark_page_accessed(page);
825 if (release_page)
826 page_cache_release(page);
827 828
828 if (ret) { 829 if (ret)
829 ret = -EFAULT;
830 goto out; 830 goto out;
831 }
832 831
833 remain -= page_length; 832 remain -= page_length;
834 user_data += page_length; 833 user_data += page_length;
@@ -836,6 +835,8 @@ next_page:
836 } 835 }
837 836
838out: 837out:
838 i915_gem_object_unpin_pages(obj);
839
839 if (hit_slowpath) { 840 if (hit_slowpath) {
840 /* Fixup: Kill any reinstated backing storage pages */ 841 /* Fixup: Kill any reinstated backing storage pages */
841 if (obj->madv == __I915_MADV_PURGED) 842 if (obj->madv == __I915_MADV_PURGED)
@@ -919,10 +920,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
919 goto out; 920 goto out;
920 } 921 }
921 922
922 if (obj->gtt_space && 923 if (obj->cache_level == I915_CACHE_NONE &&
923 obj->cache_level == I915_CACHE_NONE &&
924 obj->tiling_mode == I915_TILING_NONE && 924 obj->tiling_mode == I915_TILING_NONE &&
925 obj->map_and_fenceable &&
926 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 925 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
927 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); 926 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
928 /* Note that the gtt paths might fail with non-page-backed user 927 /* Note that the gtt paths might fail with non-page-backed user
@@ -930,7 +929,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
930 * textures). Fallback to the shmem path in that case. */ 929 * textures). Fallback to the shmem path in that case. */
931 } 930 }
932 931
933 if (ret == -EFAULT) 932 if (ret == -EFAULT || ret == -ENOSPC)
934 ret = i915_gem_shmem_pwrite(dev, obj, args, file); 933 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
935 934
936out: 935out:
@@ -940,6 +939,240 @@ unlock:
940 return ret; 939 return ret;
941} 940}
942 941
942int
943i915_gem_check_wedge(struct drm_i915_private *dev_priv,
944 bool interruptible)
945{
946 if (atomic_read(&dev_priv->mm.wedged)) {
947 struct completion *x = &dev_priv->error_completion;
948 bool recovery_complete;
949 unsigned long flags;
950
951 /* Give the error handler a chance to run. */
952 spin_lock_irqsave(&x->wait.lock, flags);
953 recovery_complete = x->done > 0;
954 spin_unlock_irqrestore(&x->wait.lock, flags);
955
956 /* Non-interruptible callers can't handle -EAGAIN, hence return
957 * -EIO unconditionally for these. */
958 if (!interruptible)
959 return -EIO;
960
961 /* Recovery complete, but still wedged means reset failure. */
962 if (recovery_complete)
963 return -EIO;
964
965 return -EAGAIN;
966 }
967
968 return 0;
969}
970
971/*
972 * Compare seqno against outstanding lazy request. Emit a request if they are
973 * equal.
974 */
975static int
976i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
977{
978 int ret;
979
980 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
981
982 ret = 0;
983 if (seqno == ring->outstanding_lazy_request)
984 ret = i915_add_request(ring, NULL, NULL);
985
986 return ret;
987}
988
989/**
990 * __wait_seqno - wait until execution of seqno has finished
991 * @ring: the ring expected to report seqno
992 * @seqno: duh!
993 * @interruptible: do an interruptible wait (normally yes)
994 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
995 *
996 * Returns 0 if the seqno was found within the alloted time. Else returns the
997 * errno with remaining time filled in timeout argument.
998 */
999static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1000 bool interruptible, struct timespec *timeout)
1001{
1002 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1003 struct timespec before, now, wait_time={1,0};
1004 unsigned long timeout_jiffies;
1005 long end;
1006 bool wait_forever = true;
1007 int ret;
1008
1009 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1010 return 0;
1011
1012 trace_i915_gem_request_wait_begin(ring, seqno);
1013
1014 if (timeout != NULL) {
1015 wait_time = *timeout;
1016 wait_forever = false;
1017 }
1018
1019 timeout_jiffies = timespec_to_jiffies(&wait_time);
1020
1021 if (WARN_ON(!ring->irq_get(ring)))
1022 return -ENODEV;
1023
1024 /* Record current time in case interrupted by signal, or wedged * */
1025 getrawmonotonic(&before);
1026
1027#define EXIT_COND \
1028 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1029 atomic_read(&dev_priv->mm.wedged))
1030 do {
1031 if (interruptible)
1032 end = wait_event_interruptible_timeout(ring->irq_queue,
1033 EXIT_COND,
1034 timeout_jiffies);
1035 else
1036 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1037 timeout_jiffies);
1038
1039 ret = i915_gem_check_wedge(dev_priv, interruptible);
1040 if (ret)
1041 end = ret;
1042 } while (end == 0 && wait_forever);
1043
1044 getrawmonotonic(&now);
1045
1046 ring->irq_put(ring);
1047 trace_i915_gem_request_wait_end(ring, seqno);
1048#undef EXIT_COND
1049
1050 if (timeout) {
1051 struct timespec sleep_time = timespec_sub(now, before);
1052 *timeout = timespec_sub(*timeout, sleep_time);
1053 }
1054
1055 switch (end) {
1056 case -EIO:
1057 case -EAGAIN: /* Wedged */
1058 case -ERESTARTSYS: /* Signal */
1059 return (int)end;
1060 case 0: /* Timeout */
1061 if (timeout)
1062 set_normalized_timespec(timeout, 0, 0);
1063 return -ETIME;
1064 default: /* Completed */
1065 WARN_ON(end < 0); /* We're not aware of other errors */
1066 return 0;
1067 }
1068}
1069
1070/**
1071 * Waits for a sequence number to be signaled, and cleans up the
1072 * request and object lists appropriately for that event.
1073 */
1074int
1075i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1076{
1077 struct drm_device *dev = ring->dev;
1078 struct drm_i915_private *dev_priv = dev->dev_private;
1079 bool interruptible = dev_priv->mm.interruptible;
1080 int ret;
1081
1082 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1083 BUG_ON(seqno == 0);
1084
1085 ret = i915_gem_check_wedge(dev_priv, interruptible);
1086 if (ret)
1087 return ret;
1088
1089 ret = i915_gem_check_olr(ring, seqno);
1090 if (ret)
1091 return ret;
1092
1093 return __wait_seqno(ring, seqno, interruptible, NULL);
1094}
1095
1096/**
1097 * Ensures that all rendering to the object has completed and the object is
1098 * safe to unbind from the GTT or access from the CPU.
1099 */
1100static __must_check int
1101i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1102 bool readonly)
1103{
1104 struct intel_ring_buffer *ring = obj->ring;
1105 u32 seqno;
1106 int ret;
1107
1108 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1109 if (seqno == 0)
1110 return 0;
1111
1112 ret = i915_wait_seqno(ring, seqno);
1113 if (ret)
1114 return ret;
1115
1116 i915_gem_retire_requests_ring(ring);
1117
1118 /* Manually manage the write flush as we may have not yet
1119 * retired the buffer.
1120 */
1121 if (obj->last_write_seqno &&
1122 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1123 obj->last_write_seqno = 0;
1124 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1125 }
1126
1127 return 0;
1128}
1129
1130/* A nonblocking variant of the above wait. This is a highly dangerous routine
1131 * as the object state may change during this call.
1132 */
1133static __must_check int
1134i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1135 bool readonly)
1136{
1137 struct drm_device *dev = obj->base.dev;
1138 struct drm_i915_private *dev_priv = dev->dev_private;
1139 struct intel_ring_buffer *ring = obj->ring;
1140 u32 seqno;
1141 int ret;
1142
1143 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1144 BUG_ON(!dev_priv->mm.interruptible);
1145
1146 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1147 if (seqno == 0)
1148 return 0;
1149
1150 ret = i915_gem_check_wedge(dev_priv, true);
1151 if (ret)
1152 return ret;
1153
1154 ret = i915_gem_check_olr(ring, seqno);
1155 if (ret)
1156 return ret;
1157
1158 mutex_unlock(&dev->struct_mutex);
1159 ret = __wait_seqno(ring, seqno, true, NULL);
1160 mutex_lock(&dev->struct_mutex);
1161
1162 i915_gem_retire_requests_ring(ring);
1163
1164 /* Manually manage the write flush as we may have not yet
1165 * retired the buffer.
1166 */
1167 if (obj->last_write_seqno &&
1168 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1169 obj->last_write_seqno = 0;
1170 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1171 }
1172
1173 return ret;
1174}
1175
943/** 1176/**
944 * Called when user space prepares to use an object with the CPU, either 1177 * Called when user space prepares to use an object with the CPU, either
945 * through the mmap ioctl's mapping or a GTT mapping. 1178 * through the mmap ioctl's mapping or a GTT mapping.
@@ -977,6 +1210,14 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
977 goto unlock; 1210 goto unlock;
978 } 1211 }
979 1212
1213 /* Try to flush the object off the GPU without holding the lock.
1214 * We will repeat the flush holding the lock in the normal manner
1215 * to catch cases where we are gazumped.
1216 */
1217 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1218 if (ret)
1219 goto unref;
1220
980 if (read_domains & I915_GEM_DOMAIN_GTT) { 1221 if (read_domains & I915_GEM_DOMAIN_GTT) {
981 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1222 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
982 1223
@@ -990,6 +1231,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
990 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1231 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
991 } 1232 }
992 1233
1234unref:
993 drm_gem_object_unreference(&obj->base); 1235 drm_gem_object_unreference(&obj->base);
994unlock: 1236unlock:
995 mutex_unlock(&dev->struct_mutex); 1237 mutex_unlock(&dev->struct_mutex);
@@ -1109,7 +1351,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1109 goto unlock; 1351 goto unlock;
1110 } 1352 }
1111 if (!obj->gtt_space) { 1353 if (!obj->gtt_space) {
1112 ret = i915_gem_object_bind_to_gtt(obj, 0, true); 1354 ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
1113 if (ret) 1355 if (ret)
1114 goto unlock; 1356 goto unlock;
1115 1357
@@ -1270,6 +1512,42 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1270 return i915_gem_get_gtt_size(dev, size, tiling_mode); 1512 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1271} 1513}
1272 1514
1515static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1516{
1517 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1518 int ret;
1519
1520 if (obj->base.map_list.map)
1521 return 0;
1522
1523 ret = drm_gem_create_mmap_offset(&obj->base);
1524 if (ret != -ENOSPC)
1525 return ret;
1526
1527 /* Badly fragmented mmap space? The only way we can recover
1528 * space is by destroying unwanted objects. We can't randomly release
1529 * mmap_offsets as userspace expects them to be persistent for the
1530 * lifetime of the objects. The closest we can is to release the
1531 * offsets on purgeable objects by truncating it and marking it purged,
1532 * which prevents userspace from ever using that object again.
1533 */
1534 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1535 ret = drm_gem_create_mmap_offset(&obj->base);
1536 if (ret != -ENOSPC)
1537 return ret;
1538
1539 i915_gem_shrink_all(dev_priv);
1540 return drm_gem_create_mmap_offset(&obj->base);
1541}
1542
1543static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1544{
1545 if (!obj->base.map_list.map)
1546 return;
1547
1548 drm_gem_free_mmap_offset(&obj->base);
1549}
1550
1273int 1551int
1274i915_gem_mmap_gtt(struct drm_file *file, 1552i915_gem_mmap_gtt(struct drm_file *file,
1275 struct drm_device *dev, 1553 struct drm_device *dev,
@@ -1301,11 +1579,9 @@ i915_gem_mmap_gtt(struct drm_file *file,
1301 goto out; 1579 goto out;
1302 } 1580 }
1303 1581
1304 if (!obj->base.map_list.map) { 1582 ret = i915_gem_object_create_mmap_offset(obj);
1305 ret = drm_gem_create_mmap_offset(&obj->base); 1583 if (ret)
1306 if (ret) 1584 goto out;
1307 goto out;
1308 }
1309 1585
1310 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; 1586 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1311 1587
@@ -1340,83 +1616,245 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1340 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 1616 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1341} 1617}
1342 1618
1343int 1619/* Immediately discard the backing storage */
1344i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, 1620static void
1345 gfp_t gfpmask) 1621i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1346{ 1622{
1347 int page_count, i;
1348 struct address_space *mapping;
1349 struct inode *inode; 1623 struct inode *inode;
1350 struct page *page;
1351 1624
1352 if (obj->pages || obj->sg_table) 1625 i915_gem_object_free_mmap_offset(obj);
1353 return 0;
1354 1626
1355 /* Get the list of pages out of our struct file. They'll be pinned 1627 if (obj->base.filp == NULL)
1356 * at this point until we release them. 1628 return;
1357 */
1358 page_count = obj->base.size / PAGE_SIZE;
1359 BUG_ON(obj->pages != NULL);
1360 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1361 if (obj->pages == NULL)
1362 return -ENOMEM;
1363 1629
1630 /* Our goal here is to return as much of the memory as
1631 * is possible back to the system as we are called from OOM.
1632 * To do this we must instruct the shmfs to drop all of its
1633 * backing pages, *now*.
1634 */
1364 inode = obj->base.filp->f_path.dentry->d_inode; 1635 inode = obj->base.filp->f_path.dentry->d_inode;
1365 mapping = inode->i_mapping; 1636 shmem_truncate_range(inode, 0, (loff_t)-1);
1366 gfpmask |= mapping_gfp_mask(mapping);
1367
1368 for (i = 0; i < page_count; i++) {
1369 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
1370 if (IS_ERR(page))
1371 goto err_pages;
1372
1373 obj->pages[i] = page;
1374 }
1375
1376 if (i915_gem_object_needs_bit17_swizzle(obj))
1377 i915_gem_object_do_bit_17_swizzle(obj);
1378
1379 return 0;
1380 1637
1381err_pages: 1638 obj->madv = __I915_MADV_PURGED;
1382 while (i--) 1639}
1383 page_cache_release(obj->pages[i]);
1384 1640
1385 drm_free_large(obj->pages); 1641static inline int
1386 obj->pages = NULL; 1642i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1387 return PTR_ERR(page); 1643{
1644 return obj->madv == I915_MADV_DONTNEED;
1388} 1645}
1389 1646
1390static void 1647static void
1391i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 1648i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1392{ 1649{
1393 int page_count = obj->base.size / PAGE_SIZE; 1650 int page_count = obj->base.size / PAGE_SIZE;
1394 int i; 1651 struct scatterlist *sg;
1395 1652 int ret, i;
1396 if (!obj->pages)
1397 return;
1398 1653
1399 BUG_ON(obj->madv == __I915_MADV_PURGED); 1654 BUG_ON(obj->madv == __I915_MADV_PURGED);
1400 1655
1656 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1657 if (ret) {
1658 /* In the event of a disaster, abandon all caches and
1659 * hope for the best.
1660 */
1661 WARN_ON(ret != -EIO);
1662 i915_gem_clflush_object(obj);
1663 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1664 }
1665
1401 if (i915_gem_object_needs_bit17_swizzle(obj)) 1666 if (i915_gem_object_needs_bit17_swizzle(obj))
1402 i915_gem_object_save_bit_17_swizzle(obj); 1667 i915_gem_object_save_bit_17_swizzle(obj);
1403 1668
1404 if (obj->madv == I915_MADV_DONTNEED) 1669 if (obj->madv == I915_MADV_DONTNEED)
1405 obj->dirty = 0; 1670 obj->dirty = 0;
1406 1671
1407 for (i = 0; i < page_count; i++) { 1672 for_each_sg(obj->pages->sgl, sg, page_count, i) {
1673 struct page *page = sg_page(sg);
1674
1408 if (obj->dirty) 1675 if (obj->dirty)
1409 set_page_dirty(obj->pages[i]); 1676 set_page_dirty(page);
1410 1677
1411 if (obj->madv == I915_MADV_WILLNEED) 1678 if (obj->madv == I915_MADV_WILLNEED)
1412 mark_page_accessed(obj->pages[i]); 1679 mark_page_accessed(page);
1413 1680
1414 page_cache_release(obj->pages[i]); 1681 page_cache_release(page);
1415 } 1682 }
1416 obj->dirty = 0; 1683 obj->dirty = 0;
1417 1684
1418 drm_free_large(obj->pages); 1685 sg_free_table(obj->pages);
1686 kfree(obj->pages);
1687}
1688
1689static int
1690i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1691{
1692 const struct drm_i915_gem_object_ops *ops = obj->ops;
1693
1694 if (obj->pages == NULL)
1695 return 0;
1696
1697 BUG_ON(obj->gtt_space);
1698
1699 if (obj->pages_pin_count)
1700 return -EBUSY;
1701
1702 ops->put_pages(obj);
1419 obj->pages = NULL; 1703 obj->pages = NULL;
1704
1705 list_del(&obj->gtt_list);
1706 if (i915_gem_object_is_purgeable(obj))
1707 i915_gem_object_truncate(obj);
1708
1709 return 0;
1710}
1711
1712static long
1713i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1714{
1715 struct drm_i915_gem_object *obj, *next;
1716 long count = 0;
1717
1718 list_for_each_entry_safe(obj, next,
1719 &dev_priv->mm.unbound_list,
1720 gtt_list) {
1721 if (i915_gem_object_is_purgeable(obj) &&
1722 i915_gem_object_put_pages(obj) == 0) {
1723 count += obj->base.size >> PAGE_SHIFT;
1724 if (count >= target)
1725 return count;
1726 }
1727 }
1728
1729 list_for_each_entry_safe(obj, next,
1730 &dev_priv->mm.inactive_list,
1731 mm_list) {
1732 if (i915_gem_object_is_purgeable(obj) &&
1733 i915_gem_object_unbind(obj) == 0 &&
1734 i915_gem_object_put_pages(obj) == 0) {
1735 count += obj->base.size >> PAGE_SHIFT;
1736 if (count >= target)
1737 return count;
1738 }
1739 }
1740
1741 return count;
1742}
1743
1744static void
1745i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1746{
1747 struct drm_i915_gem_object *obj, *next;
1748
1749 i915_gem_evict_everything(dev_priv->dev);
1750
1751 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
1752 i915_gem_object_put_pages(obj);
1753}
1754
1755static int
1756i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1757{
1758 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1759 int page_count, i;
1760 struct address_space *mapping;
1761 struct sg_table *st;
1762 struct scatterlist *sg;
1763 struct page *page;
1764 gfp_t gfp;
1765
1766 /* Assert that the object is not currently in any GPU domain. As it
1767 * wasn't in the GTT, there shouldn't be any way it could have been in
1768 * a GPU cache
1769 */
1770 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1771 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1772
1773 st = kmalloc(sizeof(*st), GFP_KERNEL);
1774 if (st == NULL)
1775 return -ENOMEM;
1776
1777 page_count = obj->base.size / PAGE_SIZE;
1778 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1779 sg_free_table(st);
1780 kfree(st);
1781 return -ENOMEM;
1782 }
1783
1784 /* Get the list of pages out of our struct file. They'll be pinned
1785 * at this point until we release them.
1786 *
1787 * Fail silently without starting the shrinker
1788 */
1789 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
1790 gfp = mapping_gfp_mask(mapping);
1791 gfp |= __GFP_NORETRY | __GFP_NOWARN;
1792 gfp &= ~(__GFP_IO | __GFP_WAIT);
1793 for_each_sg(st->sgl, sg, page_count, i) {
1794 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1795 if (IS_ERR(page)) {
1796 i915_gem_purge(dev_priv, page_count);
1797 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1798 }
1799 if (IS_ERR(page)) {
1800 /* We've tried hard to allocate the memory by reaping
1801 * our own buffer, now let the real VM do its job and
1802 * go down in flames if truly OOM.
1803 */
1804 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN);
1805 gfp |= __GFP_IO | __GFP_WAIT;
1806
1807 i915_gem_shrink_all(dev_priv);
1808 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1809 if (IS_ERR(page))
1810 goto err_pages;
1811
1812 gfp |= __GFP_NORETRY | __GFP_NOWARN;
1813 gfp &= ~(__GFP_IO | __GFP_WAIT);
1814 }
1815
1816 sg_set_page(sg, page, PAGE_SIZE, 0);
1817 }
1818
1819 if (i915_gem_object_needs_bit17_swizzle(obj))
1820 i915_gem_object_do_bit_17_swizzle(obj);
1821
1822 obj->pages = st;
1823 return 0;
1824
1825err_pages:
1826 for_each_sg(st->sgl, sg, i, page_count)
1827 page_cache_release(sg_page(sg));
1828 sg_free_table(st);
1829 kfree(st);
1830 return PTR_ERR(page);
1831}
1832
1833/* Ensure that the associated pages are gathered from the backing storage
1834 * and pinned into our object. i915_gem_object_get_pages() may be called
1835 * multiple times before they are released by a single call to
1836 * i915_gem_object_put_pages() - once the pages are no longer referenced
1837 * either as a result of memory pressure (reaping pages under the shrinker)
1838 * or as the object is itself released.
1839 */
1840int
1841i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1842{
1843 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1844 const struct drm_i915_gem_object_ops *ops = obj->ops;
1845 int ret;
1846
1847 if (obj->pages)
1848 return 0;
1849
1850 BUG_ON(obj->pages_pin_count);
1851
1852 ret = ops->get_pages(obj);
1853 if (ret)
1854 return ret;
1855
1856 list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
1857 return 0;
1420} 1858}
1421 1859
1422void 1860void
@@ -1440,7 +1878,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1440 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); 1878 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1441 list_move_tail(&obj->ring_list, &ring->active_list); 1879 list_move_tail(&obj->ring_list, &ring->active_list);
1442 1880
1443 obj->last_rendering_seqno = seqno; 1881 obj->last_read_seqno = seqno;
1444 1882
1445 if (obj->fenced_gpu_access) { 1883 if (obj->fenced_gpu_access) {
1446 obj->last_fenced_seqno = seqno; 1884 obj->last_fenced_seqno = seqno;
@@ -1457,97 +1895,35 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1457} 1895}
1458 1896
1459static void 1897static void
1460i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) 1898i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1461{
1462 list_del_init(&obj->ring_list);
1463 obj->last_rendering_seqno = 0;
1464 obj->last_fenced_seqno = 0;
1465}
1466
1467static void
1468i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1469{ 1899{
1470 struct drm_device *dev = obj->base.dev; 1900 struct drm_device *dev = obj->base.dev;
1471 drm_i915_private_t *dev_priv = dev->dev_private; 1901 struct drm_i915_private *dev_priv = dev->dev_private;
1472 1902
1903 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1473 BUG_ON(!obj->active); 1904 BUG_ON(!obj->active);
1474 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1475
1476 i915_gem_object_move_off_active(obj);
1477}
1478 1905
1479static void 1906 if (obj->pin_count) /* are we a framebuffer? */
1480i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 1907 intel_mark_fb_idle(obj);
1481{
1482 struct drm_device *dev = obj->base.dev;
1483 struct drm_i915_private *dev_priv = dev->dev_private;
1484 1908
1485 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 1909 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1486 1910
1487 BUG_ON(!list_empty(&obj->gpu_write_list)); 1911 list_del_init(&obj->ring_list);
1488 BUG_ON(!obj->active);
1489 obj->ring = NULL; 1912 obj->ring = NULL;
1490 1913
1491 i915_gem_object_move_off_active(obj); 1914 obj->last_read_seqno = 0;
1915 obj->last_write_seqno = 0;
1916 obj->base.write_domain = 0;
1917
1918 obj->last_fenced_seqno = 0;
1492 obj->fenced_gpu_access = false; 1919 obj->fenced_gpu_access = false;
1493 1920
1494 obj->active = 0; 1921 obj->active = 0;
1495 obj->pending_gpu_write = false;
1496 drm_gem_object_unreference(&obj->base); 1922 drm_gem_object_unreference(&obj->base);
1497 1923
1498 WARN_ON(i915_verify_lists(dev)); 1924 WARN_ON(i915_verify_lists(dev));
1499} 1925}
1500 1926
1501/* Immediately discard the backing storage */
1502static void
1503i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1504{
1505 struct inode *inode;
1506
1507 /* Our goal here is to return as much of the memory as
1508 * is possible back to the system as we are called from OOM.
1509 * To do this we must instruct the shmfs to drop all of its
1510 * backing pages, *now*.
1511 */
1512 inode = obj->base.filp->f_path.dentry->d_inode;
1513 shmem_truncate_range(inode, 0, (loff_t)-1);
1514
1515 if (obj->base.map_list.map)
1516 drm_gem_free_mmap_offset(&obj->base);
1517
1518 obj->madv = __I915_MADV_PURGED;
1519}
1520
1521static inline int
1522i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1523{
1524 return obj->madv == I915_MADV_DONTNEED;
1525}
1526
1527static void
1528i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1529 uint32_t flush_domains)
1530{
1531 struct drm_i915_gem_object *obj, *next;
1532
1533 list_for_each_entry_safe(obj, next,
1534 &ring->gpu_write_list,
1535 gpu_write_list) {
1536 if (obj->base.write_domain & flush_domains) {
1537 uint32_t old_write_domain = obj->base.write_domain;
1538
1539 obj->base.write_domain = 0;
1540 list_del_init(&obj->gpu_write_list);
1541 i915_gem_object_move_to_active(obj, ring,
1542 i915_gem_next_request_seqno(ring));
1543
1544 trace_i915_gem_object_change_domain(obj,
1545 obj->base.read_domains,
1546 old_write_domain);
1547 }
1548 }
1549}
1550
1551static u32 1927static u32
1552i915_gem_get_seqno(struct drm_device *dev) 1928i915_gem_get_seqno(struct drm_device *dev)
1553{ 1929{
@@ -1588,15 +1964,16 @@ i915_add_request(struct intel_ring_buffer *ring,
1588 * is that the flush _must_ happen before the next request, no matter 1964 * is that the flush _must_ happen before the next request, no matter
1589 * what. 1965 * what.
1590 */ 1966 */
1591 if (ring->gpu_caches_dirty) { 1967 ret = intel_ring_flush_all_caches(ring);
1592 ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS); 1968 if (ret)
1593 if (ret) 1969 return ret;
1594 return ret;
1595 1970
1596 ring->gpu_caches_dirty = false; 1971 if (request == NULL) {
1972 request = kmalloc(sizeof(*request), GFP_KERNEL);
1973 if (request == NULL)
1974 return -ENOMEM;
1597 } 1975 }
1598 1976
1599 BUG_ON(request == NULL);
1600 seqno = i915_gem_next_request_seqno(ring); 1977 seqno = i915_gem_next_request_seqno(ring);
1601 1978
1602 /* Record the position of the start of the request so that 1979 /* Record the position of the start of the request so that
@@ -1607,8 +1984,10 @@ i915_add_request(struct intel_ring_buffer *ring,
1607 request_ring_position = intel_ring_get_tail(ring); 1984 request_ring_position = intel_ring_get_tail(ring);
1608 1985
1609 ret = ring->add_request(ring, &seqno); 1986 ret = ring->add_request(ring, &seqno);
1610 if (ret) 1987 if (ret) {
1611 return ret; 1988 kfree(request);
1989 return ret;
1990 }
1612 1991
1613 trace_i915_gem_request_add(ring, seqno); 1992 trace_i915_gem_request_add(ring, seqno);
1614 1993
@@ -1618,6 +1997,7 @@ i915_add_request(struct intel_ring_buffer *ring,
1618 request->emitted_jiffies = jiffies; 1997 request->emitted_jiffies = jiffies;
1619 was_empty = list_empty(&ring->request_list); 1998 was_empty = list_empty(&ring->request_list);
1620 list_add_tail(&request->list, &ring->request_list); 1999 list_add_tail(&request->list, &ring->request_list);
2000 request->file_priv = NULL;
1621 2001
1622 if (file) { 2002 if (file) {
1623 struct drm_i915_file_private *file_priv = file->driver_priv; 2003 struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -1637,13 +2017,13 @@ i915_add_request(struct intel_ring_buffer *ring,
1637 jiffies + 2017 jiffies +
1638 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 2018 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1639 } 2019 }
1640 if (was_empty) 2020 if (was_empty) {
1641 queue_delayed_work(dev_priv->wq, 2021 queue_delayed_work(dev_priv->wq,
1642 &dev_priv->mm.retire_work, HZ); 2022 &dev_priv->mm.retire_work, HZ);
2023 intel_mark_busy(dev_priv->dev);
2024 }
1643 } 2025 }
1644 2026
1645 WARN_ON(!list_empty(&ring->gpu_write_list));
1646
1647 return 0; 2027 return 0;
1648} 2028}
1649 2029
@@ -1685,8 +2065,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1685 struct drm_i915_gem_object, 2065 struct drm_i915_gem_object,
1686 ring_list); 2066 ring_list);
1687 2067
1688 obj->base.write_domain = 0;
1689 list_del_init(&obj->gpu_write_list);
1690 i915_gem_object_move_to_inactive(obj); 2068 i915_gem_object_move_to_inactive(obj);
1691 } 2069 }
1692} 2070}
@@ -1722,20 +2100,6 @@ void i915_gem_reset(struct drm_device *dev)
1722 for_each_ring(ring, dev_priv, i) 2100 for_each_ring(ring, dev_priv, i)
1723 i915_gem_reset_ring_lists(dev_priv, ring); 2101 i915_gem_reset_ring_lists(dev_priv, ring);
1724 2102
1725 /* Remove anything from the flushing lists. The GPU cache is likely
1726 * to be lost on reset along with the data, so simply move the
1727 * lost bo to the inactive list.
1728 */
1729 while (!list_empty(&dev_priv->mm.flushing_list)) {
1730 obj = list_first_entry(&dev_priv->mm.flushing_list,
1731 struct drm_i915_gem_object,
1732 mm_list);
1733
1734 obj->base.write_domain = 0;
1735 list_del_init(&obj->gpu_write_list);
1736 i915_gem_object_move_to_inactive(obj);
1737 }
1738
1739 /* Move everything out of the GPU domains to ensure we do any 2103 /* Move everything out of the GPU domains to ensure we do any
1740 * necessary invalidation upon reuse. 2104 * necessary invalidation upon reuse.
1741 */ 2105 */
@@ -1764,7 +2128,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1764 2128
1765 WARN_ON(i915_verify_lists(ring->dev)); 2129 WARN_ON(i915_verify_lists(ring->dev));
1766 2130
1767 seqno = ring->get_seqno(ring); 2131 seqno = ring->get_seqno(ring, true);
1768 2132
1769 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) 2133 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1770 if (seqno >= ring->sync_seqno[i]) 2134 if (seqno >= ring->sync_seqno[i])
@@ -1803,13 +2167,10 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1803 struct drm_i915_gem_object, 2167 struct drm_i915_gem_object,
1804 ring_list); 2168 ring_list);
1805 2169
1806 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno)) 2170 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
1807 break; 2171 break;
1808 2172
1809 if (obj->base.write_domain != 0) 2173 i915_gem_object_move_to_inactive(obj);
1810 i915_gem_object_move_to_flushing(obj);
1811 else
1812 i915_gem_object_move_to_inactive(obj);
1813 } 2174 }
1814 2175
1815 if (unlikely(ring->trace_irq_seqno && 2176 if (unlikely(ring->trace_irq_seqno &&
@@ -1858,216 +2219,20 @@ i915_gem_retire_work_handler(struct work_struct *work)
1858 */ 2219 */
1859 idle = true; 2220 idle = true;
1860 for_each_ring(ring, dev_priv, i) { 2221 for_each_ring(ring, dev_priv, i) {
1861 if (ring->gpu_caches_dirty) { 2222 if (ring->gpu_caches_dirty)
1862 struct drm_i915_gem_request *request; 2223 i915_add_request(ring, NULL, NULL);
1863
1864 request = kzalloc(sizeof(*request), GFP_KERNEL);
1865 if (request == NULL ||
1866 i915_add_request(ring, NULL, request))
1867 kfree(request);
1868 }
1869 2224
1870 idle &= list_empty(&ring->request_list); 2225 idle &= list_empty(&ring->request_list);
1871 } 2226 }
1872 2227
1873 if (!dev_priv->mm.suspended && !idle) 2228 if (!dev_priv->mm.suspended && !idle)
1874 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 2229 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2230 if (idle)
2231 intel_mark_idle(dev);
1875 2232
1876 mutex_unlock(&dev->struct_mutex); 2233 mutex_unlock(&dev->struct_mutex);
1877} 2234}
1878 2235
1879int
1880i915_gem_check_wedge(struct drm_i915_private *dev_priv,
1881 bool interruptible)
1882{
1883 if (atomic_read(&dev_priv->mm.wedged)) {
1884 struct completion *x = &dev_priv->error_completion;
1885 bool recovery_complete;
1886 unsigned long flags;
1887
1888 /* Give the error handler a chance to run. */
1889 spin_lock_irqsave(&x->wait.lock, flags);
1890 recovery_complete = x->done > 0;
1891 spin_unlock_irqrestore(&x->wait.lock, flags);
1892
1893 /* Non-interruptible callers can't handle -EAGAIN, hence return
1894 * -EIO unconditionally for these. */
1895 if (!interruptible)
1896 return -EIO;
1897
1898 /* Recovery complete, but still wedged means reset failure. */
1899 if (recovery_complete)
1900 return -EIO;
1901
1902 return -EAGAIN;
1903 }
1904
1905 return 0;
1906}
1907
1908/*
1909 * Compare seqno against outstanding lazy request. Emit a request if they are
1910 * equal.
1911 */
1912static int
1913i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1914{
1915 int ret = 0;
1916
1917 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1918
1919 if (seqno == ring->outstanding_lazy_request) {
1920 struct drm_i915_gem_request *request;
1921
1922 request = kzalloc(sizeof(*request), GFP_KERNEL);
1923 if (request == NULL)
1924 return -ENOMEM;
1925
1926 ret = i915_add_request(ring, NULL, request);
1927 if (ret) {
1928 kfree(request);
1929 return ret;
1930 }
1931
1932 BUG_ON(seqno != request->seqno);
1933 }
1934
1935 return ret;
1936}
1937
1938/**
1939 * __wait_seqno - wait until execution of seqno has finished
1940 * @ring: the ring expected to report seqno
1941 * @seqno: duh!
1942 * @interruptible: do an interruptible wait (normally yes)
1943 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1944 *
1945 * Returns 0 if the seqno was found within the alloted time. Else returns the
1946 * errno with remaining time filled in timeout argument.
1947 */
1948static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1949 bool interruptible, struct timespec *timeout)
1950{
1951 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1952 struct timespec before, now, wait_time={1,0};
1953 unsigned long timeout_jiffies;
1954 long end;
1955 bool wait_forever = true;
1956 int ret;
1957
1958 if (i915_seqno_passed(ring->get_seqno(ring), seqno))
1959 return 0;
1960
1961 trace_i915_gem_request_wait_begin(ring, seqno);
1962
1963 if (timeout != NULL) {
1964 wait_time = *timeout;
1965 wait_forever = false;
1966 }
1967
1968 timeout_jiffies = timespec_to_jiffies(&wait_time);
1969
1970 if (WARN_ON(!ring->irq_get(ring)))
1971 return -ENODEV;
1972
1973 /* Record current time in case interrupted by signal, or wedged * */
1974 getrawmonotonic(&before);
1975
1976#define EXIT_COND \
1977 (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
1978 atomic_read(&dev_priv->mm.wedged))
1979 do {
1980 if (interruptible)
1981 end = wait_event_interruptible_timeout(ring->irq_queue,
1982 EXIT_COND,
1983 timeout_jiffies);
1984 else
1985 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1986 timeout_jiffies);
1987
1988 ret = i915_gem_check_wedge(dev_priv, interruptible);
1989 if (ret)
1990 end = ret;
1991 } while (end == 0 && wait_forever);
1992
1993 getrawmonotonic(&now);
1994
1995 ring->irq_put(ring);
1996 trace_i915_gem_request_wait_end(ring, seqno);
1997#undef EXIT_COND
1998
1999 if (timeout) {
2000 struct timespec sleep_time = timespec_sub(now, before);
2001 *timeout = timespec_sub(*timeout, sleep_time);
2002 }
2003
2004 switch (end) {
2005 case -EIO:
2006 case -EAGAIN: /* Wedged */
2007 case -ERESTARTSYS: /* Signal */
2008 return (int)end;
2009 case 0: /* Timeout */
2010 if (timeout)
2011 set_normalized_timespec(timeout, 0, 0);
2012 return -ETIME;
2013 default: /* Completed */
2014 WARN_ON(end < 0); /* We're not aware of other errors */
2015 return 0;
2016 }
2017}
2018
2019/**
2020 * Waits for a sequence number to be signaled, and cleans up the
2021 * request and object lists appropriately for that event.
2022 */
2023int
2024i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
2025{
2026 drm_i915_private_t *dev_priv = ring->dev->dev_private;
2027 int ret = 0;
2028
2029 BUG_ON(seqno == 0);
2030
2031 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
2032 if (ret)
2033 return ret;
2034
2035 ret = i915_gem_check_olr(ring, seqno);
2036 if (ret)
2037 return ret;
2038
2039 ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
2040
2041 return ret;
2042}
2043
2044/**
2045 * Ensures that all rendering to the object has completed and the object is
2046 * safe to unbind from the GTT or access from the CPU.
2047 */
2048int
2049i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2050{
2051 int ret;
2052
2053 /* This function only exists to support waiting for existing rendering,
2054 * not for emitting required flushes.
2055 */
2056 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2057
2058 /* If there is rendering queued on the buffer being evicted, wait for
2059 * it.
2060 */
2061 if (obj->active) {
2062 ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
2063 if (ret)
2064 return ret;
2065 i915_gem_retire_requests_ring(obj->ring);
2066 }
2067
2068 return 0;
2069}
2070
2071/** 2236/**
2072 * Ensures that an object will eventually get non-busy by flushing any required 2237 * Ensures that an object will eventually get non-busy by flushing any required
2073 * write domains, emitting any outstanding lazy request and retiring and 2238 * write domains, emitting any outstanding lazy request and retiring and
@@ -2079,14 +2244,10 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2079 int ret; 2244 int ret;
2080 2245
2081 if (obj->active) { 2246 if (obj->active) {
2082 ret = i915_gem_object_flush_gpu_write_domain(obj); 2247 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2083 if (ret) 2248 if (ret)
2084 return ret; 2249 return ret;
2085 2250
2086 ret = i915_gem_check_olr(obj->ring,
2087 obj->last_rendering_seqno);
2088 if (ret)
2089 return ret;
2090 i915_gem_retire_requests_ring(obj->ring); 2251 i915_gem_retire_requests_ring(obj->ring);
2091 } 2252 }
2092 2253
@@ -2146,7 +2307,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2146 goto out; 2307 goto out;
2147 2308
2148 if (obj->active) { 2309 if (obj->active) {
2149 seqno = obj->last_rendering_seqno; 2310 seqno = obj->last_read_seqno;
2150 ring = obj->ring; 2311 ring = obj->ring;
2151 } 2312 }
2152 2313
@@ -2201,11 +2362,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
2201 return 0; 2362 return 0;
2202 2363
2203 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) 2364 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2204 return i915_gem_object_wait_rendering(obj); 2365 return i915_gem_object_wait_rendering(obj, false);
2205 2366
2206 idx = intel_ring_sync_index(from, to); 2367 idx = intel_ring_sync_index(from, to);
2207 2368
2208 seqno = obj->last_rendering_seqno; 2369 seqno = obj->last_read_seqno;
2209 if (seqno <= from->sync_seqno[idx]) 2370 if (seqno <= from->sync_seqno[idx])
2210 return 0; 2371 return 0;
2211 2372
@@ -2259,6 +2420,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2259 if (obj->pin_count) 2420 if (obj->pin_count)
2260 return -EBUSY; 2421 return -EBUSY;
2261 2422
2423 BUG_ON(obj->pages == NULL);
2424
2262 ret = i915_gem_object_finish_gpu(obj); 2425 ret = i915_gem_object_finish_gpu(obj);
2263 if (ret) 2426 if (ret)
2264 return ret; 2427 return ret;
@@ -2269,22 +2432,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2269 2432
2270 i915_gem_object_finish_gtt(obj); 2433 i915_gem_object_finish_gtt(obj);
2271 2434
2272 /* Move the object to the CPU domain to ensure that
2273 * any possible CPU writes while it's not in the GTT
2274 * are flushed when we go to remap it.
2275 */
2276 if (ret == 0)
2277 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2278 if (ret == -ERESTARTSYS)
2279 return ret;
2280 if (ret) {
2281 /* In the event of a disaster, abandon all caches and
2282 * hope for the best.
2283 */
2284 i915_gem_clflush_object(obj);
2285 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2286 }
2287
2288 /* release the fence reg _after_ flushing */ 2435 /* release the fence reg _after_ flushing */
2289 ret = i915_gem_object_put_fence(obj); 2436 ret = i915_gem_object_put_fence(obj);
2290 if (ret) 2437 if (ret)
@@ -2300,10 +2447,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2300 } 2447 }
2301 i915_gem_gtt_finish_object(obj); 2448 i915_gem_gtt_finish_object(obj);
2302 2449
2303 i915_gem_object_put_pages_gtt(obj); 2450 list_del(&obj->mm_list);
2304 2451 list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
2305 list_del_init(&obj->gtt_list);
2306 list_del_init(&obj->mm_list);
2307 /* Avoid an unnecessary call to unbind on rebind. */ 2452 /* Avoid an unnecessary call to unbind on rebind. */
2308 obj->map_and_fenceable = true; 2453 obj->map_and_fenceable = true;
2309 2454
@@ -2311,48 +2456,14 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2311 obj->gtt_space = NULL; 2456 obj->gtt_space = NULL;
2312 obj->gtt_offset = 0; 2457 obj->gtt_offset = 0;
2313 2458
2314 if (i915_gem_object_is_purgeable(obj))
2315 i915_gem_object_truncate(obj);
2316
2317 return ret;
2318}
2319
2320int
2321i915_gem_flush_ring(struct intel_ring_buffer *ring,
2322 uint32_t invalidate_domains,
2323 uint32_t flush_domains)
2324{
2325 int ret;
2326
2327 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2328 return 0;
2329
2330 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2331
2332 ret = ring->flush(ring, invalidate_domains, flush_domains);
2333 if (ret)
2334 return ret;
2335
2336 if (flush_domains & I915_GEM_GPU_DOMAINS)
2337 i915_gem_process_flushing_list(ring, flush_domains);
2338
2339 return 0; 2459 return 0;
2340} 2460}
2341 2461
2342static int i915_ring_idle(struct intel_ring_buffer *ring) 2462static int i915_ring_idle(struct intel_ring_buffer *ring)
2343{ 2463{
2344 int ret; 2464 if (list_empty(&ring->active_list))
2345
2346 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2347 return 0; 2465 return 0;
2348 2466
2349 if (!list_empty(&ring->gpu_write_list)) {
2350 ret = i915_gem_flush_ring(ring,
2351 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2352 if (ret)
2353 return ret;
2354 }
2355
2356 return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring)); 2467 return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
2357} 2468}
2358 2469
@@ -2371,10 +2482,6 @@ int i915_gpu_idle(struct drm_device *dev)
2371 ret = i915_ring_idle(ring); 2482 ret = i915_ring_idle(ring);
2372 if (ret) 2483 if (ret)
2373 return ret; 2484 return ret;
2374
2375 /* Is the device fubar? */
2376 if (WARN_ON(!list_empty(&ring->gpu_write_list)))
2377 return -EBUSY;
2378 } 2485 }
2379 2486
2380 return 0; 2487 return 0;
@@ -2547,21 +2654,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2547static int 2654static int
2548i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) 2655i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2549{ 2656{
2550 int ret;
2551
2552 if (obj->fenced_gpu_access) {
2553 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2554 ret = i915_gem_flush_ring(obj->ring,
2555 0, obj->base.write_domain);
2556 if (ret)
2557 return ret;
2558 }
2559
2560 obj->fenced_gpu_access = false;
2561 }
2562
2563 if (obj->last_fenced_seqno) { 2657 if (obj->last_fenced_seqno) {
2564 ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); 2658 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2565 if (ret) 2659 if (ret)
2566 return ret; 2660 return ret;
2567 2661
@@ -2574,6 +2668,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2574 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) 2668 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2575 mb(); 2669 mb();
2576 2670
2671 obj->fenced_gpu_access = false;
2577 return 0; 2672 return 0;
2578} 2673}
2579 2674
@@ -2693,18 +2788,88 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2693 return 0; 2788 return 0;
2694} 2789}
2695 2790
2791static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2792 struct drm_mm_node *gtt_space,
2793 unsigned long cache_level)
2794{
2795 struct drm_mm_node *other;
2796
2797 /* On non-LLC machines we have to be careful when putting differing
2798 * types of snoopable memory together to avoid the prefetcher
2799 * crossing memory domains and dieing.
2800 */
2801 if (HAS_LLC(dev))
2802 return true;
2803
2804 if (gtt_space == NULL)
2805 return true;
2806
2807 if (list_empty(&gtt_space->node_list))
2808 return true;
2809
2810 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2811 if (other->allocated && !other->hole_follows && other->color != cache_level)
2812 return false;
2813
2814 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2815 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2816 return false;
2817
2818 return true;
2819}
2820
2821static void i915_gem_verify_gtt(struct drm_device *dev)
2822{
2823#if WATCH_GTT
2824 struct drm_i915_private *dev_priv = dev->dev_private;
2825 struct drm_i915_gem_object *obj;
2826 int err = 0;
2827
2828 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
2829 if (obj->gtt_space == NULL) {
2830 printk(KERN_ERR "object found on GTT list with no space reserved\n");
2831 err++;
2832 continue;
2833 }
2834
2835 if (obj->cache_level != obj->gtt_space->color) {
2836 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
2837 obj->gtt_space->start,
2838 obj->gtt_space->start + obj->gtt_space->size,
2839 obj->cache_level,
2840 obj->gtt_space->color);
2841 err++;
2842 continue;
2843 }
2844
2845 if (!i915_gem_valid_gtt_space(dev,
2846 obj->gtt_space,
2847 obj->cache_level)) {
2848 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
2849 obj->gtt_space->start,
2850 obj->gtt_space->start + obj->gtt_space->size,
2851 obj->cache_level);
2852 err++;
2853 continue;
2854 }
2855 }
2856
2857 WARN_ON(err);
2858#endif
2859}
2860
2696/** 2861/**
2697 * Finds free space in the GTT aperture and binds the object there. 2862 * Finds free space in the GTT aperture and binds the object there.
2698 */ 2863 */
2699static int 2864static int
2700i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 2865i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2701 unsigned alignment, 2866 unsigned alignment,
2702 bool map_and_fenceable) 2867 bool map_and_fenceable,
2868 bool nonblocking)
2703{ 2869{
2704 struct drm_device *dev = obj->base.dev; 2870 struct drm_device *dev = obj->base.dev;
2705 drm_i915_private_t *dev_priv = dev->dev_private; 2871 drm_i915_private_t *dev_priv = dev->dev_private;
2706 struct drm_mm_node *free_space; 2872 struct drm_mm_node *free_space;
2707 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2708 u32 size, fence_size, fence_alignment, unfenced_alignment; 2873 u32 size, fence_size, fence_alignment, unfenced_alignment;
2709 bool mappable, fenceable; 2874 bool mappable, fenceable;
2710 int ret; 2875 int ret;
@@ -2744,89 +2909,67 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2744 return -E2BIG; 2909 return -E2BIG;
2745 } 2910 }
2746 2911
2912 ret = i915_gem_object_get_pages(obj);
2913 if (ret)
2914 return ret;
2915
2747 search_free: 2916 search_free:
2748 if (map_and_fenceable) 2917 if (map_and_fenceable)
2749 free_space = 2918 free_space =
2750 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, 2919 drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
2751 size, alignment, 2920 size, alignment, obj->cache_level,
2752 0, dev_priv->mm.gtt_mappable_end, 2921 0, dev_priv->mm.gtt_mappable_end,
2753 0); 2922 false);
2754 else 2923 else
2755 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, 2924 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
2756 size, alignment, 0); 2925 size, alignment, obj->cache_level,
2926 false);
2757 2927
2758 if (free_space != NULL) { 2928 if (free_space != NULL) {
2759 if (map_and_fenceable) 2929 if (map_and_fenceable)
2760 obj->gtt_space = 2930 obj->gtt_space =
2761 drm_mm_get_block_range_generic(free_space, 2931 drm_mm_get_block_range_generic(free_space,
2762 size, alignment, 0, 2932 size, alignment, obj->cache_level,
2763 0, dev_priv->mm.gtt_mappable_end, 2933 0, dev_priv->mm.gtt_mappable_end,
2764 0); 2934 false);
2765 else 2935 else
2766 obj->gtt_space = 2936 obj->gtt_space =
2767 drm_mm_get_block(free_space, size, alignment); 2937 drm_mm_get_block_generic(free_space,
2938 size, alignment, obj->cache_level,
2939 false);
2768 } 2940 }
2769 if (obj->gtt_space == NULL) { 2941 if (obj->gtt_space == NULL) {
2770 /* If the gtt is empty and we're still having trouble
2771 * fitting our object in, we're out of memory.
2772 */
2773 ret = i915_gem_evict_something(dev, size, alignment, 2942 ret = i915_gem_evict_something(dev, size, alignment,
2774 map_and_fenceable); 2943 obj->cache_level,
2944 map_and_fenceable,
2945 nonblocking);
2775 if (ret) 2946 if (ret)
2776 return ret; 2947 return ret;
2777 2948
2778 goto search_free; 2949 goto search_free;
2779 } 2950 }
2780 2951 if (WARN_ON(!i915_gem_valid_gtt_space(dev,
2781 ret = i915_gem_object_get_pages_gtt(obj, gfpmask); 2952 obj->gtt_space,
2782 if (ret) { 2953 obj->cache_level))) {
2783 drm_mm_put_block(obj->gtt_space); 2954 drm_mm_put_block(obj->gtt_space);
2784 obj->gtt_space = NULL; 2955 obj->gtt_space = NULL;
2785 2956 return -EINVAL;
2786 if (ret == -ENOMEM) {
2787 /* first try to reclaim some memory by clearing the GTT */
2788 ret = i915_gem_evict_everything(dev, false);
2789 if (ret) {
2790 /* now try to shrink everyone else */
2791 if (gfpmask) {
2792 gfpmask = 0;
2793 goto search_free;
2794 }
2795
2796 return -ENOMEM;
2797 }
2798
2799 goto search_free;
2800 }
2801
2802 return ret;
2803 } 2957 }
2804 2958
2959
2805 ret = i915_gem_gtt_prepare_object(obj); 2960 ret = i915_gem_gtt_prepare_object(obj);
2806 if (ret) { 2961 if (ret) {
2807 i915_gem_object_put_pages_gtt(obj);
2808 drm_mm_put_block(obj->gtt_space); 2962 drm_mm_put_block(obj->gtt_space);
2809 obj->gtt_space = NULL; 2963 obj->gtt_space = NULL;
2810 2964 return ret;
2811 if (i915_gem_evict_everything(dev, false))
2812 return ret;
2813
2814 goto search_free;
2815 } 2965 }
2816 2966
2817 if (!dev_priv->mm.aliasing_ppgtt) 2967 if (!dev_priv->mm.aliasing_ppgtt)
2818 i915_gem_gtt_bind_object(obj, obj->cache_level); 2968 i915_gem_gtt_bind_object(obj, obj->cache_level);
2819 2969
2820 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); 2970 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
2821 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 2971 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2822 2972
2823 /* Assert that the object is not currently in any GPU domain. As it
2824 * wasn't in the GTT, there shouldn't be any way it could have been in
2825 * a GPU cache
2826 */
2827 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2828 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2829
2830 obj->gtt_offset = obj->gtt_space->start; 2973 obj->gtt_offset = obj->gtt_space->start;
2831 2974
2832 fenceable = 2975 fenceable =
@@ -2839,6 +2982,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2839 obj->map_and_fenceable = mappable && fenceable; 2982 obj->map_and_fenceable = mappable && fenceable;
2840 2983
2841 trace_i915_gem_object_bind(obj, map_and_fenceable); 2984 trace_i915_gem_object_bind(obj, map_and_fenceable);
2985 i915_gem_verify_gtt(dev);
2842 return 0; 2986 return 0;
2843} 2987}
2844 2988
@@ -2865,18 +3009,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2865 3009
2866 trace_i915_gem_object_clflush(obj); 3010 trace_i915_gem_object_clflush(obj);
2867 3011
2868 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); 3012 drm_clflush_sg(obj->pages);
2869}
2870
2871/** Flushes any GPU write domain for the object if it's dirty. */
2872static int
2873i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2874{
2875 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2876 return 0;
2877
2878 /* Queue the GPU write cache flushing we need. */
2879 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
2880} 3013}
2881 3014
2882/** Flushes the GTT write domain for the object if it's dirty. */ 3015/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2945,16 +3078,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2945 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3078 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2946 return 0; 3079 return 0;
2947 3080
2948 ret = i915_gem_object_flush_gpu_write_domain(obj); 3081 ret = i915_gem_object_wait_rendering(obj, !write);
2949 if (ret) 3082 if (ret)
2950 return ret; 3083 return ret;
2951 3084
2952 if (obj->pending_gpu_write || write) {
2953 ret = i915_gem_object_wait_rendering(obj);
2954 if (ret)
2955 return ret;
2956 }
2957
2958 i915_gem_object_flush_cpu_write_domain(obj); 3085 i915_gem_object_flush_cpu_write_domain(obj);
2959 3086
2960 old_write_domain = obj->base.write_domain; 3087 old_write_domain = obj->base.write_domain;
@@ -2997,6 +3124,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2997 return -EBUSY; 3124 return -EBUSY;
2998 } 3125 }
2999 3126
3127 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
3128 ret = i915_gem_object_unbind(obj);
3129 if (ret)
3130 return ret;
3131 }
3132
3000 if (obj->gtt_space) { 3133 if (obj->gtt_space) {
3001 ret = i915_gem_object_finish_gpu(obj); 3134 ret = i915_gem_object_finish_gpu(obj);
3002 if (ret) 3135 if (ret)
@@ -3008,7 +3141,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3008 * registers with snooped memory, so relinquish any fences 3141 * registers with snooped memory, so relinquish any fences
3009 * currently pointing to our region in the aperture. 3142 * currently pointing to our region in the aperture.
3010 */ 3143 */
3011 if (INTEL_INFO(obj->base.dev)->gen < 6) { 3144 if (INTEL_INFO(dev)->gen < 6) {
3012 ret = i915_gem_object_put_fence(obj); 3145 ret = i915_gem_object_put_fence(obj);
3013 if (ret) 3146 if (ret)
3014 return ret; 3147 return ret;
@@ -3019,6 +3152,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3019 if (obj->has_aliasing_ppgtt_mapping) 3152 if (obj->has_aliasing_ppgtt_mapping)
3020 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, 3153 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3021 obj, cache_level); 3154 obj, cache_level);
3155
3156 obj->gtt_space->color = cache_level;
3022 } 3157 }
3023 3158
3024 if (cache_level == I915_CACHE_NONE) { 3159 if (cache_level == I915_CACHE_NONE) {
@@ -3045,9 +3180,72 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3045 } 3180 }
3046 3181
3047 obj->cache_level = cache_level; 3182 obj->cache_level = cache_level;
3183 i915_gem_verify_gtt(dev);
3048 return 0; 3184 return 0;
3049} 3185}
3050 3186
3187int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3188 struct drm_file *file)
3189{
3190 struct drm_i915_gem_caching *args = data;
3191 struct drm_i915_gem_object *obj;
3192 int ret;
3193
3194 ret = i915_mutex_lock_interruptible(dev);
3195 if (ret)
3196 return ret;
3197
3198 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3199 if (&obj->base == NULL) {
3200 ret = -ENOENT;
3201 goto unlock;
3202 }
3203
3204 args->caching = obj->cache_level != I915_CACHE_NONE;
3205
3206 drm_gem_object_unreference(&obj->base);
3207unlock:
3208 mutex_unlock(&dev->struct_mutex);
3209 return ret;
3210}
3211
3212int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3213 struct drm_file *file)
3214{
3215 struct drm_i915_gem_caching *args = data;
3216 struct drm_i915_gem_object *obj;
3217 enum i915_cache_level level;
3218 int ret;
3219
3220 ret = i915_mutex_lock_interruptible(dev);
3221 if (ret)
3222 return ret;
3223
3224 switch (args->caching) {
3225 case I915_CACHING_NONE:
3226 level = I915_CACHE_NONE;
3227 break;
3228 case I915_CACHING_CACHED:
3229 level = I915_CACHE_LLC;
3230 break;
3231 default:
3232 return -EINVAL;
3233 }
3234
3235 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3236 if (&obj->base == NULL) {
3237 ret = -ENOENT;
3238 goto unlock;
3239 }
3240
3241 ret = i915_gem_object_set_cache_level(obj, level);
3242
3243 drm_gem_object_unreference(&obj->base);
3244unlock:
3245 mutex_unlock(&dev->struct_mutex);
3246 return ret;
3247}
3248
3051/* 3249/*
3052 * Prepare buffer for display plane (scanout, cursors, etc). 3250 * Prepare buffer for display plane (scanout, cursors, etc).
3053 * Can be called from an uninterruptible phase (modesetting) and allows 3251 * Can be called from an uninterruptible phase (modesetting) and allows
@@ -3061,10 +3259,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3061 u32 old_read_domains, old_write_domain; 3259 u32 old_read_domains, old_write_domain;
3062 int ret; 3260 int ret;
3063 3261
3064 ret = i915_gem_object_flush_gpu_write_domain(obj);
3065 if (ret)
3066 return ret;
3067
3068 if (pipelined != obj->ring) { 3262 if (pipelined != obj->ring) {
3069 ret = i915_gem_object_sync(obj, pipelined); 3263 ret = i915_gem_object_sync(obj, pipelined);
3070 if (ret) 3264 if (ret)
@@ -3088,7 +3282,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3088 * (e.g. libkms for the bootup splash), we have to ensure that we 3282 * (e.g. libkms for the bootup splash), we have to ensure that we
3089 * always use map_and_fenceable for all scanout buffers. 3283 * always use map_and_fenceable for all scanout buffers.
3090 */ 3284 */
3091 ret = i915_gem_object_pin(obj, alignment, true); 3285 ret = i915_gem_object_pin(obj, alignment, true, false);
3092 if (ret) 3286 if (ret)
3093 return ret; 3287 return ret;
3094 3288
@@ -3100,7 +3294,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3100 /* It should now be out of any other write domains, and we can update 3294 /* It should now be out of any other write domains, and we can update
3101 * the domain values for our changes. 3295 * the domain values for our changes.
3102 */ 3296 */
3103 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 3297 obj->base.write_domain = 0;
3104 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3298 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3105 3299
3106 trace_i915_gem_object_change_domain(obj, 3300 trace_i915_gem_object_change_domain(obj,
@@ -3118,13 +3312,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3118 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) 3312 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3119 return 0; 3313 return 0;
3120 3314
3121 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 3315 ret = i915_gem_object_wait_rendering(obj, false);
3122 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
3123 if (ret)
3124 return ret;
3125 }
3126
3127 ret = i915_gem_object_wait_rendering(obj);
3128 if (ret) 3316 if (ret)
3129 return ret; 3317 return ret;
3130 3318
@@ -3148,16 +3336,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3148 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) 3336 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3149 return 0; 3337 return 0;
3150 3338
3151 ret = i915_gem_object_flush_gpu_write_domain(obj); 3339 ret = i915_gem_object_wait_rendering(obj, !write);
3152 if (ret) 3340 if (ret)
3153 return ret; 3341 return ret;
3154 3342
3155 if (write || obj->pending_gpu_write) {
3156 ret = i915_gem_object_wait_rendering(obj);
3157 if (ret)
3158 return ret;
3159 }
3160
3161 i915_gem_object_flush_gtt_write_domain(obj); 3343 i915_gem_object_flush_gtt_write_domain(obj);
3162 3344
3163 old_write_domain = obj->base.write_domain; 3345 old_write_domain = obj->base.write_domain;
@@ -3237,7 +3419,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3237int 3419int
3238i915_gem_object_pin(struct drm_i915_gem_object *obj, 3420i915_gem_object_pin(struct drm_i915_gem_object *obj,
3239 uint32_t alignment, 3421 uint32_t alignment,
3240 bool map_and_fenceable) 3422 bool map_and_fenceable,
3423 bool nonblocking)
3241{ 3424{
3242 int ret; 3425 int ret;
3243 3426
@@ -3262,7 +3445,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3262 3445
3263 if (obj->gtt_space == NULL) { 3446 if (obj->gtt_space == NULL) {
3264 ret = i915_gem_object_bind_to_gtt(obj, alignment, 3447 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3265 map_and_fenceable); 3448 map_and_fenceable,
3449 nonblocking);
3266 if (ret) 3450 if (ret)
3267 return ret; 3451 return ret;
3268 } 3452 }
@@ -3320,7 +3504,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3320 obj->user_pin_count++; 3504 obj->user_pin_count++;
3321 obj->pin_filp = file; 3505 obj->pin_filp = file;
3322 if (obj->user_pin_count == 1) { 3506 if (obj->user_pin_count == 1) {
3323 ret = i915_gem_object_pin(obj, args->alignment, true); 3507 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3324 if (ret) 3508 if (ret)
3325 goto out; 3509 goto out;
3326 } 3510 }
@@ -3400,6 +3584,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3400 ret = i915_gem_object_flush_active(obj); 3584 ret = i915_gem_object_flush_active(obj);
3401 3585
3402 args->busy = obj->active; 3586 args->busy = obj->active;
3587 if (obj->ring) {
3588 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3589 args->busy |= intel_ring_flag(obj->ring) << 16;
3590 }
3403 3591
3404 drm_gem_object_unreference(&obj->base); 3592 drm_gem_object_unreference(&obj->base);
3405unlock: 3593unlock:
@@ -3448,9 +3636,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3448 if (obj->madv != __I915_MADV_PURGED) 3636 if (obj->madv != __I915_MADV_PURGED)
3449 obj->madv = args->madv; 3637 obj->madv = args->madv;
3450 3638
3451 /* if the object is no longer bound, discard its backing storage */ 3639 /* if the object is no longer attached, discard its backing storage */
3452 if (i915_gem_object_is_purgeable(obj) && 3640 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3453 obj->gtt_space == NULL)
3454 i915_gem_object_truncate(obj); 3641 i915_gem_object_truncate(obj);
3455 3642
3456 args->retained = obj->madv != __I915_MADV_PURGED; 3643 args->retained = obj->madv != __I915_MADV_PURGED;
@@ -3462,10 +3649,32 @@ unlock:
3462 return ret; 3649 return ret;
3463} 3650}
3464 3651
3652void i915_gem_object_init(struct drm_i915_gem_object *obj,
3653 const struct drm_i915_gem_object_ops *ops)
3654{
3655 INIT_LIST_HEAD(&obj->mm_list);
3656 INIT_LIST_HEAD(&obj->gtt_list);
3657 INIT_LIST_HEAD(&obj->ring_list);
3658 INIT_LIST_HEAD(&obj->exec_list);
3659
3660 obj->ops = ops;
3661
3662 obj->fence_reg = I915_FENCE_REG_NONE;
3663 obj->madv = I915_MADV_WILLNEED;
3664 /* Avoid an unnecessary call to unbind on the first bind. */
3665 obj->map_and_fenceable = true;
3666
3667 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3668}
3669
3670static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3671 .get_pages = i915_gem_object_get_pages_gtt,
3672 .put_pages = i915_gem_object_put_pages_gtt,
3673};
3674
3465struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 3675struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3466 size_t size) 3676 size_t size)
3467{ 3677{
3468 struct drm_i915_private *dev_priv = dev->dev_private;
3469 struct drm_i915_gem_object *obj; 3678 struct drm_i915_gem_object *obj;
3470 struct address_space *mapping; 3679 struct address_space *mapping;
3471 u32 mask; 3680 u32 mask;
@@ -3489,7 +3698,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3489 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 3698 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3490 mapping_set_gfp_mask(mapping, mask); 3699 mapping_set_gfp_mask(mapping, mask);
3491 3700
3492 i915_gem_info_add_obj(dev_priv, size); 3701 i915_gem_object_init(obj, &i915_gem_object_ops);
3493 3702
3494 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3703 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3495 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3704 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -3511,17 +3720,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3511 } else 3720 } else
3512 obj->cache_level = I915_CACHE_NONE; 3721 obj->cache_level = I915_CACHE_NONE;
3513 3722
3514 obj->base.driver_private = NULL;
3515 obj->fence_reg = I915_FENCE_REG_NONE;
3516 INIT_LIST_HEAD(&obj->mm_list);
3517 INIT_LIST_HEAD(&obj->gtt_list);
3518 INIT_LIST_HEAD(&obj->ring_list);
3519 INIT_LIST_HEAD(&obj->exec_list);
3520 INIT_LIST_HEAD(&obj->gpu_write_list);
3521 obj->madv = I915_MADV_WILLNEED;
3522 /* Avoid an unnecessary call to unbind on the first bind. */
3523 obj->map_and_fenceable = true;
3524
3525 return obj; 3723 return obj;
3526} 3724}
3527 3725
@@ -3540,9 +3738,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3540 3738
3541 trace_i915_gem_object_destroy(obj); 3739 trace_i915_gem_object_destroy(obj);
3542 3740
3543 if (gem_obj->import_attach)
3544 drm_prime_gem_destroy(gem_obj, obj->sg_table);
3545
3546 if (obj->phys_obj) 3741 if (obj->phys_obj)
3547 i915_gem_detach_phys_object(dev, obj); 3742 i915_gem_detach_phys_object(dev, obj);
3548 3743
@@ -3558,8 +3753,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3558 dev_priv->mm.interruptible = was_interruptible; 3753 dev_priv->mm.interruptible = was_interruptible;
3559 } 3754 }
3560 3755
3561 if (obj->base.map_list.map) 3756 obj->pages_pin_count = 0;
3562 drm_gem_free_mmap_offset(&obj->base); 3757 i915_gem_object_put_pages(obj);
3758 i915_gem_object_free_mmap_offset(obj);
3759
3760 BUG_ON(obj->pages);
3761
3762 if (obj->base.import_attach)
3763 drm_prime_gem_destroy(&obj->base, NULL);
3563 3764
3564 drm_gem_object_release(&obj->base); 3765 drm_gem_object_release(&obj->base);
3565 i915_gem_info_remove_obj(dev_priv, obj->base.size); 3766 i915_gem_info_remove_obj(dev_priv, obj->base.size);
@@ -3590,7 +3791,7 @@ i915_gem_idle(struct drm_device *dev)
3590 3791
3591 /* Under UMS, be paranoid and evict. */ 3792 /* Under UMS, be paranoid and evict. */
3592 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3793 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3593 i915_gem_evict_everything(dev, false); 3794 i915_gem_evict_everything(dev);
3594 3795
3595 i915_gem_reset_fences(dev); 3796 i915_gem_reset_fences(dev);
3596 3797
@@ -3891,7 +4092,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3891 } 4092 }
3892 4093
3893 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 4094 BUG_ON(!list_empty(&dev_priv->mm.active_list));
3894 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3895 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 4095 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3896 mutex_unlock(&dev->struct_mutex); 4096 mutex_unlock(&dev->struct_mutex);
3897 4097
@@ -3939,7 +4139,6 @@ init_ring_lists(struct intel_ring_buffer *ring)
3939{ 4139{
3940 INIT_LIST_HEAD(&ring->active_list); 4140 INIT_LIST_HEAD(&ring->active_list);
3941 INIT_LIST_HEAD(&ring->request_list); 4141 INIT_LIST_HEAD(&ring->request_list);
3942 INIT_LIST_HEAD(&ring->gpu_write_list);
3943} 4142}
3944 4143
3945void 4144void
@@ -3949,10 +4148,10 @@ i915_gem_load(struct drm_device *dev)
3949 drm_i915_private_t *dev_priv = dev->dev_private; 4148 drm_i915_private_t *dev_priv = dev->dev_private;
3950 4149
3951 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4150 INIT_LIST_HEAD(&dev_priv->mm.active_list);
3952 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3953 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4151 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4152 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4153 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
3954 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4154 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3955 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3956 for (i = 0; i < I915_NUM_RINGS; i++) 4155 for (i = 0; i < I915_NUM_RINGS; i++)
3957 init_ring_lists(&dev_priv->ring[i]); 4156 init_ring_lists(&dev_priv->ring[i]);
3958 for (i = 0; i < I915_MAX_NUM_FENCES; i++) 4157 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
@@ -4197,18 +4396,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4197} 4396}
4198 4397
4199static int 4398static int
4200i915_gpu_is_active(struct drm_device *dev)
4201{
4202 drm_i915_private_t *dev_priv = dev->dev_private;
4203 int lists_empty;
4204
4205 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4206 list_empty(&dev_priv->mm.active_list);
4207
4208 return !lists_empty;
4209}
4210
4211static int
4212i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) 4399i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4213{ 4400{
4214 struct drm_i915_private *dev_priv = 4401 struct drm_i915_private *dev_priv =
@@ -4216,60 +4403,27 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4216 struct drm_i915_private, 4403 struct drm_i915_private,
4217 mm.inactive_shrinker); 4404 mm.inactive_shrinker);
4218 struct drm_device *dev = dev_priv->dev; 4405 struct drm_device *dev = dev_priv->dev;
4219 struct drm_i915_gem_object *obj, *next; 4406 struct drm_i915_gem_object *obj;
4220 int nr_to_scan = sc->nr_to_scan; 4407 int nr_to_scan = sc->nr_to_scan;
4221 int cnt; 4408 int cnt;
4222 4409
4223 if (!mutex_trylock(&dev->struct_mutex)) 4410 if (!mutex_trylock(&dev->struct_mutex))
4224 return 0; 4411 return 0;
4225 4412
4226 /* "fast-path" to count number of available objects */ 4413 if (nr_to_scan) {
4227 if (nr_to_scan == 0) { 4414 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4228 cnt = 0; 4415 if (nr_to_scan > 0)
4229 list_for_each_entry(obj, 4416 i915_gem_shrink_all(dev_priv);
4230 &dev_priv->mm.inactive_list,
4231 mm_list)
4232 cnt++;
4233 mutex_unlock(&dev->struct_mutex);
4234 return cnt / 100 * sysctl_vfs_cache_pressure;
4235 }
4236
4237rescan:
4238 /* first scan for clean buffers */
4239 i915_gem_retire_requests(dev);
4240
4241 list_for_each_entry_safe(obj, next,
4242 &dev_priv->mm.inactive_list,
4243 mm_list) {
4244 if (i915_gem_object_is_purgeable(obj)) {
4245 if (i915_gem_object_unbind(obj) == 0 &&
4246 --nr_to_scan == 0)
4247 break;
4248 }
4249 } 4417 }
4250 4418
4251 /* second pass, evict/count anything still on the inactive list */
4252 cnt = 0; 4419 cnt = 0;
4253 list_for_each_entry_safe(obj, next, 4420 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
4254 &dev_priv->mm.inactive_list, 4421 if (obj->pages_pin_count == 0)
4255 mm_list) { 4422 cnt += obj->base.size >> PAGE_SHIFT;
4256 if (nr_to_scan && 4423 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
4257 i915_gem_object_unbind(obj) == 0) 4424 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4258 nr_to_scan--; 4425 cnt += obj->base.size >> PAGE_SHIFT;
4259 else
4260 cnt++;
4261 }
4262 4426
4263 if (nr_to_scan && i915_gpu_is_active(dev)) {
4264 /*
4265 * We are desperate for pages, so as a last resort, wait
4266 * for the GPU to finish and discard whatever we can.
4267 * This has a dramatic impact to reduce the number of
4268 * OOM-killer events whilst running the GPU aggressively.
4269 */
4270 if (i915_gpu_idle(dev) == 0)
4271 goto rescan;
4272 }
4273 mutex_unlock(&dev->struct_mutex); 4427 mutex_unlock(&dev->struct_mutex);
4274 return cnt / 100 * sysctl_vfs_cache_pressure; 4428 return cnt;
4275} 4429}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a21c3dccf436..1eb48faf741b 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -97,8 +97,7 @@
97 97
98static struct i915_hw_context * 98static struct i915_hw_context *
99i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 99i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
100static int do_switch(struct drm_i915_gem_object *from_obj, 100static int do_switch(struct i915_hw_context *to);
101 struct i915_hw_context *to, u32 seqno);
102 101
103static int get_context_size(struct drm_device *dev) 102static int get_context_size(struct drm_device *dev)
104{ 103{
@@ -113,7 +112,10 @@ static int get_context_size(struct drm_device *dev)
113 break; 112 break;
114 case 7: 113 case 7:
115 reg = I915_READ(GEN7_CXT_SIZE); 114 reg = I915_READ(GEN7_CXT_SIZE);
116 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; 115 if (IS_HASWELL(dev))
116 ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
117 else
118 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
117 break; 119 break;
118 default: 120 default:
119 BUG(); 121 BUG();
@@ -219,20 +221,21 @@ static int create_default_context(struct drm_i915_private *dev_priv)
219 * default context. 221 * default context.
220 */ 222 */
221 dev_priv->ring[RCS].default_context = ctx; 223 dev_priv->ring[RCS].default_context = ctx;
222 ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); 224 ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
223 if (ret) { 225 if (ret)
224 do_destroy(ctx); 226 goto err_destroy;
225 return ret;
226 }
227 227
228 ret = do_switch(NULL, ctx, 0); 228 ret = do_switch(ctx);
229 if (ret) { 229 if (ret)
230 i915_gem_object_unpin(ctx->obj); 230 goto err_unpin;
231 do_destroy(ctx);
232 } else {
233 DRM_DEBUG_DRIVER("Default HW context loaded\n");
234 }
235 231
232 DRM_DEBUG_DRIVER("Default HW context loaded\n");
233 return 0;
234
235err_unpin:
236 i915_gem_object_unpin(ctx->obj);
237err_destroy:
238 do_destroy(ctx);
236 return ret; 239 return ret;
237} 240}
238 241
@@ -359,18 +362,19 @@ mi_set_context(struct intel_ring_buffer *ring,
359 return ret; 362 return ret;
360} 363}
361 364
362static int do_switch(struct drm_i915_gem_object *from_obj, 365static int do_switch(struct i915_hw_context *to)
363 struct i915_hw_context *to,
364 u32 seqno)
365{ 366{
366 struct intel_ring_buffer *ring = NULL; 367 struct intel_ring_buffer *ring = to->ring;
368 struct drm_i915_gem_object *from_obj = ring->last_context_obj;
367 u32 hw_flags = 0; 369 u32 hw_flags = 0;
368 int ret; 370 int ret;
369 371
370 BUG_ON(to == NULL);
371 BUG_ON(from_obj != NULL && from_obj->pin_count == 0); 372 BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
372 373
373 ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false); 374 if (from_obj == to->obj)
375 return 0;
376
377 ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
374 if (ret) 378 if (ret)
375 return ret; 379 return ret;
376 380
@@ -393,7 +397,6 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
393 else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */ 397 else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
394 hw_flags |= MI_FORCE_RESTORE; 398 hw_flags |= MI_FORCE_RESTORE;
395 399
396 ring = to->ring;
397 ret = mi_set_context(ring, to, hw_flags); 400 ret = mi_set_context(ring, to, hw_flags);
398 if (ret) { 401 if (ret) {
399 i915_gem_object_unpin(to->obj); 402 i915_gem_object_unpin(to->obj);
@@ -407,6 +410,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
407 * MI_SET_CONTEXT instead of when the next seqno has completed. 410 * MI_SET_CONTEXT instead of when the next seqno has completed.
408 */ 411 */
409 if (from_obj != NULL) { 412 if (from_obj != NULL) {
413 u32 seqno = i915_gem_next_request_seqno(ring);
410 from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 414 from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
411 i915_gem_object_move_to_active(from_obj, ring, seqno); 415 i915_gem_object_move_to_active(from_obj, ring, seqno);
412 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 416 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
@@ -417,7 +421,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
417 * swapped, but there is no way to do that yet. 421 * swapped, but there is no way to do that yet.
418 */ 422 */
419 from_obj->dirty = 1; 423 from_obj->dirty = 1;
420 BUG_ON(from_obj->ring != to->ring); 424 BUG_ON(from_obj->ring != ring);
421 i915_gem_object_unpin(from_obj); 425 i915_gem_object_unpin(from_obj);
422 426
423 drm_gem_object_unreference(&from_obj->base); 427 drm_gem_object_unreference(&from_obj->base);
@@ -448,9 +452,7 @@ int i915_switch_context(struct intel_ring_buffer *ring,
448 int to_id) 452 int to_id)
449{ 453{
450 struct drm_i915_private *dev_priv = ring->dev->dev_private; 454 struct drm_i915_private *dev_priv = ring->dev->dev_private;
451 struct drm_i915_file_private *file_priv = NULL;
452 struct i915_hw_context *to; 455 struct i915_hw_context *to;
453 struct drm_i915_gem_object *from_obj = ring->last_context_obj;
454 456
455 if (dev_priv->hw_contexts_disabled) 457 if (dev_priv->hw_contexts_disabled)
456 return 0; 458 return 0;
@@ -458,21 +460,18 @@ int i915_switch_context(struct intel_ring_buffer *ring,
458 if (ring != &dev_priv->ring[RCS]) 460 if (ring != &dev_priv->ring[RCS])
459 return 0; 461 return 0;
460 462
461 if (file)
462 file_priv = file->driver_priv;
463
464 if (to_id == DEFAULT_CONTEXT_ID) { 463 if (to_id == DEFAULT_CONTEXT_ID) {
465 to = ring->default_context; 464 to = ring->default_context;
466 } else { 465 } else {
467 to = i915_gem_context_get(file_priv, to_id); 466 if (file == NULL)
467 return -EINVAL;
468
469 to = i915_gem_context_get(file->driver_priv, to_id);
468 if (to == NULL) 470 if (to == NULL)
469 return -ENOENT; 471 return -ENOENT;
470 } 472 }
471 473
472 if (from_obj == to->obj) 474 return do_switch(to);
473 return 0;
474
475 return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
476} 475}
477 476
478int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 477int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index af199596e792..773ef77b6c22 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -28,35 +28,62 @@
28#include <linux/dma-buf.h> 28#include <linux/dma-buf.h>
29 29
30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, 30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
31 enum dma_data_direction dir) 31 enum dma_data_direction dir)
32{ 32{
33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv; 33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
34 struct drm_device *dev = obj->base.dev; 34 struct sg_table *st;
35 int npages = obj->base.size / PAGE_SIZE; 35 struct scatterlist *src, *dst;
36 struct sg_table *sg = NULL; 36 int ret, i;
37 int ret;
38 int nents;
39 37
40 ret = i915_mutex_lock_interruptible(dev); 38 ret = i915_mutex_lock_interruptible(obj->base.dev);
41 if (ret) 39 if (ret)
42 return ERR_PTR(ret); 40 return ERR_PTR(ret);
43 41
44 if (!obj->pages) { 42 ret = i915_gem_object_get_pages(obj);
45 ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); 43 if (ret) {
46 if (ret) 44 st = ERR_PTR(ret);
47 goto out; 45 goto out;
46 }
47
48 /* Copy sg so that we make an independent mapping */
49 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
50 if (st == NULL) {
51 st = ERR_PTR(-ENOMEM);
52 goto out;
48 } 53 }
49 54
50 /* link the pages into an SG then map the sg */ 55 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
51 sg = drm_prime_pages_to_sg(obj->pages, npages); 56 if (ret) {
52 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir); 57 kfree(st);
58 st = ERR_PTR(ret);
59 goto out;
60 }
61
62 src = obj->pages->sgl;
63 dst = st->sgl;
64 for (i = 0; i < obj->pages->nents; i++) {
65 sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
66 dst = sg_next(dst);
67 src = sg_next(src);
68 }
69
70 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
71 sg_free_table(st);
72 kfree(st);
73 st = ERR_PTR(-ENOMEM);
74 goto out;
75 }
76
77 i915_gem_object_pin_pages(obj);
78
53out: 79out:
54 mutex_unlock(&dev->struct_mutex); 80 mutex_unlock(&obj->base.dev->struct_mutex);
55 return sg; 81 return st;
56} 82}
57 83
58static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 84static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
59 struct sg_table *sg, enum dma_data_direction dir) 85 struct sg_table *sg,
86 enum dma_data_direction dir)
60{ 87{
61 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); 88 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
62 sg_free_table(sg); 89 sg_free_table(sg);
@@ -78,7 +105,9 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
78{ 105{
79 struct drm_i915_gem_object *obj = dma_buf->priv; 106 struct drm_i915_gem_object *obj = dma_buf->priv;
80 struct drm_device *dev = obj->base.dev; 107 struct drm_device *dev = obj->base.dev;
81 int ret; 108 struct scatterlist *sg;
109 struct page **pages;
110 int ret, i;
82 111
83 ret = i915_mutex_lock_interruptible(dev); 112 ret = i915_mutex_lock_interruptible(dev);
84 if (ret) 113 if (ret)
@@ -89,24 +118,34 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
89 goto out_unlock; 118 goto out_unlock;
90 } 119 }
91 120
92 if (!obj->pages) { 121 ret = i915_gem_object_get_pages(obj);
93 ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); 122 if (ret)
94 if (ret) { 123 goto error;
95 mutex_unlock(&dev->struct_mutex);
96 return ERR_PTR(ret);
97 }
98 }
99 124
100 obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); 125 ret = -ENOMEM;
101 if (!obj->dma_buf_vmapping) { 126
102 DRM_ERROR("failed to vmap object\n"); 127 pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
103 goto out_unlock; 128 if (pages == NULL)
104 } 129 goto error;
130
131 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
132 pages[i] = sg_page(sg);
133
134 obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
135 drm_free_large(pages);
136
137 if (!obj->dma_buf_vmapping)
138 goto error;
105 139
106 obj->vmapping_count = 1; 140 obj->vmapping_count = 1;
141 i915_gem_object_pin_pages(obj);
107out_unlock: 142out_unlock:
108 mutex_unlock(&dev->struct_mutex); 143 mutex_unlock(&dev->struct_mutex);
109 return obj->dma_buf_vmapping; 144 return obj->dma_buf_vmapping;
145
146error:
147 mutex_unlock(&dev->struct_mutex);
148 return ERR_PTR(ret);
110} 149}
111 150
112static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 151static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -119,10 +158,11 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
119 if (ret) 158 if (ret)
120 return; 159 return;
121 160
122 --obj->vmapping_count; 161 if (--obj->vmapping_count == 0) {
123 if (obj->vmapping_count == 0) {
124 vunmap(obj->dma_buf_vmapping); 162 vunmap(obj->dma_buf_vmapping);
125 obj->dma_buf_vmapping = NULL; 163 obj->dma_buf_vmapping = NULL;
164
165 i915_gem_object_unpin_pages(obj);
126 } 166 }
127 mutex_unlock(&dev->struct_mutex); 167 mutex_unlock(&dev->struct_mutex);
128} 168}
@@ -151,6 +191,22 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
151 return -EINVAL; 191 return -EINVAL;
152} 192}
153 193
194static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
195{
196 struct drm_i915_gem_object *obj = dma_buf->priv;
197 struct drm_device *dev = obj->base.dev;
198 int ret;
199 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
200
201 ret = i915_mutex_lock_interruptible(dev);
202 if (ret)
203 return ret;
204
205 ret = i915_gem_object_set_to_cpu_domain(obj, write);
206 mutex_unlock(&dev->struct_mutex);
207 return ret;
208}
209
154static const struct dma_buf_ops i915_dmabuf_ops = { 210static const struct dma_buf_ops i915_dmabuf_ops = {
155 .map_dma_buf = i915_gem_map_dma_buf, 211 .map_dma_buf = i915_gem_map_dma_buf,
156 .unmap_dma_buf = i915_gem_unmap_dma_buf, 212 .unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -162,25 +218,47 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
162 .mmap = i915_gem_dmabuf_mmap, 218 .mmap = i915_gem_dmabuf_mmap,
163 .vmap = i915_gem_dmabuf_vmap, 219 .vmap = i915_gem_dmabuf_vmap,
164 .vunmap = i915_gem_dmabuf_vunmap, 220 .vunmap = i915_gem_dmabuf_vunmap,
221 .begin_cpu_access = i915_gem_begin_cpu_access,
165}; 222};
166 223
167struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 224struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
168 struct drm_gem_object *gem_obj, int flags) 225 struct drm_gem_object *gem_obj, int flags)
169{ 226{
170 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 227 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
171 228
172 return dma_buf_export(obj, &i915_dmabuf_ops, 229 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
173 obj->base.size, 0600); 230}
231
232static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
233{
234 struct sg_table *sg;
235
236 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
237 if (IS_ERR(sg))
238 return PTR_ERR(sg);
239
240 obj->pages = sg;
241 obj->has_dma_mapping = true;
242 return 0;
174} 243}
175 244
245static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
246{
247 dma_buf_unmap_attachment(obj->base.import_attach,
248 obj->pages, DMA_BIDIRECTIONAL);
249 obj->has_dma_mapping = false;
250}
251
252static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
253 .get_pages = i915_gem_object_get_pages_dmabuf,
254 .put_pages = i915_gem_object_put_pages_dmabuf,
255};
256
176struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 257struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
177 struct dma_buf *dma_buf) 258 struct dma_buf *dma_buf)
178{ 259{
179 struct dma_buf_attachment *attach; 260 struct dma_buf_attachment *attach;
180 struct sg_table *sg;
181 struct drm_i915_gem_object *obj; 261 struct drm_i915_gem_object *obj;
182 int npages;
183 int size;
184 int ret; 262 int ret;
185 263
186 /* is this one of own objects? */ 264 /* is this one of own objects? */
@@ -198,34 +276,24 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
198 if (IS_ERR(attach)) 276 if (IS_ERR(attach))
199 return ERR_CAST(attach); 277 return ERR_CAST(attach);
200 278
201 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
202 if (IS_ERR(sg)) {
203 ret = PTR_ERR(sg);
204 goto fail_detach;
205 }
206
207 size = dma_buf->size;
208 npages = size / PAGE_SIZE;
209 279
210 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 280 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
211 if (obj == NULL) { 281 if (obj == NULL) {
212 ret = -ENOMEM; 282 ret = -ENOMEM;
213 goto fail_unmap; 283 goto fail_detach;
214 } 284 }
215 285
216 ret = drm_gem_private_object_init(dev, &obj->base, size); 286 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
217 if (ret) { 287 if (ret) {
218 kfree(obj); 288 kfree(obj);
219 goto fail_unmap; 289 goto fail_detach;
220 } 290 }
221 291
222 obj->sg_table = sg; 292 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
223 obj->base.import_attach = attach; 293 obj->base.import_attach = attach;
224 294
225 return &obj->base; 295 return &obj->base;
226 296
227fail_unmap:
228 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
229fail_detach: 297fail_detach:
230 dma_buf_detach(dma_buf, attach); 298 dma_buf_detach(dma_buf, attach);
231 return ERR_PTR(ret); 299 return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index fd408995a783..776a3225184c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -43,7 +43,8 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
43 43
44int 44int
45i915_gem_evict_something(struct drm_device *dev, int min_size, 45i915_gem_evict_something(struct drm_device *dev, int min_size,
46 unsigned alignment, bool mappable) 46 unsigned alignment, unsigned cache_level,
47 bool mappable, bool nonblocking)
47{ 48{
48 drm_i915_private_t *dev_priv = dev->dev_private; 49 drm_i915_private_t *dev_priv = dev->dev_private;
49 struct list_head eviction_list, unwind_list; 50 struct list_head eviction_list, unwind_list;
@@ -78,11 +79,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
78 INIT_LIST_HEAD(&unwind_list); 79 INIT_LIST_HEAD(&unwind_list);
79 if (mappable) 80 if (mappable)
80 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, 81 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
81 min_size, alignment, 0, 82 min_size, alignment, cache_level,
82 0, dev_priv->mm.gtt_mappable_end); 83 0, dev_priv->mm.gtt_mappable_end);
83 else 84 else
84 drm_mm_init_scan(&dev_priv->mm.gtt_space, 85 drm_mm_init_scan(&dev_priv->mm.gtt_space,
85 min_size, alignment, 0); 86 min_size, alignment, cache_level);
86 87
87 /* First see if there is a large enough contiguous idle region... */ 88 /* First see if there is a large enough contiguous idle region... */
88 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { 89 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
@@ -90,29 +91,16 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
90 goto found; 91 goto found;
91 } 92 }
92 93
93 /* Now merge in the soon-to-be-expired objects... */ 94 if (nonblocking)
94 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 95 goto none;
95 /* Does the object require an outstanding flush? */
96 if (obj->base.write_domain)
97 continue;
98
99 if (mark_free(obj, &unwind_list))
100 goto found;
101 }
102 96
103 /* Finally add anything with a pending flush (in order of retirement) */ 97 /* Now merge in the soon-to-be-expired objects... */
104 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
105 if (mark_free(obj, &unwind_list))
106 goto found;
107 }
108 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 98 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
109 if (!obj->base.write_domain)
110 continue;
111
112 if (mark_free(obj, &unwind_list)) 99 if (mark_free(obj, &unwind_list))
113 goto found; 100 goto found;
114 } 101 }
115 102
103none:
116 /* Nothing found, clean up and bail out! */ 104 /* Nothing found, clean up and bail out! */
117 while (!list_empty(&unwind_list)) { 105 while (!list_empty(&unwind_list)) {
118 obj = list_first_entry(&unwind_list, 106 obj = list_first_entry(&unwind_list,
@@ -163,7 +151,7 @@ found:
163} 151}
164 152
165int 153int
166i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) 154i915_gem_evict_everything(struct drm_device *dev)
167{ 155{
168 drm_i915_private_t *dev_priv = dev->dev_private; 156 drm_i915_private_t *dev_priv = dev->dev_private;
169 struct drm_i915_gem_object *obj, *next; 157 struct drm_i915_gem_object *obj, *next;
@@ -171,12 +159,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
171 int ret; 159 int ret;
172 160
173 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 161 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
174 list_empty(&dev_priv->mm.flushing_list) &&
175 list_empty(&dev_priv->mm.active_list)); 162 list_empty(&dev_priv->mm.active_list));
176 if (lists_empty) 163 if (lists_empty)
177 return -ENOSPC; 164 return -ENOSPC;
178 165
179 trace_i915_gem_evict_everything(dev, purgeable_only); 166 trace_i915_gem_evict_everything(dev);
180 167
181 /* The gpu_idle will flush everything in the write domain to the 168 /* The gpu_idle will flush everything in the write domain to the
182 * active list. Then we must move everything off the active list 169 * active list. Then we must move everything off the active list
@@ -188,16 +175,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
188 175
189 i915_gem_retire_requests(dev); 176 i915_gem_retire_requests(dev);
190 177
191 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
192
193 /* Having flushed everything, unbind() should never raise an error */ 178 /* Having flushed everything, unbind() should never raise an error */
194 list_for_each_entry_safe(obj, next, 179 list_for_each_entry_safe(obj, next,
195 &dev_priv->mm.inactive_list, mm_list) { 180 &dev_priv->mm.inactive_list, mm_list)
196 if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { 181 if (obj->pin_count == 0)
197 if (obj->pin_count == 0) 182 WARN_ON(i915_gem_object_unbind(obj));
198 WARN_ON(i915_gem_object_unbind(obj));
199 }
200 }
201 183
202 return 0; 184 return 0;
203} 185}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8dd9a6f47db8..3eea143749f6 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,180 +33,6 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35 35
36struct change_domains {
37 uint32_t invalidate_domains;
38 uint32_t flush_domains;
39 uint32_t flush_rings;
40 uint32_t flips;
41};
42
43/*
44 * Set the next domain for the specified object. This
45 * may not actually perform the necessary flushing/invaliding though,
46 * as that may want to be batched with other set_domain operations
47 *
48 * This is (we hope) the only really tricky part of gem. The goal
49 * is fairly simple -- track which caches hold bits of the object
50 * and make sure they remain coherent. A few concrete examples may
51 * help to explain how it works. For shorthand, we use the notation
52 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
53 * a pair of read and write domain masks.
54 *
55 * Case 1: the batch buffer
56 *
57 * 1. Allocated
58 * 2. Written by CPU
59 * 3. Mapped to GTT
60 * 4. Read by GPU
61 * 5. Unmapped from GTT
62 * 6. Freed
63 *
64 * Let's take these a step at a time
65 *
66 * 1. Allocated
67 * Pages allocated from the kernel may still have
68 * cache contents, so we set them to (CPU, CPU) always.
69 * 2. Written by CPU (using pwrite)
70 * The pwrite function calls set_domain (CPU, CPU) and
71 * this function does nothing (as nothing changes)
72 * 3. Mapped by GTT
73 * This function asserts that the object is not
74 * currently in any GPU-based read or write domains
75 * 4. Read by GPU
76 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
77 * As write_domain is zero, this function adds in the
78 * current read domains (CPU+COMMAND, 0).
79 * flush_domains is set to CPU.
80 * invalidate_domains is set to COMMAND
81 * clflush is run to get data out of the CPU caches
82 * then i915_dev_set_domain calls i915_gem_flush to
83 * emit an MI_FLUSH and drm_agp_chipset_flush
84 * 5. Unmapped from GTT
85 * i915_gem_object_unbind calls set_domain (CPU, CPU)
86 * flush_domains and invalidate_domains end up both zero
87 * so no flushing/invalidating happens
88 * 6. Freed
89 * yay, done
90 *
91 * Case 2: The shared render buffer
92 *
93 * 1. Allocated
94 * 2. Mapped to GTT
95 * 3. Read/written by GPU
96 * 4. set_domain to (CPU,CPU)
97 * 5. Read/written by CPU
98 * 6. Read/written by GPU
99 *
100 * 1. Allocated
101 * Same as last example, (CPU, CPU)
102 * 2. Mapped to GTT
103 * Nothing changes (assertions find that it is not in the GPU)
104 * 3. Read/written by GPU
105 * execbuffer calls set_domain (RENDER, RENDER)
106 * flush_domains gets CPU
107 * invalidate_domains gets GPU
108 * clflush (obj)
109 * MI_FLUSH and drm_agp_chipset_flush
110 * 4. set_domain (CPU, CPU)
111 * flush_domains gets GPU
112 * invalidate_domains gets CPU
113 * wait_rendering (obj) to make sure all drawing is complete.
114 * This will include an MI_FLUSH to get the data from GPU
115 * to memory
116 * clflush (obj) to invalidate the CPU cache
117 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
118 * 5. Read/written by CPU
119 * cache lines are loaded and dirtied
120 * 6. Read written by GPU
121 * Same as last GPU access
122 *
123 * Case 3: The constant buffer
124 *
125 * 1. Allocated
126 * 2. Written by CPU
127 * 3. Read by GPU
128 * 4. Updated (written) by CPU again
129 * 5. Read by GPU
130 *
131 * 1. Allocated
132 * (CPU, CPU)
133 * 2. Written by CPU
134 * (CPU, CPU)
135 * 3. Read by GPU
136 * (CPU+RENDER, 0)
137 * flush_domains = CPU
138 * invalidate_domains = RENDER
139 * clflush (obj)
140 * MI_FLUSH
141 * drm_agp_chipset_flush
142 * 4. Updated (written) by CPU again
143 * (CPU, CPU)
144 * flush_domains = 0 (no previous write domain)
145 * invalidate_domains = 0 (no new read domains)
146 * 5. Read by GPU
147 * (CPU+RENDER, 0)
148 * flush_domains = CPU
149 * invalidate_domains = RENDER
150 * clflush (obj)
151 * MI_FLUSH
152 * drm_agp_chipset_flush
153 */
154static void
155i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
156 struct intel_ring_buffer *ring,
157 struct change_domains *cd)
158{
159 uint32_t invalidate_domains = 0, flush_domains = 0;
160
161 /*
162 * If the object isn't moving to a new write domain,
163 * let the object stay in multiple read domains
164 */
165 if (obj->base.pending_write_domain == 0)
166 obj->base.pending_read_domains |= obj->base.read_domains;
167
168 /*
169 * Flush the current write domain if
170 * the new read domains don't match. Invalidate
171 * any read domains which differ from the old
172 * write domain
173 */
174 if (obj->base.write_domain &&
175 (((obj->base.write_domain != obj->base.pending_read_domains ||
176 obj->ring != ring)) ||
177 (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
178 flush_domains |= obj->base.write_domain;
179 invalidate_domains |=
180 obj->base.pending_read_domains & ~obj->base.write_domain;
181 }
182 /*
183 * Invalidate any read caches which may have
184 * stale data. That is, any new read domains.
185 */
186 invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
187 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
188 i915_gem_clflush_object(obj);
189
190 if (obj->base.pending_write_domain)
191 cd->flips |= atomic_read(&obj->pending_flip);
192
193 /* The actual obj->write_domain will be updated with
194 * pending_write_domain after we emit the accumulated flush for all
195 * of our domain changes in execbuffers (which clears objects'
196 * write_domains). So if we have a current write domain that we
197 * aren't changing, set pending_write_domain to that.
198 */
199 if (flush_domains == 0 && obj->base.pending_write_domain == 0)
200 obj->base.pending_write_domain = obj->base.write_domain;
201
202 cd->invalidate_domains |= invalidate_domains;
203 cd->flush_domains |= flush_domains;
204 if (flush_domains & I915_GEM_GPU_DOMAINS)
205 cd->flush_rings |= intel_ring_flag(obj->ring);
206 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
207 cd->flush_rings |= intel_ring_flag(ring);
208}
209
210struct eb_objects { 36struct eb_objects {
211 int and; 37 int and;
212 struct hlist_head buckets[0]; 38 struct hlist_head buckets[0];
@@ -217,6 +43,7 @@ eb_create(int size)
217{ 43{
218 struct eb_objects *eb; 44 struct eb_objects *eb;
219 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; 45 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
46 BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
220 while (count > size) 47 while (count > size)
221 count >>= 1; 48 count >>= 1;
222 eb = kzalloc(count*sizeof(struct hlist_head) + 49 eb = kzalloc(count*sizeof(struct hlist_head) +
@@ -268,6 +95,7 @@ eb_destroy(struct eb_objects *eb)
268static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) 95static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
269{ 96{
270 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || 97 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
98 !obj->map_and_fenceable ||
271 obj->cache_level != I915_CACHE_NONE); 99 obj->cache_level != I915_CACHE_NONE);
272} 100}
273 101
@@ -382,7 +210,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
382 if (ret) 210 if (ret)
383 return ret; 211 return ret;
384 212
385 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); 213 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
214 reloc->offset >> PAGE_SHIFT));
386 *(uint32_t *)(vaddr + page_offset) = reloc->delta; 215 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
387 kunmap_atomic(vaddr); 216 kunmap_atomic(vaddr);
388 } else { 217 } else {
@@ -503,7 +332,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
503 return ret; 332 return ret;
504} 333}
505 334
506#define __EXEC_OBJECT_HAS_FENCE (1<<31) 335#define __EXEC_OBJECT_HAS_PIN (1<<31)
336#define __EXEC_OBJECT_HAS_FENCE (1<<30)
507 337
508static int 338static int
509need_reloc_mappable(struct drm_i915_gem_object *obj) 339need_reloc_mappable(struct drm_i915_gem_object *obj)
@@ -513,9 +343,10 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
513} 343}
514 344
515static int 345static int
516pin_and_fence_object(struct drm_i915_gem_object *obj, 346i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
517 struct intel_ring_buffer *ring) 347 struct intel_ring_buffer *ring)
518{ 348{
349 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
519 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 350 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
520 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 351 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
521 bool need_fence, need_mappable; 352 bool need_fence, need_mappable;
@@ -527,15 +358,17 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
527 obj->tiling_mode != I915_TILING_NONE; 358 obj->tiling_mode != I915_TILING_NONE;
528 need_mappable = need_fence || need_reloc_mappable(obj); 359 need_mappable = need_fence || need_reloc_mappable(obj);
529 360
530 ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); 361 ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
531 if (ret) 362 if (ret)
532 return ret; 363 return ret;
533 364
365 entry->flags |= __EXEC_OBJECT_HAS_PIN;
366
534 if (has_fenced_gpu_access) { 367 if (has_fenced_gpu_access) {
535 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { 368 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
536 ret = i915_gem_object_get_fence(obj); 369 ret = i915_gem_object_get_fence(obj);
537 if (ret) 370 if (ret)
538 goto err_unpin; 371 return ret;
539 372
540 if (i915_gem_object_pin_fence(obj)) 373 if (i915_gem_object_pin_fence(obj))
541 entry->flags |= __EXEC_OBJECT_HAS_FENCE; 374 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
@@ -544,12 +377,35 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
544 } 377 }
545 } 378 }
546 379
380 /* Ensure ppgtt mapping exists if needed */
381 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
382 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
383 obj, obj->cache_level);
384
385 obj->has_aliasing_ppgtt_mapping = 1;
386 }
387
547 entry->offset = obj->gtt_offset; 388 entry->offset = obj->gtt_offset;
548 return 0; 389 return 0;
390}
549 391
550err_unpin: 392static void
551 i915_gem_object_unpin(obj); 393i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
552 return ret; 394{
395 struct drm_i915_gem_exec_object2 *entry;
396
397 if (!obj->gtt_space)
398 return;
399
400 entry = obj->exec_entry;
401
402 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
403 i915_gem_object_unpin_fence(obj);
404
405 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
406 i915_gem_object_unpin(obj);
407
408 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
553} 409}
554 410
555static int 411static int
@@ -557,11 +413,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
557 struct drm_file *file, 413 struct drm_file *file,
558 struct list_head *objects) 414 struct list_head *objects)
559{ 415{
560 drm_i915_private_t *dev_priv = ring->dev->dev_private;
561 struct drm_i915_gem_object *obj; 416 struct drm_i915_gem_object *obj;
562 int ret, retry;
563 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
564 struct list_head ordered_objects; 417 struct list_head ordered_objects;
418 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
419 int retry;
565 420
566 INIT_LIST_HEAD(&ordered_objects); 421 INIT_LIST_HEAD(&ordered_objects);
567 while (!list_empty(objects)) { 422 while (!list_empty(objects)) {
@@ -586,6 +441,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
586 441
587 obj->base.pending_read_domains = 0; 442 obj->base.pending_read_domains = 0;
588 obj->base.pending_write_domain = 0; 443 obj->base.pending_write_domain = 0;
444 obj->pending_fenced_gpu_access = false;
589 } 445 }
590 list_splice(&ordered_objects, objects); 446 list_splice(&ordered_objects, objects);
591 447
@@ -598,12 +454,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
598 * 2. Bind new objects. 454 * 2. Bind new objects.
599 * 3. Decrement pin count. 455 * 3. Decrement pin count.
600 * 456 *
601 * This avoid unnecessary unbinding of later objects in order to makr 457 * This avoid unnecessary unbinding of later objects in order to make
602 * room for the earlier objects *unless* we need to defragment. 458 * room for the earlier objects *unless* we need to defragment.
603 */ 459 */
604 retry = 0; 460 retry = 0;
605 do { 461 do {
606 ret = 0; 462 int ret = 0;
607 463
608 /* Unbind any ill-fitting objects or pin. */ 464 /* Unbind any ill-fitting objects or pin. */
609 list_for_each_entry(obj, objects, exec_list) { 465 list_for_each_entry(obj, objects, exec_list) {
@@ -623,7 +479,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
623 (need_mappable && !obj->map_and_fenceable)) 479 (need_mappable && !obj->map_and_fenceable))
624 ret = i915_gem_object_unbind(obj); 480 ret = i915_gem_object_unbind(obj);
625 else 481 else
626 ret = pin_and_fence_object(obj, ring); 482 ret = i915_gem_execbuffer_reserve_object(obj, ring);
627 if (ret) 483 if (ret)
628 goto err; 484 goto err;
629 } 485 }
@@ -633,77 +489,22 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
633 if (obj->gtt_space) 489 if (obj->gtt_space)
634 continue; 490 continue;
635 491
636 ret = pin_and_fence_object(obj, ring); 492 ret = i915_gem_execbuffer_reserve_object(obj, ring);
637 if (ret) { 493 if (ret)
638 int ret_ignore; 494 goto err;
639
640 /* This can potentially raise a harmless
641 * -EINVAL if we failed to bind in the above
642 * call. It cannot raise -EINTR since we know
643 * that the bo is freshly bound and so will
644 * not need to be flushed or waited upon.
645 */
646 ret_ignore = i915_gem_object_unbind(obj);
647 (void)ret_ignore;
648 WARN_ON(obj->gtt_space);
649 break;
650 }
651 } 495 }
652 496
653 /* Decrement pin count for bound objects */ 497err: /* Decrement pin count for bound objects */
654 list_for_each_entry(obj, objects, exec_list) { 498 list_for_each_entry(obj, objects, exec_list)
655 struct drm_i915_gem_exec_object2 *entry; 499 i915_gem_execbuffer_unreserve_object(obj);
656
657 if (!obj->gtt_space)
658 continue;
659
660 entry = obj->exec_entry;
661 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
662 i915_gem_object_unpin_fence(obj);
663 entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
664 }
665
666 i915_gem_object_unpin(obj);
667
668 /* ... and ensure ppgtt mapping exist if needed. */
669 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
670 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
671 obj, obj->cache_level);
672 500
673 obj->has_aliasing_ppgtt_mapping = 1; 501 if (ret != -ENOSPC || retry++)
674 }
675 }
676
677 if (ret != -ENOSPC || retry > 1)
678 return ret; 502 return ret;
679 503
680 /* First attempt, just clear anything that is purgeable. 504 ret = i915_gem_evict_everything(ring->dev);
681 * Second attempt, clear the entire GTT.
682 */
683 ret = i915_gem_evict_everything(ring->dev, retry == 0);
684 if (ret) 505 if (ret)
685 return ret; 506 return ret;
686
687 retry++;
688 } while (1); 507 } while (1);
689
690err:
691 list_for_each_entry_continue_reverse(obj, objects, exec_list) {
692 struct drm_i915_gem_exec_object2 *entry;
693
694 if (!obj->gtt_space)
695 continue;
696
697 entry = obj->exec_entry;
698 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
699 i915_gem_object_unpin_fence(obj);
700 entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
701 }
702
703 i915_gem_object_unpin(obj);
704 }
705
706 return ret;
707} 508}
708 509
709static int 510static int
@@ -809,18 +610,6 @@ err:
809 return ret; 610 return ret;
810} 611}
811 612
812static void
813i915_gem_execbuffer_flush(struct drm_device *dev,
814 uint32_t invalidate_domains,
815 uint32_t flush_domains)
816{
817 if (flush_domains & I915_GEM_DOMAIN_CPU)
818 intel_gtt_chipset_flush();
819
820 if (flush_domains & I915_GEM_DOMAIN_GTT)
821 wmb();
822}
823
824static int 613static int
825i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) 614i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
826{ 615{
@@ -853,48 +642,45 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
853 return 0; 642 return 0;
854} 643}
855 644
856
857static int 645static int
858i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, 646i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
859 struct list_head *objects) 647 struct list_head *objects)
860{ 648{
861 struct drm_i915_gem_object *obj; 649 struct drm_i915_gem_object *obj;
862 struct change_domains cd; 650 uint32_t flush_domains = 0;
651 uint32_t flips = 0;
863 int ret; 652 int ret;
864 653
865 memset(&cd, 0, sizeof(cd)); 654 list_for_each_entry(obj, objects, exec_list) {
866 list_for_each_entry(obj, objects, exec_list) 655 ret = i915_gem_object_sync(obj, ring);
867 i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
868
869 if (cd.invalidate_domains | cd.flush_domains) {
870 i915_gem_execbuffer_flush(ring->dev,
871 cd.invalidate_domains,
872 cd.flush_domains);
873 }
874
875 if (cd.flips) {
876 ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
877 if (ret) 656 if (ret)
878 return ret; 657 return ret;
658
659 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
660 i915_gem_clflush_object(obj);
661
662 if (obj->base.pending_write_domain)
663 flips |= atomic_read(&obj->pending_flip);
664
665 flush_domains |= obj->base.write_domain;
879 } 666 }
880 667
881 list_for_each_entry(obj, objects, exec_list) { 668 if (flips) {
882 ret = i915_gem_object_sync(obj, ring); 669 ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
883 if (ret) 670 if (ret)
884 return ret; 671 return ret;
885 } 672 }
886 673
674 if (flush_domains & I915_GEM_DOMAIN_CPU)
675 intel_gtt_chipset_flush();
676
677 if (flush_domains & I915_GEM_DOMAIN_GTT)
678 wmb();
679
887 /* Unconditionally invalidate gpu caches and ensure that we do flush 680 /* Unconditionally invalidate gpu caches and ensure that we do flush
888 * any residual writes from the previous batch. 681 * any residual writes from the previous batch.
889 */ 682 */
890 ret = i915_gem_flush_ring(ring, 683 return intel_ring_invalidate_all_caches(ring);
891 I915_GEM_GPU_DOMAINS,
892 ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
893 if (ret)
894 return ret;
895
896 ring->gpu_caches_dirty = false;
897 return 0;
898} 684}
899 685
900static bool 686static bool
@@ -942,9 +728,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
942 struct drm_i915_gem_object *obj; 728 struct drm_i915_gem_object *obj;
943 729
944 list_for_each_entry(obj, objects, exec_list) { 730 list_for_each_entry(obj, objects, exec_list) {
945 u32 old_read = obj->base.read_domains; 731 u32 old_read = obj->base.read_domains;
946 u32 old_write = obj->base.write_domain; 732 u32 old_write = obj->base.write_domain;
947
948 733
949 obj->base.read_domains = obj->base.pending_read_domains; 734 obj->base.read_domains = obj->base.pending_read_domains;
950 obj->base.write_domain = obj->base.pending_write_domain; 735 obj->base.write_domain = obj->base.pending_write_domain;
@@ -953,17 +738,13 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
953 i915_gem_object_move_to_active(obj, ring, seqno); 738 i915_gem_object_move_to_active(obj, ring, seqno);
954 if (obj->base.write_domain) { 739 if (obj->base.write_domain) {
955 obj->dirty = 1; 740 obj->dirty = 1;
956 obj->pending_gpu_write = true; 741 obj->last_write_seqno = seqno;
957 list_move_tail(&obj->gpu_write_list,
958 &ring->gpu_write_list);
959 if (obj->pin_count) /* check for potential scanout */ 742 if (obj->pin_count) /* check for potential scanout */
960 intel_mark_busy(ring->dev, obj); 743 intel_mark_fb_busy(obj);
961 } 744 }
962 745
963 trace_i915_gem_object_change_domain(obj, old_read, old_write); 746 trace_i915_gem_object_change_domain(obj, old_read, old_write);
964 } 747 }
965
966 intel_mark_busy(ring->dev, NULL);
967} 748}
968 749
969static void 750static void
@@ -971,16 +752,11 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
971 struct drm_file *file, 752 struct drm_file *file,
972 struct intel_ring_buffer *ring) 753 struct intel_ring_buffer *ring)
973{ 754{
974 struct drm_i915_gem_request *request;
975
976 /* Unconditionally force add_request to emit a full flush. */ 755 /* Unconditionally force add_request to emit a full flush. */
977 ring->gpu_caches_dirty = true; 756 ring->gpu_caches_dirty = true;
978 757
979 /* Add a breadcrumb for the completion of the batch buffer */ 758 /* Add a breadcrumb for the completion of the batch buffer */
980 request = kzalloc(sizeof(*request), GFP_KERNEL); 759 (void)i915_add_request(ring, file, NULL);
981 if (request == NULL || i915_add_request(ring, file, request)) {
982 kfree(request);
983 }
984} 760}
985 761
986static int 762static int
@@ -1326,8 +1102,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1326 return -ENOMEM; 1102 return -ENOMEM;
1327 } 1103 }
1328 ret = copy_from_user(exec_list, 1104 ret = copy_from_user(exec_list,
1329 (struct drm_i915_relocation_entry __user *) 1105 (void __user *)(uintptr_t)args->buffers_ptr,
1330 (uintptr_t) args->buffers_ptr,
1331 sizeof(*exec_list) * args->buffer_count); 1106 sizeof(*exec_list) * args->buffer_count);
1332 if (ret != 0) { 1107 if (ret != 0) {
1333 DRM_DEBUG("copy %d exec entries failed %d\n", 1108 DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1366,8 +1141,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1366 for (i = 0; i < args->buffer_count; i++) 1141 for (i = 0; i < args->buffer_count; i++)
1367 exec_list[i].offset = exec2_list[i].offset; 1142 exec_list[i].offset = exec2_list[i].offset;
1368 /* ... and back out to userspace */ 1143 /* ... and back out to userspace */
1369 ret = copy_to_user((struct drm_i915_relocation_entry __user *) 1144 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
1370 (uintptr_t) args->buffers_ptr,
1371 exec_list, 1145 exec_list,
1372 sizeof(*exec_list) * args->buffer_count); 1146 sizeof(*exec_list) * args->buffer_count);
1373 if (ret) { 1147 if (ret) {
@@ -1421,8 +1195,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1421 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 1195 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1422 if (!ret) { 1196 if (!ret) {
1423 /* Copy the new buffer offsets back to the user's exec list. */ 1197 /* Copy the new buffer offsets back to the user's exec list. */
1424 ret = copy_to_user((struct drm_i915_relocation_entry __user *) 1198 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
1425 (uintptr_t) args->buffers_ptr,
1426 exec2_list, 1199 exec2_list,
1427 sizeof(*exec2_list) * args->buffer_count); 1200 sizeof(*exec2_list) * args->buffer_count);
1428 if (ret) { 1201 if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 69261acb94b3..df470b5e8d36 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -166,8 +166,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
166} 166}
167 167
168static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, 168static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
169 struct scatterlist *sg_list, 169 const struct sg_table *pages,
170 unsigned sg_len,
171 unsigned first_entry, 170 unsigned first_entry,
172 uint32_t pte_flags) 171 uint32_t pte_flags)
173{ 172{
@@ -179,12 +178,12 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
179 struct scatterlist *sg; 178 struct scatterlist *sg;
180 179
181 /* init sg walking */ 180 /* init sg walking */
182 sg = sg_list; 181 sg = pages->sgl;
183 i = 0; 182 i = 0;
184 segment_len = sg_dma_len(sg) >> PAGE_SHIFT; 183 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
185 m = 0; 184 m = 0;
186 185
187 while (i < sg_len) { 186 while (i < pages->nents) {
188 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); 187 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
189 188
190 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { 189 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
@@ -193,13 +192,11 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
193 pt_vaddr[j] = pte | pte_flags; 192 pt_vaddr[j] = pte | pte_flags;
194 193
195 /* grab the next page */ 194 /* grab the next page */
196 m++; 195 if (++m == segment_len) {
197 if (m == segment_len) { 196 if (++i == pages->nents)
198 sg = sg_next(sg);
199 i++;
200 if (i == sg_len)
201 break; 197 break;
202 198
199 sg = sg_next(sg);
203 segment_len = sg_dma_len(sg) >> PAGE_SHIFT; 200 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
204 m = 0; 201 m = 0;
205 } 202 }
@@ -212,44 +209,10 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
212 } 209 }
213} 210}
214 211
215static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
216 unsigned first_entry, unsigned num_entries,
217 struct page **pages, uint32_t pte_flags)
218{
219 uint32_t *pt_vaddr, pte;
220 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
221 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
222 unsigned last_pte, i;
223 dma_addr_t page_addr;
224
225 while (num_entries) {
226 last_pte = first_pte + num_entries;
227 last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
228
229 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
230
231 for (i = first_pte; i < last_pte; i++) {
232 page_addr = page_to_phys(*pages);
233 pte = GEN6_PTE_ADDR_ENCODE(page_addr);
234 pt_vaddr[i] = pte | pte_flags;
235
236 pages++;
237 }
238
239 kunmap_atomic(pt_vaddr);
240
241 num_entries -= last_pte - first_pte;
242 first_pte = 0;
243 act_pd++;
244 }
245}
246
247void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 212void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
248 struct drm_i915_gem_object *obj, 213 struct drm_i915_gem_object *obj,
249 enum i915_cache_level cache_level) 214 enum i915_cache_level cache_level)
250{ 215{
251 struct drm_device *dev = obj->base.dev;
252 struct drm_i915_private *dev_priv = dev->dev_private;
253 uint32_t pte_flags = GEN6_PTE_VALID; 216 uint32_t pte_flags = GEN6_PTE_VALID;
254 217
255 switch (cache_level) { 218 switch (cache_level) {
@@ -260,7 +223,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
260 pte_flags |= GEN6_PTE_CACHE_LLC; 223 pte_flags |= GEN6_PTE_CACHE_LLC;
261 break; 224 break;
262 case I915_CACHE_NONE: 225 case I915_CACHE_NONE:
263 if (IS_HASWELL(dev)) 226 if (IS_HASWELL(obj->base.dev))
264 pte_flags |= HSW_PTE_UNCACHED; 227 pte_flags |= HSW_PTE_UNCACHED;
265 else 228 else
266 pte_flags |= GEN6_PTE_UNCACHED; 229 pte_flags |= GEN6_PTE_UNCACHED;
@@ -269,26 +232,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
269 BUG(); 232 BUG();
270 } 233 }
271 234
272 if (obj->sg_table) { 235 i915_ppgtt_insert_sg_entries(ppgtt,
273 i915_ppgtt_insert_sg_entries(ppgtt, 236 obj->pages,
274 obj->sg_table->sgl, 237 obj->gtt_space->start >> PAGE_SHIFT,
275 obj->sg_table->nents, 238 pte_flags);
276 obj->gtt_space->start >> PAGE_SHIFT,
277 pte_flags);
278 } else if (dev_priv->mm.gtt->needs_dmar) {
279 BUG_ON(!obj->sg_list);
280
281 i915_ppgtt_insert_sg_entries(ppgtt,
282 obj->sg_list,
283 obj->num_sg,
284 obj->gtt_space->start >> PAGE_SHIFT,
285 pte_flags);
286 } else
287 i915_ppgtt_insert_pages(ppgtt,
288 obj->gtt_space->start >> PAGE_SHIFT,
289 obj->base.size >> PAGE_SHIFT,
290 obj->pages,
291 pte_flags);
292} 239}
293 240
294void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 241void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -350,7 +297,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
350 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, 297 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
351 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 298 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
352 299
353 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 300 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
354 i915_gem_clflush_object(obj); 301 i915_gem_clflush_object(obj);
355 i915_gem_gtt_bind_object(obj, obj->cache_level); 302 i915_gem_gtt_bind_object(obj, obj->cache_level);
356 } 303 }
@@ -360,44 +307,26 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
360 307
361int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) 308int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
362{ 309{
363 struct drm_device *dev = obj->base.dev; 310 if (obj->has_dma_mapping)
364 struct drm_i915_private *dev_priv = dev->dev_private;
365
366 /* don't map imported dma buf objects */
367 if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
368 return intel_gtt_map_memory(obj->pages,
369 obj->base.size >> PAGE_SHIFT,
370 &obj->sg_list,
371 &obj->num_sg);
372 else
373 return 0; 311 return 0;
312
313 if (!dma_map_sg(&obj->base.dev->pdev->dev,
314 obj->pages->sgl, obj->pages->nents,
315 PCI_DMA_BIDIRECTIONAL))
316 return -ENOSPC;
317
318 return 0;
374} 319}
375 320
376void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 321void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
377 enum i915_cache_level cache_level) 322 enum i915_cache_level cache_level)
378{ 323{
379 struct drm_device *dev = obj->base.dev; 324 struct drm_device *dev = obj->base.dev;
380 struct drm_i915_private *dev_priv = dev->dev_private;
381 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); 325 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
382 326
383 if (obj->sg_table) { 327 intel_gtt_insert_sg_entries(obj->pages,
384 intel_gtt_insert_sg_entries(obj->sg_table->sgl, 328 obj->gtt_space->start >> PAGE_SHIFT,
385 obj->sg_table->nents, 329 agp_type);
386 obj->gtt_space->start >> PAGE_SHIFT,
387 agp_type);
388 } else if (dev_priv->mm.gtt->needs_dmar) {
389 BUG_ON(!obj->sg_list);
390
391 intel_gtt_insert_sg_entries(obj->sg_list,
392 obj->num_sg,
393 obj->gtt_space->start >> PAGE_SHIFT,
394 agp_type);
395 } else
396 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
397 obj->base.size >> PAGE_SHIFT,
398 obj->pages,
399 agp_type);
400
401 obj->has_global_gtt_mapping = 1; 330 obj->has_global_gtt_mapping = 1;
402} 331}
403 332
@@ -417,14 +346,31 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
417 346
418 interruptible = do_idling(dev_priv); 347 interruptible = do_idling(dev_priv);
419 348
420 if (obj->sg_list) { 349 if (!obj->has_dma_mapping)
421 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); 350 dma_unmap_sg(&dev->pdev->dev,
422 obj->sg_list = NULL; 351 obj->pages->sgl, obj->pages->nents,
423 } 352 PCI_DMA_BIDIRECTIONAL);
424 353
425 undo_idling(dev_priv, interruptible); 354 undo_idling(dev_priv, interruptible);
426} 355}
427 356
357static void i915_gtt_color_adjust(struct drm_mm_node *node,
358 unsigned long color,
359 unsigned long *start,
360 unsigned long *end)
361{
362 if (node->color != color)
363 *start += 4096;
364
365 if (!list_empty(&node->node_list)) {
366 node = list_entry(node->node_list.next,
367 struct drm_mm_node,
368 node_list);
369 if (node->allocated && node->color != color)
370 *end -= 4096;
371 }
372}
373
428void i915_gem_init_global_gtt(struct drm_device *dev, 374void i915_gem_init_global_gtt(struct drm_device *dev,
429 unsigned long start, 375 unsigned long start,
430 unsigned long mappable_end, 376 unsigned long mappable_end,
@@ -434,6 +380,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
434 380
435 /* Substract the guard page ... */ 381 /* Substract the guard page ... */
436 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); 382 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
383 if (!HAS_LLC(dev))
384 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
437 385
438 dev_priv->mm.gtt_start = start; 386 dev_priv->mm.gtt_start = start;
439 dev_priv->mm.gtt_mappable_end = mappable_end; 387 dev_priv->mm.gtt_mappable_end = mappable_end;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index c2b7b67e410d..3208650a235c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -469,18 +469,20 @@ i915_gem_swizzle_page(struct page *page)
469void 469void
470i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) 470i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
471{ 471{
472 struct scatterlist *sg;
472 int page_count = obj->base.size >> PAGE_SHIFT; 473 int page_count = obj->base.size >> PAGE_SHIFT;
473 int i; 474 int i;
474 475
475 if (obj->bit_17 == NULL) 476 if (obj->bit_17 == NULL)
476 return; 477 return;
477 478
478 for (i = 0; i < page_count; i++) { 479 for_each_sg(obj->pages->sgl, sg, page_count, i) {
479 char new_bit_17 = page_to_phys(obj->pages[i]) >> 17; 480 struct page *page = sg_page(sg);
481 char new_bit_17 = page_to_phys(page) >> 17;
480 if ((new_bit_17 & 0x1) != 482 if ((new_bit_17 & 0x1) !=
481 (test_bit(i, obj->bit_17) != 0)) { 483 (test_bit(i, obj->bit_17) != 0)) {
482 i915_gem_swizzle_page(obj->pages[i]); 484 i915_gem_swizzle_page(page);
483 set_page_dirty(obj->pages[i]); 485 set_page_dirty(page);
484 } 486 }
485 } 487 }
486} 488}
@@ -488,6 +490,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
488void 490void
489i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) 491i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
490{ 492{
493 struct scatterlist *sg;
491 int page_count = obj->base.size >> PAGE_SHIFT; 494 int page_count = obj->base.size >> PAGE_SHIFT;
492 int i; 495 int i;
493 496
@@ -501,8 +504,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
501 } 504 }
502 } 505 }
503 506
504 for (i = 0; i < page_count; i++) { 507 for_each_sg(obj->pages->sgl, sg, page_count, i) {
505 if (page_to_phys(obj->pages[i]) & (1 << 17)) 508 struct page *page = sg_page(sg);
509 if (page_to_phys(page) & (1 << 17))
506 __set_bit(i, obj->bit_17); 510 __set_bit(i, obj->bit_17);
507 else 511 else
508 __clear_bit(i, obj->bit_17); 512 __clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 505357886bbb..4e9888388c0c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -295,11 +295,21 @@ static void i915_hotplug_work_func(struct work_struct *work)
295 drm_helper_hpd_irq_event(dev); 295 drm_helper_hpd_irq_event(dev);
296} 296}
297 297
298static void i915_handle_rps_change(struct drm_device *dev) 298/* defined intel_pm.c */
299extern spinlock_t mchdev_lock;
300
301static void ironlake_handle_rps_change(struct drm_device *dev)
299{ 302{
300 drm_i915_private_t *dev_priv = dev->dev_private; 303 drm_i915_private_t *dev_priv = dev->dev_private;
301 u32 busy_up, busy_down, max_avg, min_avg; 304 u32 busy_up, busy_down, max_avg, min_avg;
302 u8 new_delay = dev_priv->cur_delay; 305 u8 new_delay;
306 unsigned long flags;
307
308 spin_lock_irqsave(&mchdev_lock, flags);
309
310 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
311
312 new_delay = dev_priv->ips.cur_delay;
303 313
304 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 314 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
305 busy_up = I915_READ(RCPREVBSYTUPAVG); 315 busy_up = I915_READ(RCPREVBSYTUPAVG);
@@ -309,19 +319,21 @@ static void i915_handle_rps_change(struct drm_device *dev)
309 319
310 /* Handle RCS change request from hw */ 320 /* Handle RCS change request from hw */
311 if (busy_up > max_avg) { 321 if (busy_up > max_avg) {
312 if (dev_priv->cur_delay != dev_priv->max_delay) 322 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
313 new_delay = dev_priv->cur_delay - 1; 323 new_delay = dev_priv->ips.cur_delay - 1;
314 if (new_delay < dev_priv->max_delay) 324 if (new_delay < dev_priv->ips.max_delay)
315 new_delay = dev_priv->max_delay; 325 new_delay = dev_priv->ips.max_delay;
316 } else if (busy_down < min_avg) { 326 } else if (busy_down < min_avg) {
317 if (dev_priv->cur_delay != dev_priv->min_delay) 327 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
318 new_delay = dev_priv->cur_delay + 1; 328 new_delay = dev_priv->ips.cur_delay + 1;
319 if (new_delay > dev_priv->min_delay) 329 if (new_delay > dev_priv->ips.min_delay)
320 new_delay = dev_priv->min_delay; 330 new_delay = dev_priv->ips.min_delay;
321 } 331 }
322 332
323 if (ironlake_set_drps(dev, new_delay)) 333 if (ironlake_set_drps(dev, new_delay))
324 dev_priv->cur_delay = new_delay; 334 dev_priv->ips.cur_delay = new_delay;
335
336 spin_unlock_irqrestore(&mchdev_lock, flags);
325 337
326 return; 338 return;
327} 339}
@@ -334,7 +346,7 @@ static void notify_ring(struct drm_device *dev,
334 if (ring->obj == NULL) 346 if (ring->obj == NULL)
335 return; 347 return;
336 348
337 trace_i915_gem_request_complete(ring, ring->get_seqno(ring)); 349 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
338 350
339 wake_up_all(&ring->irq_queue); 351 wake_up_all(&ring->irq_queue);
340 if (i915_enable_hangcheck) { 352 if (i915_enable_hangcheck) {
@@ -348,16 +360,16 @@ static void notify_ring(struct drm_device *dev,
348static void gen6_pm_rps_work(struct work_struct *work) 360static void gen6_pm_rps_work(struct work_struct *work)
349{ 361{
350 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 362 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
351 rps_work); 363 rps.work);
352 u32 pm_iir, pm_imr; 364 u32 pm_iir, pm_imr;
353 u8 new_delay; 365 u8 new_delay;
354 366
355 spin_lock_irq(&dev_priv->rps_lock); 367 spin_lock_irq(&dev_priv->rps.lock);
356 pm_iir = dev_priv->pm_iir; 368 pm_iir = dev_priv->rps.pm_iir;
357 dev_priv->pm_iir = 0; 369 dev_priv->rps.pm_iir = 0;
358 pm_imr = I915_READ(GEN6_PMIMR); 370 pm_imr = I915_READ(GEN6_PMIMR);
359 I915_WRITE(GEN6_PMIMR, 0); 371 I915_WRITE(GEN6_PMIMR, 0);
360 spin_unlock_irq(&dev_priv->rps_lock); 372 spin_unlock_irq(&dev_priv->rps.lock);
361 373
362 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) 374 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
363 return; 375 return;
@@ -365,11 +377,17 @@ static void gen6_pm_rps_work(struct work_struct *work)
365 mutex_lock(&dev_priv->dev->struct_mutex); 377 mutex_lock(&dev_priv->dev->struct_mutex);
366 378
367 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 379 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
368 new_delay = dev_priv->cur_delay + 1; 380 new_delay = dev_priv->rps.cur_delay + 1;
369 else 381 else
370 new_delay = dev_priv->cur_delay - 1; 382 new_delay = dev_priv->rps.cur_delay - 1;
371 383
372 gen6_set_rps(dev_priv->dev, new_delay); 384 /* sysfs frequency interfaces may have snuck in while servicing the
385 * interrupt
386 */
387 if (!(new_delay > dev_priv->rps.max_delay ||
388 new_delay < dev_priv->rps.min_delay)) {
389 gen6_set_rps(dev_priv->dev, new_delay);
390 }
373 391
374 mutex_unlock(&dev_priv->dev->struct_mutex); 392 mutex_unlock(&dev_priv->dev->struct_mutex);
375} 393}
@@ -443,7 +461,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
443 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 461 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
444 unsigned long flags; 462 unsigned long flags;
445 463
446 if (!IS_IVYBRIDGE(dev)) 464 if (!HAS_L3_GPU_CACHE(dev))
447 return; 465 return;
448 466
449 spin_lock_irqsave(&dev_priv->irq_lock, flags); 467 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -487,19 +505,19 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
487 * IIR bits should never already be set because IMR should 505 * IIR bits should never already be set because IMR should
488 * prevent an interrupt from being shown in IIR. The warning 506 * prevent an interrupt from being shown in IIR. The warning
489 * displays a case where we've unsafely cleared 507 * displays a case where we've unsafely cleared
490 * dev_priv->pm_iir. Although missing an interrupt of the same 508 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
491 * type is not a problem, it displays a problem in the logic. 509 * type is not a problem, it displays a problem in the logic.
492 * 510 *
493 * The mask bit in IMR is cleared by rps_work. 511 * The mask bit in IMR is cleared by dev_priv->rps.work.
494 */ 512 */
495 513
496 spin_lock_irqsave(&dev_priv->rps_lock, flags); 514 spin_lock_irqsave(&dev_priv->rps.lock, flags);
497 dev_priv->pm_iir |= pm_iir; 515 dev_priv->rps.pm_iir |= pm_iir;
498 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); 516 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
499 POSTING_READ(GEN6_PMIMR); 517 POSTING_READ(GEN6_PMIMR);
500 spin_unlock_irqrestore(&dev_priv->rps_lock, flags); 518 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
501 519
502 queue_work(dev_priv->wq, &dev_priv->rps_work); 520 queue_work(dev_priv->wq, &dev_priv->rps.work);
503} 521}
504 522
505static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) 523static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
@@ -792,10 +810,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
792 ibx_irq_handler(dev, pch_iir); 810 ibx_irq_handler(dev, pch_iir);
793 } 811 }
794 812
795 if (de_iir & DE_PCU_EVENT) { 813 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
796 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 814 ironlake_handle_rps_change(dev);
797 i915_handle_rps_change(dev);
798 }
799 815
800 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 816 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
801 gen6_queue_rps_work(dev_priv, pm_iir); 817 gen6_queue_rps_work(dev_priv, pm_iir);
@@ -842,26 +858,55 @@ static void i915_error_work_func(struct work_struct *work)
842 } 858 }
843} 859}
844 860
861/* NB: please notice the memset */
862static void i915_get_extra_instdone(struct drm_device *dev,
863 uint32_t *instdone)
864{
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
867
868 switch(INTEL_INFO(dev)->gen) {
869 case 2:
870 case 3:
871 instdone[0] = I915_READ(INSTDONE);
872 break;
873 case 4:
874 case 5:
875 case 6:
876 instdone[0] = I915_READ(INSTDONE_I965);
877 instdone[1] = I915_READ(INSTDONE1);
878 break;
879 default:
880 WARN_ONCE(1, "Unsupported platform\n");
881 case 7:
882 instdone[0] = I915_READ(GEN7_INSTDONE_1);
883 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
884 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
885 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
886 break;
887 }
888}
889
845#ifdef CONFIG_DEBUG_FS 890#ifdef CONFIG_DEBUG_FS
846static struct drm_i915_error_object * 891static struct drm_i915_error_object *
847i915_error_object_create(struct drm_i915_private *dev_priv, 892i915_error_object_create(struct drm_i915_private *dev_priv,
848 struct drm_i915_gem_object *src) 893 struct drm_i915_gem_object *src)
849{ 894{
850 struct drm_i915_error_object *dst; 895 struct drm_i915_error_object *dst;
851 int page, page_count; 896 int i, count;
852 u32 reloc_offset; 897 u32 reloc_offset;
853 898
854 if (src == NULL || src->pages == NULL) 899 if (src == NULL || src->pages == NULL)
855 return NULL; 900 return NULL;
856 901
857 page_count = src->base.size / PAGE_SIZE; 902 count = src->base.size / PAGE_SIZE;
858 903
859 dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC); 904 dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
860 if (dst == NULL) 905 if (dst == NULL)
861 return NULL; 906 return NULL;
862 907
863 reloc_offset = src->gtt_offset; 908 reloc_offset = src->gtt_offset;
864 for (page = 0; page < page_count; page++) { 909 for (i = 0; i < count; i++) {
865 unsigned long flags; 910 unsigned long flags;
866 void *d; 911 void *d;
867 912
@@ -884,30 +929,33 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
884 memcpy_fromio(d, s, PAGE_SIZE); 929 memcpy_fromio(d, s, PAGE_SIZE);
885 io_mapping_unmap_atomic(s); 930 io_mapping_unmap_atomic(s);
886 } else { 931 } else {
932 struct page *page;
887 void *s; 933 void *s;
888 934
889 drm_clflush_pages(&src->pages[page], 1); 935 page = i915_gem_object_get_page(src, i);
936
937 drm_clflush_pages(&page, 1);
890 938
891 s = kmap_atomic(src->pages[page]); 939 s = kmap_atomic(page);
892 memcpy(d, s, PAGE_SIZE); 940 memcpy(d, s, PAGE_SIZE);
893 kunmap_atomic(s); 941 kunmap_atomic(s);
894 942
895 drm_clflush_pages(&src->pages[page], 1); 943 drm_clflush_pages(&page, 1);
896 } 944 }
897 local_irq_restore(flags); 945 local_irq_restore(flags);
898 946
899 dst->pages[page] = d; 947 dst->pages[i] = d;
900 948
901 reloc_offset += PAGE_SIZE; 949 reloc_offset += PAGE_SIZE;
902 } 950 }
903 dst->page_count = page_count; 951 dst->page_count = count;
904 dst->gtt_offset = src->gtt_offset; 952 dst->gtt_offset = src->gtt_offset;
905 953
906 return dst; 954 return dst;
907 955
908unwind: 956unwind:
909 while (page--) 957 while (i--)
910 kfree(dst->pages[page]); 958 kfree(dst->pages[i]);
911 kfree(dst); 959 kfree(dst);
912 return NULL; 960 return NULL;
913} 961}
@@ -948,7 +996,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
948{ 996{
949 err->size = obj->base.size; 997 err->size = obj->base.size;
950 err->name = obj->base.name; 998 err->name = obj->base.name;
951 err->seqno = obj->last_rendering_seqno; 999 err->rseqno = obj->last_read_seqno;
1000 err->wseqno = obj->last_write_seqno;
952 err->gtt_offset = obj->gtt_offset; 1001 err->gtt_offset = obj->gtt_offset;
953 err->read_domains = obj->base.read_domains; 1002 err->read_domains = obj->base.read_domains;
954 err->write_domain = obj->base.write_domain; 1003 err->write_domain = obj->base.write_domain;
@@ -1038,12 +1087,12 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1038 if (!ring->get_seqno) 1087 if (!ring->get_seqno)
1039 return NULL; 1088 return NULL;
1040 1089
1041 seqno = ring->get_seqno(ring); 1090 seqno = ring->get_seqno(ring, false);
1042 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 1091 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1043 if (obj->ring != ring) 1092 if (obj->ring != ring)
1044 continue; 1093 continue;
1045 1094
1046 if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) 1095 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1047 continue; 1096 continue;
1048 1097
1049 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 1098 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
@@ -1079,10 +1128,8 @@ static void i915_record_ring_state(struct drm_device *dev,
1079 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 1128 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1080 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 1129 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1081 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 1130 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1082 if (ring->id == RCS) { 1131 if (ring->id == RCS)
1083 error->instdone1 = I915_READ(INSTDONE1);
1084 error->bbaddr = I915_READ64(BB_ADDR); 1132 error->bbaddr = I915_READ64(BB_ADDR);
1085 }
1086 } else { 1133 } else {
1087 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 1134 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1088 error->ipeir[ring->id] = I915_READ(IPEIR); 1135 error->ipeir[ring->id] = I915_READ(IPEIR);
@@ -1092,7 +1139,7 @@ static void i915_record_ring_state(struct drm_device *dev,
1092 1139
1093 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); 1140 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1094 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 1141 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1095 error->seqno[ring->id] = ring->get_seqno(ring); 1142 error->seqno[ring->id] = ring->get_seqno(ring, false);
1096 error->acthd[ring->id] = intel_ring_get_active_head(ring); 1143 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1097 error->head[ring->id] = I915_READ_HEAD(ring); 1144 error->head[ring->id] = I915_READ_HEAD(ring);
1098 error->tail[ring->id] = I915_READ_TAIL(ring); 1145 error->tail[ring->id] = I915_READ_TAIL(ring);
@@ -1198,6 +1245,11 @@ static void i915_capture_error_state(struct drm_device *dev)
1198 error->done_reg = I915_READ(DONE_REG); 1245 error->done_reg = I915_READ(DONE_REG);
1199 } 1246 }
1200 1247
1248 if (INTEL_INFO(dev)->gen == 7)
1249 error->err_int = I915_READ(GEN7_ERR_INT);
1250
1251 i915_get_extra_instdone(dev, error->extra_instdone);
1252
1201 i915_gem_record_fences(dev, error); 1253 i915_gem_record_fences(dev, error);
1202 i915_gem_record_rings(dev, error); 1254 i915_gem_record_rings(dev, error);
1203 1255
@@ -1209,7 +1261,7 @@ static void i915_capture_error_state(struct drm_device *dev)
1209 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 1261 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1210 i++; 1262 i++;
1211 error->active_bo_count = i; 1263 error->active_bo_count = i;
1212 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) 1264 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1213 if (obj->pin_count) 1265 if (obj->pin_count)
1214 i++; 1266 i++;
1215 error->pinned_bo_count = i - error->active_bo_count; 1267 error->pinned_bo_count = i - error->active_bo_count;
@@ -1234,7 +1286,7 @@ static void i915_capture_error_state(struct drm_device *dev)
1234 error->pinned_bo_count = 1286 error->pinned_bo_count =
1235 capture_pinned_bo(error->pinned_bo, 1287 capture_pinned_bo(error->pinned_bo,
1236 error->pinned_bo_count, 1288 error->pinned_bo_count,
1237 &dev_priv->mm.gtt_list); 1289 &dev_priv->mm.bound_list);
1238 1290
1239 do_gettimeofday(&error->time); 1291 do_gettimeofday(&error->time);
1240 1292
@@ -1273,24 +1325,26 @@ void i915_destroy_error_state(struct drm_device *dev)
1273static void i915_report_and_clear_eir(struct drm_device *dev) 1325static void i915_report_and_clear_eir(struct drm_device *dev)
1274{ 1326{
1275 struct drm_i915_private *dev_priv = dev->dev_private; 1327 struct drm_i915_private *dev_priv = dev->dev_private;
1328 uint32_t instdone[I915_NUM_INSTDONE_REG];
1276 u32 eir = I915_READ(EIR); 1329 u32 eir = I915_READ(EIR);
1277 int pipe; 1330 int pipe, i;
1278 1331
1279 if (!eir) 1332 if (!eir)
1280 return; 1333 return;
1281 1334
1282 pr_err("render error detected, EIR: 0x%08x\n", eir); 1335 pr_err("render error detected, EIR: 0x%08x\n", eir);
1283 1336
1337 i915_get_extra_instdone(dev, instdone);
1338
1284 if (IS_G4X(dev)) { 1339 if (IS_G4X(dev)) {
1285 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 1340 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1286 u32 ipeir = I915_READ(IPEIR_I965); 1341 u32 ipeir = I915_READ(IPEIR_I965);
1287 1342
1288 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1343 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1289 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1344 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1290 pr_err(" INSTDONE: 0x%08x\n", 1345 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1291 I915_READ(INSTDONE_I965)); 1346 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1292 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1347 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1293 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1294 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1348 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1295 I915_WRITE(IPEIR_I965, ipeir); 1349 I915_WRITE(IPEIR_I965, ipeir);
1296 POSTING_READ(IPEIR_I965); 1350 POSTING_READ(IPEIR_I965);
@@ -1324,12 +1378,13 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
1324 if (eir & I915_ERROR_INSTRUCTION) { 1378 if (eir & I915_ERROR_INSTRUCTION) {
1325 pr_err("instruction error\n"); 1379 pr_err("instruction error\n");
1326 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1380 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
1381 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1382 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1327 if (INTEL_INFO(dev)->gen < 4) { 1383 if (INTEL_INFO(dev)->gen < 4) {
1328 u32 ipeir = I915_READ(IPEIR); 1384 u32 ipeir = I915_READ(IPEIR);
1329 1385
1330 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1386 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
1331 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1387 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
1332 pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
1333 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 1388 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
1334 I915_WRITE(IPEIR, ipeir); 1389 I915_WRITE(IPEIR, ipeir);
1335 POSTING_READ(IPEIR); 1390 POSTING_READ(IPEIR);
@@ -1338,10 +1393,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
1338 1393
1339 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1394 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1340 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1395 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1341 pr_err(" INSTDONE: 0x%08x\n",
1342 I915_READ(INSTDONE_I965));
1343 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1396 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1344 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1345 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1397 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1346 I915_WRITE(IPEIR_I965, ipeir); 1398 I915_WRITE(IPEIR_I965, ipeir);
1347 POSTING_READ(IPEIR_I965); 1399 POSTING_READ(IPEIR_I965);
@@ -1589,7 +1641,8 @@ ring_last_seqno(struct intel_ring_buffer *ring)
1589static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 1641static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1590{ 1642{
1591 if (list_empty(&ring->request_list) || 1643 if (list_empty(&ring->request_list) ||
1592 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { 1644 i915_seqno_passed(ring->get_seqno(ring, false),
1645 ring_last_seqno(ring))) {
1593 /* Issue a wake-up to catch stuck h/w. */ 1646 /* Issue a wake-up to catch stuck h/w. */
1594 if (waitqueue_active(&ring->irq_queue)) { 1647 if (waitqueue_active(&ring->irq_queue)) {
1595 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 1648 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
@@ -1655,7 +1708,7 @@ void i915_hangcheck_elapsed(unsigned long data)
1655{ 1708{
1656 struct drm_device *dev = (struct drm_device *)data; 1709 struct drm_device *dev = (struct drm_device *)data;
1657 drm_i915_private_t *dev_priv = dev->dev_private; 1710 drm_i915_private_t *dev_priv = dev->dev_private;
1658 uint32_t acthd[I915_NUM_RINGS], instdone, instdone1; 1711 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
1659 struct intel_ring_buffer *ring; 1712 struct intel_ring_buffer *ring;
1660 bool err = false, idle; 1713 bool err = false, idle;
1661 int i; 1714 int i;
@@ -1683,25 +1736,16 @@ void i915_hangcheck_elapsed(unsigned long data)
1683 return; 1736 return;
1684 } 1737 }
1685 1738
1686 if (INTEL_INFO(dev)->gen < 4) { 1739 i915_get_extra_instdone(dev, instdone);
1687 instdone = I915_READ(INSTDONE);
1688 instdone1 = 0;
1689 } else {
1690 instdone = I915_READ(INSTDONE_I965);
1691 instdone1 = I915_READ(INSTDONE1);
1692 }
1693
1694 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && 1740 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
1695 dev_priv->last_instdone == instdone && 1741 memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
1696 dev_priv->last_instdone1 == instdone1) {
1697 if (i915_hangcheck_hung(dev)) 1742 if (i915_hangcheck_hung(dev))
1698 return; 1743 return;
1699 } else { 1744 } else {
1700 dev_priv->hangcheck_count = 0; 1745 dev_priv->hangcheck_count = 0;
1701 1746
1702 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); 1747 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
1703 dev_priv->last_instdone = instdone; 1748 memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
1704 dev_priv->last_instdone1 = instdone1;
1705 } 1749 }
1706 1750
1707repeat: 1751repeat:
@@ -2646,7 +2690,7 @@ void intel_irq_init(struct drm_device *dev)
2646 2690
2647 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 2691 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2648 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 2692 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2649 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); 2693 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
2650 INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); 2694 INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
2651 2695
2652 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2696 dev->driver->get_vblank_counter = i915_get_vblank_counter;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 28725ce5b82c..7637824c6a7d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -450,6 +450,7 @@
450#define RING_ACTHD(base) ((base)+0x74) 450#define RING_ACTHD(base) ((base)+0x74)
451#define RING_NOPID(base) ((base)+0x94) 451#define RING_NOPID(base) ((base)+0x94)
452#define RING_IMR(base) ((base)+0xa8) 452#define RING_IMR(base) ((base)+0xa8)
453#define RING_TIMESTAMP(base) ((base)+0x358)
453#define TAIL_ADDR 0x001FFFF8 454#define TAIL_ADDR 0x001FFFF8
454#define HEAD_WRAP_COUNT 0xFFE00000 455#define HEAD_WRAP_COUNT 0xFFE00000
455#define HEAD_WRAP_ONE 0x00200000 456#define HEAD_WRAP_ONE 0x00200000
@@ -478,6 +479,11 @@
478#define IPEIR_I965 0x02064 479#define IPEIR_I965 0x02064
479#define IPEHR_I965 0x02068 480#define IPEHR_I965 0x02068
480#define INSTDONE_I965 0x0206c 481#define INSTDONE_I965 0x0206c
482#define GEN7_INSTDONE_1 0x0206c
483#define GEN7_SC_INSTDONE 0x07100
484#define GEN7_SAMPLER_INSTDONE 0x0e160
485#define GEN7_ROW_INSTDONE 0x0e164
486#define I915_NUM_INSTDONE_REG 4
481#define RING_IPEIR(base) ((base)+0x64) 487#define RING_IPEIR(base) ((base)+0x64)
482#define RING_IPEHR(base) ((base)+0x68) 488#define RING_IPEHR(base) ((base)+0x68)
483#define RING_INSTDONE(base) ((base)+0x6c) 489#define RING_INSTDONE(base) ((base)+0x6c)
@@ -500,6 +506,8 @@
500#define DMA_FADD_I8XX 0x020d0 506#define DMA_FADD_I8XX 0x020d0
501 507
502#define ERROR_GEN6 0x040a0 508#define ERROR_GEN6 0x040a0
509#define GEN7_ERR_INT 0x44040
510#define ERR_INT_MMIO_UNCLAIMED (1<<13)
503 511
504/* GM45+ chicken bits -- debug workaround bits that may be required 512/* GM45+ chicken bits -- debug workaround bits that may be required
505 * for various sorts of correct behavior. The top 16 bits of each are 513 * for various sorts of correct behavior. The top 16 bits of each are
@@ -529,6 +537,8 @@
529#define GFX_PSMI_GRANULARITY (1<<10) 537#define GFX_PSMI_GRANULARITY (1<<10)
530#define GFX_PPGTT_ENABLE (1<<9) 538#define GFX_PPGTT_ENABLE (1<<9)
531 539
540#define VLV_DISPLAY_BASE 0x180000
541
532#define SCPD0 0x0209c /* 915+ only */ 542#define SCPD0 0x0209c /* 915+ only */
533#define IER 0x020a0 543#define IER 0x020a0
534#define IIR 0x020a4 544#define IIR 0x020a4
@@ -1496,6 +1506,14 @@
1496 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ 1506 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
1497 GEN7_CXT_GT1_SIZE(ctx_reg) + \ 1507 GEN7_CXT_GT1_SIZE(ctx_reg) + \
1498 GEN7_CXT_VFSTATE_SIZE(ctx_reg)) 1508 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
1509#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f)
1510#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7)
1511#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff)
1512#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \
1513 HSW_CXT_RING_SIZE(ctx_reg) + \
1514 HSW_CXT_RENDER_SIZE(ctx_reg) + \
1515 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
1516
1499 1517
1500/* 1518/*
1501 * Overlay regs 1519 * Overlay regs
@@ -1549,12 +1567,35 @@
1549 1567
1550/* VGA port control */ 1568/* VGA port control */
1551#define ADPA 0x61100 1569#define ADPA 0x61100
1570#define PCH_ADPA 0xe1100
1571#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA)
1572
1552#define ADPA_DAC_ENABLE (1<<31) 1573#define ADPA_DAC_ENABLE (1<<31)
1553#define ADPA_DAC_DISABLE 0 1574#define ADPA_DAC_DISABLE 0
1554#define ADPA_PIPE_SELECT_MASK (1<<30) 1575#define ADPA_PIPE_SELECT_MASK (1<<30)
1555#define ADPA_PIPE_A_SELECT 0 1576#define ADPA_PIPE_A_SELECT 0
1556#define ADPA_PIPE_B_SELECT (1<<30) 1577#define ADPA_PIPE_B_SELECT (1<<30)
1557#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) 1578#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
1579/* CPT uses bits 29:30 for pch transcoder select */
1580#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
1581#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
1582#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
1583#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
1584#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
1585#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
1586#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
1587#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
1588#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
1589#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
1590#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
1591#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
1592#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
1593#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
1594#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
1595#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
1596#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
1597#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
1598#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
1558#define ADPA_USE_VGA_HVPOLARITY (1<<15) 1599#define ADPA_USE_VGA_HVPOLARITY (1<<15)
1559#define ADPA_SETS_HVPOLARITY 0 1600#define ADPA_SETS_HVPOLARITY 0
1560#define ADPA_VSYNC_CNTL_DISABLE (1<<11) 1601#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -1753,6 +1794,10 @@
1753 1794
1754/* Video Data Island Packet control */ 1795/* Video Data Island Packet control */
1755#define VIDEO_DIP_DATA 0x61178 1796#define VIDEO_DIP_DATA 0x61178
1797/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
1798 * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
1799 * of the infoframe structure specified by CEA-861. */
1800#define VIDEO_DIP_DATA_SIZE 32
1756#define VIDEO_DIP_CTL 0x61170 1801#define VIDEO_DIP_CTL 0x61170
1757/* Pre HSW: */ 1802/* Pre HSW: */
1758#define VIDEO_DIP_ENABLE (1 << 31) 1803#define VIDEO_DIP_ENABLE (1 << 31)
@@ -3889,31 +3934,6 @@
3889#define FDI_PLL_CTL_1 0xfe000 3934#define FDI_PLL_CTL_1 0xfe000
3890#define FDI_PLL_CTL_2 0xfe004 3935#define FDI_PLL_CTL_2 0xfe004
3891 3936
3892/* CRT */
3893#define PCH_ADPA 0xe1100
3894#define ADPA_TRANS_SELECT_MASK (1<<30)
3895#define ADPA_TRANS_A_SELECT 0
3896#define ADPA_TRANS_B_SELECT (1<<30)
3897#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
3898#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
3899#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
3900#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
3901#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
3902#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
3903#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
3904#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
3905#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
3906#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
3907#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
3908#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
3909#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
3910#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
3911#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
3912#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
3913#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
3914#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
3915#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
3916
3917/* or SDVOB */ 3937/* or SDVOB */
3918#define HDMIB 0xe1140 3938#define HDMIB 0xe1140
3919#define PORT_ENABLE (1 << 31) 3939#define PORT_ENABLE (1 << 31)
@@ -4021,6 +4041,8 @@
4021#define PORT_TRANS_C_SEL_CPT (2<<29) 4041#define PORT_TRANS_C_SEL_CPT (2<<29)
4022#define PORT_TRANS_SEL_MASK (3<<29) 4042#define PORT_TRANS_SEL_MASK (3<<29)
4023#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29) 4043#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
4044#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
4045#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
4024 4046
4025#define TRANS_DP_CTL_A 0xe0300 4047#define TRANS_DP_CTL_A 0xe0300
4026#define TRANS_DP_CTL_B 0xe1300 4048#define TRANS_DP_CTL_B 0xe1300
@@ -4239,7 +4261,15 @@
4239#define G4X_HDMIW_HDMIEDID 0x6210C 4261#define G4X_HDMIW_HDMIEDID 0x6210C
4240 4262
4241#define IBX_HDMIW_HDMIEDID_A 0xE2050 4263#define IBX_HDMIW_HDMIEDID_A 0xE2050
4264#define IBX_HDMIW_HDMIEDID_B 0xE2150
4265#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
4266 IBX_HDMIW_HDMIEDID_A, \
4267 IBX_HDMIW_HDMIEDID_B)
4242#define IBX_AUD_CNTL_ST_A 0xE20B4 4268#define IBX_AUD_CNTL_ST_A 0xE20B4
4269#define IBX_AUD_CNTL_ST_B 0xE21B4
4270#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
4271 IBX_AUD_CNTL_ST_A, \
4272 IBX_AUD_CNTL_ST_B)
4243#define IBX_ELD_BUFFER_SIZE (0x1f << 10) 4273#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
4244#define IBX_ELD_ADDRESS (0x1f << 5) 4274#define IBX_ELD_ADDRESS (0x1f << 5)
4245#define IBX_ELD_ACK (1 << 4) 4275#define IBX_ELD_ACK (1 << 4)
@@ -4248,7 +4278,15 @@
4248#define IBX_CP_READYB (1 << 1) 4278#define IBX_CP_READYB (1 << 1)
4249 4279
4250#define CPT_HDMIW_HDMIEDID_A 0xE5050 4280#define CPT_HDMIW_HDMIEDID_A 0xE5050
4281#define CPT_HDMIW_HDMIEDID_B 0xE5150
4282#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
4283 CPT_HDMIW_HDMIEDID_A, \
4284 CPT_HDMIW_HDMIEDID_B)
4251#define CPT_AUD_CNTL_ST_A 0xE50B4 4285#define CPT_AUD_CNTL_ST_A 0xE50B4
4286#define CPT_AUD_CNTL_ST_B 0xE51B4
4287#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
4288 CPT_AUD_CNTL_ST_A, \
4289 CPT_AUD_CNTL_ST_B)
4252#define CPT_AUD_CNTRL_ST2 0xE50C0 4290#define CPT_AUD_CNTRL_ST2 0xE50C0
4253 4291
4254/* These are the 4 32-bit write offset registers for each stream 4292/* These are the 4 32-bit write offset registers for each stream
@@ -4258,7 +4296,15 @@
4258#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) 4296#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
4259 4297
4260#define IBX_AUD_CONFIG_A 0xe2000 4298#define IBX_AUD_CONFIG_A 0xe2000
4299#define IBX_AUD_CONFIG_B 0xe2100
4300#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
4301 IBX_AUD_CONFIG_A, \
4302 IBX_AUD_CONFIG_B)
4261#define CPT_AUD_CONFIG_A 0xe5000 4303#define CPT_AUD_CONFIG_A 0xe5000
4304#define CPT_AUD_CONFIG_B 0xe5100
4305#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
4306 CPT_AUD_CONFIG_A, \
4307 CPT_AUD_CONFIG_B)
4262#define AUD_CONFIG_N_VALUE_INDEX (1 << 29) 4308#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
4263#define AUD_CONFIG_N_PROG_ENABLE (1 << 28) 4309#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
4264#define AUD_CONFIG_UPPER_N_SHIFT 20 4310#define AUD_CONFIG_UPPER_N_SHIFT 20
@@ -4269,195 +4315,233 @@
4269#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) 4315#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
4270#define AUD_CONFIG_DISABLE_NCTS (1 << 3) 4316#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
4271 4317
4318/* HSW Audio */
4319#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */
4320#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */
4321#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
4322 HSW_AUD_CONFIG_A, \
4323 HSW_AUD_CONFIG_B)
4324
4325#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */
4326#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */
4327#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
4328 HSW_AUD_MISC_CTRL_A, \
4329 HSW_AUD_MISC_CTRL_B)
4330
4331#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */
4332#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */
4333#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
4334 HSW_AUD_DIP_ELD_CTRL_ST_A, \
4335 HSW_AUD_DIP_ELD_CTRL_ST_B)
4336
4337/* Audio Digital Converter */
4338#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */
4339#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */
4340#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
4341 HSW_AUD_DIG_CNVT_1, \
4342 HSW_AUD_DIG_CNVT_2)
4343#define DIP_PORT_SEL_MASK 0x3
4344
4345#define HSW_AUD_EDID_DATA_A 0x65050
4346#define HSW_AUD_EDID_DATA_B 0x65150
4347#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
4348 HSW_AUD_EDID_DATA_A, \
4349 HSW_AUD_EDID_DATA_B)
4350
4351#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */
4352#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */
4353#define AUDIO_INACTIVE_C (1<<11)
4354#define AUDIO_INACTIVE_B (1<<7)
4355#define AUDIO_INACTIVE_A (1<<3)
4356#define AUDIO_OUTPUT_ENABLE_A (1<<2)
4357#define AUDIO_OUTPUT_ENABLE_B (1<<6)
4358#define AUDIO_OUTPUT_ENABLE_C (1<<10)
4359#define AUDIO_ELD_VALID_A (1<<0)
4360#define AUDIO_ELD_VALID_B (1<<4)
4361#define AUDIO_ELD_VALID_C (1<<8)
4362#define AUDIO_CP_READY_A (1<<1)
4363#define AUDIO_CP_READY_B (1<<5)
4364#define AUDIO_CP_READY_C (1<<9)
4365
4272/* HSW Power Wells */ 4366/* HSW Power Wells */
4273#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */ 4367#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
4274#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */ 4368#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
4275#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */ 4369#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
4276#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */ 4370#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
4277#define HSW_PWR_WELL_ENABLE (1<<31) 4371#define HSW_PWR_WELL_ENABLE (1<<31)
4278#define HSW_PWR_WELL_STATE (1<<30) 4372#define HSW_PWR_WELL_STATE (1<<30)
4279#define HSW_PWR_WELL_CTL5 0x45410 4373#define HSW_PWR_WELL_CTL5 0x45410
4280#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) 4374#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
4281#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) 4375#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
4282#define HSW_PWR_WELL_FORCE_ON (1<<19) 4376#define HSW_PWR_WELL_FORCE_ON (1<<19)
4283#define HSW_PWR_WELL_CTL6 0x45414 4377#define HSW_PWR_WELL_CTL6 0x45414
4284 4378
4285/* Per-pipe DDI Function Control */ 4379/* Per-pipe DDI Function Control */
4286#define PIPE_DDI_FUNC_CTL_A 0x60400 4380#define PIPE_DDI_FUNC_CTL_A 0x60400
4287#define PIPE_DDI_FUNC_CTL_B 0x61400 4381#define PIPE_DDI_FUNC_CTL_B 0x61400
4288#define PIPE_DDI_FUNC_CTL_C 0x62400 4382#define PIPE_DDI_FUNC_CTL_C 0x62400
4289#define PIPE_DDI_FUNC_CTL_EDP 0x6F400 4383#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
4290#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \ 4384#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
4291 PIPE_DDI_FUNC_CTL_A, \ 4385 PIPE_DDI_FUNC_CTL_B)
4292 PIPE_DDI_FUNC_CTL_B)
4293#define PIPE_DDI_FUNC_ENABLE (1<<31) 4386#define PIPE_DDI_FUNC_ENABLE (1<<31)
4294/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ 4387/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
4295#define PIPE_DDI_PORT_MASK (7<<28) 4388#define PIPE_DDI_PORT_MASK (7<<28)
4296#define PIPE_DDI_SELECT_PORT(x) ((x)<<28) 4389#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
4297#define PIPE_DDI_MODE_SELECT_HDMI (0<<24) 4390#define PIPE_DDI_MODE_SELECT_MASK (7<<24)
4298#define PIPE_DDI_MODE_SELECT_DVI (1<<24) 4391#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
4392#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
4299#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) 4393#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
4300#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) 4394#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
4301#define PIPE_DDI_MODE_SELECT_FDI (4<<24) 4395#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
4302#define PIPE_DDI_BPC_8 (0<<20) 4396#define PIPE_DDI_BPC_MASK (7<<20)
4303#define PIPE_DDI_BPC_10 (1<<20) 4397#define PIPE_DDI_BPC_8 (0<<20)
4304#define PIPE_DDI_BPC_6 (2<<20) 4398#define PIPE_DDI_BPC_10 (1<<20)
4305#define PIPE_DDI_BPC_12 (3<<20) 4399#define PIPE_DDI_BPC_6 (2<<20)
4306#define PIPE_DDI_BFI_ENABLE (1<<4) 4400#define PIPE_DDI_BPC_12 (3<<20)
4307#define PIPE_DDI_PORT_WIDTH_X1 (0<<1) 4401#define PIPE_DDI_PVSYNC (1<<17)
4308#define PIPE_DDI_PORT_WIDTH_X2 (1<<1) 4402#define PIPE_DDI_PHSYNC (1<<16)
4309#define PIPE_DDI_PORT_WIDTH_X4 (3<<1) 4403#define PIPE_DDI_BFI_ENABLE (1<<4)
4404#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
4405#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
4406#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
4310 4407
4311/* DisplayPort Transport Control */ 4408/* DisplayPort Transport Control */
4312#define DP_TP_CTL_A 0x64040 4409#define DP_TP_CTL_A 0x64040
4313#define DP_TP_CTL_B 0x64140 4410#define DP_TP_CTL_B 0x64140
4314#define DP_TP_CTL(port) _PORT(port, \ 4411#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
4315 DP_TP_CTL_A, \ 4412#define DP_TP_CTL_ENABLE (1<<31)
4316 DP_TP_CTL_B) 4413#define DP_TP_CTL_MODE_SST (0<<27)
4317#define DP_TP_CTL_ENABLE (1<<31) 4414#define DP_TP_CTL_MODE_MST (1<<27)
4318#define DP_TP_CTL_MODE_SST (0<<27)
4319#define DP_TP_CTL_MODE_MST (1<<27)
4320#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18) 4415#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
4321#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) 4416#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
4322#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) 4417#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
4323#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) 4418#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
4324#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) 4419#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
4325#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) 4420#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
4326 4421
4327/* DisplayPort Transport Status */ 4422/* DisplayPort Transport Status */
4328#define DP_TP_STATUS_A 0x64044 4423#define DP_TP_STATUS_A 0x64044
4329#define DP_TP_STATUS_B 0x64144 4424#define DP_TP_STATUS_B 0x64144
4330#define DP_TP_STATUS(port) _PORT(port, \ 4425#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
4331 DP_TP_STATUS_A, \
4332 DP_TP_STATUS_B)
4333#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) 4426#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
4334 4427
4335/* DDI Buffer Control */ 4428/* DDI Buffer Control */
4336#define DDI_BUF_CTL_A 0x64000 4429#define DDI_BUF_CTL_A 0x64000
4337#define DDI_BUF_CTL_B 0x64100 4430#define DDI_BUF_CTL_B 0x64100
4338#define DDI_BUF_CTL(port) _PORT(port, \ 4431#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
4339 DDI_BUF_CTL_A, \ 4432#define DDI_BUF_CTL_ENABLE (1<<31)
4340 DDI_BUF_CTL_B)
4341#define DDI_BUF_CTL_ENABLE (1<<31)
4342#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ 4433#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
4343#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ 4434#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
4344#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ 4435#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
4345#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */ 4436#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
4346#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */ 4437#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
4347#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */ 4438#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
4348#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ 4439#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
4349#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ 4440#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
4350#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ 4441#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
4351#define DDI_BUF_EMP_MASK (0xf<<24) 4442#define DDI_BUF_EMP_MASK (0xf<<24)
4352#define DDI_BUF_IS_IDLE (1<<7) 4443#define DDI_BUF_IS_IDLE (1<<7)
4353#define DDI_PORT_WIDTH_X1 (0<<1) 4444#define DDI_PORT_WIDTH_X1 (0<<1)
4354#define DDI_PORT_WIDTH_X2 (1<<1) 4445#define DDI_PORT_WIDTH_X2 (1<<1)
4355#define DDI_PORT_WIDTH_X4 (3<<1) 4446#define DDI_PORT_WIDTH_X4 (3<<1)
4356#define DDI_INIT_DISPLAY_DETECTED (1<<0) 4447#define DDI_INIT_DISPLAY_DETECTED (1<<0)
4357 4448
4358/* DDI Buffer Translations */ 4449/* DDI Buffer Translations */
4359#define DDI_BUF_TRANS_A 0x64E00 4450#define DDI_BUF_TRANS_A 0x64E00
4360#define DDI_BUF_TRANS_B 0x64E60 4451#define DDI_BUF_TRANS_B 0x64E60
4361#define DDI_BUF_TRANS(port) _PORT(port, \ 4452#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
4362 DDI_BUF_TRANS_A, \
4363 DDI_BUF_TRANS_B)
4364 4453
4365/* Sideband Interface (SBI) is programmed indirectly, via 4454/* Sideband Interface (SBI) is programmed indirectly, via
4366 * SBI_ADDR, which contains the register offset; and SBI_DATA, 4455 * SBI_ADDR, which contains the register offset; and SBI_DATA,
4367 * which contains the payload */ 4456 * which contains the payload */
4368#define SBI_ADDR 0xC6000 4457#define SBI_ADDR 0xC6000
4369#define SBI_DATA 0xC6004 4458#define SBI_DATA 0xC6004
4370#define SBI_CTL_STAT 0xC6008 4459#define SBI_CTL_STAT 0xC6008
4371#define SBI_CTL_OP_CRRD (0x6<<8) 4460#define SBI_CTL_OP_CRRD (0x6<<8)
4372#define SBI_CTL_OP_CRWR (0x7<<8) 4461#define SBI_CTL_OP_CRWR (0x7<<8)
4373#define SBI_RESPONSE_FAIL (0x1<<1) 4462#define SBI_RESPONSE_FAIL (0x1<<1)
4374#define SBI_RESPONSE_SUCCESS (0x0<<1) 4463#define SBI_RESPONSE_SUCCESS (0x0<<1)
4375#define SBI_BUSY (0x1<<0) 4464#define SBI_BUSY (0x1<<0)
4376#define SBI_READY (0x0<<0) 4465#define SBI_READY (0x0<<0)
4377 4466
4378/* SBI offsets */ 4467/* SBI offsets */
4379#define SBI_SSCDIVINTPHASE6 0x0600 4468#define SBI_SSCDIVINTPHASE6 0x0600
4380#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1) 4469#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
4381#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1) 4470#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
4382#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8) 4471#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
4383#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8) 4472#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
4384#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) 4473#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
4385#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) 4474#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
4386#define SBI_SSCCTL 0x020c 4475#define SBI_SSCCTL 0x020c
4387#define SBI_SSCCTL6 0x060C 4476#define SBI_SSCCTL6 0x060C
4388#define SBI_SSCCTL_DISABLE (1<<0) 4477#define SBI_SSCCTL_DISABLE (1<<0)
4389#define SBI_SSCAUXDIV6 0x0610 4478#define SBI_SSCAUXDIV6 0x0610
4390#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) 4479#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
4391#define SBI_DBUFF0 0x2a00 4480#define SBI_DBUFF0 0x2a00
4392 4481
4393/* LPT PIXCLK_GATE */ 4482/* LPT PIXCLK_GATE */
4394#define PIXCLK_GATE 0xC6020 4483#define PIXCLK_GATE 0xC6020
4395#define PIXCLK_GATE_UNGATE 1<<0 4484#define PIXCLK_GATE_UNGATE (1<<0)
4396#define PIXCLK_GATE_GATE 0<<0 4485#define PIXCLK_GATE_GATE (0<<0)
4397 4486
4398/* SPLL */ 4487/* SPLL */
4399#define SPLL_CTL 0x46020 4488#define SPLL_CTL 0x46020
4400#define SPLL_PLL_ENABLE (1<<31) 4489#define SPLL_PLL_ENABLE (1<<31)
4401#define SPLL_PLL_SCC (1<<28) 4490#define SPLL_PLL_SCC (1<<28)
4402#define SPLL_PLL_NON_SCC (2<<28) 4491#define SPLL_PLL_NON_SCC (2<<28)
4403#define SPLL_PLL_FREQ_810MHz (0<<26) 4492#define SPLL_PLL_FREQ_810MHz (0<<26)
4404#define SPLL_PLL_FREQ_1350MHz (1<<26) 4493#define SPLL_PLL_FREQ_1350MHz (1<<26)
4405 4494
4406/* WRPLL */ 4495/* WRPLL */
4407#define WRPLL_CTL1 0x46040 4496#define WRPLL_CTL1 0x46040
4408#define WRPLL_CTL2 0x46060 4497#define WRPLL_CTL2 0x46060
4409#define WRPLL_PLL_ENABLE (1<<31) 4498#define WRPLL_PLL_ENABLE (1<<31)
4410#define WRPLL_PLL_SELECT_SSC (0x01<<28) 4499#define WRPLL_PLL_SELECT_SSC (0x01<<28)
4411#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) 4500#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
4412#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) 4501#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
4413/* WRPLL divider programming */ 4502/* WRPLL divider programming */
4414#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) 4503#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
4415#define WRPLL_DIVIDER_POST(x) ((x)<<8) 4504#define WRPLL_DIVIDER_POST(x) ((x)<<8)
4416#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) 4505#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
4417 4506
4418/* Port clock selection */ 4507/* Port clock selection */
4419#define PORT_CLK_SEL_A 0x46100 4508#define PORT_CLK_SEL_A 0x46100
4420#define PORT_CLK_SEL_B 0x46104 4509#define PORT_CLK_SEL_B 0x46104
4421#define PORT_CLK_SEL(port) _PORT(port, \ 4510#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
4422 PORT_CLK_SEL_A, \
4423 PORT_CLK_SEL_B)
4424#define PORT_CLK_SEL_LCPLL_2700 (0<<29) 4511#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
4425#define PORT_CLK_SEL_LCPLL_1350 (1<<29) 4512#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
4426#define PORT_CLK_SEL_LCPLL_810 (2<<29) 4513#define PORT_CLK_SEL_LCPLL_810 (2<<29)
4427#define PORT_CLK_SEL_SPLL (3<<29) 4514#define PORT_CLK_SEL_SPLL (3<<29)
4428#define PORT_CLK_SEL_WRPLL1 (4<<29) 4515#define PORT_CLK_SEL_WRPLL1 (4<<29)
4429#define PORT_CLK_SEL_WRPLL2 (5<<29) 4516#define PORT_CLK_SEL_WRPLL2 (5<<29)
4430 4517
4431/* Pipe clock selection */ 4518/* Pipe clock selection */
4432#define PIPE_CLK_SEL_A 0x46140 4519#define PIPE_CLK_SEL_A 0x46140
4433#define PIPE_CLK_SEL_B 0x46144 4520#define PIPE_CLK_SEL_B 0x46144
4434#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \ 4521#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
4435 PIPE_CLK_SEL_A, \
4436 PIPE_CLK_SEL_B)
4437/* For each pipe, we need to select the corresponding port clock */ 4522/* For each pipe, we need to select the corresponding port clock */
4438#define PIPE_CLK_SEL_DISABLED (0x0<<29) 4523#define PIPE_CLK_SEL_DISABLED (0x0<<29)
4439#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) 4524#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
4440 4525
4441/* LCPLL Control */ 4526/* LCPLL Control */
4442#define LCPLL_CTL 0x130040 4527#define LCPLL_CTL 0x130040
4443#define LCPLL_PLL_DISABLE (1<<31) 4528#define LCPLL_PLL_DISABLE (1<<31)
4444#define LCPLL_PLL_LOCK (1<<30) 4529#define LCPLL_PLL_LOCK (1<<30)
4445#define LCPLL_CD_CLOCK_DISABLE (1<<25) 4530#define LCPLL_CD_CLOCK_DISABLE (1<<25)
4446#define LCPLL_CD2X_CLOCK_DISABLE (1<<23) 4531#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
4447 4532
4448/* Pipe WM_LINETIME - watermark line time */ 4533/* Pipe WM_LINETIME - watermark line time */
4449#define PIPE_WM_LINETIME_A 0x45270 4534#define PIPE_WM_LINETIME_A 0x45270
4450#define PIPE_WM_LINETIME_B 0x45274 4535#define PIPE_WM_LINETIME_B 0x45274
4451#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \ 4536#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
4452 PIPE_WM_LINETIME_A, \ 4537 PIPE_WM_LINETIME_B)
4453 PIPE_WM_LINETIME_B) 4538#define PIPE_WM_LINETIME_MASK (0x1ff)
4454#define PIPE_WM_LINETIME_MASK (0x1ff) 4539#define PIPE_WM_LINETIME_TIME(x) ((x))
4455#define PIPE_WM_LINETIME_TIME(x) ((x))
4456#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) 4540#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
4457#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) 4541#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
4458 4542
4459/* SFUSE_STRAP */ 4543/* SFUSE_STRAP */
4460#define SFUSE_STRAP 0xc2014 4544#define SFUSE_STRAP 0xc2014
4461#define SFUSE_STRAP_DDIB_DETECTED (1<<2) 4545#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
4462#define SFUSE_STRAP_DDIC_DETECTED (1<<1) 4546#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
4463#define SFUSE_STRAP_DDID_DETECTED (1<<0) 4547#define SFUSE_STRAP_DDID_DETECTED (1<<0)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 7631807a2788..903eebd2117a 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -46,32 +46,32 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
46} 46}
47 47
48static ssize_t 48static ssize_t
49show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf) 49show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
50{ 50{
51 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 51 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
52 return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev)); 52 return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
53} 53}
54 54
55static ssize_t 55static ssize_t
56show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf) 56show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
57{ 57{
58 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 58 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
59 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); 59 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
60 return snprintf(buf, PAGE_SIZE, "%u", rc6_residency); 60 return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
61} 61}
62 62
63static ssize_t 63static ssize_t
64show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf) 64show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
65{ 65{
66 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 66 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
67 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); 67 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
68 return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency); 68 return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
69} 69}
70 70
71static ssize_t 71static ssize_t
72show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf) 72show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
73{ 73{
74 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 74 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
75 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); 75 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
76 return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency); 76 return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
77} 77}
@@ -93,6 +93,7 @@ static struct attribute_group rc6_attr_group = {
93 .name = power_group_name, 93 .name = power_group_name,
94 .attrs = rc6_attrs 94 .attrs = rc6_attrs
95}; 95};
96#endif
96 97
97static int l3_access_valid(struct drm_device *dev, loff_t offset) 98static int l3_access_valid(struct drm_device *dev, loff_t offset)
98{ 99{
@@ -202,37 +203,214 @@ static struct bin_attribute dpf_attrs = {
202 .mmap = NULL 203 .mmap = NULL
203}; 204};
204 205
206static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
207 struct device_attribute *attr, char *buf)
208{
209 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
210 struct drm_device *dev = minor->dev;
211 struct drm_i915_private *dev_priv = dev->dev_private;
212 int ret;
213
214 ret = i915_mutex_lock_interruptible(dev);
215 if (ret)
216 return ret;
217
218 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
219 mutex_unlock(&dev->struct_mutex);
220
221 return snprintf(buf, PAGE_SIZE, "%d", ret);
222}
223
224static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
225{
226 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
227 struct drm_device *dev = minor->dev;
228 struct drm_i915_private *dev_priv = dev->dev_private;
229 int ret;
230
231 ret = i915_mutex_lock_interruptible(dev);
232 if (ret)
233 return ret;
234
235 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
236 mutex_unlock(&dev->struct_mutex);
237
238 return snprintf(buf, PAGE_SIZE, "%d", ret);
239}
240
241static ssize_t gt_max_freq_mhz_store(struct device *kdev,
242 struct device_attribute *attr,
243 const char *buf, size_t count)
244{
245 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
246 struct drm_device *dev = minor->dev;
247 struct drm_i915_private *dev_priv = dev->dev_private;
248 u32 val, rp_state_cap, hw_max, hw_min;
249 ssize_t ret;
250
251 ret = kstrtou32(buf, 0, &val);
252 if (ret)
253 return ret;
254
255 val /= GT_FREQUENCY_MULTIPLIER;
256
257 ret = mutex_lock_interruptible(&dev->struct_mutex);
258 if (ret)
259 return ret;
260
261 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
262 hw_max = (rp_state_cap & 0xff);
263 hw_min = ((rp_state_cap & 0xff0000) >> 16);
264
265 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
266 mutex_unlock(&dev->struct_mutex);
267 return -EINVAL;
268 }
269
270 if (dev_priv->rps.cur_delay > val)
271 gen6_set_rps(dev_priv->dev, val);
272
273 dev_priv->rps.max_delay = val;
274
275 mutex_unlock(&dev->struct_mutex);
276
277 return count;
278}
279
280static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
281{
282 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
283 struct drm_device *dev = minor->dev;
284 struct drm_i915_private *dev_priv = dev->dev_private;
285 int ret;
286
287 ret = i915_mutex_lock_interruptible(dev);
288 if (ret)
289 return ret;
290
291 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
292 mutex_unlock(&dev->struct_mutex);
293
294 return snprintf(buf, PAGE_SIZE, "%d", ret);
295}
296
297static ssize_t gt_min_freq_mhz_store(struct device *kdev,
298 struct device_attribute *attr,
299 const char *buf, size_t count)
300{
301 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
302 struct drm_device *dev = minor->dev;
303 struct drm_i915_private *dev_priv = dev->dev_private;
304 u32 val, rp_state_cap, hw_max, hw_min;
305 ssize_t ret;
306
307 ret = kstrtou32(buf, 0, &val);
308 if (ret)
309 return ret;
310
311 val /= GT_FREQUENCY_MULTIPLIER;
312
313 ret = mutex_lock_interruptible(&dev->struct_mutex);
314 if (ret)
315 return ret;
316
317 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
318 hw_max = (rp_state_cap & 0xff);
319 hw_min = ((rp_state_cap & 0xff0000) >> 16);
320
321 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
322 mutex_unlock(&dev->struct_mutex);
323 return -EINVAL;
324 }
325
326 if (dev_priv->rps.cur_delay < val)
327 gen6_set_rps(dev_priv->dev, val);
328
329 dev_priv->rps.min_delay = val;
330
331 mutex_unlock(&dev->struct_mutex);
332
333 return count;
334
335}
336
337static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
338static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
339static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
340
341
342static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
343static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
344static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
345static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
346
347/* For now we have a static number of RP states */
348static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
349{
350 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
351 struct drm_device *dev = minor->dev;
352 struct drm_i915_private *dev_priv = dev->dev_private;
353 u32 val, rp_state_cap;
354 ssize_t ret;
355
356 ret = mutex_lock_interruptible(&dev->struct_mutex);
357 if (ret)
358 return ret;
359 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
360 mutex_unlock(&dev->struct_mutex);
361
362 if (attr == &dev_attr_gt_RP0_freq_mhz) {
363 val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
364 } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
365 val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
366 } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
367 val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
368 } else {
369 BUG();
370 }
371 return snprintf(buf, PAGE_SIZE, "%d", val);
372}
373
374static const struct attribute *gen6_attrs[] = {
375 &dev_attr_gt_cur_freq_mhz.attr,
376 &dev_attr_gt_max_freq_mhz.attr,
377 &dev_attr_gt_min_freq_mhz.attr,
378 &dev_attr_gt_RP0_freq_mhz.attr,
379 &dev_attr_gt_RP1_freq_mhz.attr,
380 &dev_attr_gt_RPn_freq_mhz.attr,
381 NULL,
382};
383
205void i915_setup_sysfs(struct drm_device *dev) 384void i915_setup_sysfs(struct drm_device *dev)
206{ 385{
207 int ret; 386 int ret;
208 387
388#ifdef CONFIG_PM
209 if (INTEL_INFO(dev)->gen >= 6) { 389 if (INTEL_INFO(dev)->gen >= 6) {
210 ret = sysfs_merge_group(&dev->primary->kdev.kobj, 390 ret = sysfs_merge_group(&dev->primary->kdev.kobj,
211 &rc6_attr_group); 391 &rc6_attr_group);
212 if (ret) 392 if (ret)
213 DRM_ERROR("RC6 residency sysfs setup failed\n"); 393 DRM_ERROR("RC6 residency sysfs setup failed\n");
214 } 394 }
215 395#endif
216 if (IS_IVYBRIDGE(dev)) { 396 if (HAS_L3_GPU_CACHE(dev)) {
217 ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); 397 ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
218 if (ret) 398 if (ret)
219 DRM_ERROR("l3 parity sysfs setup failed\n"); 399 DRM_ERROR("l3 parity sysfs setup failed\n");
220 } 400 }
401
402 if (INTEL_INFO(dev)->gen >= 6) {
403 ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
404 if (ret)
405 DRM_ERROR("gen6 sysfs setup failed\n");
406 }
221} 407}
222 408
223void i915_teardown_sysfs(struct drm_device *dev) 409void i915_teardown_sysfs(struct drm_device *dev)
224{ 410{
411 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
225 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); 412 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
413#ifdef CONFIG_PM
226 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); 414 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
415#endif
227} 416}
228#else
229void i915_setup_sysfs(struct drm_device *dev)
230{
231 return;
232}
233
234void i915_teardown_sysfs(struct drm_device *dev)
235{
236 return;
237}
238#endif /* CONFIG_PM */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fe90b3a84a6d..8134421b89a6 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -214,22 +214,18 @@ TRACE_EVENT(i915_gem_evict,
214); 214);
215 215
216TRACE_EVENT(i915_gem_evict_everything, 216TRACE_EVENT(i915_gem_evict_everything,
217 TP_PROTO(struct drm_device *dev, bool purgeable), 217 TP_PROTO(struct drm_device *dev),
218 TP_ARGS(dev, purgeable), 218 TP_ARGS(dev),
219 219
220 TP_STRUCT__entry( 220 TP_STRUCT__entry(
221 __field(u32, dev) 221 __field(u32, dev)
222 __field(bool, purgeable)
223 ), 222 ),
224 223
225 TP_fast_assign( 224 TP_fast_assign(
226 __entry->dev = dev->primary->index; 225 __entry->dev = dev->primary->index;
227 __entry->purgeable = purgeable;
228 ), 226 ),
229 227
230 TP_printk("dev=%d%s", 228 TP_printk("dev=%d", __entry->dev)
231 __entry->dev,
232 __entry->purgeable ? ", purgeable only" : "")
233); 229);
234 230
235TRACE_EVENT(i915_gem_ring_dispatch, 231TRACE_EVENT(i915_gem_ring_dispatch,
@@ -434,6 +430,21 @@ TRACE_EVENT(i915_reg_rw,
434 (u32)(__entry->val >> 32)) 430 (u32)(__entry->val >> 32))
435); 431);
436 432
433TRACE_EVENT(intel_gpu_freq_change,
434 TP_PROTO(u32 freq),
435 TP_ARGS(freq),
436
437 TP_STRUCT__entry(
438 __field(u32, freq)
439 ),
440
441 TP_fast_assign(
442 __entry->freq = freq;
443 ),
444
445 TP_printk("new_freq=%u", __entry->freq)
446);
447
437#endif /* _I915_TRACE_H_ */ 448#endif /* _I915_TRACE_H_ */
438 449
439/* This part must be outside protection */ 450/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index c8f1c0db446d..893f30164b7e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -46,6 +46,7 @@
46struct intel_crt { 46struct intel_crt {
47 struct intel_encoder base; 47 struct intel_encoder base;
48 bool force_hotplug_required; 48 bool force_hotplug_required;
49 u32 adpa_reg;
49}; 50};
50 51
51static struct intel_crt *intel_attached_crt(struct drm_connector *connector) 52static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
@@ -54,42 +55,68 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
54 struct intel_crt, base); 55 struct intel_crt, base);
55} 56}
56 57
57static void pch_crt_dpms(struct drm_encoder *encoder, int mode) 58static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
58{ 59{
59 struct drm_device *dev = encoder->dev; 60 return container_of(encoder, struct intel_crt, base);
61}
62
63static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
64 enum pipe *pipe)
65{
66 struct drm_device *dev = encoder->base.dev;
60 struct drm_i915_private *dev_priv = dev->dev_private; 67 struct drm_i915_private *dev_priv = dev->dev_private;
68 struct intel_crt *crt = intel_encoder_to_crt(encoder);
69 u32 tmp;
70
71 tmp = I915_READ(crt->adpa_reg);
72
73 if (!(tmp & ADPA_DAC_ENABLE))
74 return false;
75
76 if (HAS_PCH_CPT(dev))
77 *pipe = PORT_TO_PIPE_CPT(tmp);
78 else
79 *pipe = PORT_TO_PIPE(tmp);
80
81 return true;
82}
83
84static void intel_disable_crt(struct intel_encoder *encoder)
85{
86 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
87 struct intel_crt *crt = intel_encoder_to_crt(encoder);
61 u32 temp; 88 u32 temp;
62 89
63 temp = I915_READ(PCH_ADPA); 90 temp = I915_READ(crt->adpa_reg);
91 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
64 temp &= ~ADPA_DAC_ENABLE; 92 temp &= ~ADPA_DAC_ENABLE;
93 I915_WRITE(crt->adpa_reg, temp);
94}
65 95
66 switch (mode) { 96static void intel_enable_crt(struct intel_encoder *encoder)
67 case DRM_MODE_DPMS_ON: 97{
68 temp |= ADPA_DAC_ENABLE; 98 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
69 break; 99 struct intel_crt *crt = intel_encoder_to_crt(encoder);
70 case DRM_MODE_DPMS_STANDBY: 100 u32 temp;
71 case DRM_MODE_DPMS_SUSPEND:
72 case DRM_MODE_DPMS_OFF:
73 /* Just leave port enable cleared */
74 break;
75 }
76 101
77 I915_WRITE(PCH_ADPA, temp); 102 temp = I915_READ(crt->adpa_reg);
103 temp |= ADPA_DAC_ENABLE;
104 I915_WRITE(crt->adpa_reg, temp);
78} 105}
79 106
80static void gmch_crt_dpms(struct drm_encoder *encoder, int mode) 107/* Note: The caller is required to filter out dpms modes not supported by the
108 * platform. */
109static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
81{ 110{
82 struct drm_device *dev = encoder->dev; 111 struct drm_device *dev = encoder->base.dev;
83 struct drm_i915_private *dev_priv = dev->dev_private; 112 struct drm_i915_private *dev_priv = dev->dev_private;
113 struct intel_crt *crt = intel_encoder_to_crt(encoder);
84 u32 temp; 114 u32 temp;
85 115
86 temp = I915_READ(ADPA); 116 temp = I915_READ(crt->adpa_reg);
87 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); 117 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
88 temp &= ~ADPA_DAC_ENABLE; 118 temp &= ~ADPA_DAC_ENABLE;
89 119
90 if (IS_VALLEYVIEW(dev) && mode != DRM_MODE_DPMS_ON)
91 mode = DRM_MODE_DPMS_OFF;
92
93 switch (mode) { 120 switch (mode) {
94 case DRM_MODE_DPMS_ON: 121 case DRM_MODE_DPMS_ON:
95 temp |= ADPA_DAC_ENABLE; 122 temp |= ADPA_DAC_ENABLE;
@@ -105,7 +132,51 @@ static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
105 break; 132 break;
106 } 133 }
107 134
108 I915_WRITE(ADPA, temp); 135 I915_WRITE(crt->adpa_reg, temp);
136}
137
138static void intel_crt_dpms(struct drm_connector *connector, int mode)
139{
140 struct drm_device *dev = connector->dev;
141 struct intel_encoder *encoder = intel_attached_encoder(connector);
142 struct drm_crtc *crtc;
143 int old_dpms;
144
145 /* PCH platforms and VLV only support on/off. */
146 if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
147 mode = DRM_MODE_DPMS_OFF;
148
149 if (mode == connector->dpms)
150 return;
151
152 old_dpms = connector->dpms;
153 connector->dpms = mode;
154
155 /* Only need to change hw state when actually enabled */
156 crtc = encoder->base.crtc;
157 if (!crtc) {
158 encoder->connectors_active = false;
159 return;
160 }
161
162 /* We need the pipe to run for anything but OFF. */
163 if (mode == DRM_MODE_DPMS_OFF)
164 encoder->connectors_active = false;
165 else
166 encoder->connectors_active = true;
167
168 if (mode < old_dpms) {
169 /* From off to on, enable the pipe first. */
170 intel_crtc_update_dpms(crtc);
171
172 intel_crt_set_dpms(encoder, mode);
173 } else {
174 intel_crt_set_dpms(encoder, mode);
175
176 intel_crtc_update_dpms(crtc);
177 }
178
179 intel_modeset_check_state(connector->dev);
109} 180}
110 181
111static int intel_crt_mode_valid(struct drm_connector *connector, 182static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -144,19 +215,15 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
144 215
145 struct drm_device *dev = encoder->dev; 216 struct drm_device *dev = encoder->dev;
146 struct drm_crtc *crtc = encoder->crtc; 217 struct drm_crtc *crtc = encoder->crtc;
218 struct intel_crt *crt =
219 intel_encoder_to_crt(to_intel_encoder(encoder));
147 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 220 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
148 struct drm_i915_private *dev_priv = dev->dev_private; 221 struct drm_i915_private *dev_priv = dev->dev_private;
149 int dpll_md_reg; 222 int dpll_md_reg;
150 u32 adpa, dpll_md; 223 u32 adpa, dpll_md;
151 u32 adpa_reg;
152 224
153 dpll_md_reg = DPLL_MD(intel_crtc->pipe); 225 dpll_md_reg = DPLL_MD(intel_crtc->pipe);
154 226
155 if (HAS_PCH_SPLIT(dev))
156 adpa_reg = PCH_ADPA;
157 else
158 adpa_reg = ADPA;
159
160 /* 227 /*
161 * Disable separate mode multiplier used when cloning SDVO to CRT 228 * Disable separate mode multiplier used when cloning SDVO to CRT
162 * XXX this needs to be adjusted when we really are cloning 229 * XXX this needs to be adjusted when we really are cloning
@@ -184,7 +251,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
184 if (!HAS_PCH_SPLIT(dev)) 251 if (!HAS_PCH_SPLIT(dev))
185 I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); 252 I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
186 253
187 I915_WRITE(adpa_reg, adpa); 254 I915_WRITE(crt->adpa_reg, adpa);
188} 255}
189 256
190static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) 257static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
@@ -544,14 +611,12 @@ intel_crt_detect(struct drm_connector *connector, bool force)
544 return connector->status; 611 return connector->status;
545 612
546 /* for pre-945g platforms use load detect */ 613 /* for pre-945g platforms use load detect */
547 if (intel_get_load_detect_pipe(&crt->base, connector, NULL, 614 if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
548 &tmp)) {
549 if (intel_crt_detect_ddc(connector)) 615 if (intel_crt_detect_ddc(connector))
550 status = connector_status_connected; 616 status = connector_status_connected;
551 else 617 else
552 status = intel_crt_load_detect(crt); 618 status = intel_crt_load_detect(crt);
553 intel_release_load_detect_pipe(&crt->base, connector, 619 intel_release_load_detect_pipe(connector, &tmp);
554 &tmp);
555 } else 620 } else
556 status = connector_status_unknown; 621 status = connector_status_unknown;
557 622
@@ -602,25 +667,15 @@ static void intel_crt_reset(struct drm_connector *connector)
602 * Routines for controlling stuff on the analog port 667 * Routines for controlling stuff on the analog port
603 */ 668 */
604 669
605static const struct drm_encoder_helper_funcs pch_encoder_funcs = { 670static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
606 .mode_fixup = intel_crt_mode_fixup, 671 .mode_fixup = intel_crt_mode_fixup,
607 .prepare = intel_encoder_prepare,
608 .commit = intel_encoder_commit,
609 .mode_set = intel_crt_mode_set, 672 .mode_set = intel_crt_mode_set,
610 .dpms = pch_crt_dpms, 673 .disable = intel_encoder_noop,
611};
612
613static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
614 .mode_fixup = intel_crt_mode_fixup,
615 .prepare = intel_encoder_prepare,
616 .commit = intel_encoder_commit,
617 .mode_set = intel_crt_mode_set,
618 .dpms = gmch_crt_dpms,
619}; 674};
620 675
621static const struct drm_connector_funcs intel_crt_connector_funcs = { 676static const struct drm_connector_funcs intel_crt_connector_funcs = {
622 .reset = intel_crt_reset, 677 .reset = intel_crt_reset,
623 .dpms = drm_helper_connector_dpms, 678 .dpms = intel_crt_dpms,
624 .detect = intel_crt_detect, 679 .detect = intel_crt_detect,
625 .fill_modes = drm_helper_probe_single_connector_modes, 680 .fill_modes = drm_helper_probe_single_connector_modes,
626 .destroy = intel_crt_destroy, 681 .destroy = intel_crt_destroy,
@@ -661,7 +716,6 @@ void intel_crt_init(struct drm_device *dev)
661 struct intel_crt *crt; 716 struct intel_crt *crt;
662 struct intel_connector *intel_connector; 717 struct intel_connector *intel_connector;
663 struct drm_i915_private *dev_priv = dev->dev_private; 718 struct drm_i915_private *dev_priv = dev->dev_private;
664 const struct drm_encoder_helper_funcs *encoder_helper_funcs;
665 719
666 /* Skip machines without VGA that falsely report hotplug events */ 720 /* Skip machines without VGA that falsely report hotplug events */
667 if (dmi_check_system(intel_no_crt)) 721 if (dmi_check_system(intel_no_crt))
@@ -687,13 +741,11 @@ void intel_crt_init(struct drm_device *dev)
687 intel_connector_attach_encoder(intel_connector, &crt->base); 741 intel_connector_attach_encoder(intel_connector, &crt->base);
688 742
689 crt->base.type = INTEL_OUTPUT_ANALOG; 743 crt->base.type = INTEL_OUTPUT_ANALOG;
690 crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT | 744 crt->base.cloneable = true;
691 1 << INTEL_ANALOG_CLONE_BIT |
692 1 << INTEL_SDVO_LVDS_CLONE_BIT);
693 if (IS_HASWELL(dev)) 745 if (IS_HASWELL(dev))
694 crt->base.crtc_mask = (1 << 0); 746 crt->base.crtc_mask = (1 << 0);
695 else 747 else
696 crt->base.crtc_mask = (1 << 0) | (1 << 1); 748 crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
697 749
698 if (IS_GEN2(dev)) 750 if (IS_GEN2(dev))
699 connector->interlace_allowed = 0; 751 connector->interlace_allowed = 0;
@@ -702,11 +754,18 @@ void intel_crt_init(struct drm_device *dev)
702 connector->doublescan_allowed = 0; 754 connector->doublescan_allowed = 0;
703 755
704 if (HAS_PCH_SPLIT(dev)) 756 if (HAS_PCH_SPLIT(dev))
705 encoder_helper_funcs = &pch_encoder_funcs; 757 crt->adpa_reg = PCH_ADPA;
758 else if (IS_VALLEYVIEW(dev))
759 crt->adpa_reg = VLV_ADPA;
706 else 760 else
707 encoder_helper_funcs = &gmch_encoder_funcs; 761 crt->adpa_reg = ADPA;
762
763 crt->base.disable = intel_disable_crt;
764 crt->base.enable = intel_enable_crt;
765 crt->base.get_hw_state = intel_crt_get_hw_state;
766 intel_connector->get_hw_state = intel_connector_get_hw_state;
708 767
709 drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs); 768 drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
710 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 769 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
711 770
712 drm_sysfs_connector_add(connector); 771 drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 933c74859172..bfe375466a0e 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -250,7 +250,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
250 case PORT_B: 250 case PORT_B:
251 case PORT_C: 251 case PORT_C:
252 case PORT_D: 252 case PORT_D:
253 intel_hdmi_init(dev, DDI_BUF_CTL(port)); 253 intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
254 break; 254 break;
255 default: 255 default:
256 DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n", 256 DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
@@ -267,7 +267,8 @@ struct wrpll_tmds_clock {
267 u16 r2; /* Reference divider */ 267 u16 r2; /* Reference divider */
268}; 268};
269 269
270/* Table of matching values for WRPLL clocks programming for each frequency */ 270/* Table of matching values for WRPLL clocks programming for each frequency.
271 * The code assumes this table is sorted. */
271static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { 272static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
272 {19750, 38, 25, 18}, 273 {19750, 38, 25, 18},
273 {20000, 48, 32, 18}, 274 {20000, 48, 32, 18},
@@ -277,7 +278,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
277 {23000, 36, 23, 15}, 278 {23000, 36, 23, 15},
278 {23500, 40, 40, 23}, 279 {23500, 40, 40, 23},
279 {23750, 26, 16, 14}, 280 {23750, 26, 16, 14},
280 {23750, 26, 16, 14},
281 {24000, 36, 24, 15}, 281 {24000, 36, 24, 15},
282 {25000, 36, 25, 15}, 282 {25000, 36, 25, 15},
283 {25175, 26, 40, 33}, 283 {25175, 26, 40, 33},
@@ -437,7 +437,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
437 {108000, 8, 24, 15}, 437 {108000, 8, 24, 15},
438 {108108, 8, 173, 108}, 438 {108108, 8, 173, 108},
439 {109000, 6, 23, 19}, 439 {109000, 6, 23, 19},
440 {109000, 6, 23, 19},
441 {110000, 6, 22, 18}, 440 {110000, 6, 22, 18},
442 {110013, 6, 22, 18}, 441 {110013, 6, 22, 18},
443 {110250, 8, 49, 30}, 442 {110250, 8, 49, 30},
@@ -614,7 +613,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
614 {218250, 4, 42, 26}, 613 {218250, 4, 42, 26},
615 {218750, 4, 34, 21}, 614 {218750, 4, 34, 21},
616 {219000, 4, 47, 29}, 615 {219000, 4, 47, 29},
617 {219000, 4, 47, 29},
618 {220000, 4, 44, 27}, 616 {220000, 4, 44, 27},
619 {220640, 4, 49, 30}, 617 {220640, 4, 49, 30},
620 {220750, 4, 36, 22}, 618 {220750, 4, 36, 22},
@@ -658,7 +656,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
658 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 656 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
659 int port = intel_hdmi->ddi_port; 657 int port = intel_hdmi->ddi_port;
660 int pipe = intel_crtc->pipe; 658 int pipe = intel_crtc->pipe;
661 int p, n2, r2, valid=0; 659 int p, n2, r2;
662 u32 temp, i; 660 u32 temp, i;
663 661
664 /* On Haswell, we need to enable the clocks and prepare DDI function to 662 /* On Haswell, we need to enable the clocks and prepare DDI function to
@@ -666,26 +664,23 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
666 */ 664 */
667 DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); 665 DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
668 666
669 for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) { 667 for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
670 if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) { 668 if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
671 p = wrpll_tmds_clock_table[i].p; 669 break;
672 n2 = wrpll_tmds_clock_table[i].n2;
673 r2 = wrpll_tmds_clock_table[i].r2;
674 670
675 DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n", 671 if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
676 crtc->mode.clock, 672 i--;
677 p, n2, r2);
678 673
679 valid = 1; 674 p = wrpll_tmds_clock_table[i].p;
680 break; 675 n2 = wrpll_tmds_clock_table[i].n2;
681 } 676 r2 = wrpll_tmds_clock_table[i].r2;
682 }
683 677
684 if (!valid) { 678 if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
685 DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n", 679 DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
686 crtc->mode.clock); 680 wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
687 return; 681
688 } 682 DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
683 crtc->mode.clock, p, n2, r2);
689 684
690 /* Enable LCPLL if disabled */ 685 /* Enable LCPLL if disabled */
691 temp = I915_READ(LCPLL_CTL); 686 temp = I915_READ(LCPLL_CTL);
@@ -718,46 +713,107 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
718 /* Proper support for digital audio needs a new logic and a new set 713 /* Proper support for digital audio needs a new logic and a new set
719 * of registers, so we leave it for future patch bombing. 714 * of registers, so we leave it for future patch bombing.
720 */ 715 */
721 DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n", 716 DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
722 pipe_name(intel_crtc->pipe)); 717 pipe_name(intel_crtc->pipe));
718
719 /* write eld */
720 DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
721 intel_write_eld(encoder, adjusted_mode);
723 } 722 }
724 723
725 /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ 724 /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
726 temp = I915_READ(DDI_FUNC_CTL(pipe)); 725 temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
727 temp &= ~PIPE_DDI_PORT_MASK; 726
728 temp &= ~PIPE_DDI_BPC_12; 727 switch (intel_crtc->bpp) {
729 temp |= PIPE_DDI_SELECT_PORT(port) | 728 case 18:
730 PIPE_DDI_MODE_SELECT_HDMI | 729 temp |= PIPE_DDI_BPC_6;
731 ((intel_crtc->bpp > 24) ? 730 break;
732 PIPE_DDI_BPC_12 : 731 case 24:
733 PIPE_DDI_BPC_8) | 732 temp |= PIPE_DDI_BPC_8;
734 PIPE_DDI_FUNC_ENABLE; 733 break;
734 case 30:
735 temp |= PIPE_DDI_BPC_10;
736 break;
737 case 36:
738 temp |= PIPE_DDI_BPC_12;
739 break;
740 default:
741 WARN(1, "%d bpp unsupported by pipe DDI function\n",
742 intel_crtc->bpp);
743 }
744
745 if (intel_hdmi->has_hdmi_sink)
746 temp |= PIPE_DDI_MODE_SELECT_HDMI;
747 else
748 temp |= PIPE_DDI_MODE_SELECT_DVI;
749
750 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
751 temp |= PIPE_DDI_PVSYNC;
752 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
753 temp |= PIPE_DDI_PHSYNC;
735 754
736 I915_WRITE(DDI_FUNC_CTL(pipe), temp); 755 I915_WRITE(DDI_FUNC_CTL(pipe), temp);
737 756
738 intel_hdmi->set_infoframes(encoder, adjusted_mode); 757 intel_hdmi->set_infoframes(encoder, adjusted_mode);
739} 758}
740 759
741void intel_ddi_dpms(struct drm_encoder *encoder, int mode) 760bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
761 enum pipe *pipe)
742{ 762{
743 struct drm_device *dev = encoder->dev; 763 struct drm_device *dev = encoder->base.dev;
744 struct drm_i915_private *dev_priv = dev->dev_private; 764 struct drm_i915_private *dev_priv = dev->dev_private;
745 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 765 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
766 u32 tmp;
767 int i;
768
769 tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port));
770
771 if (!(tmp & DDI_BUF_CTL_ENABLE))
772 return false;
773
774 for_each_pipe(i) {
775 tmp = I915_READ(DDI_FUNC_CTL(i));
776
777 if ((tmp & PIPE_DDI_PORT_MASK)
778 == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) {
779 *pipe = i;
780 return true;
781 }
782 }
783
784 DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port);
785
786 return true;
787}
788
789void intel_enable_ddi(struct intel_encoder *encoder)
790{
791 struct drm_device *dev = encoder->base.dev;
792 struct drm_i915_private *dev_priv = dev->dev_private;
793 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
746 int port = intel_hdmi->ddi_port; 794 int port = intel_hdmi->ddi_port;
747 u32 temp; 795 u32 temp;
748 796
749 temp = I915_READ(DDI_BUF_CTL(port)); 797 temp = I915_READ(DDI_BUF_CTL(port));
750 798 temp |= DDI_BUF_CTL_ENABLE;
751 if (mode != DRM_MODE_DPMS_ON) {
752 temp &= ~DDI_BUF_CTL_ENABLE;
753 } else {
754 temp |= DDI_BUF_CTL_ENABLE;
755 }
756 799
757 /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width, 800 /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
758 * and swing/emphasis values are ignored so nothing special needs 801 * and swing/emphasis values are ignored so nothing special needs
759 * to be done besides enabling the port. 802 * to be done besides enabling the port.
760 */ 803 */
761 I915_WRITE(DDI_BUF_CTL(port), 804 I915_WRITE(DDI_BUF_CTL(port), temp);
762 temp); 805}
806
807void intel_disable_ddi(struct intel_encoder *encoder)
808{
809 struct drm_device *dev = encoder->base.dev;
810 struct drm_i915_private *dev_priv = dev->dev_private;
811 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
812 int port = intel_hdmi->ddi_port;
813 u32 temp;
814
815 temp = I915_READ(DDI_BUF_CTL(port));
816 temp &= ~DDI_BUF_CTL_ENABLE;
817
818 I915_WRITE(DDI_BUF_CTL(port), temp);
763} 819}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7ea9a3ceb269..e3c02655d36f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1006,7 +1006,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1006 /* Wait for the Pipe State to go off */ 1006 /* Wait for the Pipe State to go off */
1007 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 1007 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1008 100)) 1008 100))
1009 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 1009 WARN(1, "pipe_off wait timed out\n");
1010 } else { 1010 } else {
1011 u32 last_line, line_mask; 1011 u32 last_line, line_mask;
1012 int reg = PIPEDSL(pipe); 1012 int reg = PIPEDSL(pipe);
@@ -1024,7 +1024,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1024 } while (((I915_READ(reg) & line_mask) != last_line) && 1024 } while (((I915_READ(reg) & line_mask) != last_line) &&
1025 time_after(timeout, jiffies)); 1025 time_after(timeout, jiffies));
1026 if (time_after(jiffies, timeout)) 1026 if (time_after(jiffies, timeout))
1027 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 1027 WARN(1, "pipe_off wait timed out\n");
1028 } 1028 }
1029} 1029}
1030 1030
@@ -1431,6 +1431,8 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1431 * protect mechanism may be enabled. 1431 * protect mechanism may be enabled.
1432 * 1432 *
1433 * Note! This is for pre-ILK only. 1433 * Note! This is for pre-ILK only.
1434 *
1435 * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
1434 */ 1436 */
1435static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1437static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1436{ 1438{
@@ -1860,59 +1862,6 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
1860 intel_wait_for_vblank(dev_priv->dev, pipe); 1862 intel_wait_for_vblank(dev_priv->dev, pipe);
1861} 1863}
1862 1864
1863static void disable_pch_dp(struct drm_i915_private *dev_priv,
1864 enum pipe pipe, int reg, u32 port_sel)
1865{
1866 u32 val = I915_READ(reg);
1867 if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1868 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1869 I915_WRITE(reg, val & ~DP_PORT_EN);
1870 }
1871}
1872
1873static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1874 enum pipe pipe, int reg)
1875{
1876 u32 val = I915_READ(reg);
1877 if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
1878 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1879 reg, pipe);
1880 I915_WRITE(reg, val & ~PORT_ENABLE);
1881 }
1882}
1883
1884/* Disable any ports connected to this transcoder */
1885static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1886 enum pipe pipe)
1887{
1888 u32 reg, val;
1889
1890 val = I915_READ(PCH_PP_CONTROL);
1891 I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1892
1893 disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1894 disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1895 disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1896
1897 reg = PCH_ADPA;
1898 val = I915_READ(reg);
1899 if (adpa_pipe_enabled(dev_priv, pipe, val))
1900 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1901
1902 reg = PCH_LVDS;
1903 val = I915_READ(reg);
1904 if (lvds_pipe_enabled(dev_priv, pipe, val)) {
1905 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1906 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1907 POSTING_READ(reg);
1908 udelay(100);
1909 }
1910
1911 disable_pch_hdmi(dev_priv, pipe, HDMIB);
1912 disable_pch_hdmi(dev_priv, pipe, HDMIC);
1913 disable_pch_hdmi(dev_priv, pipe, HDMID);
1914}
1915
1916int 1865int
1917intel_pin_and_fence_fb_obj(struct drm_device *dev, 1866intel_pin_and_fence_fb_obj(struct drm_device *dev,
1918 struct drm_i915_gem_object *obj, 1867 struct drm_i915_gem_object *obj,
@@ -2201,16 +2150,17 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
2201 2150
2202static int 2151static int
2203intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 2152intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2204 struct drm_framebuffer *old_fb) 2153 struct drm_framebuffer *fb)
2205{ 2154{
2206 struct drm_device *dev = crtc->dev; 2155 struct drm_device *dev = crtc->dev;
2207 struct drm_i915_private *dev_priv = dev->dev_private; 2156 struct drm_i915_private *dev_priv = dev->dev_private;
2208 struct drm_i915_master_private *master_priv; 2157 struct drm_i915_master_private *master_priv;
2209 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2158 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2159 struct drm_framebuffer *old_fb;
2210 int ret; 2160 int ret;
2211 2161
2212 /* no fb bound */ 2162 /* no fb bound */
2213 if (!crtc->fb) { 2163 if (!fb) {
2214 DRM_ERROR("No FB bound\n"); 2164 DRM_ERROR("No FB bound\n");
2215 return 0; 2165 return 0;
2216 } 2166 }
@@ -2224,7 +2174,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2224 2174
2225 mutex_lock(&dev->struct_mutex); 2175 mutex_lock(&dev->struct_mutex);
2226 ret = intel_pin_and_fence_fb_obj(dev, 2176 ret = intel_pin_and_fence_fb_obj(dev,
2227 to_intel_framebuffer(crtc->fb)->obj, 2177 to_intel_framebuffer(fb)->obj,
2228 NULL); 2178 NULL);
2229 if (ret != 0) { 2179 if (ret != 0) {
2230 mutex_unlock(&dev->struct_mutex); 2180 mutex_unlock(&dev->struct_mutex);
@@ -2232,17 +2182,22 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2232 return ret; 2182 return ret;
2233 } 2183 }
2234 2184
2235 if (old_fb) 2185 if (crtc->fb)
2236 intel_finish_fb(old_fb); 2186 intel_finish_fb(crtc->fb);
2237 2187
2238 ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y); 2188 ret = dev_priv->display.update_plane(crtc, fb, x, y);
2239 if (ret) { 2189 if (ret) {
2240 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); 2190 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
2241 mutex_unlock(&dev->struct_mutex); 2191 mutex_unlock(&dev->struct_mutex);
2242 DRM_ERROR("failed to update base address\n"); 2192 DRM_ERROR("failed to update base address\n");
2243 return ret; 2193 return ret;
2244 } 2194 }
2245 2195
2196 old_fb = crtc->fb;
2197 crtc->fb = fb;
2198 crtc->x = x;
2199 crtc->y = y;
2200
2246 if (old_fb) { 2201 if (old_fb) {
2247 intel_wait_for_vblank(dev, intel_crtc->pipe); 2202 intel_wait_for_vblank(dev, intel_crtc->pipe);
2248 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); 2203 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
@@ -2709,11 +2664,10 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2709 DRM_DEBUG_KMS("FDI train done.\n"); 2664 DRM_DEBUG_KMS("FDI train done.\n");
2710} 2665}
2711 2666
2712static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) 2667static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2713{ 2668{
2714 struct drm_device *dev = crtc->dev; 2669 struct drm_device *dev = intel_crtc->base.dev;
2715 struct drm_i915_private *dev_priv = dev->dev_private; 2670 struct drm_i915_private *dev_priv = dev->dev_private;
2716 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2717 int pipe = intel_crtc->pipe; 2671 int pipe = intel_crtc->pipe;
2718 u32 reg, temp; 2672 u32 reg, temp;
2719 2673
@@ -2754,6 +2708,35 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2754 } 2708 }
2755} 2709}
2756 2710
2711static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2712{
2713 struct drm_device *dev = intel_crtc->base.dev;
2714 struct drm_i915_private *dev_priv = dev->dev_private;
2715 int pipe = intel_crtc->pipe;
2716 u32 reg, temp;
2717
2718 /* Switch from PCDclk to Rawclk */
2719 reg = FDI_RX_CTL(pipe);
2720 temp = I915_READ(reg);
2721 I915_WRITE(reg, temp & ~FDI_PCDCLK);
2722
2723 /* Disable CPU FDI TX PLL */
2724 reg = FDI_TX_CTL(pipe);
2725 temp = I915_READ(reg);
2726 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2727
2728 POSTING_READ(reg);
2729 udelay(100);
2730
2731 reg = FDI_RX_CTL(pipe);
2732 temp = I915_READ(reg);
2733 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2734
2735 /* Wait for the clocks to turn off. */
2736 POSTING_READ(reg);
2737 udelay(100);
2738}
2739
2757static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) 2740static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2758{ 2741{
2759 struct drm_i915_private *dev_priv = dev->dev_private; 2742 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2838,13 +2821,13 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2838static bool intel_crtc_driving_pch(struct drm_crtc *crtc) 2821static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2839{ 2822{
2840 struct drm_device *dev = crtc->dev; 2823 struct drm_device *dev = crtc->dev;
2841 struct intel_encoder *encoder; 2824 struct intel_encoder *intel_encoder;
2842 2825
2843 /* 2826 /*
2844 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that 2827 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2845 * must be driven by its own crtc; no sharing is possible. 2828 * must be driven by its own crtc; no sharing is possible.
2846 */ 2829 */
2847 for_each_encoder_on_crtc(dev, crtc, encoder) { 2830 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2848 2831
2849 /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell 2832 /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
2850 * CPU handles all others */ 2833 * CPU handles all others */
@@ -2852,19 +2835,19 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2852 /* It is still unclear how this will work on PPT, so throw up a warning */ 2835 /* It is still unclear how this will work on PPT, so throw up a warning */
2853 WARN_ON(!HAS_PCH_LPT(dev)); 2836 WARN_ON(!HAS_PCH_LPT(dev));
2854 2837
2855 if (encoder->type == DRM_MODE_ENCODER_DAC) { 2838 if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
2856 DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n"); 2839 DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
2857 return true; 2840 return true;
2858 } else { 2841 } else {
2859 DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n", 2842 DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
2860 encoder->type); 2843 intel_encoder->type);
2861 return false; 2844 return false;
2862 } 2845 }
2863 } 2846 }
2864 2847
2865 switch (encoder->type) { 2848 switch (intel_encoder->type) {
2866 case INTEL_OUTPUT_EDP: 2849 case INTEL_OUTPUT_EDP:
2867 if (!intel_encoder_is_pch_edp(&encoder->base)) 2850 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
2868 return false; 2851 return false;
2869 continue; 2852 continue;
2870 } 2853 }
@@ -3181,11 +3164,14 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3181 struct drm_device *dev = crtc->dev; 3164 struct drm_device *dev = crtc->dev;
3182 struct drm_i915_private *dev_priv = dev->dev_private; 3165 struct drm_i915_private *dev_priv = dev->dev_private;
3183 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3166 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3167 struct intel_encoder *encoder;
3184 int pipe = intel_crtc->pipe; 3168 int pipe = intel_crtc->pipe;
3185 int plane = intel_crtc->plane; 3169 int plane = intel_crtc->plane;
3186 u32 temp; 3170 u32 temp;
3187 bool is_pch_port; 3171 bool is_pch_port;
3188 3172
3173 WARN_ON(!crtc->enabled);
3174
3189 if (intel_crtc->active) 3175 if (intel_crtc->active)
3190 return; 3176 return;
3191 3177
@@ -3200,10 +3186,16 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3200 3186
3201 is_pch_port = intel_crtc_driving_pch(crtc); 3187 is_pch_port = intel_crtc_driving_pch(crtc);
3202 3188
3203 if (is_pch_port) 3189 if (is_pch_port) {
3204 ironlake_fdi_pll_enable(crtc); 3190 ironlake_fdi_pll_enable(intel_crtc);
3205 else 3191 } else {
3206 ironlake_fdi_disable(crtc); 3192 assert_fdi_tx_disabled(dev_priv, pipe);
3193 assert_fdi_rx_disabled(dev_priv, pipe);
3194 }
3195
3196 for_each_encoder_on_crtc(dev, crtc, encoder)
3197 if (encoder->pre_enable)
3198 encoder->pre_enable(encoder);
3207 3199
3208 /* Enable panel fitting for LVDS */ 3200 /* Enable panel fitting for LVDS */
3209 if (dev_priv->pch_pf_size && 3201 if (dev_priv->pch_pf_size &&
@@ -3234,6 +3226,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3234 mutex_unlock(&dev->struct_mutex); 3226 mutex_unlock(&dev->struct_mutex);
3235 3227
3236 intel_crtc_update_cursor(crtc, true); 3228 intel_crtc_update_cursor(crtc, true);
3229
3230 for_each_encoder_on_crtc(dev, crtc, encoder)
3231 encoder->enable(encoder);
3232
3233 if (HAS_PCH_CPT(dev))
3234 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3237} 3235}
3238 3236
3239static void ironlake_crtc_disable(struct drm_crtc *crtc) 3237static void ironlake_crtc_disable(struct drm_crtc *crtc)
@@ -3241,13 +3239,18 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3241 struct drm_device *dev = crtc->dev; 3239 struct drm_device *dev = crtc->dev;
3242 struct drm_i915_private *dev_priv = dev->dev_private; 3240 struct drm_i915_private *dev_priv = dev->dev_private;
3243 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3241 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3242 struct intel_encoder *encoder;
3244 int pipe = intel_crtc->pipe; 3243 int pipe = intel_crtc->pipe;
3245 int plane = intel_crtc->plane; 3244 int plane = intel_crtc->plane;
3246 u32 reg, temp; 3245 u32 reg, temp;
3247 3246
3247
3248 if (!intel_crtc->active) 3248 if (!intel_crtc->active)
3249 return; 3249 return;
3250 3250
3251 for_each_encoder_on_crtc(dev, crtc, encoder)
3252 encoder->disable(encoder);
3253
3251 intel_crtc_wait_for_pending_flips(crtc); 3254 intel_crtc_wait_for_pending_flips(crtc);
3252 drm_vblank_off(dev, pipe); 3255 drm_vblank_off(dev, pipe);
3253 intel_crtc_update_cursor(crtc, false); 3256 intel_crtc_update_cursor(crtc, false);
@@ -3263,14 +3266,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3263 I915_WRITE(PF_CTL(pipe), 0); 3266 I915_WRITE(PF_CTL(pipe), 0);
3264 I915_WRITE(PF_WIN_SZ(pipe), 0); 3267 I915_WRITE(PF_WIN_SZ(pipe), 0);
3265 3268
3266 ironlake_fdi_disable(crtc); 3269 for_each_encoder_on_crtc(dev, crtc, encoder)
3270 if (encoder->post_disable)
3271 encoder->post_disable(encoder);
3267 3272
3268 /* This is a horrible layering violation; we should be doing this in 3273 ironlake_fdi_disable(crtc);
3269 * the connector/encoder ->prepare instead, but we don't always have
3270 * enough information there about the config to know whether it will
3271 * actually be necessary or just cause undesired flicker.
3272 */
3273 intel_disable_pch_ports(dev_priv, pipe);
3274 3274
3275 intel_disable_transcoder(dev_priv, pipe); 3275 intel_disable_transcoder(dev_priv, pipe);
3276 3276
@@ -3304,26 +3304,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3304 /* disable PCH DPLL */ 3304 /* disable PCH DPLL */
3305 intel_disable_pch_pll(intel_crtc); 3305 intel_disable_pch_pll(intel_crtc);
3306 3306
3307 /* Switch from PCDclk to Rawclk */ 3307 ironlake_fdi_pll_disable(intel_crtc);
3308 reg = FDI_RX_CTL(pipe);
3309 temp = I915_READ(reg);
3310 I915_WRITE(reg, temp & ~FDI_PCDCLK);
3311
3312 /* Disable CPU FDI TX PLL */
3313 reg = FDI_TX_CTL(pipe);
3314 temp = I915_READ(reg);
3315 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3316
3317 POSTING_READ(reg);
3318 udelay(100);
3319
3320 reg = FDI_RX_CTL(pipe);
3321 temp = I915_READ(reg);
3322 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3323
3324 /* Wait for the clocks to turn off. */
3325 POSTING_READ(reg);
3326 udelay(100);
3327 3308
3328 intel_crtc->active = false; 3309 intel_crtc->active = false;
3329 intel_update_watermarks(dev); 3310 intel_update_watermarks(dev);
@@ -3333,30 +3314,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3333 mutex_unlock(&dev->struct_mutex); 3314 mutex_unlock(&dev->struct_mutex);
3334} 3315}
3335 3316
3336static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3337{
3338 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3339 int pipe = intel_crtc->pipe;
3340 int plane = intel_crtc->plane;
3341
3342 /* XXX: When our outputs are all unaware of DPMS modes other than off
3343 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3344 */
3345 switch (mode) {
3346 case DRM_MODE_DPMS_ON:
3347 case DRM_MODE_DPMS_STANDBY:
3348 case DRM_MODE_DPMS_SUSPEND:
3349 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3350 ironlake_crtc_enable(crtc);
3351 break;
3352
3353 case DRM_MODE_DPMS_OFF:
3354 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3355 ironlake_crtc_disable(crtc);
3356 break;
3357 }
3358}
3359
3360static void ironlake_crtc_off(struct drm_crtc *crtc) 3317static void ironlake_crtc_off(struct drm_crtc *crtc)
3361{ 3318{
3362 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3319 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -3386,9 +3343,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3386 struct drm_device *dev = crtc->dev; 3343 struct drm_device *dev = crtc->dev;
3387 struct drm_i915_private *dev_priv = dev->dev_private; 3344 struct drm_i915_private *dev_priv = dev->dev_private;
3388 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3345 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3346 struct intel_encoder *encoder;
3389 int pipe = intel_crtc->pipe; 3347 int pipe = intel_crtc->pipe;
3390 int plane = intel_crtc->plane; 3348 int plane = intel_crtc->plane;
3391 3349
3350 WARN_ON(!crtc->enabled);
3351
3392 if (intel_crtc->active) 3352 if (intel_crtc->active)
3393 return; 3353 return;
3394 3354
@@ -3405,6 +3365,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3405 /* Give the overlay scaler a chance to enable if it's on this pipe */ 3365 /* Give the overlay scaler a chance to enable if it's on this pipe */
3406 intel_crtc_dpms_overlay(intel_crtc, true); 3366 intel_crtc_dpms_overlay(intel_crtc, true);
3407 intel_crtc_update_cursor(crtc, true); 3367 intel_crtc_update_cursor(crtc, true);
3368
3369 for_each_encoder_on_crtc(dev, crtc, encoder)
3370 encoder->enable(encoder);
3408} 3371}
3409 3372
3410static void i9xx_crtc_disable(struct drm_crtc *crtc) 3373static void i9xx_crtc_disable(struct drm_crtc *crtc)
@@ -3412,12 +3375,17 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3412 struct drm_device *dev = crtc->dev; 3375 struct drm_device *dev = crtc->dev;
3413 struct drm_i915_private *dev_priv = dev->dev_private; 3376 struct drm_i915_private *dev_priv = dev->dev_private;
3414 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3377 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3378 struct intel_encoder *encoder;
3415 int pipe = intel_crtc->pipe; 3379 int pipe = intel_crtc->pipe;
3416 int plane = intel_crtc->plane; 3380 int plane = intel_crtc->plane;
3417 3381
3382
3418 if (!intel_crtc->active) 3383 if (!intel_crtc->active)
3419 return; 3384 return;
3420 3385
3386 for_each_encoder_on_crtc(dev, crtc, encoder)
3387 encoder->disable(encoder);
3388
3421 /* Give the overlay scaler a chance to disable if it's on this pipe */ 3389 /* Give the overlay scaler a chance to disable if it's on this pipe */
3422 intel_crtc_wait_for_pending_flips(crtc); 3390 intel_crtc_wait_for_pending_flips(crtc);
3423 drm_vblank_off(dev, pipe); 3391 drm_vblank_off(dev, pipe);
@@ -3436,45 +3404,17 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3436 intel_update_watermarks(dev); 3404 intel_update_watermarks(dev);
3437} 3405}
3438 3406
3439static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3440{
3441 /* XXX: When our outputs are all unaware of DPMS modes other than off
3442 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3443 */
3444 switch (mode) {
3445 case DRM_MODE_DPMS_ON:
3446 case DRM_MODE_DPMS_STANDBY:
3447 case DRM_MODE_DPMS_SUSPEND:
3448 i9xx_crtc_enable(crtc);
3449 break;
3450 case DRM_MODE_DPMS_OFF:
3451 i9xx_crtc_disable(crtc);
3452 break;
3453 }
3454}
3455
3456static void i9xx_crtc_off(struct drm_crtc *crtc) 3407static void i9xx_crtc_off(struct drm_crtc *crtc)
3457{ 3408{
3458} 3409}
3459 3410
3460/** 3411static void intel_crtc_update_sarea(struct drm_crtc *crtc,
3461 * Sets the power management mode of the pipe and plane. 3412 bool enabled)
3462 */
3463static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3464{ 3413{
3465 struct drm_device *dev = crtc->dev; 3414 struct drm_device *dev = crtc->dev;
3466 struct drm_i915_private *dev_priv = dev->dev_private;
3467 struct drm_i915_master_private *master_priv; 3415 struct drm_i915_master_private *master_priv;
3468 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3416 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3469 int pipe = intel_crtc->pipe; 3417 int pipe = intel_crtc->pipe;
3470 bool enabled;
3471
3472 if (intel_crtc->dpms_mode == mode)
3473 return;
3474
3475 intel_crtc->dpms_mode = mode;
3476
3477 dev_priv->display.dpms(crtc, mode);
3478 3418
3479 if (!dev->primary->master) 3419 if (!dev->primary->master)
3480 return; 3420 return;
@@ -3483,8 +3423,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3483 if (!master_priv->sarea_priv) 3423 if (!master_priv->sarea_priv)
3484 return; 3424 return;
3485 3425
3486 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3487
3488 switch (pipe) { 3426 switch (pipe) {
3489 case 0: 3427 case 0:
3490 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; 3428 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
@@ -3500,13 +3438,42 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3500 } 3438 }
3501} 3439}
3502 3440
3441/**
3442 * Sets the power management mode of the pipe and plane.
3443 */
3444void intel_crtc_update_dpms(struct drm_crtc *crtc)
3445{
3446 struct drm_device *dev = crtc->dev;
3447 struct drm_i915_private *dev_priv = dev->dev_private;
3448 struct intel_encoder *intel_encoder;
3449 bool enable = false;
3450
3451 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3452 enable |= intel_encoder->connectors_active;
3453
3454 if (enable)
3455 dev_priv->display.crtc_enable(crtc);
3456 else
3457 dev_priv->display.crtc_disable(crtc);
3458
3459 intel_crtc_update_sarea(crtc, enable);
3460}
3461
3462static void intel_crtc_noop(struct drm_crtc *crtc)
3463{
3464}
3465
3503static void intel_crtc_disable(struct drm_crtc *crtc) 3466static void intel_crtc_disable(struct drm_crtc *crtc)
3504{ 3467{
3505 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3506 struct drm_device *dev = crtc->dev; 3468 struct drm_device *dev = crtc->dev;
3469 struct drm_connector *connector;
3507 struct drm_i915_private *dev_priv = dev->dev_private; 3470 struct drm_i915_private *dev_priv = dev->dev_private;
3508 3471
3509 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 3472 /* crtc should still be enabled when we disable it. */
3473 WARN_ON(!crtc->enabled);
3474
3475 dev_priv->display.crtc_disable(crtc);
3476 intel_crtc_update_sarea(crtc, false);
3510 dev_priv->display.off(crtc); 3477 dev_priv->display.off(crtc);
3511 3478
3512 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); 3479 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
@@ -3516,63 +3483,128 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
3516 mutex_lock(&dev->struct_mutex); 3483 mutex_lock(&dev->struct_mutex);
3517 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); 3484 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3518 mutex_unlock(&dev->struct_mutex); 3485 mutex_unlock(&dev->struct_mutex);
3486 crtc->fb = NULL;
3487 }
3488
3489 /* Update computed state. */
3490 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3491 if (!connector->encoder || !connector->encoder->crtc)
3492 continue;
3493
3494 if (connector->encoder->crtc != crtc)
3495 continue;
3496
3497 connector->dpms = DRM_MODE_DPMS_OFF;
3498 to_intel_encoder(connector->encoder)->connectors_active = false;
3519 } 3499 }
3520} 3500}
3521 3501
3522/* Prepare for a mode set. 3502void intel_modeset_disable(struct drm_device *dev)
3523 *
3524 * Note we could be a lot smarter here. We need to figure out which outputs
3525 * will be enabled, which disabled (in short, how the config will changes)
3526 * and perform the minimum necessary steps to accomplish that, e.g. updating
3527 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3528 * panel fitting is in the proper state, etc.
3529 */
3530static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3531{ 3503{
3532 i9xx_crtc_disable(crtc); 3504 struct drm_crtc *crtc;
3505
3506 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3507 if (crtc->enabled)
3508 intel_crtc_disable(crtc);
3509 }
3533} 3510}
3534 3511
3535static void i9xx_crtc_commit(struct drm_crtc *crtc) 3512void intel_encoder_noop(struct drm_encoder *encoder)
3536{ 3513{
3537 i9xx_crtc_enable(crtc);
3538} 3514}
3539 3515
3540static void ironlake_crtc_prepare(struct drm_crtc *crtc) 3516void intel_encoder_destroy(struct drm_encoder *encoder)
3541{ 3517{
3542 ironlake_crtc_disable(crtc); 3518 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3519
3520 drm_encoder_cleanup(encoder);
3521 kfree(intel_encoder);
3543} 3522}
3544 3523
3545static void ironlake_crtc_commit(struct drm_crtc *crtc) 3524/* Simple dpms helper for encodres with just one connector, no cloning and only
3525 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
3526 * state of the entire output pipe. */
3527void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
3546{ 3528{
3547 ironlake_crtc_enable(crtc); 3529 if (mode == DRM_MODE_DPMS_ON) {
3530 encoder->connectors_active = true;
3531
3532 intel_crtc_update_dpms(encoder->base.crtc);
3533 } else {
3534 encoder->connectors_active = false;
3535
3536 intel_crtc_update_dpms(encoder->base.crtc);
3537 }
3548} 3538}
3549 3539
3550void intel_encoder_prepare(struct drm_encoder *encoder) 3540/* Cross check the actual hw state with our own modeset state tracking (and it's
3541 * internal consistency). */
3542static void intel_connector_check_state(struct intel_connector *connector)
3551{ 3543{
3552 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3544 if (connector->get_hw_state(connector)) {
3553 /* lvds has its own version of prepare see intel_lvds_prepare */ 3545 struct intel_encoder *encoder = connector->encoder;
3554 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); 3546 struct drm_crtc *crtc;
3547 bool encoder_enabled;
3548 enum pipe pipe;
3549
3550 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3551 connector->base.base.id,
3552 drm_get_connector_name(&connector->base));
3553
3554 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
3555 "wrong connector dpms state\n");
3556 WARN(connector->base.encoder != &encoder->base,
3557 "active connector not linked to encoder\n");
3558 WARN(!encoder->connectors_active,
3559 "encoder->connectors_active not set\n");
3560
3561 encoder_enabled = encoder->get_hw_state(encoder, &pipe);
3562 WARN(!encoder_enabled, "encoder not enabled\n");
3563 if (WARN_ON(!encoder->base.crtc))
3564 return;
3565
3566 crtc = encoder->base.crtc;
3567
3568 WARN(!crtc->enabled, "crtc not enabled\n");
3569 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
3570 WARN(pipe != to_intel_crtc(crtc)->pipe,
3571 "encoder active on the wrong pipe\n");
3572 }
3555} 3573}
3556 3574
3557void intel_encoder_commit(struct drm_encoder *encoder) 3575/* Even simpler default implementation, if there's really no special case to
3576 * consider. */
3577void intel_connector_dpms(struct drm_connector *connector, int mode)
3558{ 3578{
3559 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3579 struct intel_encoder *encoder = intel_attached_encoder(connector);
3560 struct drm_device *dev = encoder->dev;
3561 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
3562 3580
3563 /* lvds has its own version of commit see intel_lvds_commit */ 3581 /* All the simple cases only support two dpms states. */
3564 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 3582 if (mode != DRM_MODE_DPMS_ON)
3583 mode = DRM_MODE_DPMS_OFF;
3565 3584
3566 if (HAS_PCH_CPT(dev)) 3585 if (mode == connector->dpms)
3567 intel_cpt_verify_modeset(dev, intel_crtc->pipe); 3586 return;
3587
3588 connector->dpms = mode;
3589
3590 /* Only need to change hw state when actually enabled */
3591 if (encoder->base.crtc)
3592 intel_encoder_dpms(encoder, mode);
3593 else
3594 WARN_ON(encoder->connectors_active != false);
3595
3596 intel_modeset_check_state(connector->dev);
3568} 3597}
3569 3598
3570void intel_encoder_destroy(struct drm_encoder *encoder) 3599/* Simple connector->get_hw_state implementation for encoders that support only
3600 * one connector and no cloning and hence the encoder state determines the state
3601 * of the connector. */
3602bool intel_connector_get_hw_state(struct intel_connector *connector)
3571{ 3603{
3572 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3604 enum pipe pipe = 0;
3605 struct intel_encoder *encoder = connector->encoder;
3573 3606
3574 drm_encoder_cleanup(encoder); 3607 return encoder->get_hw_state(encoder, &pipe);
3575 kfree(intel_encoder);
3576} 3608}
3577 3609
3578static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, 3610static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -3593,6 +3625,13 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3593 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) 3625 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3594 drm_mode_set_crtcinfo(adjusted_mode, 0); 3626 drm_mode_set_crtcinfo(adjusted_mode, 0);
3595 3627
3628 /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
3629 * with a hsync front porch of 0.
3630 */
3631 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
3632 adjusted_mode->hsync_start == adjusted_mode->hdisplay)
3633 return false;
3634
3596 return true; 3635 return true;
3597} 3636}
3598 3637
@@ -3728,6 +3767,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
3728 * true if they don't match). 3767 * true if they don't match).
3729 */ 3768 */
3730static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, 3769static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
3770 struct drm_framebuffer *fb,
3731 unsigned int *pipe_bpp, 3771 unsigned int *pipe_bpp,
3732 struct drm_display_mode *mode) 3772 struct drm_display_mode *mode)
3733{ 3773{
@@ -3797,7 +3837,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
3797 * also stays within the max display bpc discovered above. 3837 * also stays within the max display bpc discovered above.
3798 */ 3838 */
3799 3839
3800 switch (crtc->fb->depth) { 3840 switch (fb->depth) {
3801 case 8: 3841 case 8:
3802 bpc = 8; /* since we go through a colormap */ 3842 bpc = 8; /* since we go through a colormap */
3803 break; 3843 break;
@@ -4216,7 +4256,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4216 struct drm_display_mode *mode, 4256 struct drm_display_mode *mode,
4217 struct drm_display_mode *adjusted_mode, 4257 struct drm_display_mode *adjusted_mode,
4218 int x, int y, 4258 int x, int y,
4219 struct drm_framebuffer *old_fb) 4259 struct drm_framebuffer *fb)
4220{ 4260{
4221 struct drm_device *dev = crtc->dev; 4261 struct drm_device *dev = crtc->dev;
4222 struct drm_i915_private *dev_priv = dev->dev_private; 4262 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4406,7 +4446,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4406 I915_WRITE(DSPCNTR(plane), dspcntr); 4446 I915_WRITE(DSPCNTR(plane), dspcntr);
4407 POSTING_READ(DSPCNTR(plane)); 4447 POSTING_READ(DSPCNTR(plane));
4408 4448
4409 ret = intel_pipe_set_base(crtc, x, y, old_fb); 4449 ret = intel_pipe_set_base(crtc, x, y, fb);
4410 4450
4411 intel_update_watermarks(dev); 4451 intel_update_watermarks(dev);
4412 4452
@@ -4560,24 +4600,130 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
4560 return 120000; 4600 return 120000;
4561} 4601}
4562 4602
4603static void ironlake_set_pipeconf(struct drm_crtc *crtc,
4604 struct drm_display_mode *adjusted_mode,
4605 bool dither)
4606{
4607 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
4608 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4609 int pipe = intel_crtc->pipe;
4610 uint32_t val;
4611
4612 val = I915_READ(PIPECONF(pipe));
4613
4614 val &= ~PIPE_BPC_MASK;
4615 switch (intel_crtc->bpp) {
4616 case 18:
4617 val |= PIPE_6BPC;
4618 break;
4619 case 24:
4620 val |= PIPE_8BPC;
4621 break;
4622 case 30:
4623 val |= PIPE_10BPC;
4624 break;
4625 case 36:
4626 val |= PIPE_12BPC;
4627 break;
4628 default:
4629 val |= PIPE_8BPC;
4630 break;
4631 }
4632
4633 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
4634 if (dither)
4635 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
4636
4637 val &= ~PIPECONF_INTERLACE_MASK;
4638 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
4639 val |= PIPECONF_INTERLACED_ILK;
4640 else
4641 val |= PIPECONF_PROGRESSIVE;
4642
4643 I915_WRITE(PIPECONF(pipe), val);
4644 POSTING_READ(PIPECONF(pipe));
4645}
4646
4647static bool ironlake_compute_clocks(struct drm_crtc *crtc,
4648 struct drm_display_mode *adjusted_mode,
4649 intel_clock_t *clock,
4650 bool *has_reduced_clock,
4651 intel_clock_t *reduced_clock)
4652{
4653 struct drm_device *dev = crtc->dev;
4654 struct drm_i915_private *dev_priv = dev->dev_private;
4655 struct intel_encoder *intel_encoder;
4656 int refclk;
4657 const intel_limit_t *limit;
4658 bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
4659
4660 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4661 switch (intel_encoder->type) {
4662 case INTEL_OUTPUT_LVDS:
4663 is_lvds = true;
4664 break;
4665 case INTEL_OUTPUT_SDVO:
4666 case INTEL_OUTPUT_HDMI:
4667 is_sdvo = true;
4668 if (intel_encoder->needs_tv_clock)
4669 is_tv = true;
4670 break;
4671 case INTEL_OUTPUT_TVOUT:
4672 is_tv = true;
4673 break;
4674 }
4675 }
4676
4677 refclk = ironlake_get_refclk(crtc);
4678
4679 /*
4680 * Returns a set of divisors for the desired target clock with the given
4681 * refclk, or FALSE. The returned values represent the clock equation:
4682 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4683 */
4684 limit = intel_limit(crtc, refclk);
4685 ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4686 clock);
4687 if (!ret)
4688 return false;
4689
4690 if (is_lvds && dev_priv->lvds_downclock_avail) {
4691 /*
4692 * Ensure we match the reduced clock's P to the target clock.
4693 * If the clocks don't match, we can't switch the display clock
4694 * by using the FP0/FP1. In such case we will disable the LVDS
4695 * downclock feature.
4696 */
4697 *has_reduced_clock = limit->find_pll(limit, crtc,
4698 dev_priv->lvds_downclock,
4699 refclk,
4700 clock,
4701 reduced_clock);
4702 }
4703
4704 if (is_sdvo && is_tv)
4705 i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
4706
4707 return true;
4708}
4709
4563static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 4710static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4564 struct drm_display_mode *mode, 4711 struct drm_display_mode *mode,
4565 struct drm_display_mode *adjusted_mode, 4712 struct drm_display_mode *adjusted_mode,
4566 int x, int y, 4713 int x, int y,
4567 struct drm_framebuffer *old_fb) 4714 struct drm_framebuffer *fb)
4568{ 4715{
4569 struct drm_device *dev = crtc->dev; 4716 struct drm_device *dev = crtc->dev;
4570 struct drm_i915_private *dev_priv = dev->dev_private; 4717 struct drm_i915_private *dev_priv = dev->dev_private;
4571 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4718 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4572 int pipe = intel_crtc->pipe; 4719 int pipe = intel_crtc->pipe;
4573 int plane = intel_crtc->plane; 4720 int plane = intel_crtc->plane;
4574 int refclk, num_connectors = 0; 4721 int num_connectors = 0;
4575 intel_clock_t clock, reduced_clock; 4722 intel_clock_t clock, reduced_clock;
4576 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; 4723 u32 dpll, fp = 0, fp2 = 0;
4577 bool ok, has_reduced_clock = false, is_sdvo = false; 4724 bool ok, has_reduced_clock = false, is_sdvo = false;
4578 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 4725 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4579 struct intel_encoder *encoder, *edp_encoder = NULL; 4726 struct intel_encoder *encoder, *edp_encoder = NULL;
4580 const intel_limit_t *limit;
4581 int ret; 4727 int ret;
4582 struct fdi_m_n m_n = {0}; 4728 struct fdi_m_n m_n = {0};
4583 u32 temp; 4729 u32 temp;
@@ -4619,16 +4765,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4619 num_connectors++; 4765 num_connectors++;
4620 } 4766 }
4621 4767
4622 refclk = ironlake_get_refclk(crtc); 4768 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
4623 4769 &has_reduced_clock, &reduced_clock);
4624 /*
4625 * Returns a set of divisors for the desired target clock with the given
4626 * refclk, or FALSE. The returned values represent the clock equation:
4627 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4628 */
4629 limit = intel_limit(crtc, refclk);
4630 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4631 &clock);
4632 if (!ok) { 4770 if (!ok) {
4633 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 4771 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4634 return -EINVAL; 4772 return -EINVAL;
@@ -4637,24 +4775,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4637 /* Ensure that the cursor is valid for the new mode before changing... */ 4775 /* Ensure that the cursor is valid for the new mode before changing... */
4638 intel_crtc_update_cursor(crtc, true); 4776 intel_crtc_update_cursor(crtc, true);
4639 4777
4640 if (is_lvds && dev_priv->lvds_downclock_avail) {
4641 /*
4642 * Ensure we match the reduced clock's P to the target clock.
4643 * If the clocks don't match, we can't switch the display clock
4644 * by using the FP0/FP1. In such case we will disable the LVDS
4645 * downclock feature.
4646 */
4647 has_reduced_clock = limit->find_pll(limit, crtc,
4648 dev_priv->lvds_downclock,
4649 refclk,
4650 &clock,
4651 &reduced_clock);
4652 }
4653
4654 if (is_sdvo && is_tv)
4655 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
4656
4657
4658 /* FDI link */ 4778 /* FDI link */
4659 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 4779 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4660 lane = 0; 4780 lane = 0;
@@ -4682,32 +4802,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4682 target_clock = adjusted_mode->clock; 4802 target_clock = adjusted_mode->clock;
4683 4803
4684 /* determine panel color depth */ 4804 /* determine panel color depth */
4685 temp = I915_READ(PIPECONF(pipe)); 4805 dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, mode);
4686 temp &= ~PIPE_BPC_MASK; 4806 if (is_lvds && dev_priv->lvds_dither)
4687 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode); 4807 dither = true;
4688 switch (pipe_bpp) { 4808
4689 case 18: 4809 if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
4690 temp |= PIPE_6BPC; 4810 pipe_bpp != 36) {
4691 break;
4692 case 24:
4693 temp |= PIPE_8BPC;
4694 break;
4695 case 30:
4696 temp |= PIPE_10BPC;
4697 break;
4698 case 36:
4699 temp |= PIPE_12BPC;
4700 break;
4701 default:
4702 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n", 4811 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
4703 pipe_bpp); 4812 pipe_bpp);
4704 temp |= PIPE_8BPC;
4705 pipe_bpp = 24; 4813 pipe_bpp = 24;
4706 break;
4707 } 4814 }
4708
4709 intel_crtc->bpp = pipe_bpp; 4815 intel_crtc->bpp = pipe_bpp;
4710 I915_WRITE(PIPECONF(pipe), temp);
4711 4816
4712 if (!lane) { 4817 if (!lane) {
4713 /* 4818 /*
@@ -4791,12 +4896,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4791 else 4896 else
4792 dpll |= PLL_REF_INPUT_DREFCLK; 4897 dpll |= PLL_REF_INPUT_DREFCLK;
4793 4898
4794 /* setup pipeconf */
4795 pipeconf = I915_READ(PIPECONF(pipe));
4796
4797 /* Set up the display plane register */
4798 dspcntr = DISPPLANE_GAMMA_ENABLE;
4799
4800 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 4899 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
4801 drm_mode_debug_printmodeline(mode); 4900 drm_mode_debug_printmodeline(mode);
4802 4901
@@ -4856,12 +4955,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4856 I915_WRITE(PCH_LVDS, temp); 4955 I915_WRITE(PCH_LVDS, temp);
4857 } 4956 }
4858 4957
4859 pipeconf &= ~PIPECONF_DITHER_EN;
4860 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
4861 if ((is_lvds && dev_priv->lvds_dither) || dither) {
4862 pipeconf |= PIPECONF_DITHER_EN;
4863 pipeconf |= PIPECONF_DITHER_TYPE_SP;
4864 }
4865 if (is_dp && !is_cpu_edp) { 4958 if (is_dp && !is_cpu_edp) {
4866 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4959 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4867 } else { 4960 } else {
@@ -4897,9 +4990,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4897 } 4990 }
4898 } 4991 }
4899 4992
4900 pipeconf &= ~PIPECONF_INTERLACE_MASK;
4901 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4993 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4902 pipeconf |= PIPECONF_INTERLACED_ILK;
4903 /* the chip adds 2 halflines automatically */ 4994 /* the chip adds 2 halflines automatically */
4904 adjusted_mode->crtc_vtotal -= 1; 4995 adjusted_mode->crtc_vtotal -= 1;
4905 adjusted_mode->crtc_vblank_end -= 1; 4996 adjusted_mode->crtc_vblank_end -= 1;
@@ -4907,7 +4998,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4907 adjusted_mode->crtc_hsync_start 4998 adjusted_mode->crtc_hsync_start
4908 - adjusted_mode->crtc_htotal/2); 4999 - adjusted_mode->crtc_htotal/2);
4909 } else { 5000 } else {
4910 pipeconf |= PIPECONF_PROGRESSIVE;
4911 I915_WRITE(VSYNCSHIFT(pipe), 0); 5001 I915_WRITE(VSYNCSHIFT(pipe), 0);
4912 } 5002 }
4913 5003
@@ -4945,15 +5035,15 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4945 if (is_cpu_edp) 5035 if (is_cpu_edp)
4946 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 5036 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
4947 5037
4948 I915_WRITE(PIPECONF(pipe), pipeconf); 5038 ironlake_set_pipeconf(crtc, adjusted_mode, dither);
4949 POSTING_READ(PIPECONF(pipe));
4950 5039
4951 intel_wait_for_vblank(dev, pipe); 5040 intel_wait_for_vblank(dev, pipe);
4952 5041
4953 I915_WRITE(DSPCNTR(plane), dspcntr); 5042 /* Set up the display plane register */
5043 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
4954 POSTING_READ(DSPCNTR(plane)); 5044 POSTING_READ(DSPCNTR(plane));
4955 5045
4956 ret = intel_pipe_set_base(crtc, x, y, old_fb); 5046 ret = intel_pipe_set_base(crtc, x, y, fb);
4957 5047
4958 intel_update_watermarks(dev); 5048 intel_update_watermarks(dev);
4959 5049
@@ -4966,7 +5056,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4966 struct drm_display_mode *mode, 5056 struct drm_display_mode *mode,
4967 struct drm_display_mode *adjusted_mode, 5057 struct drm_display_mode *adjusted_mode,
4968 int x, int y, 5058 int x, int y,
4969 struct drm_framebuffer *old_fb) 5059 struct drm_framebuffer *fb)
4970{ 5060{
4971 struct drm_device *dev = crtc->dev; 5061 struct drm_device *dev = crtc->dev;
4972 struct drm_i915_private *dev_priv = dev->dev_private; 5062 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4977,14 +5067,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4977 drm_vblank_pre_modeset(dev, pipe); 5067 drm_vblank_pre_modeset(dev, pipe);
4978 5068
4979 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, 5069 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
4980 x, y, old_fb); 5070 x, y, fb);
4981 drm_vblank_post_modeset(dev, pipe); 5071 drm_vblank_post_modeset(dev, pipe);
4982 5072
4983 if (ret)
4984 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
4985 else
4986 intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
4987
4988 return ret; 5073 return ret;
4989} 5074}
4990 5075
@@ -5057,6 +5142,91 @@ static void g4x_write_eld(struct drm_connector *connector,
5057 I915_WRITE(G4X_AUD_CNTL_ST, i); 5142 I915_WRITE(G4X_AUD_CNTL_ST, i);
5058} 5143}
5059 5144
5145static void haswell_write_eld(struct drm_connector *connector,
5146 struct drm_crtc *crtc)
5147{
5148 struct drm_i915_private *dev_priv = connector->dev->dev_private;
5149 uint8_t *eld = connector->eld;
5150 struct drm_device *dev = crtc->dev;
5151 uint32_t eldv;
5152 uint32_t i;
5153 int len;
5154 int pipe = to_intel_crtc(crtc)->pipe;
5155 int tmp;
5156
5157 int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
5158 int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
5159 int aud_config = HSW_AUD_CFG(pipe);
5160 int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
5161
5162
5163 DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
5164
5165 /* Audio output enable */
5166 DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
5167 tmp = I915_READ(aud_cntrl_st2);
5168 tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
5169 I915_WRITE(aud_cntrl_st2, tmp);
5170
5171 /* Wait for 1 vertical blank */
5172 intel_wait_for_vblank(dev, pipe);
5173
5174 /* Set ELD valid state */
5175 tmp = I915_READ(aud_cntrl_st2);
5176 DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
5177 tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
5178 I915_WRITE(aud_cntrl_st2, tmp);
5179 tmp = I915_READ(aud_cntrl_st2);
5180 DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
5181
5182 /* Enable HDMI mode */
5183 tmp = I915_READ(aud_config);
5184 DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
5185 /* clear N_programing_enable and N_value_index */
5186 tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
5187 I915_WRITE(aud_config, tmp);
5188
5189 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
5190
5191 eldv = AUDIO_ELD_VALID_A << (pipe * 4);
5192
5193 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
5194 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
5195 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
5196 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
5197 } else
5198 I915_WRITE(aud_config, 0);
5199
5200 if (intel_eld_uptodate(connector,
5201 aud_cntrl_st2, eldv,
5202 aud_cntl_st, IBX_ELD_ADDRESS,
5203 hdmiw_hdmiedid))
5204 return;
5205
5206 i = I915_READ(aud_cntrl_st2);
5207 i &= ~eldv;
5208 I915_WRITE(aud_cntrl_st2, i);
5209
5210 if (!eld[0])
5211 return;
5212
5213 i = I915_READ(aud_cntl_st);
5214 i &= ~IBX_ELD_ADDRESS;
5215 I915_WRITE(aud_cntl_st, i);
5216 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
5217 DRM_DEBUG_DRIVER("port num:%d\n", i);
5218
5219 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
5220 DRM_DEBUG_DRIVER("ELD size %d\n", len);
5221 for (i = 0; i < len; i++)
5222 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
5223
5224 i = I915_READ(aud_cntrl_st2);
5225 i |= eldv;
5226 I915_WRITE(aud_cntrl_st2, i);
5227
5228}
5229
5060static void ironlake_write_eld(struct drm_connector *connector, 5230static void ironlake_write_eld(struct drm_connector *connector,
5061 struct drm_crtc *crtc) 5231 struct drm_crtc *crtc)
5062{ 5232{
@@ -5069,28 +5239,24 @@ static void ironlake_write_eld(struct drm_connector *connector,
5069 int aud_config; 5239 int aud_config;
5070 int aud_cntl_st; 5240 int aud_cntl_st;
5071 int aud_cntrl_st2; 5241 int aud_cntrl_st2;
5242 int pipe = to_intel_crtc(crtc)->pipe;
5072 5243
5073 if (HAS_PCH_IBX(connector->dev)) { 5244 if (HAS_PCH_IBX(connector->dev)) {
5074 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; 5245 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
5075 aud_config = IBX_AUD_CONFIG_A; 5246 aud_config = IBX_AUD_CFG(pipe);
5076 aud_cntl_st = IBX_AUD_CNTL_ST_A; 5247 aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
5077 aud_cntrl_st2 = IBX_AUD_CNTL_ST2; 5248 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
5078 } else { 5249 } else {
5079 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; 5250 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
5080 aud_config = CPT_AUD_CONFIG_A; 5251 aud_config = CPT_AUD_CFG(pipe);
5081 aud_cntl_st = CPT_AUD_CNTL_ST_A; 5252 aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
5082 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; 5253 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
5083 } 5254 }
5084 5255
5085 i = to_intel_crtc(crtc)->pipe; 5256 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
5086 hdmiw_hdmiedid += i * 0x100;
5087 aud_cntl_st += i * 0x100;
5088 aud_config += i * 0x100;
5089
5090 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
5091 5257
5092 i = I915_READ(aud_cntl_st); 5258 i = I915_READ(aud_cntl_st);
5093 i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */ 5259 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
5094 if (!i) { 5260 if (!i) {
5095 DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); 5261 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
5096 /* operate blindly on all ports */ 5262 /* operate blindly on all ports */
@@ -5337,8 +5503,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
5337 uint32_t addr; 5503 uint32_t addr;
5338 int ret; 5504 int ret;
5339 5505
5340 DRM_DEBUG_KMS("\n");
5341
5342 /* if we want to turn off the cursor ignore width and height */ 5506 /* if we want to turn off the cursor ignore width and height */
5343 if (!handle) { 5507 if (!handle) {
5344 DRM_DEBUG_KMS("cursor off\n"); 5508 DRM_DEBUG_KMS("cursor off\n");
@@ -5584,17 +5748,18 @@ mode_fits_in_fbdev(struct drm_device *dev,
5584 return fb; 5748 return fb;
5585} 5749}
5586 5750
5587bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 5751bool intel_get_load_detect_pipe(struct drm_connector *connector,
5588 struct drm_connector *connector,
5589 struct drm_display_mode *mode, 5752 struct drm_display_mode *mode,
5590 struct intel_load_detect_pipe *old) 5753 struct intel_load_detect_pipe *old)
5591{ 5754{
5592 struct intel_crtc *intel_crtc; 5755 struct intel_crtc *intel_crtc;
5756 struct intel_encoder *intel_encoder =
5757 intel_attached_encoder(connector);
5593 struct drm_crtc *possible_crtc; 5758 struct drm_crtc *possible_crtc;
5594 struct drm_encoder *encoder = &intel_encoder->base; 5759 struct drm_encoder *encoder = &intel_encoder->base;
5595 struct drm_crtc *crtc = NULL; 5760 struct drm_crtc *crtc = NULL;
5596 struct drm_device *dev = encoder->dev; 5761 struct drm_device *dev = encoder->dev;
5597 struct drm_framebuffer *old_fb; 5762 struct drm_framebuffer *fb;
5598 int i = -1; 5763 int i = -1;
5599 5764
5600 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 5765 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -5615,21 +5780,12 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5615 if (encoder->crtc) { 5780 if (encoder->crtc) {
5616 crtc = encoder->crtc; 5781 crtc = encoder->crtc;
5617 5782
5618 intel_crtc = to_intel_crtc(crtc); 5783 old->dpms_mode = connector->dpms;
5619 old->dpms_mode = intel_crtc->dpms_mode;
5620 old->load_detect_temp = false; 5784 old->load_detect_temp = false;
5621 5785
5622 /* Make sure the crtc and connector are running */ 5786 /* Make sure the crtc and connector are running */
5623 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { 5787 if (connector->dpms != DRM_MODE_DPMS_ON)
5624 struct drm_encoder_helper_funcs *encoder_funcs; 5788 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
5625 struct drm_crtc_helper_funcs *crtc_funcs;
5626
5627 crtc_funcs = crtc->helper_private;
5628 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
5629
5630 encoder_funcs = encoder->helper_private;
5631 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
5632 }
5633 5789
5634 return true; 5790 return true;
5635 } 5791 }
@@ -5653,19 +5809,17 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5653 return false; 5809 return false;
5654 } 5810 }
5655 5811
5656 encoder->crtc = crtc; 5812 intel_encoder->new_crtc = to_intel_crtc(crtc);
5657 connector->encoder = encoder; 5813 to_intel_connector(connector)->new_encoder = intel_encoder;
5658 5814
5659 intel_crtc = to_intel_crtc(crtc); 5815 intel_crtc = to_intel_crtc(crtc);
5660 old->dpms_mode = intel_crtc->dpms_mode; 5816 old->dpms_mode = connector->dpms;
5661 old->load_detect_temp = true; 5817 old->load_detect_temp = true;
5662 old->release_fb = NULL; 5818 old->release_fb = NULL;
5663 5819
5664 if (!mode) 5820 if (!mode)
5665 mode = &load_detect_mode; 5821 mode = &load_detect_mode;
5666 5822
5667 old_fb = crtc->fb;
5668
5669 /* We need a framebuffer large enough to accommodate all accesses 5823 /* We need a framebuffer large enough to accommodate all accesses
5670 * that the plane may generate whilst we perform load detection. 5824 * that the plane may generate whilst we perform load detection.
5671 * We can not rely on the fbcon either being present (we get called 5825 * We can not rely on the fbcon either being present (we get called
@@ -5673,50 +5827,52 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5673 * not even exist) or that it is large enough to satisfy the 5827 * not even exist) or that it is large enough to satisfy the
5674 * requested mode. 5828 * requested mode.
5675 */ 5829 */
5676 crtc->fb = mode_fits_in_fbdev(dev, mode); 5830 fb = mode_fits_in_fbdev(dev, mode);
5677 if (crtc->fb == NULL) { 5831 if (fb == NULL) {
5678 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 5832 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
5679 crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 5833 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
5680 old->release_fb = crtc->fb; 5834 old->release_fb = fb;
5681 } else 5835 } else
5682 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 5836 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5683 if (IS_ERR(crtc->fb)) { 5837 if (IS_ERR(fb)) {
5684 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 5838 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5685 crtc->fb = old_fb; 5839 goto fail;
5686 return false;
5687 } 5840 }
5688 5841
5689 if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { 5842 if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
5690 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 5843 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5691 if (old->release_fb) 5844 if (old->release_fb)
5692 old->release_fb->funcs->destroy(old->release_fb); 5845 old->release_fb->funcs->destroy(old->release_fb);
5693 crtc->fb = old_fb; 5846 goto fail;
5694 return false;
5695 } 5847 }
5696 5848
5697 /* let the connector get through one full cycle before testing */ 5849 /* let the connector get through one full cycle before testing */
5698 intel_wait_for_vblank(dev, intel_crtc->pipe); 5850 intel_wait_for_vblank(dev, intel_crtc->pipe);
5699 5851
5700 return true; 5852 return true;
5853fail:
5854 connector->encoder = NULL;
5855 encoder->crtc = NULL;
5856 return false;
5701} 5857}
5702 5858
5703void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 5859void intel_release_load_detect_pipe(struct drm_connector *connector,
5704 struct drm_connector *connector,
5705 struct intel_load_detect_pipe *old) 5860 struct intel_load_detect_pipe *old)
5706{ 5861{
5862 struct intel_encoder *intel_encoder =
5863 intel_attached_encoder(connector);
5707 struct drm_encoder *encoder = &intel_encoder->base; 5864 struct drm_encoder *encoder = &intel_encoder->base;
5708 struct drm_device *dev = encoder->dev;
5709 struct drm_crtc *crtc = encoder->crtc;
5710 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5711 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
5712 5865
5713 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 5866 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5714 connector->base.id, drm_get_connector_name(connector), 5867 connector->base.id, drm_get_connector_name(connector),
5715 encoder->base.id, drm_get_encoder_name(encoder)); 5868 encoder->base.id, drm_get_encoder_name(encoder));
5716 5869
5717 if (old->load_detect_temp) { 5870 if (old->load_detect_temp) {
5718 connector->encoder = NULL; 5871 struct drm_crtc *crtc = encoder->crtc;
5719 drm_helper_disable_unused_functions(dev); 5872
5873 to_intel_connector(connector)->new_encoder = NULL;
5874 intel_encoder->new_crtc = NULL;
5875 intel_set_mode(crtc, NULL, 0, 0, NULL);
5720 5876
5721 if (old->release_fb) 5877 if (old->release_fb)
5722 old->release_fb->funcs->destroy(old->release_fb); 5878 old->release_fb->funcs->destroy(old->release_fb);
@@ -5725,10 +5881,8 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
5725 } 5881 }
5726 5882
5727 /* Switch crtc and encoder back off if necessary */ 5883 /* Switch crtc and encoder back off if necessary */
5728 if (old->dpms_mode != DRM_MODE_DPMS_ON) { 5884 if (old->dpms_mode != DRM_MODE_DPMS_ON)
5729 encoder_funcs->dpms(encoder, old->dpms_mode); 5885 connector->funcs->dpms(connector, old->dpms_mode);
5730 crtc_funcs->dpms(crtc, old->dpms_mode);
5731 }
5732} 5886}
5733 5887
5734/* Returns the clock of the currently programmed mode of the given pipe. */ 5888/* Returns the clock of the currently programmed mode of the given pipe. */
@@ -5850,46 +6004,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
5850 return mode; 6004 return mode;
5851} 6005}
5852 6006
5853#define GPU_IDLE_TIMEOUT 500 /* ms */
5854
5855/* When this timer fires, we've been idle for awhile */
5856static void intel_gpu_idle_timer(unsigned long arg)
5857{
5858 struct drm_device *dev = (struct drm_device *)arg;
5859 drm_i915_private_t *dev_priv = dev->dev_private;
5860
5861 if (!list_empty(&dev_priv->mm.active_list)) {
5862 /* Still processing requests, so just re-arm the timer. */
5863 mod_timer(&dev_priv->idle_timer, jiffies +
5864 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5865 return;
5866 }
5867
5868 dev_priv->busy = false;
5869 queue_work(dev_priv->wq, &dev_priv->idle_work);
5870}
5871
5872#define CRTC_IDLE_TIMEOUT 1000 /* ms */
5873
5874static void intel_crtc_idle_timer(unsigned long arg)
5875{
5876 struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
5877 struct drm_crtc *crtc = &intel_crtc->base;
5878 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
5879 struct intel_framebuffer *intel_fb;
5880
5881 intel_fb = to_intel_framebuffer(crtc->fb);
5882 if (intel_fb && intel_fb->obj->active) {
5883 /* The framebuffer is still being accessed by the GPU. */
5884 mod_timer(&intel_crtc->idle_timer, jiffies +
5885 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5886 return;
5887 }
5888
5889 intel_crtc->busy = false;
5890 queue_work(dev_priv->wq, &dev_priv->idle_work);
5891}
5892
5893static void intel_increase_pllclock(struct drm_crtc *crtc) 6007static void intel_increase_pllclock(struct drm_crtc *crtc)
5894{ 6008{
5895 struct drm_device *dev = crtc->dev; 6009 struct drm_device *dev = crtc->dev;
@@ -5919,10 +6033,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
5919 if (dpll & DISPLAY_RATE_SELECT_FPA1) 6033 if (dpll & DISPLAY_RATE_SELECT_FPA1)
5920 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); 6034 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
5921 } 6035 }
5922
5923 /* Schedule downclock */
5924 mod_timer(&intel_crtc->idle_timer, jiffies +
5925 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5926} 6036}
5927 6037
5928static void intel_decrease_pllclock(struct drm_crtc *crtc) 6038static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -5961,89 +6071,46 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
5961 6071
5962} 6072}
5963 6073
5964/** 6074void intel_mark_busy(struct drm_device *dev)
5965 * intel_idle_update - adjust clocks for idleness
5966 * @work: work struct
5967 *
5968 * Either the GPU or display (or both) went idle. Check the busy status
5969 * here and adjust the CRTC and GPU clocks as necessary.
5970 */
5971static void intel_idle_update(struct work_struct *work)
5972{ 6075{
5973 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 6076 i915_update_gfx_val(dev->dev_private);
5974 idle_work); 6077}
5975 struct drm_device *dev = dev_priv->dev; 6078
6079void intel_mark_idle(struct drm_device *dev)
6080{
6081}
6082
6083void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
6084{
6085 struct drm_device *dev = obj->base.dev;
5976 struct drm_crtc *crtc; 6086 struct drm_crtc *crtc;
5977 struct intel_crtc *intel_crtc;
5978 6087
5979 if (!i915_powersave) 6088 if (!i915_powersave)
5980 return; 6089 return;
5981 6090
5982 mutex_lock(&dev->struct_mutex);
5983
5984 i915_update_gfx_val(dev_priv);
5985
5986 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 6091 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5987 /* Skip inactive CRTCs */
5988 if (!crtc->fb) 6092 if (!crtc->fb)
5989 continue; 6093 continue;
5990 6094
5991 intel_crtc = to_intel_crtc(crtc); 6095 if (to_intel_framebuffer(crtc->fb)->obj == obj)
5992 if (!intel_crtc->busy) 6096 intel_increase_pllclock(crtc);
5993 intel_decrease_pllclock(crtc);
5994 } 6097 }
5995
5996
5997 mutex_unlock(&dev->struct_mutex);
5998} 6098}
5999 6099
6000/** 6100void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
6001 * intel_mark_busy - mark the GPU and possibly the display busy
6002 * @dev: drm device
6003 * @obj: object we're operating on
6004 *
6005 * Callers can use this function to indicate that the GPU is busy processing
6006 * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
6007 * buffer), we'll also mark the display as busy, so we know to increase its
6008 * clock frequency.
6009 */
6010void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
6011{ 6101{
6012 drm_i915_private_t *dev_priv = dev->dev_private; 6102 struct drm_device *dev = obj->base.dev;
6013 struct drm_crtc *crtc = NULL; 6103 struct drm_crtc *crtc;
6014 struct intel_framebuffer *intel_fb;
6015 struct intel_crtc *intel_crtc;
6016
6017 if (!drm_core_check_feature(dev, DRIVER_MODESET))
6018 return;
6019
6020 if (!dev_priv->busy) {
6021 intel_sanitize_pm(dev);
6022 dev_priv->busy = true;
6023 } else
6024 mod_timer(&dev_priv->idle_timer, jiffies +
6025 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
6026 6104
6027 if (obj == NULL) 6105 if (!i915_powersave)
6028 return; 6106 return;
6029 6107
6030 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 6108 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6031 if (!crtc->fb) 6109 if (!crtc->fb)
6032 continue; 6110 continue;
6033 6111
6034 intel_crtc = to_intel_crtc(crtc); 6112 if (to_intel_framebuffer(crtc->fb)->obj == obj)
6035 intel_fb = to_intel_framebuffer(crtc->fb); 6113 intel_decrease_pllclock(crtc);
6036 if (intel_fb->obj == obj) {
6037 if (!intel_crtc->busy) {
6038 /* Non-busy -> busy, upclock */
6039 intel_increase_pllclock(crtc);
6040 intel_crtc->busy = true;
6041 } else {
6042 /* Busy -> busy, put off timer */
6043 mod_timer(&intel_crtc->idle_timer, jiffies +
6044 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
6045 }
6046 }
6047 } 6114 }
6048} 6115}
6049 6116
@@ -6394,7 +6461,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
6394 default: 6461 default:
6395 WARN_ONCE(1, "unknown plane in flip command\n"); 6462 WARN_ONCE(1, "unknown plane in flip command\n");
6396 ret = -ENODEV; 6463 ret = -ENODEV;
6397 goto err; 6464 goto err_unpin;
6398 } 6465 }
6399 6466
6400 ret = intel_ring_begin(ring, 4); 6467 ret = intel_ring_begin(ring, 4);
@@ -6502,7 +6569,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6502 goto cleanup_pending; 6569 goto cleanup_pending;
6503 6570
6504 intel_disable_fbc(dev); 6571 intel_disable_fbc(dev);
6505 intel_mark_busy(dev, obj); 6572 intel_mark_fb_busy(obj);
6506 mutex_unlock(&dev->struct_mutex); 6573 mutex_unlock(&dev->struct_mutex);
6507 6574
6508 trace_i915_flip_request(intel_crtc->plane, obj); 6575 trace_i915_flip_request(intel_crtc->plane, obj);
@@ -6527,81 +6594,807 @@ free_work:
6527 return ret; 6594 return ret;
6528} 6595}
6529 6596
6530static void intel_sanitize_modesetting(struct drm_device *dev, 6597static struct drm_crtc_helper_funcs intel_helper_funcs = {
6531 int pipe, int plane) 6598 .mode_set_base_atomic = intel_pipe_set_base_atomic,
6599 .load_lut = intel_crtc_load_lut,
6600 .disable = intel_crtc_noop,
6601};
6602
6603bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
6532{ 6604{
6533 struct drm_i915_private *dev_priv = dev->dev_private; 6605 struct intel_encoder *other_encoder;
6534 u32 reg, val; 6606 struct drm_crtc *crtc = &encoder->new_crtc->base;
6535 int i;
6536 6607
6537 /* Clear any frame start delays used for debugging left by the BIOS */ 6608 if (WARN_ON(!crtc))
6538 for_each_pipe(i) { 6609 return false;
6539 reg = PIPECONF(i); 6610
6540 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 6611 list_for_each_entry(other_encoder,
6612 &crtc->dev->mode_config.encoder_list,
6613 base.head) {
6614
6615 if (&other_encoder->new_crtc->base != crtc ||
6616 encoder == other_encoder)
6617 continue;
6618 else
6619 return true;
6541 } 6620 }
6542 6621
6543 if (HAS_PCH_SPLIT(dev)) 6622 return false;
6544 return; 6623}
6545 6624
6546 /* Who knows what state these registers were left in by the BIOS or 6625static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
6547 * grub? 6626 struct drm_crtc *crtc)
6548 * 6627{
6549 * If we leave the registers in a conflicting state (e.g. with the 6628 struct drm_device *dev;
6550 * display plane reading from the other pipe than the one we intend 6629 struct drm_crtc *tmp;
6551 * to use) then when we attempt to teardown the active mode, we will 6630 int crtc_mask = 1;
6552 * not disable the pipes and planes in the correct order -- leaving 6631
6553 * a plane reading from a disabled pipe and possibly leading to 6632 WARN(!crtc, "checking null crtc?\n");
6554 * undefined behaviour. 6633
6634 dev = crtc->dev;
6635
6636 list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
6637 if (tmp == crtc)
6638 break;
6639 crtc_mask <<= 1;
6640 }
6641
6642 if (encoder->possible_crtcs & crtc_mask)
6643 return true;
6644 return false;
6645}
6646
6647/**
6648 * intel_modeset_update_staged_output_state
6649 *
6650 * Updates the staged output configuration state, e.g. after we've read out the
6651 * current hw state.
6652 */
6653static void intel_modeset_update_staged_output_state(struct drm_device *dev)
6654{
6655 struct intel_encoder *encoder;
6656 struct intel_connector *connector;
6657
6658 list_for_each_entry(connector, &dev->mode_config.connector_list,
6659 base.head) {
6660 connector->new_encoder =
6661 to_intel_encoder(connector->base.encoder);
6662 }
6663
6664 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
6665 base.head) {
6666 encoder->new_crtc =
6667 to_intel_crtc(encoder->base.crtc);
6668 }
6669}
6670
6671/**
6672 * intel_modeset_commit_output_state
6673 *
6674 * This function copies the stage display pipe configuration to the real one.
6675 */
6676static void intel_modeset_commit_output_state(struct drm_device *dev)
6677{
6678 struct intel_encoder *encoder;
6679 struct intel_connector *connector;
6680
6681 list_for_each_entry(connector, &dev->mode_config.connector_list,
6682 base.head) {
6683 connector->base.encoder = &connector->new_encoder->base;
6684 }
6685
6686 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
6687 base.head) {
6688 encoder->base.crtc = &encoder->new_crtc->base;
6689 }
6690}
6691
6692static struct drm_display_mode *
6693intel_modeset_adjusted_mode(struct drm_crtc *crtc,
6694 struct drm_display_mode *mode)
6695{
6696 struct drm_device *dev = crtc->dev;
6697 struct drm_display_mode *adjusted_mode;
6698 struct drm_encoder_helper_funcs *encoder_funcs;
6699 struct intel_encoder *encoder;
6700
6701 adjusted_mode = drm_mode_duplicate(dev, mode);
6702 if (!adjusted_mode)
6703 return ERR_PTR(-ENOMEM);
6704
6705 /* Pass our mode to the connectors and the CRTC to give them a chance to
6706 * adjust it according to limitations or connector properties, and also
6707 * a chance to reject the mode entirely.
6555 */ 6708 */
6709 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
6710 base.head) {
6556 6711
6557 reg = DSPCNTR(plane); 6712 if (&encoder->new_crtc->base != crtc)
6558 val = I915_READ(reg); 6713 continue;
6714 encoder_funcs = encoder->base.helper_private;
6715 if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
6716 adjusted_mode))) {
6717 DRM_DEBUG_KMS("Encoder fixup failed\n");
6718 goto fail;
6719 }
6720 }
6559 6721
6560 if ((val & DISPLAY_PLANE_ENABLE) == 0) 6722 if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
6561 return; 6723 DRM_DEBUG_KMS("CRTC fixup failed\n");
6562 if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) 6724 goto fail;
6563 return; 6725 }
6726 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
6564 6727
6565 /* This display plane is active and attached to the other CPU pipe. */ 6728 return adjusted_mode;
6566 pipe = !pipe; 6729fail:
6730 drm_mode_destroy(dev, adjusted_mode);
6731 return ERR_PTR(-EINVAL);
6732}
6567 6733
6568 /* Disable the plane and wait for it to stop reading from the pipe. */ 6734/* Computes which crtcs are affected and sets the relevant bits in the mask. For
6569 intel_disable_plane(dev_priv, plane, pipe); 6735 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
6570 intel_disable_pipe(dev_priv, pipe); 6736static void
6737intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
6738 unsigned *prepare_pipes, unsigned *disable_pipes)
6739{
6740 struct intel_crtc *intel_crtc;
6741 struct drm_device *dev = crtc->dev;
6742 struct intel_encoder *encoder;
6743 struct intel_connector *connector;
6744 struct drm_crtc *tmp_crtc;
6745
6746 *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
6747
6748 /* Check which crtcs have changed outputs connected to them, these need
6749 * to be part of the prepare_pipes mask. We don't (yet) support global
6750 * modeset across multiple crtcs, so modeset_pipes will only have one
6751 * bit set at most. */
6752 list_for_each_entry(connector, &dev->mode_config.connector_list,
6753 base.head) {
6754 if (connector->base.encoder == &connector->new_encoder->base)
6755 continue;
6756
6757 if (connector->base.encoder) {
6758 tmp_crtc = connector->base.encoder->crtc;
6759
6760 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
6761 }
6762
6763 if (connector->new_encoder)
6764 *prepare_pipes |=
6765 1 << connector->new_encoder->new_crtc->pipe;
6766 }
6767
6768 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
6769 base.head) {
6770 if (encoder->base.crtc == &encoder->new_crtc->base)
6771 continue;
6772
6773 if (encoder->base.crtc) {
6774 tmp_crtc = encoder->base.crtc;
6775
6776 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
6777 }
6778
6779 if (encoder->new_crtc)
6780 *prepare_pipes |= 1 << encoder->new_crtc->pipe;
6781 }
6782
6783 /* Check for any pipes that will be fully disabled ... */
6784 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
6785 base.head) {
6786 bool used = false;
6787
6788 /* Don't try to disable disabled crtcs. */
6789 if (!intel_crtc->base.enabled)
6790 continue;
6791
6792 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
6793 base.head) {
6794 if (encoder->new_crtc == intel_crtc)
6795 used = true;
6796 }
6797
6798 if (!used)
6799 *disable_pipes |= 1 << intel_crtc->pipe;
6800 }
6801
6802
6803 /* set_mode is also used to update properties on life display pipes. */
6804 intel_crtc = to_intel_crtc(crtc);
6805 if (crtc->enabled)
6806 *prepare_pipes |= 1 << intel_crtc->pipe;
6807
6808 /* We only support modeset on one single crtc, hence we need to do that
6809 * only for the passed in crtc iff we change anything else than just
6810 * disable crtcs.
6811 *
6812 * This is actually not true, to be fully compatible with the old crtc
6813 * helper we automatically disable _any_ output (i.e. doesn't need to be
6814 * connected to the crtc we're modesetting on) if it's disconnected.
6815 * Which is a rather nutty api (since changed the output configuration
6816 * without userspace's explicit request can lead to confusion), but
6817 * alas. Hence we currently need to modeset on all pipes we prepare. */
6818 if (*prepare_pipes)
6819 *modeset_pipes = *prepare_pipes;
6820
6821 /* ... and mask these out. */
6822 *modeset_pipes &= ~(*disable_pipes);
6823 *prepare_pipes &= ~(*disable_pipes);
6571} 6824}
6572 6825
6573static void intel_crtc_reset(struct drm_crtc *crtc) 6826static bool intel_crtc_in_use(struct drm_crtc *crtc)
6574{ 6827{
6828 struct drm_encoder *encoder;
6575 struct drm_device *dev = crtc->dev; 6829 struct drm_device *dev = crtc->dev;
6576 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6577 6830
6578 /* Reset flags back to the 'unknown' status so that they 6831 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
6579 * will be correctly set on the initial modeset. 6832 if (encoder->crtc == crtc)
6833 return true;
6834
6835 return false;
6836}
6837
6838static void
6839intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
6840{
6841 struct intel_encoder *intel_encoder;
6842 struct intel_crtc *intel_crtc;
6843 struct drm_connector *connector;
6844
6845 list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
6846 base.head) {
6847 if (!intel_encoder->base.crtc)
6848 continue;
6849
6850 intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
6851
6852 if (prepare_pipes & (1 << intel_crtc->pipe))
6853 intel_encoder->connectors_active = false;
6854 }
6855
6856 intel_modeset_commit_output_state(dev);
6857
6858 /* Update computed state. */
6859 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
6860 base.head) {
6861 intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
6862 }
6863
6864 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
6865 if (!connector->encoder || !connector->encoder->crtc)
6866 continue;
6867
6868 intel_crtc = to_intel_crtc(connector->encoder->crtc);
6869
6870 if (prepare_pipes & (1 << intel_crtc->pipe)) {
6871 struct drm_property *dpms_property =
6872 dev->mode_config.dpms_property;
6873
6874 connector->dpms = DRM_MODE_DPMS_ON;
6875 drm_connector_property_set_value(connector,
6876 dpms_property,
6877 DRM_MODE_DPMS_ON);
6878
6879 intel_encoder = to_intel_encoder(connector->encoder);
6880 intel_encoder->connectors_active = true;
6881 }
6882 }
6883
6884}
6885
6886#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
6887 list_for_each_entry((intel_crtc), \
6888 &(dev)->mode_config.crtc_list, \
6889 base.head) \
6890 if (mask & (1 <<(intel_crtc)->pipe)) \
6891
6892void
6893intel_modeset_check_state(struct drm_device *dev)
6894{
6895 struct intel_crtc *crtc;
6896 struct intel_encoder *encoder;
6897 struct intel_connector *connector;
6898
6899 list_for_each_entry(connector, &dev->mode_config.connector_list,
6900 base.head) {
6901 /* This also checks the encoder/connector hw state with the
6902 * ->get_hw_state callbacks. */
6903 intel_connector_check_state(connector);
6904
6905 WARN(&connector->new_encoder->base != connector->base.encoder,
6906 "connector's staged encoder doesn't match current encoder\n");
6907 }
6908
6909 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
6910 base.head) {
6911 bool enabled = false;
6912 bool active = false;
6913 enum pipe pipe, tracked_pipe;
6914
6915 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
6916 encoder->base.base.id,
6917 drm_get_encoder_name(&encoder->base));
6918
6919 WARN(&encoder->new_crtc->base != encoder->base.crtc,
6920 "encoder's stage crtc doesn't match current crtc\n");
6921 WARN(encoder->connectors_active && !encoder->base.crtc,
6922 "encoder's active_connectors set, but no crtc\n");
6923
6924 list_for_each_entry(connector, &dev->mode_config.connector_list,
6925 base.head) {
6926 if (connector->base.encoder != &encoder->base)
6927 continue;
6928 enabled = true;
6929 if (connector->base.dpms != DRM_MODE_DPMS_OFF)
6930 active = true;
6931 }
6932 WARN(!!encoder->base.crtc != enabled,
6933 "encoder's enabled state mismatch "
6934 "(expected %i, found %i)\n",
6935 !!encoder->base.crtc, enabled);
6936 WARN(active && !encoder->base.crtc,
6937 "active encoder with no crtc\n");
6938
6939 WARN(encoder->connectors_active != active,
6940 "encoder's computed active state doesn't match tracked active state "
6941 "(expected %i, found %i)\n", active, encoder->connectors_active);
6942
6943 active = encoder->get_hw_state(encoder, &pipe);
6944 WARN(active != encoder->connectors_active,
6945 "encoder's hw state doesn't match sw tracking "
6946 "(expected %i, found %i)\n",
6947 encoder->connectors_active, active);
6948
6949 if (!encoder->base.crtc)
6950 continue;
6951
6952 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
6953 WARN(active && pipe != tracked_pipe,
6954 "active encoder's pipe doesn't match"
6955 "(expected %i, found %i)\n",
6956 tracked_pipe, pipe);
6957
6958 }
6959
6960 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
6961 base.head) {
6962 bool enabled = false;
6963 bool active = false;
6964
6965 DRM_DEBUG_KMS("[CRTC:%d]\n",
6966 crtc->base.base.id);
6967
6968 WARN(crtc->active && !crtc->base.enabled,
6969 "active crtc, but not enabled in sw tracking\n");
6970
6971 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
6972 base.head) {
6973 if (encoder->base.crtc != &crtc->base)
6974 continue;
6975 enabled = true;
6976 if (encoder->connectors_active)
6977 active = true;
6978 }
6979 WARN(active != crtc->active,
6980 "crtc's computed active state doesn't match tracked active state "
6981 "(expected %i, found %i)\n", active, crtc->active);
6982 WARN(enabled != crtc->base.enabled,
6983 "crtc's computed enabled state doesn't match tracked enabled state "
6984 "(expected %i, found %i)\n", enabled, crtc->base.enabled);
6985
6986 assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
6987 }
6988}
6989
6990bool intel_set_mode(struct drm_crtc *crtc,
6991 struct drm_display_mode *mode,
6992 int x, int y, struct drm_framebuffer *fb)
6993{
6994 struct drm_device *dev = crtc->dev;
6995 drm_i915_private_t *dev_priv = dev->dev_private;
6996 struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
6997 struct drm_encoder_helper_funcs *encoder_funcs;
6998 struct drm_encoder *encoder;
6999 struct intel_crtc *intel_crtc;
7000 unsigned disable_pipes, prepare_pipes, modeset_pipes;
7001 bool ret = true;
7002
7003 intel_modeset_affected_pipes(crtc, &modeset_pipes,
7004 &prepare_pipes, &disable_pipes);
7005
7006 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
7007 modeset_pipes, prepare_pipes, disable_pipes);
7008
7009 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
7010 intel_crtc_disable(&intel_crtc->base);
7011
7012 saved_hwmode = crtc->hwmode;
7013 saved_mode = crtc->mode;
7014
7015 /* Hack: Because we don't (yet) support global modeset on multiple
7016 * crtcs, we don't keep track of the new mode for more than one crtc.
7017 * Hence simply check whether any bit is set in modeset_pipes in all the
7018 * pieces of code that are not yet converted to deal with mutliple crtcs
7019 * changing their mode at the same time. */
7020 adjusted_mode = NULL;
7021 if (modeset_pipes) {
7022 adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
7023 if (IS_ERR(adjusted_mode)) {
7024 return false;
7025 }
7026 }
7027
7028 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
7029 if (intel_crtc->base.enabled)
7030 dev_priv->display.crtc_disable(&intel_crtc->base);
7031 }
7032
7033 /* crtc->mode is already used by the ->mode_set callbacks, hence we need
7034 * to set it here already despite that we pass it down the callchain.
6580 */ 7035 */
6581 intel_crtc->dpms_mode = -1; 7036 if (modeset_pipes)
7037 crtc->mode = *mode;
6582 7038
6583 /* We need to fix up any BIOS configuration that conflicts with 7039 /* Only after disabling all output pipelines that will be changed can we
6584 * our expectations. 7040 * update the the output configuration. */
7041 intel_modeset_update_state(dev, prepare_pipes);
7042
7043 /* Set up the DPLL and any encoders state that needs to adjust or depend
7044 * on the DPLL.
6585 */ 7045 */
6586 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); 7046 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
7047 ret = !intel_crtc_mode_set(&intel_crtc->base,
7048 mode, adjusted_mode,
7049 x, y, fb);
7050 if (!ret)
7051 goto done;
7052
7053 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
7054
7055 if (encoder->crtc != &intel_crtc->base)
7056 continue;
7057
7058 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
7059 encoder->base.id, drm_get_encoder_name(encoder),
7060 mode->base.id, mode->name);
7061 encoder_funcs = encoder->helper_private;
7062 encoder_funcs->mode_set(encoder, mode, adjusted_mode);
7063 }
7064 }
7065
7066 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
7067 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
7068 dev_priv->display.crtc_enable(&intel_crtc->base);
7069
7070 if (modeset_pipes) {
7071 /* Store real post-adjustment hardware mode. */
7072 crtc->hwmode = *adjusted_mode;
7073
7074 /* Calculate and store various constants which
7075 * are later needed by vblank and swap-completion
7076 * timestamping. They are derived from true hwmode.
7077 */
7078 drm_calc_timestamping_constants(crtc);
7079 }
7080
7081 /* FIXME: add subpixel order */
7082done:
7083 drm_mode_destroy(dev, adjusted_mode);
7084 if (!ret && crtc->enabled) {
7085 crtc->hwmode = saved_hwmode;
7086 crtc->mode = saved_mode;
7087 } else {
7088 intel_modeset_check_state(dev);
7089 }
7090
7091 return ret;
6587} 7092}
6588 7093
6589static struct drm_crtc_helper_funcs intel_helper_funcs = { 7094#undef for_each_intel_crtc_masked
6590 .dpms = intel_crtc_dpms, 7095
6591 .mode_fixup = intel_crtc_mode_fixup, 7096static void intel_set_config_free(struct intel_set_config *config)
6592 .mode_set = intel_crtc_mode_set, 7097{
6593 .mode_set_base = intel_pipe_set_base, 7098 if (!config)
6594 .mode_set_base_atomic = intel_pipe_set_base_atomic, 7099 return;
6595 .load_lut = intel_crtc_load_lut, 7100
6596 .disable = intel_crtc_disable, 7101 kfree(config->save_connector_encoders);
6597}; 7102 kfree(config->save_encoder_crtcs);
7103 kfree(config);
7104}
7105
7106static int intel_set_config_save_state(struct drm_device *dev,
7107 struct intel_set_config *config)
7108{
7109 struct drm_encoder *encoder;
7110 struct drm_connector *connector;
7111 int count;
7112
7113 config->save_encoder_crtcs =
7114 kcalloc(dev->mode_config.num_encoder,
7115 sizeof(struct drm_crtc *), GFP_KERNEL);
7116 if (!config->save_encoder_crtcs)
7117 return -ENOMEM;
7118
7119 config->save_connector_encoders =
7120 kcalloc(dev->mode_config.num_connector,
7121 sizeof(struct drm_encoder *), GFP_KERNEL);
7122 if (!config->save_connector_encoders)
7123 return -ENOMEM;
7124
7125 /* Copy data. Note that driver private data is not affected.
7126 * Should anything bad happen only the expected state is
7127 * restored, not the drivers personal bookkeeping.
7128 */
7129 count = 0;
7130 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
7131 config->save_encoder_crtcs[count++] = encoder->crtc;
7132 }
7133
7134 count = 0;
7135 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
7136 config->save_connector_encoders[count++] = connector->encoder;
7137 }
7138
7139 return 0;
7140}
7141
7142static void intel_set_config_restore_state(struct drm_device *dev,
7143 struct intel_set_config *config)
7144{
7145 struct intel_encoder *encoder;
7146 struct intel_connector *connector;
7147 int count;
7148
7149 count = 0;
7150 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7151 encoder->new_crtc =
7152 to_intel_crtc(config->save_encoder_crtcs[count++]);
7153 }
7154
7155 count = 0;
7156 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
7157 connector->new_encoder =
7158 to_intel_encoder(config->save_connector_encoders[count++]);
7159 }
7160}
7161
7162static void
7163intel_set_config_compute_mode_changes(struct drm_mode_set *set,
7164 struct intel_set_config *config)
7165{
7166
7167 /* We should be able to check here if the fb has the same properties
7168 * and then just flip_or_move it */
7169 if (set->crtc->fb != set->fb) {
7170 /* If we have no fb then treat it as a full mode set */
7171 if (set->crtc->fb == NULL) {
7172 DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
7173 config->mode_changed = true;
7174 } else if (set->fb == NULL) {
7175 config->mode_changed = true;
7176 } else if (set->fb->depth != set->crtc->fb->depth) {
7177 config->mode_changed = true;
7178 } else if (set->fb->bits_per_pixel !=
7179 set->crtc->fb->bits_per_pixel) {
7180 config->mode_changed = true;
7181 } else
7182 config->fb_changed = true;
7183 }
7184
7185 if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
7186 config->fb_changed = true;
7187
7188 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
7189 DRM_DEBUG_KMS("modes are different, full mode set\n");
7190 drm_mode_debug_printmodeline(&set->crtc->mode);
7191 drm_mode_debug_printmodeline(set->mode);
7192 config->mode_changed = true;
7193 }
7194}
7195
7196static int
7197intel_modeset_stage_output_state(struct drm_device *dev,
7198 struct drm_mode_set *set,
7199 struct intel_set_config *config)
7200{
7201 struct drm_crtc *new_crtc;
7202 struct intel_connector *connector;
7203 struct intel_encoder *encoder;
7204 int count, ro;
7205
7206 /* The upper layers ensure that we either disabl a crtc or have a list
7207 * of connectors. For paranoia, double-check this. */
7208 WARN_ON(!set->fb && (set->num_connectors != 0));
7209 WARN_ON(set->fb && (set->num_connectors == 0));
7210
7211 count = 0;
7212 list_for_each_entry(connector, &dev->mode_config.connector_list,
7213 base.head) {
7214 /* Otherwise traverse passed in connector list and get encoders
7215 * for them. */
7216 for (ro = 0; ro < set->num_connectors; ro++) {
7217 if (set->connectors[ro] == &connector->base) {
7218 connector->new_encoder = connector->encoder;
7219 break;
7220 }
7221 }
7222
7223 /* If we disable the crtc, disable all its connectors. Also, if
7224 * the connector is on the changing crtc but not on the new
7225 * connector list, disable it. */
7226 if ((!set->fb || ro == set->num_connectors) &&
7227 connector->base.encoder &&
7228 connector->base.encoder->crtc == set->crtc) {
7229 connector->new_encoder = NULL;
7230
7231 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
7232 connector->base.base.id,
7233 drm_get_connector_name(&connector->base));
7234 }
7235
7236
7237 if (&connector->new_encoder->base != connector->base.encoder) {
7238 DRM_DEBUG_KMS("encoder changed, full mode switch\n");
7239 config->mode_changed = true;
7240 }
7241
7242 /* Disable all disconnected encoders. */
7243 if (connector->base.status == connector_status_disconnected)
7244 connector->new_encoder = NULL;
7245 }
7246 /* connector->new_encoder is now updated for all connectors. */
7247
7248 /* Update crtc of enabled connectors. */
7249 count = 0;
7250 list_for_each_entry(connector, &dev->mode_config.connector_list,
7251 base.head) {
7252 if (!connector->new_encoder)
7253 continue;
7254
7255 new_crtc = connector->new_encoder->base.crtc;
7256
7257 for (ro = 0; ro < set->num_connectors; ro++) {
7258 if (set->connectors[ro] == &connector->base)
7259 new_crtc = set->crtc;
7260 }
7261
7262 /* Make sure the new CRTC will work with the encoder */
7263 if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
7264 new_crtc)) {
7265 return -EINVAL;
7266 }
7267 connector->encoder->new_crtc = to_intel_crtc(new_crtc);
7268
7269 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
7270 connector->base.base.id,
7271 drm_get_connector_name(&connector->base),
7272 new_crtc->base.id);
7273 }
7274
7275 /* Check for any encoders that needs to be disabled. */
7276 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7277 base.head) {
7278 list_for_each_entry(connector,
7279 &dev->mode_config.connector_list,
7280 base.head) {
7281 if (connector->new_encoder == encoder) {
7282 WARN_ON(!connector->new_encoder->new_crtc);
7283
7284 goto next_encoder;
7285 }
7286 }
7287 encoder->new_crtc = NULL;
7288next_encoder:
7289 /* Only now check for crtc changes so we don't miss encoders
7290 * that will be disabled. */
7291 if (&encoder->new_crtc->base != encoder->base.crtc) {
7292 DRM_DEBUG_KMS("crtc changed, full mode switch\n");
7293 config->mode_changed = true;
7294 }
7295 }
7296 /* Now we've also updated encoder->new_crtc for all encoders. */
7297
7298 return 0;
7299}
7300
7301static int intel_crtc_set_config(struct drm_mode_set *set)
7302{
7303 struct drm_device *dev;
7304 struct drm_mode_set save_set;
7305 struct intel_set_config *config;
7306 int ret;
7307
7308 BUG_ON(!set);
7309 BUG_ON(!set->crtc);
7310 BUG_ON(!set->crtc->helper_private);
7311
7312 if (!set->mode)
7313 set->fb = NULL;
7314
7315 /* The fb helper likes to play gross jokes with ->mode_set_config.
7316 * Unfortunately the crtc helper doesn't do much at all for this case,
7317 * so we have to cope with this madness until the fb helper is fixed up. */
7318 if (set->fb && set->num_connectors == 0)
7319 return 0;
7320
7321 if (set->fb) {
7322 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
7323 set->crtc->base.id, set->fb->base.id,
7324 (int)set->num_connectors, set->x, set->y);
7325 } else {
7326 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
7327 }
7328
7329 dev = set->crtc->dev;
7330
7331 ret = -ENOMEM;
7332 config = kzalloc(sizeof(*config), GFP_KERNEL);
7333 if (!config)
7334 goto out_config;
7335
7336 ret = intel_set_config_save_state(dev, config);
7337 if (ret)
7338 goto out_config;
7339
7340 save_set.crtc = set->crtc;
7341 save_set.mode = &set->crtc->mode;
7342 save_set.x = set->crtc->x;
7343 save_set.y = set->crtc->y;
7344 save_set.fb = set->crtc->fb;
7345
7346 /* Compute whether we need a full modeset, only an fb base update or no
7347 * change at all. In the future we might also check whether only the
7348 * mode changed, e.g. for LVDS where we only change the panel fitter in
7349 * such cases. */
7350 intel_set_config_compute_mode_changes(set, config);
7351
7352 ret = intel_modeset_stage_output_state(dev, set, config);
7353 if (ret)
7354 goto fail;
7355
7356 if (config->mode_changed) {
7357 if (set->mode) {
7358 DRM_DEBUG_KMS("attempting to set mode from"
7359 " userspace\n");
7360 drm_mode_debug_printmodeline(set->mode);
7361 }
7362
7363 if (!intel_set_mode(set->crtc, set->mode,
7364 set->x, set->y, set->fb)) {
7365 DRM_ERROR("failed to set mode on [CRTC:%d]\n",
7366 set->crtc->base.id);
7367 ret = -EINVAL;
7368 goto fail;
7369 }
7370 } else if (config->fb_changed) {
7371 ret = intel_pipe_set_base(set->crtc,
7372 set->x, set->y, set->fb);
7373 }
7374
7375 intel_set_config_free(config);
7376
7377 return 0;
7378
7379fail:
7380 intel_set_config_restore_state(dev, config);
7381
7382 /* Try to restore the config */
7383 if (config->mode_changed &&
7384 !intel_set_mode(save_set.crtc, save_set.mode,
7385 save_set.x, save_set.y, save_set.fb))
7386 DRM_ERROR("failed to restore config after modeset failure\n");
7387
7388out_config:
7389 intel_set_config_free(config);
7390 return ret;
7391}
6598 7392
6599static const struct drm_crtc_funcs intel_crtc_funcs = { 7393static const struct drm_crtc_funcs intel_crtc_funcs = {
6600 .reset = intel_crtc_reset,
6601 .cursor_set = intel_crtc_cursor_set, 7394 .cursor_set = intel_crtc_cursor_set,
6602 .cursor_move = intel_crtc_cursor_move, 7395 .cursor_move = intel_crtc_cursor_move,
6603 .gamma_set = intel_crtc_gamma_set, 7396 .gamma_set = intel_crtc_gamma_set,
6604 .set_config = drm_crtc_helper_set_config, 7397 .set_config = intel_crtc_set_config,
6605 .destroy = intel_crtc_destroy, 7398 .destroy = intel_crtc_destroy,
6606 .page_flip = intel_crtc_page_flip, 7399 .page_flip = intel_crtc_page_flip,
6607}; 7400};
@@ -6655,24 +7448,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
6655 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 7448 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
6656 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 7449 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
6657 7450
6658 intel_crtc_reset(&intel_crtc->base);
6659 intel_crtc->active = true; /* force the pipe off on setup_init_config */
6660 intel_crtc->bpp = 24; /* default for pre-Ironlake */ 7451 intel_crtc->bpp = 24; /* default for pre-Ironlake */
6661 7452
6662 if (HAS_PCH_SPLIT(dev)) {
6663 intel_helper_funcs.prepare = ironlake_crtc_prepare;
6664 intel_helper_funcs.commit = ironlake_crtc_commit;
6665 } else {
6666 intel_helper_funcs.prepare = i9xx_crtc_prepare;
6667 intel_helper_funcs.commit = i9xx_crtc_commit;
6668 }
6669
6670 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 7453 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
6671
6672 intel_crtc->busy = false;
6673
6674 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
6675 (unsigned long)intel_crtc);
6676} 7454}
6677 7455
6678int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 7456int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -6699,15 +7477,23 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
6699 return 0; 7477 return 0;
6700} 7478}
6701 7479
6702static int intel_encoder_clones(struct drm_device *dev, int type_mask) 7480static int intel_encoder_clones(struct intel_encoder *encoder)
6703{ 7481{
6704 struct intel_encoder *encoder; 7482 struct drm_device *dev = encoder->base.dev;
7483 struct intel_encoder *source_encoder;
6705 int index_mask = 0; 7484 int index_mask = 0;
6706 int entry = 0; 7485 int entry = 0;
6707 7486
6708 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 7487 list_for_each_entry(source_encoder,
6709 if (type_mask & encoder->clone_mask) 7488 &dev->mode_config.encoder_list, base.head) {
7489
7490 if (encoder == source_encoder)
6710 index_mask |= (1 << entry); 7491 index_mask |= (1 << entry);
7492
7493 /* Intel hw has only one MUX where enocoders could be cloned. */
7494 if (encoder->cloneable && source_encoder->cloneable)
7495 index_mask |= (1 << entry);
7496
6711 entry++; 7497 entry++;
6712 } 7498 }
6713 7499
@@ -6748,10 +7534,10 @@ static void intel_setup_outputs(struct drm_device *dev)
6748 dpd_is_edp = intel_dpd_is_edp(dev); 7534 dpd_is_edp = intel_dpd_is_edp(dev);
6749 7535
6750 if (has_edp_a(dev)) 7536 if (has_edp_a(dev))
6751 intel_dp_init(dev, DP_A); 7537 intel_dp_init(dev, DP_A, PORT_A);
6752 7538
6753 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) 7539 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6754 intel_dp_init(dev, PCH_DP_D); 7540 intel_dp_init(dev, PCH_DP_D, PORT_D);
6755 } 7541 }
6756 7542
6757 intel_crt_init(dev); 7543 intel_crt_init(dev);
@@ -6782,22 +7568,22 @@ static void intel_setup_outputs(struct drm_device *dev)
6782 /* PCH SDVOB multiplex with HDMIB */ 7568 /* PCH SDVOB multiplex with HDMIB */
6783 found = intel_sdvo_init(dev, PCH_SDVOB, true); 7569 found = intel_sdvo_init(dev, PCH_SDVOB, true);
6784 if (!found) 7570 if (!found)
6785 intel_hdmi_init(dev, HDMIB); 7571 intel_hdmi_init(dev, HDMIB, PORT_B);
6786 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 7572 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
6787 intel_dp_init(dev, PCH_DP_B); 7573 intel_dp_init(dev, PCH_DP_B, PORT_B);
6788 } 7574 }
6789 7575
6790 if (I915_READ(HDMIC) & PORT_DETECTED) 7576 if (I915_READ(HDMIC) & PORT_DETECTED)
6791 intel_hdmi_init(dev, HDMIC); 7577 intel_hdmi_init(dev, HDMIC, PORT_C);
6792 7578
6793 if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED) 7579 if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
6794 intel_hdmi_init(dev, HDMID); 7580 intel_hdmi_init(dev, HDMID, PORT_D);
6795 7581
6796 if (I915_READ(PCH_DP_C) & DP_DETECTED) 7582 if (I915_READ(PCH_DP_C) & DP_DETECTED)
6797 intel_dp_init(dev, PCH_DP_C); 7583 intel_dp_init(dev, PCH_DP_C, PORT_C);
6798 7584
6799 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) 7585 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6800 intel_dp_init(dev, PCH_DP_D); 7586 intel_dp_init(dev, PCH_DP_D, PORT_D);
6801 } else if (IS_VALLEYVIEW(dev)) { 7587 } else if (IS_VALLEYVIEW(dev)) {
6802 int found; 7588 int found;
6803 7589
@@ -6805,17 +7591,17 @@ static void intel_setup_outputs(struct drm_device *dev)
6805 /* SDVOB multiplex with HDMIB */ 7591 /* SDVOB multiplex with HDMIB */
6806 found = intel_sdvo_init(dev, SDVOB, true); 7592 found = intel_sdvo_init(dev, SDVOB, true);
6807 if (!found) 7593 if (!found)
6808 intel_hdmi_init(dev, SDVOB); 7594 intel_hdmi_init(dev, SDVOB, PORT_B);
6809 if (!found && (I915_READ(DP_B) & DP_DETECTED)) 7595 if (!found && (I915_READ(DP_B) & DP_DETECTED))
6810 intel_dp_init(dev, DP_B); 7596 intel_dp_init(dev, DP_B, PORT_B);
6811 } 7597 }
6812 7598
6813 if (I915_READ(SDVOC) & PORT_DETECTED) 7599 if (I915_READ(SDVOC) & PORT_DETECTED)
6814 intel_hdmi_init(dev, SDVOC); 7600 intel_hdmi_init(dev, SDVOC, PORT_C);
6815 7601
6816 /* Shares lanes with HDMI on SDVOC */ 7602 /* Shares lanes with HDMI on SDVOC */
6817 if (I915_READ(DP_C) & DP_DETECTED) 7603 if (I915_READ(DP_C) & DP_DETECTED)
6818 intel_dp_init(dev, DP_C); 7604 intel_dp_init(dev, DP_C, PORT_C);
6819 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 7605 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
6820 bool found = false; 7606 bool found = false;
6821 7607
@@ -6824,12 +7610,12 @@ static void intel_setup_outputs(struct drm_device *dev)
6824 found = intel_sdvo_init(dev, SDVOB, true); 7610 found = intel_sdvo_init(dev, SDVOB, true);
6825 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 7611 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
6826 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 7612 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
6827 intel_hdmi_init(dev, SDVOB); 7613 intel_hdmi_init(dev, SDVOB, PORT_B);
6828 } 7614 }
6829 7615
6830 if (!found && SUPPORTS_INTEGRATED_DP(dev)) { 7616 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
6831 DRM_DEBUG_KMS("probing DP_B\n"); 7617 DRM_DEBUG_KMS("probing DP_B\n");
6832 intel_dp_init(dev, DP_B); 7618 intel_dp_init(dev, DP_B, PORT_B);
6833 } 7619 }
6834 } 7620 }
6835 7621
@@ -6844,18 +7630,18 @@ static void intel_setup_outputs(struct drm_device *dev)
6844 7630
6845 if (SUPPORTS_INTEGRATED_HDMI(dev)) { 7631 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
6846 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 7632 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
6847 intel_hdmi_init(dev, SDVOC); 7633 intel_hdmi_init(dev, SDVOC, PORT_C);
6848 } 7634 }
6849 if (SUPPORTS_INTEGRATED_DP(dev)) { 7635 if (SUPPORTS_INTEGRATED_DP(dev)) {
6850 DRM_DEBUG_KMS("probing DP_C\n"); 7636 DRM_DEBUG_KMS("probing DP_C\n");
6851 intel_dp_init(dev, DP_C); 7637 intel_dp_init(dev, DP_C, PORT_C);
6852 } 7638 }
6853 } 7639 }
6854 7640
6855 if (SUPPORTS_INTEGRATED_DP(dev) && 7641 if (SUPPORTS_INTEGRATED_DP(dev) &&
6856 (I915_READ(DP_D) & DP_DETECTED)) { 7642 (I915_READ(DP_D) & DP_DETECTED)) {
6857 DRM_DEBUG_KMS("probing DP_D\n"); 7643 DRM_DEBUG_KMS("probing DP_D\n");
6858 intel_dp_init(dev, DP_D); 7644 intel_dp_init(dev, DP_D, PORT_D);
6859 } 7645 }
6860 } else if (IS_GEN2(dev)) 7646 } else if (IS_GEN2(dev))
6861 intel_dvo_init(dev); 7647 intel_dvo_init(dev);
@@ -6866,12 +7652,9 @@ static void intel_setup_outputs(struct drm_device *dev)
6866 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 7652 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6867 encoder->base.possible_crtcs = encoder->crtc_mask; 7653 encoder->base.possible_crtcs = encoder->crtc_mask;
6868 encoder->base.possible_clones = 7654 encoder->base.possible_clones =
6869 intel_encoder_clones(dev, encoder->clone_mask); 7655 intel_encoder_clones(encoder);
6870 } 7656 }
6871 7657
6872 /* disable all the possible outputs/crtcs before entering KMS mode */
6873 drm_helper_disable_unused_functions(dev);
6874
6875 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 7658 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
6876 ironlake_init_pch_refclk(dev); 7659 ironlake_init_pch_refclk(dev);
6877} 7660}
@@ -6973,13 +7756,15 @@ static void intel_init_display(struct drm_device *dev)
6973 7756
6974 /* We always want a DPMS function */ 7757 /* We always want a DPMS function */
6975 if (HAS_PCH_SPLIT(dev)) { 7758 if (HAS_PCH_SPLIT(dev)) {
6976 dev_priv->display.dpms = ironlake_crtc_dpms;
6977 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 7759 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
7760 dev_priv->display.crtc_enable = ironlake_crtc_enable;
7761 dev_priv->display.crtc_disable = ironlake_crtc_disable;
6978 dev_priv->display.off = ironlake_crtc_off; 7762 dev_priv->display.off = ironlake_crtc_off;
6979 dev_priv->display.update_plane = ironlake_update_plane; 7763 dev_priv->display.update_plane = ironlake_update_plane;
6980 } else { 7764 } else {
6981 dev_priv->display.dpms = i9xx_crtc_dpms;
6982 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 7765 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
7766 dev_priv->display.crtc_enable = i9xx_crtc_enable;
7767 dev_priv->display.crtc_disable = i9xx_crtc_disable;
6983 dev_priv->display.off = i9xx_crtc_off; 7768 dev_priv->display.off = i9xx_crtc_off;
6984 dev_priv->display.update_plane = i9xx_update_plane; 7769 dev_priv->display.update_plane = i9xx_update_plane;
6985 } 7770 }
@@ -7023,7 +7808,7 @@ static void intel_init_display(struct drm_device *dev)
7023 dev_priv->display.write_eld = ironlake_write_eld; 7808 dev_priv->display.write_eld = ironlake_write_eld;
7024 } else if (IS_HASWELL(dev)) { 7809 } else if (IS_HASWELL(dev)) {
7025 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 7810 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
7026 dev_priv->display.write_eld = ironlake_write_eld; 7811 dev_priv->display.write_eld = haswell_write_eld;
7027 } else 7812 } else
7028 dev_priv->display.update_wm = NULL; 7813 dev_priv->display.update_wm = NULL;
7029 } else if (IS_G4X(dev)) { 7814 } else if (IS_G4X(dev)) {
@@ -7101,21 +7886,16 @@ static struct intel_quirk intel_quirks[] = {
7101 /* HP Mini needs pipe A force quirk (LP: #322104) */ 7886 /* HP Mini needs pipe A force quirk (LP: #322104) */
7102 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, 7887 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
7103 7888
7104 /* Thinkpad R31 needs pipe A force quirk */
7105 { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
7106 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 7889 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
7107 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 7890 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
7108 7891
7109 /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
7110 { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
7111 /* ThinkPad X40 needs pipe A force quirk */
7112
7113 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 7892 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
7114 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 7893 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
7115 7894
7116 /* 855 & before need to leave pipe A & dpll A up */ 7895 /* 855 & before need to leave pipe A & dpll A up */
7117 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 7896 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7118 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 7897 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7898 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7119 7899
7120 /* Lenovo U160 cannot use SSC on LVDS */ 7900 /* Lenovo U160 cannot use SSC on LVDS */
7121 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 7901 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
@@ -7231,10 +8011,251 @@ void intel_modeset_init(struct drm_device *dev)
7231 /* Just disable it once at startup */ 8011 /* Just disable it once at startup */
7232 i915_disable_vga(dev); 8012 i915_disable_vga(dev);
7233 intel_setup_outputs(dev); 8013 intel_setup_outputs(dev);
8014}
7234 8015
7235 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 8016static void
7236 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 8017intel_connector_break_all_links(struct intel_connector *connector)
7237 (unsigned long)dev); 8018{
8019 connector->base.dpms = DRM_MODE_DPMS_OFF;
8020 connector->base.encoder = NULL;
8021 connector->encoder->connectors_active = false;
8022 connector->encoder->base.crtc = NULL;
8023}
8024
8025static void intel_enable_pipe_a(struct drm_device *dev)
8026{
8027 struct intel_connector *connector;
8028 struct drm_connector *crt = NULL;
8029 struct intel_load_detect_pipe load_detect_temp;
8030
8031 /* We can't just switch on the pipe A, we need to set things up with a
8032 * proper mode and output configuration. As a gross hack, enable pipe A
8033 * by enabling the load detect pipe once. */
8034 list_for_each_entry(connector,
8035 &dev->mode_config.connector_list,
8036 base.head) {
8037 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
8038 crt = &connector->base;
8039 break;
8040 }
8041 }
8042
8043 if (!crt)
8044 return;
8045
8046 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
8047 intel_release_load_detect_pipe(crt, &load_detect_temp);
8048
8049
8050}
8051
8052static void intel_sanitize_crtc(struct intel_crtc *crtc)
8053{
8054 struct drm_device *dev = crtc->base.dev;
8055 struct drm_i915_private *dev_priv = dev->dev_private;
8056 u32 reg, val;
8057
8058 /* Clear any frame start delays used for debugging left by the BIOS */
8059 reg = PIPECONF(crtc->pipe);
8060 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
8061
8062 /* We need to sanitize the plane -> pipe mapping first because this will
8063 * disable the crtc (and hence change the state) if it is wrong. */
8064 if (!HAS_PCH_SPLIT(dev)) {
8065 struct intel_connector *connector;
8066 bool plane;
8067
8068 reg = DSPCNTR(crtc->plane);
8069 val = I915_READ(reg);
8070
8071 if ((val & DISPLAY_PLANE_ENABLE) == 0 &&
8072 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
8073 goto ok;
8074
8075 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
8076 crtc->base.base.id);
8077
8078 /* Pipe has the wrong plane attached and the plane is active.
8079 * Temporarily change the plane mapping and disable everything
8080 * ... */
8081 plane = crtc->plane;
8082 crtc->plane = !plane;
8083 dev_priv->display.crtc_disable(&crtc->base);
8084 crtc->plane = plane;
8085
8086 /* ... and break all links. */
8087 list_for_each_entry(connector, &dev->mode_config.connector_list,
8088 base.head) {
8089 if (connector->encoder->base.crtc != &crtc->base)
8090 continue;
8091
8092 intel_connector_break_all_links(connector);
8093 }
8094
8095 WARN_ON(crtc->active);
8096 crtc->base.enabled = false;
8097 }
8098ok:
8099
8100 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
8101 crtc->pipe == PIPE_A && !crtc->active) {
8102 /* BIOS forgot to enable pipe A, this mostly happens after
8103 * resume. Force-enable the pipe to fix this, the update_dpms
8104 * call below we restore the pipe to the right state, but leave
8105 * the required bits on. */
8106 intel_enable_pipe_a(dev);
8107 }
8108
8109 /* Adjust the state of the output pipe according to whether we
8110 * have active connectors/encoders. */
8111 intel_crtc_update_dpms(&crtc->base);
8112
8113 if (crtc->active != crtc->base.enabled) {
8114 struct intel_encoder *encoder;
8115
8116 /* This can happen either due to bugs in the get_hw_state
8117 * functions or because the pipe is force-enabled due to the
8118 * pipe A quirk. */
8119 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
8120 crtc->base.base.id,
8121 crtc->base.enabled ? "enabled" : "disabled",
8122 crtc->active ? "enabled" : "disabled");
8123
8124 crtc->base.enabled = crtc->active;
8125
8126 /* Because we only establish the connector -> encoder ->
8127 * crtc links if something is active, this means the
8128 * crtc is now deactivated. Break the links. connector
8129 * -> encoder links are only establish when things are
8130 * actually up, hence no need to break them. */
8131 WARN_ON(crtc->active);
8132
8133 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
8134 WARN_ON(encoder->connectors_active);
8135 encoder->base.crtc = NULL;
8136 }
8137 }
8138}
8139
8140static void intel_sanitize_encoder(struct intel_encoder *encoder)
8141{
8142 struct intel_connector *connector;
8143 struct drm_device *dev = encoder->base.dev;
8144
8145 /* We need to check both for a crtc link (meaning that the
8146 * encoder is active and trying to read from a pipe) and the
8147 * pipe itself being active. */
8148 bool has_active_crtc = encoder->base.crtc &&
8149 to_intel_crtc(encoder->base.crtc)->active;
8150
8151 if (encoder->connectors_active && !has_active_crtc) {
8152 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
8153 encoder->base.base.id,
8154 drm_get_encoder_name(&encoder->base));
8155
8156 /* Connector is active, but has no active pipe. This is
8157 * fallout from our resume register restoring. Disable
8158 * the encoder manually again. */
8159 if (encoder->base.crtc) {
8160 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
8161 encoder->base.base.id,
8162 drm_get_encoder_name(&encoder->base));
8163 encoder->disable(encoder);
8164 }
8165
8166 /* Inconsistent output/port/pipe state happens presumably due to
8167 * a bug in one of the get_hw_state functions. Or someplace else
8168 * in our code, like the register restore mess on resume. Clamp
8169 * things to off as a safer default. */
8170 list_for_each_entry(connector,
8171 &dev->mode_config.connector_list,
8172 base.head) {
8173 if (connector->encoder != encoder)
8174 continue;
8175
8176 intel_connector_break_all_links(connector);
8177 }
8178 }
8179 /* Enabled encoders without active connectors will be fixed in
8180 * the crtc fixup. */
8181}
8182
8183/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
8184 * and i915 state tracking structures. */
8185void intel_modeset_setup_hw_state(struct drm_device *dev)
8186{
8187 struct drm_i915_private *dev_priv = dev->dev_private;
8188 enum pipe pipe;
8189 u32 tmp;
8190 struct intel_crtc *crtc;
8191 struct intel_encoder *encoder;
8192 struct intel_connector *connector;
8193
8194 for_each_pipe(pipe) {
8195 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
8196
8197 tmp = I915_READ(PIPECONF(pipe));
8198 if (tmp & PIPECONF_ENABLE)
8199 crtc->active = true;
8200 else
8201 crtc->active = false;
8202
8203 crtc->base.enabled = crtc->active;
8204
8205 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
8206 crtc->base.base.id,
8207 crtc->active ? "enabled" : "disabled");
8208 }
8209
8210 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8211 base.head) {
8212 pipe = 0;
8213
8214 if (encoder->get_hw_state(encoder, &pipe)) {
8215 encoder->base.crtc =
8216 dev_priv->pipe_to_crtc_mapping[pipe];
8217 } else {
8218 encoder->base.crtc = NULL;
8219 }
8220
8221 encoder->connectors_active = false;
8222 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
8223 encoder->base.base.id,
8224 drm_get_encoder_name(&encoder->base),
8225 encoder->base.crtc ? "enabled" : "disabled",
8226 pipe);
8227 }
8228
8229 list_for_each_entry(connector, &dev->mode_config.connector_list,
8230 base.head) {
8231 if (connector->get_hw_state(connector)) {
8232 connector->base.dpms = DRM_MODE_DPMS_ON;
8233 connector->encoder->connectors_active = true;
8234 connector->base.encoder = &connector->encoder->base;
8235 } else {
8236 connector->base.dpms = DRM_MODE_DPMS_OFF;
8237 connector->base.encoder = NULL;
8238 }
8239 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
8240 connector->base.base.id,
8241 drm_get_connector_name(&connector->base),
8242 connector->base.encoder ? "enabled" : "disabled");
8243 }
8244
8245 /* HW state is read out, now we need to sanitize this mess. */
8246 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8247 base.head) {
8248 intel_sanitize_encoder(encoder);
8249 }
8250
8251 for_each_pipe(pipe) {
8252 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
8253 intel_sanitize_crtc(crtc);
8254 }
8255
8256 intel_modeset_update_staged_output_state(dev);
8257
8258 intel_modeset_check_state(dev);
7238} 8259}
7239 8260
7240void intel_modeset_gem_init(struct drm_device *dev) 8261void intel_modeset_gem_init(struct drm_device *dev)
@@ -7242,6 +8263,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
7242 intel_modeset_init_hw(dev); 8263 intel_modeset_init_hw(dev);
7243 8264
7244 intel_setup_overlay(dev); 8265 intel_setup_overlay(dev);
8266
8267 intel_modeset_setup_hw_state(dev);
7245} 8268}
7246 8269
7247void intel_modeset_cleanup(struct drm_device *dev) 8270void intel_modeset_cleanup(struct drm_device *dev)
@@ -7280,19 +8303,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
7280 * enqueue unpin/hotplug work. */ 8303 * enqueue unpin/hotplug work. */
7281 drm_irq_uninstall(dev); 8304 drm_irq_uninstall(dev);
7282 cancel_work_sync(&dev_priv->hotplug_work); 8305 cancel_work_sync(&dev_priv->hotplug_work);
7283 cancel_work_sync(&dev_priv->rps_work); 8306 cancel_work_sync(&dev_priv->rps.work);
7284 8307
7285 /* flush any delayed tasks or pending work */ 8308 /* flush any delayed tasks or pending work */
7286 flush_scheduled_work(); 8309 flush_scheduled_work();
7287 8310
7288 /* Shut off idle work before the crtcs get freed. */
7289 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7290 intel_crtc = to_intel_crtc(crtc);
7291 del_timer_sync(&intel_crtc->idle_timer);
7292 }
7293 del_timer_sync(&dev_priv->idle_timer);
7294 cancel_work_sync(&dev_priv->idle_work);
7295
7296 drm_mode_config_cleanup(dev); 8311 drm_mode_config_cleanup(dev);
7297} 8312}
7298 8313
@@ -7338,7 +8353,7 @@ struct intel_display_error_state {
7338 u32 position; 8353 u32 position;
7339 u32 base; 8354 u32 base;
7340 u32 size; 8355 u32 size;
7341 } cursor[2]; 8356 } cursor[I915_MAX_PIPES];
7342 8357
7343 struct intel_pipe_error_state { 8358 struct intel_pipe_error_state {
7344 u32 conf; 8359 u32 conf;
@@ -7350,7 +8365,7 @@ struct intel_display_error_state {
7350 u32 vtotal; 8365 u32 vtotal;
7351 u32 vblank; 8366 u32 vblank;
7352 u32 vsync; 8367 u32 vsync;
7353 } pipe[2]; 8368 } pipe[I915_MAX_PIPES];
7354 8369
7355 struct intel_plane_error_state { 8370 struct intel_plane_error_state {
7356 u32 control; 8371 u32 control;
@@ -7360,7 +8375,7 @@ struct intel_display_error_state {
7360 u32 addr; 8375 u32 addr;
7361 u32 surface; 8376 u32 surface;
7362 u32 tile_offset; 8377 u32 tile_offset;
7363 } plane[2]; 8378 } plane[I915_MAX_PIPES];
7364}; 8379};
7365 8380
7366struct intel_display_error_state * 8381struct intel_display_error_state *
@@ -7374,7 +8389,7 @@ intel_display_capture_error_state(struct drm_device *dev)
7374 if (error == NULL) 8389 if (error == NULL)
7375 return NULL; 8390 return NULL;
7376 8391
7377 for (i = 0; i < 2; i++) { 8392 for_each_pipe(i) {
7378 error->cursor[i].control = I915_READ(CURCNTR(i)); 8393 error->cursor[i].control = I915_READ(CURCNTR(i));
7379 error->cursor[i].position = I915_READ(CURPOS(i)); 8394 error->cursor[i].position = I915_READ(CURPOS(i));
7380 error->cursor[i].base = I915_READ(CURBASE(i)); 8395 error->cursor[i].base = I915_READ(CURBASE(i));
@@ -7407,9 +8422,11 @@ intel_display_print_error_state(struct seq_file *m,
7407 struct drm_device *dev, 8422 struct drm_device *dev,
7408 struct intel_display_error_state *error) 8423 struct intel_display_error_state *error)
7409{ 8424{
8425 drm_i915_private_t *dev_priv = dev->dev_private;
7410 int i; 8426 int i;
7411 8427
7412 for (i = 0; i < 2; i++) { 8428 seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
8429 for_each_pipe(i) {
7413 seq_printf(m, "Pipe [%d]:\n", i); 8430 seq_printf(m, "Pipe [%d]:\n", i);
7414 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); 8431 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
7415 seq_printf(m, " SRC: %08x\n", error->pipe[i].source); 8432 seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f1bd4f4cd667..6c8746c030c7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -35,42 +35,10 @@
35#include "intel_drv.h" 35#include "intel_drv.h"
36#include <drm/i915_drm.h> 36#include <drm/i915_drm.h>
37#include "i915_drv.h" 37#include "i915_drv.h"
38#include <drm/drm_dp_helper.h>
39 38
40#define DP_RECEIVER_CAP_SIZE 0xf
41#define DP_LINK_STATUS_SIZE 6 39#define DP_LINK_STATUS_SIZE 6
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 40#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43 41
44#define DP_LINK_CONFIGURATION_SIZE 9
45
46struct intel_dp {
47 struct intel_encoder base;
48 uint32_t output_reg;
49 uint32_t DP;
50 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
51 bool has_audio;
52 enum hdmi_force_audio force_audio;
53 uint32_t color_range;
54 int dpms_mode;
55 uint8_t link_bw;
56 uint8_t lane_count;
57 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
58 struct i2c_adapter adapter;
59 struct i2c_algo_dp_aux_data algo;
60 bool is_pch_edp;
61 uint8_t train_set[4];
62 int panel_power_up_delay;
63 int panel_power_down_delay;
64 int panel_power_cycle_delay;
65 int backlight_on_delay;
66 int backlight_off_delay;
67 struct drm_display_mode *panel_fixed_mode; /* for eDP */
68 struct delayed_work panel_vdd_work;
69 bool want_panel_vdd;
70 struct edid *edid; /* cached EDID for eDP */
71 int edid_mode_count;
72};
73
74/** 42/**
75 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 43 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
76 * @intel_dp: DP struct 44 * @intel_dp: DP struct
@@ -839,9 +807,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
839 } 807 }
840} 808}
841 809
842static void ironlake_edp_pll_on(struct drm_encoder *encoder);
843static void ironlake_edp_pll_off(struct drm_encoder *encoder);
844
845static void 810static void
846intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 811intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
847 struct drm_display_mode *adjusted_mode) 812 struct drm_display_mode *adjusted_mode)
@@ -852,14 +817,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
852 struct drm_crtc *crtc = intel_dp->base.base.crtc; 817 struct drm_crtc *crtc = intel_dp->base.base.crtc;
853 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 818 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
854 819
855 /* Turn on the eDP PLL if needed */
856 if (is_edp(intel_dp)) {
857 if (!is_pch_edp(intel_dp))
858 ironlake_edp_pll_on(encoder);
859 else
860 ironlake_edp_pll_off(encoder);
861 }
862
863 /* 820 /*
864 * There are four kinds of DP registers: 821 * There are four kinds of DP registers:
865 * 822 *
@@ -881,10 +838,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
881 * supposed to be read-only. 838 * supposed to be read-only.
882 */ 839 */
883 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 840 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
884 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
885 841
886 /* Handle DP bits in common between all three register formats */ 842 /* Handle DP bits in common between all three register formats */
887
888 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 843 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
889 844
890 switch (intel_dp->lane_count) { 845 switch (intel_dp->lane_count) {
@@ -931,7 +886,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
931 intel_dp->DP |= intel_crtc->pipe << 29; 886 intel_dp->DP |= intel_crtc->pipe << 29;
932 887
933 /* don't miss out required setting for eDP */ 888 /* don't miss out required setting for eDP */
934 intel_dp->DP |= DP_PLL_ENABLE;
935 if (adjusted_mode->clock < 200000) 889 if (adjusted_mode->clock < 200000)
936 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 890 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
937 else 891 else
@@ -953,7 +907,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
953 907
954 if (is_cpu_edp(intel_dp)) { 908 if (is_cpu_edp(intel_dp)) {
955 /* don't miss out required setting for eDP */ 909 /* don't miss out required setting for eDP */
956 intel_dp->DP |= DP_PLL_ENABLE;
957 if (adjusted_mode->clock < 200000) 910 if (adjusted_mode->clock < 200000)
958 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 911 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
959 else 912 else
@@ -1224,27 +1177,49 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1224 msleep(intel_dp->backlight_off_delay); 1177 msleep(intel_dp->backlight_off_delay);
1225} 1178}
1226 1179
1227static void ironlake_edp_pll_on(struct drm_encoder *encoder) 1180static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1228{ 1181{
1229 struct drm_device *dev = encoder->dev; 1182 struct drm_device *dev = intel_dp->base.base.dev;
1183 struct drm_crtc *crtc = intel_dp->base.base.crtc;
1230 struct drm_i915_private *dev_priv = dev->dev_private; 1184 struct drm_i915_private *dev_priv = dev->dev_private;
1231 u32 dpa_ctl; 1185 u32 dpa_ctl;
1232 1186
1187 assert_pipe_disabled(dev_priv,
1188 to_intel_crtc(crtc)->pipe);
1189
1233 DRM_DEBUG_KMS("\n"); 1190 DRM_DEBUG_KMS("\n");
1234 dpa_ctl = I915_READ(DP_A); 1191 dpa_ctl = I915_READ(DP_A);
1235 dpa_ctl |= DP_PLL_ENABLE; 1192 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1236 I915_WRITE(DP_A, dpa_ctl); 1193 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1194
1195 /* We don't adjust intel_dp->DP while tearing down the link, to
1196 * facilitate link retraining (e.g. after hotplug). Hence clear all
1197 * enable bits here to ensure that we don't enable too much. */
1198 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1199 intel_dp->DP |= DP_PLL_ENABLE;
1200 I915_WRITE(DP_A, intel_dp->DP);
1237 POSTING_READ(DP_A); 1201 POSTING_READ(DP_A);
1238 udelay(200); 1202 udelay(200);
1239} 1203}
1240 1204
1241static void ironlake_edp_pll_off(struct drm_encoder *encoder) 1205static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1242{ 1206{
1243 struct drm_device *dev = encoder->dev; 1207 struct drm_device *dev = intel_dp->base.base.dev;
1208 struct drm_crtc *crtc = intel_dp->base.base.crtc;
1244 struct drm_i915_private *dev_priv = dev->dev_private; 1209 struct drm_i915_private *dev_priv = dev->dev_private;
1245 u32 dpa_ctl; 1210 u32 dpa_ctl;
1246 1211
1212 assert_pipe_disabled(dev_priv,
1213 to_intel_crtc(crtc)->pipe);
1214
1247 dpa_ctl = I915_READ(DP_A); 1215 dpa_ctl = I915_READ(DP_A);
1216 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1217 "dp pll off, should be on\n");
1218 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1219
1220 /* We can't rely on the value tracked for the DP register in
1221 * intel_dp->DP because link_down must not change that (otherwise link
1222 * re-training will fail. */
1248 dpa_ctl &= ~DP_PLL_ENABLE; 1223 dpa_ctl &= ~DP_PLL_ENABLE;
1249 I915_WRITE(DP_A, dpa_ctl); 1224 I915_WRITE(DP_A, dpa_ctl);
1250 POSTING_READ(DP_A); 1225 POSTING_READ(DP_A);
@@ -1281,10 +1256,57 @@ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1281 } 1256 }
1282} 1257}
1283 1258
1284static void intel_dp_prepare(struct drm_encoder *encoder) 1259static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1260 enum pipe *pipe)
1285{ 1261{
1286 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1262 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1263 struct drm_device *dev = encoder->base.dev;
1264 struct drm_i915_private *dev_priv = dev->dev_private;
1265 u32 tmp = I915_READ(intel_dp->output_reg);
1287 1266
1267 if (!(tmp & DP_PORT_EN))
1268 return false;
1269
1270 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
1271 *pipe = PORT_TO_PIPE_CPT(tmp);
1272 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
1273 *pipe = PORT_TO_PIPE(tmp);
1274 } else {
1275 u32 trans_sel;
1276 u32 trans_dp;
1277 int i;
1278
1279 switch (intel_dp->output_reg) {
1280 case PCH_DP_B:
1281 trans_sel = TRANS_DP_PORT_SEL_B;
1282 break;
1283 case PCH_DP_C:
1284 trans_sel = TRANS_DP_PORT_SEL_C;
1285 break;
1286 case PCH_DP_D:
1287 trans_sel = TRANS_DP_PORT_SEL_D;
1288 break;
1289 default:
1290 return true;
1291 }
1292
1293 for_each_pipe(i) {
1294 trans_dp = I915_READ(TRANS_DP_CTL(i));
1295 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1296 *pipe = i;
1297 return true;
1298 }
1299 }
1300 }
1301
1302 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
1303
1304 return true;
1305}
1306
1307static void intel_disable_dp(struct intel_encoder *encoder)
1308{
1309 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1288 1310
1289 /* Make sure the panel is off before trying to change the mode. But also 1311 /* Make sure the panel is off before trying to change the mode. But also
1290 * ensure that we have vdd while we switch off the panel. */ 1312 * ensure that we have vdd while we switch off the panel. */
@@ -1292,14 +1314,31 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
1292 ironlake_edp_backlight_off(intel_dp); 1314 ironlake_edp_backlight_off(intel_dp);
1293 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1315 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1294 ironlake_edp_panel_off(intel_dp); 1316 ironlake_edp_panel_off(intel_dp);
1295 intel_dp_link_down(intel_dp); 1317
1318 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1319 if (!is_cpu_edp(intel_dp))
1320 intel_dp_link_down(intel_dp);
1296} 1321}
1297 1322
1298static void intel_dp_commit(struct drm_encoder *encoder) 1323static void intel_post_disable_dp(struct intel_encoder *encoder)
1299{ 1324{
1300 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1325 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1301 struct drm_device *dev = encoder->dev; 1326
1302 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); 1327 if (is_cpu_edp(intel_dp)) {
1328 intel_dp_link_down(intel_dp);
1329 ironlake_edp_pll_off(intel_dp);
1330 }
1331}
1332
1333static void intel_enable_dp(struct intel_encoder *encoder)
1334{
1335 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1336 struct drm_device *dev = encoder->base.dev;
1337 struct drm_i915_private *dev_priv = dev->dev_private;
1338 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1339
1340 if (WARN_ON(dp_reg & DP_PORT_EN))
1341 return;
1303 1342
1304 ironlake_edp_panel_vdd_on(intel_dp); 1343 ironlake_edp_panel_vdd_on(intel_dp);
1305 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1344 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@@ -1308,47 +1347,14 @@ static void intel_dp_commit(struct drm_encoder *encoder)
1308 ironlake_edp_panel_vdd_off(intel_dp, true); 1347 ironlake_edp_panel_vdd_off(intel_dp, true);
1309 intel_dp_complete_link_train(intel_dp); 1348 intel_dp_complete_link_train(intel_dp);
1310 ironlake_edp_backlight_on(intel_dp); 1349 ironlake_edp_backlight_on(intel_dp);
1311
1312 intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
1313
1314 if (HAS_PCH_CPT(dev))
1315 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
1316} 1350}
1317 1351
1318static void 1352static void intel_pre_enable_dp(struct intel_encoder *encoder)
1319intel_dp_dpms(struct drm_encoder *encoder, int mode)
1320{ 1353{
1321 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1354 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1322 struct drm_device *dev = encoder->dev;
1323 struct drm_i915_private *dev_priv = dev->dev_private;
1324 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1325 1355
1326 if (mode != DRM_MODE_DPMS_ON) { 1356 if (is_cpu_edp(intel_dp))
1327 /* Switching the panel off requires vdd. */ 1357 ironlake_edp_pll_on(intel_dp);
1328 ironlake_edp_panel_vdd_on(intel_dp);
1329 ironlake_edp_backlight_off(intel_dp);
1330 intel_dp_sink_dpms(intel_dp, mode);
1331 ironlake_edp_panel_off(intel_dp);
1332 intel_dp_link_down(intel_dp);
1333
1334 if (is_cpu_edp(intel_dp))
1335 ironlake_edp_pll_off(encoder);
1336 } else {
1337 if (is_cpu_edp(intel_dp))
1338 ironlake_edp_pll_on(encoder);
1339
1340 ironlake_edp_panel_vdd_on(intel_dp);
1341 intel_dp_sink_dpms(intel_dp, mode);
1342 if (!(dp_reg & DP_PORT_EN)) {
1343 intel_dp_start_link_train(intel_dp);
1344 ironlake_edp_panel_on(intel_dp);
1345 ironlake_edp_panel_vdd_off(intel_dp, true);
1346 intel_dp_complete_link_train(intel_dp);
1347 } else
1348 ironlake_edp_panel_vdd_off(intel_dp, false);
1349 ironlake_edp_backlight_on(intel_dp);
1350 }
1351 intel_dp->dpms_mode = mode;
1352} 1358}
1353 1359
1354/* 1360/*
@@ -1667,6 +1673,45 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1667 struct drm_i915_private *dev_priv = dev->dev_private; 1673 struct drm_i915_private *dev_priv = dev->dev_private;
1668 int ret; 1674 int ret;
1669 1675
1676 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1677 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1678
1679 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1680 case DP_TRAINING_PATTERN_DISABLE:
1681 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
1682 break;
1683 case DP_TRAINING_PATTERN_1:
1684 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
1685 break;
1686 case DP_TRAINING_PATTERN_2:
1687 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1688 break;
1689 case DP_TRAINING_PATTERN_3:
1690 DRM_ERROR("DP training pattern 3 not supported\n");
1691 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1692 break;
1693 }
1694
1695 } else {
1696 dp_reg_value &= ~DP_LINK_TRAIN_MASK;
1697
1698 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1699 case DP_TRAINING_PATTERN_DISABLE:
1700 dp_reg_value |= DP_LINK_TRAIN_OFF;
1701 break;
1702 case DP_TRAINING_PATTERN_1:
1703 dp_reg_value |= DP_LINK_TRAIN_PAT_1;
1704 break;
1705 case DP_TRAINING_PATTERN_2:
1706 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1707 break;
1708 case DP_TRAINING_PATTERN_3:
1709 DRM_ERROR("DP training pattern 3 not supported\n");
1710 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1711 break;
1712 }
1713 }
1714
1670 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1715 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1671 POSTING_READ(intel_dp->output_reg); 1716 POSTING_READ(intel_dp->output_reg);
1672 1717
@@ -1674,12 +1719,15 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1674 DP_TRAINING_PATTERN_SET, 1719 DP_TRAINING_PATTERN_SET,
1675 dp_train_pat); 1720 dp_train_pat);
1676 1721
1677 ret = intel_dp_aux_native_write(intel_dp, 1722 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
1678 DP_TRAINING_LANE0_SET, 1723 DP_TRAINING_PATTERN_DISABLE) {
1679 intel_dp->train_set, 1724 ret = intel_dp_aux_native_write(intel_dp,
1680 intel_dp->lane_count); 1725 DP_TRAINING_LANE0_SET,
1681 if (ret != intel_dp->lane_count) 1726 intel_dp->train_set,
1682 return false; 1727 intel_dp->lane_count);
1728 if (ret != intel_dp->lane_count)
1729 return false;
1730 }
1683 1731
1684 return true; 1732 return true;
1685} 1733}
@@ -1689,26 +1737,12 @@ static void
1689intel_dp_start_link_train(struct intel_dp *intel_dp) 1737intel_dp_start_link_train(struct intel_dp *intel_dp)
1690{ 1738{
1691 struct drm_device *dev = intel_dp->base.base.dev; 1739 struct drm_device *dev = intel_dp->base.base.dev;
1692 struct drm_i915_private *dev_priv = dev->dev_private;
1693 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
1694 int i; 1740 int i;
1695 uint8_t voltage; 1741 uint8_t voltage;
1696 bool clock_recovery = false; 1742 bool clock_recovery = false;
1697 int voltage_tries, loop_tries; 1743 int voltage_tries, loop_tries;
1698 u32 reg;
1699 uint32_t DP = intel_dp->DP; 1744 uint32_t DP = intel_dp->DP;
1700 1745
1701 /*
1702 * On CPT we have to enable the port in training pattern 1, which
1703 * will happen below in intel_dp_set_link_train. Otherwise, enable
1704 * the port and wait for it to become active.
1705 */
1706 if (!HAS_PCH_CPT(dev)) {
1707 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
1708 POSTING_READ(intel_dp->output_reg);
1709 intel_wait_for_vblank(dev, intel_crtc->pipe);
1710 }
1711
1712 /* Write the link configuration data */ 1746 /* Write the link configuration data */
1713 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1747 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1714 intel_dp->link_configuration, 1748 intel_dp->link_configuration,
@@ -1716,10 +1750,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1716 1750
1717 DP |= DP_PORT_EN; 1751 DP |= DP_PORT_EN;
1718 1752
1719 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1720 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1721 else
1722 DP &= ~DP_LINK_TRAIN_MASK;
1723 memset(intel_dp->train_set, 0, 4); 1753 memset(intel_dp->train_set, 0, 4);
1724 voltage = 0xff; 1754 voltage = 0xff;
1725 voltage_tries = 0; 1755 voltage_tries = 0;
@@ -1743,12 +1773,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1743 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1773 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1744 } 1774 }
1745 1775
1746 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) 1776 if (!intel_dp_set_link_train(intel_dp, DP,
1747 reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
1748 else
1749 reg = DP | DP_LINK_TRAIN_PAT_1;
1750
1751 if (!intel_dp_set_link_train(intel_dp, reg,
1752 DP_TRAINING_PATTERN_1 | 1777 DP_TRAINING_PATTERN_1 |
1753 DP_LINK_SCRAMBLING_DISABLE)) 1778 DP_LINK_SCRAMBLING_DISABLE))
1754 break; 1779 break;
@@ -1803,10 +1828,8 @@ static void
1803intel_dp_complete_link_train(struct intel_dp *intel_dp) 1828intel_dp_complete_link_train(struct intel_dp *intel_dp)
1804{ 1829{
1805 struct drm_device *dev = intel_dp->base.base.dev; 1830 struct drm_device *dev = intel_dp->base.base.dev;
1806 struct drm_i915_private *dev_priv = dev->dev_private;
1807 bool channel_eq = false; 1831 bool channel_eq = false;
1808 int tries, cr_tries; 1832 int tries, cr_tries;
1809 u32 reg;
1810 uint32_t DP = intel_dp->DP; 1833 uint32_t DP = intel_dp->DP;
1811 1834
1812 /* channel equalization */ 1835 /* channel equalization */
@@ -1835,13 +1858,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1835 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1858 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1836 } 1859 }
1837 1860
1838 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1839 reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
1840 else
1841 reg = DP | DP_LINK_TRAIN_PAT_2;
1842
1843 /* channel eq pattern */ 1861 /* channel eq pattern */
1844 if (!intel_dp_set_link_train(intel_dp, reg, 1862 if (!intel_dp_set_link_train(intel_dp, DP,
1845 DP_TRAINING_PATTERN_2 | 1863 DP_TRAINING_PATTERN_2 |
1846 DP_LINK_SCRAMBLING_DISABLE)) 1864 DP_LINK_SCRAMBLING_DISABLE))
1847 break; 1865 break;
@@ -1876,15 +1894,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1876 ++tries; 1894 ++tries;
1877 } 1895 }
1878 1896
1879 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) 1897 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
1880 reg = DP | DP_LINK_TRAIN_OFF_CPT;
1881 else
1882 reg = DP | DP_LINK_TRAIN_OFF;
1883
1884 I915_WRITE(intel_dp->output_reg, reg);
1885 POSTING_READ(intel_dp->output_reg);
1886 intel_dp_aux_native_write_1(intel_dp,
1887 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
1888} 1898}
1889 1899
1890static void 1900static void
@@ -1894,18 +1904,11 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1894 struct drm_i915_private *dev_priv = dev->dev_private; 1904 struct drm_i915_private *dev_priv = dev->dev_private;
1895 uint32_t DP = intel_dp->DP; 1905 uint32_t DP = intel_dp->DP;
1896 1906
1897 if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) 1907 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1898 return; 1908 return;
1899 1909
1900 DRM_DEBUG_KMS("\n"); 1910 DRM_DEBUG_KMS("\n");
1901 1911
1902 if (is_edp(intel_dp)) {
1903 DP &= ~DP_PLL_ENABLE;
1904 I915_WRITE(intel_dp->output_reg, DP);
1905 POSTING_READ(intel_dp->output_reg);
1906 udelay(100);
1907 }
1908
1909 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1912 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1910 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1913 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1911 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1914 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
@@ -1917,13 +1920,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1917 1920
1918 msleep(17); 1921 msleep(17);
1919 1922
1920 if (is_edp(intel_dp)) {
1921 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1922 DP |= DP_LINK_TRAIN_OFF_CPT;
1923 else
1924 DP |= DP_LINK_TRAIN_OFF;
1925 }
1926
1927 if (HAS_PCH_IBX(dev) && 1923 if (HAS_PCH_IBX(dev) &&
1928 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1924 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
1929 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1925 struct drm_crtc *crtc = intel_dp->base.base.crtc;
@@ -2032,10 +2028,10 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
2032 u8 sink_irq_vector; 2028 u8 sink_irq_vector;
2033 u8 link_status[DP_LINK_STATUS_SIZE]; 2029 u8 link_status[DP_LINK_STATUS_SIZE];
2034 2030
2035 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) 2031 if (!intel_dp->base.connectors_active)
2036 return; 2032 return;
2037 2033
2038 if (!intel_dp->base.base.crtc) 2034 if (WARN_ON(!intel_dp->base.base.crtc))
2039 return; 2035 return;
2040 2036
2041 /* Try to read receiver status if the link appears to be up */ 2037 /* Try to read receiver status if the link appears to be up */
@@ -2159,7 +2155,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
2159 ret = drm_add_edid_modes(connector, intel_dp->edid); 2155 ret = drm_add_edid_modes(connector, intel_dp->edid);
2160 drm_edid_to_eld(connector, 2156 drm_edid_to_eld(connector,
2161 intel_dp->edid); 2157 intel_dp->edid);
2162 connector->display_info.raw_edid = NULL;
2163 return intel_dp->edid_mode_count; 2158 return intel_dp->edid_mode_count;
2164 } 2159 }
2165 2160
@@ -2205,7 +2200,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
2205 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2200 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2206 if (edid) { 2201 if (edid) {
2207 intel_dp->has_audio = drm_detect_monitor_audio(edid); 2202 intel_dp->has_audio = drm_detect_monitor_audio(edid);
2208 connector->display_info.raw_edid = NULL;
2209 kfree(edid); 2203 kfree(edid);
2210 } 2204 }
2211 } 2205 }
@@ -2270,8 +2264,6 @@ intel_dp_detect_audio(struct drm_connector *connector)
2270 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2264 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2271 if (edid) { 2265 if (edid) {
2272 has_audio = drm_detect_monitor_audio(edid); 2266 has_audio = drm_detect_monitor_audio(edid);
2273
2274 connector->display_info.raw_edid = NULL;
2275 kfree(edid); 2267 kfree(edid);
2276 } 2268 }
2277 2269
@@ -2325,9 +2317,8 @@ intel_dp_set_property(struct drm_connector *connector,
2325done: 2317done:
2326 if (intel_dp->base.base.crtc) { 2318 if (intel_dp->base.base.crtc) {
2327 struct drm_crtc *crtc = intel_dp->base.base.crtc; 2319 struct drm_crtc *crtc = intel_dp->base.base.crtc;
2328 drm_crtc_helper_set_mode(crtc, &crtc->mode, 2320 intel_set_mode(crtc, &crtc->mode,
2329 crtc->x, crtc->y, 2321 crtc->x, crtc->y, crtc->fb);
2330 crtc->fb);
2331 } 2322 }
2332 2323
2333 return 0; 2324 return 0;
@@ -2361,15 +2352,13 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2361} 2352}
2362 2353
2363static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2354static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2364 .dpms = intel_dp_dpms,
2365 .mode_fixup = intel_dp_mode_fixup, 2355 .mode_fixup = intel_dp_mode_fixup,
2366 .prepare = intel_dp_prepare,
2367 .mode_set = intel_dp_mode_set, 2356 .mode_set = intel_dp_mode_set,
2368 .commit = intel_dp_commit, 2357 .disable = intel_encoder_noop,
2369}; 2358};
2370 2359
2371static const struct drm_connector_funcs intel_dp_connector_funcs = { 2360static const struct drm_connector_funcs intel_dp_connector_funcs = {
2372 .dpms = drm_helper_connector_dpms, 2361 .dpms = intel_connector_dpms,
2373 .detect = intel_dp_detect, 2362 .detect = intel_dp_detect,
2374 .fill_modes = drm_helper_probe_single_connector_modes, 2363 .fill_modes = drm_helper_probe_single_connector_modes,
2375 .set_property = intel_dp_set_property, 2364 .set_property = intel_dp_set_property,
@@ -2440,7 +2429,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
2440} 2429}
2441 2430
2442void 2431void
2443intel_dp_init(struct drm_device *dev, int output_reg) 2432intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2444{ 2433{
2445 struct drm_i915_private *dev_priv = dev->dev_private; 2434 struct drm_i915_private *dev_priv = dev->dev_private;
2446 struct drm_connector *connector; 2435 struct drm_connector *connector;
@@ -2455,7 +2444,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2455 return; 2444 return;
2456 2445
2457 intel_dp->output_reg = output_reg; 2446 intel_dp->output_reg = output_reg;
2458 intel_dp->dpms_mode = -1; 2447 intel_dp->port = port;
2448 /* Preserve the current hw state. */
2449 intel_dp->DP = I915_READ(intel_dp->output_reg);
2459 2450
2460 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 2451 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
2461 if (!intel_connector) { 2452 if (!intel_connector) {
@@ -2482,18 +2473,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2482 2473
2483 connector->polled = DRM_CONNECTOR_POLL_HPD; 2474 connector->polled = DRM_CONNECTOR_POLL_HPD;
2484 2475
2485 if (output_reg == DP_B || output_reg == PCH_DP_B) 2476 intel_encoder->cloneable = false;
2486 intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
2487 else if (output_reg == DP_C || output_reg == PCH_DP_C)
2488 intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
2489 else if (output_reg == DP_D || output_reg == PCH_DP_D)
2490 intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
2491 2477
2492 if (is_edp(intel_dp)) { 2478 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
2493 intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); 2479 ironlake_panel_vdd_work);
2494 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
2495 ironlake_panel_vdd_work);
2496 }
2497 2480
2498 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2481 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2499 2482
@@ -2507,29 +2490,33 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2507 intel_connector_attach_encoder(intel_connector, intel_encoder); 2490 intel_connector_attach_encoder(intel_connector, intel_encoder);
2508 drm_sysfs_connector_add(connector); 2491 drm_sysfs_connector_add(connector);
2509 2492
2493 intel_encoder->enable = intel_enable_dp;
2494 intel_encoder->pre_enable = intel_pre_enable_dp;
2495 intel_encoder->disable = intel_disable_dp;
2496 intel_encoder->post_disable = intel_post_disable_dp;
2497 intel_encoder->get_hw_state = intel_dp_get_hw_state;
2498 intel_connector->get_hw_state = intel_connector_get_hw_state;
2499
2510 /* Set up the DDC bus. */ 2500 /* Set up the DDC bus. */
2511 switch (output_reg) { 2501 switch (port) {
2512 case DP_A: 2502 case PORT_A:
2513 name = "DPDDC-A"; 2503 name = "DPDDC-A";
2514 break; 2504 break;
2515 case DP_B: 2505 case PORT_B:
2516 case PCH_DP_B: 2506 dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
2517 dev_priv->hotplug_supported_mask |= 2507 name = "DPDDC-B";
2518 DPB_HOTPLUG_INT_STATUS; 2508 break;
2519 name = "DPDDC-B"; 2509 case PORT_C:
2520 break; 2510 dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
2521 case DP_C: 2511 name = "DPDDC-C";
2522 case PCH_DP_C: 2512 break;
2523 dev_priv->hotplug_supported_mask |= 2513 case PORT_D:
2524 DPC_HOTPLUG_INT_STATUS; 2514 dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
2525 name = "DPDDC-C"; 2515 name = "DPDDC-D";
2526 break; 2516 break;
2527 case DP_D: 2517 default:
2528 case PCH_DP_D: 2518 WARN(1, "Invalid port %c\n", port_name(port));
2529 dev_priv->hotplug_supported_mask |= 2519 break;
2530 DPD_HOTPLUG_INT_STATUS;
2531 name = "DPDDC-D";
2532 break;
2533 } 2520 }
2534 2521
2535 /* Cache some DPCD data in the eDP case */ 2522 /* Cache some DPCD data in the eDP case */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7db849052a98..05cc7c372fc5 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -31,6 +31,7 @@
31#include <drm/drm_crtc.h> 31#include <drm/drm_crtc.h>
32#include <drm/drm_crtc_helper.h> 32#include <drm/drm_crtc_helper.h>
33#include <drm/drm_fb_helper.h> 33#include <drm/drm_fb_helper.h>
34#include <drm/drm_dp_helper.h>
34 35
35#define _wait_for(COND, MS, W) ({ \ 36#define _wait_for(COND, MS, W) ({ \
36 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ 37 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
@@ -40,7 +41,11 @@
40 ret__ = -ETIMEDOUT; \ 41 ret__ = -ETIMEDOUT; \
41 break; \ 42 break; \
42 } \ 43 } \
43 if (W && drm_can_sleep()) msleep(W); \ 44 if (W && drm_can_sleep()) { \
45 msleep(W); \
46 } else { \
47 cpu_relax(); \
48 } \
44 } \ 49 } \
45 ret__; \ 50 ret__; \
46}) 51})
@@ -90,25 +95,6 @@
90#define INTEL_OUTPUT_DISPLAYPORT 7 95#define INTEL_OUTPUT_DISPLAYPORT 7
91#define INTEL_OUTPUT_EDP 8 96#define INTEL_OUTPUT_EDP 8
92 97
93/* Intel Pipe Clone Bit */
94#define INTEL_HDMIB_CLONE_BIT 1
95#define INTEL_HDMIC_CLONE_BIT 2
96#define INTEL_HDMID_CLONE_BIT 3
97#define INTEL_HDMIE_CLONE_BIT 4
98#define INTEL_HDMIF_CLONE_BIT 5
99#define INTEL_SDVO_NON_TV_CLONE_BIT 6
100#define INTEL_SDVO_TV_CLONE_BIT 7
101#define INTEL_SDVO_LVDS_CLONE_BIT 8
102#define INTEL_ANALOG_CLONE_BIT 9
103#define INTEL_TV_CLONE_BIT 10
104#define INTEL_DP_B_CLONE_BIT 11
105#define INTEL_DP_C_CLONE_BIT 12
106#define INTEL_DP_D_CLONE_BIT 13
107#define INTEL_LVDS_CLONE_BIT 14
108#define INTEL_DVO_TMDS_CLONE_BIT 15
109#define INTEL_DVO_LVDS_CLONE_BIT 16
110#define INTEL_EDP_CLONE_BIT 17
111
112#define INTEL_DVO_CHIP_NONE 0 98#define INTEL_DVO_CHIP_NONE 0
113#define INTEL_DVO_CHIP_LVDS 1 99#define INTEL_DVO_CHIP_LVDS 1
114#define INTEL_DVO_CHIP_TMDS 2 100#define INTEL_DVO_CHIP_TMDS 2
@@ -151,16 +137,48 @@ struct intel_fbdev {
151 137
152struct intel_encoder { 138struct intel_encoder {
153 struct drm_encoder base; 139 struct drm_encoder base;
140 /*
141 * The new crtc this encoder will be driven from. Only differs from
142 * base->crtc while a modeset is in progress.
143 */
144 struct intel_crtc *new_crtc;
145
154 int type; 146 int type;
155 bool needs_tv_clock; 147 bool needs_tv_clock;
148 /*
149 * Intel hw has only one MUX where encoders could be clone, hence a
150 * simple flag is enough to compute the possible_clones mask.
151 */
152 bool cloneable;
153 bool connectors_active;
156 void (*hot_plug)(struct intel_encoder *); 154 void (*hot_plug)(struct intel_encoder *);
155 void (*pre_enable)(struct intel_encoder *);
156 void (*enable)(struct intel_encoder *);
157 void (*disable)(struct intel_encoder *);
158 void (*post_disable)(struct intel_encoder *);
159 /* Read out the current hw state of this connector, returning true if
160 * the encoder is active. If the encoder is enabled it also set the pipe
161 * it is connected to in the pipe parameter. */
162 bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
157 int crtc_mask; 163 int crtc_mask;
158 int clone_mask;
159}; 164};
160 165
161struct intel_connector { 166struct intel_connector {
162 struct drm_connector base; 167 struct drm_connector base;
168 /*
169 * The fixed encoder this connector is connected to.
170 */
163 struct intel_encoder *encoder; 171 struct intel_encoder *encoder;
172
173 /*
174 * The new encoder this connector will be driven. Only differs from
175 * encoder while a modeset is in progress.
176 */
177 struct intel_encoder *new_encoder;
178
179 /* Reads out the current hw, returning true if the connector is enabled
180 * and active (i.e. dpms ON state). */
181 bool (*get_hw_state)(struct intel_connector *);
164}; 182};
165 183
166struct intel_crtc { 184struct intel_crtc {
@@ -168,11 +186,13 @@ struct intel_crtc {
168 enum pipe pipe; 186 enum pipe pipe;
169 enum plane plane; 187 enum plane plane;
170 u8 lut_r[256], lut_g[256], lut_b[256]; 188 u8 lut_r[256], lut_g[256], lut_b[256];
171 int dpms_mode; 189 /*
172 bool active; /* is the crtc on? independent of the dpms mode */ 190 * Whether the crtc and the connected output pipeline is active. Implies
191 * that crtc->enabled is set, i.e. the current mode configuration has
192 * some outputs connected to this crtc.
193 */
194 bool active;
173 bool primary_disabled; /* is the crtc obscured by a plane? */ 195 bool primary_disabled; /* is the crtc obscured by a plane? */
174 bool busy; /* is scanout buffer being updated frequently? */
175 struct timer_list idle_timer;
176 bool lowfreq_avail; 196 bool lowfreq_avail;
177 struct intel_overlay *overlay; 197 struct intel_overlay *overlay;
178 struct intel_unpin_work *unpin_work; 198 struct intel_unpin_work *unpin_work;
@@ -311,6 +331,37 @@ struct intel_hdmi {
311 struct drm_display_mode *adjusted_mode); 331 struct drm_display_mode *adjusted_mode);
312}; 332};
313 333
334#define DP_RECEIVER_CAP_SIZE 0xf
335#define DP_LINK_CONFIGURATION_SIZE 9
336
337struct intel_dp {
338 struct intel_encoder base;
339 uint32_t output_reg;
340 uint32_t DP;
341 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
342 bool has_audio;
343 enum hdmi_force_audio force_audio;
344 enum port port;
345 uint32_t color_range;
346 uint8_t link_bw;
347 uint8_t lane_count;
348 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
349 struct i2c_adapter adapter;
350 struct i2c_algo_dp_aux_data algo;
351 bool is_pch_edp;
352 uint8_t train_set[4];
353 int panel_power_up_delay;
354 int panel_power_down_delay;
355 int panel_power_cycle_delay;
356 int backlight_on_delay;
357 int backlight_off_delay;
358 struct drm_display_mode *panel_fixed_mode; /* for eDP */
359 struct delayed_work panel_vdd_work;
360 bool want_panel_vdd;
361 struct edid *edid; /* cached EDID for eDP */
362 int edid_mode_count;
363};
364
314static inline struct drm_crtc * 365static inline struct drm_crtc *
315intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) 366intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
316{ 367{
@@ -350,17 +401,21 @@ extern void intel_attach_force_audio_property(struct drm_connector *connector);
350extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); 401extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
351 402
352extern void intel_crt_init(struct drm_device *dev); 403extern void intel_crt_init(struct drm_device *dev);
353extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); 404extern void intel_hdmi_init(struct drm_device *dev,
405 int sdvox_reg, enum port port);
354extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); 406extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
355extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); 407extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
356extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, 408extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
357 bool is_sdvob); 409 bool is_sdvob);
358extern void intel_dvo_init(struct drm_device *dev); 410extern void intel_dvo_init(struct drm_device *dev);
359extern void intel_tv_init(struct drm_device *dev); 411extern void intel_tv_init(struct drm_device *dev);
360extern void intel_mark_busy(struct drm_device *dev, 412extern void intel_mark_busy(struct drm_device *dev);
361 struct drm_i915_gem_object *obj); 413extern void intel_mark_idle(struct drm_device *dev);
414extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
415extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
362extern bool intel_lvds_init(struct drm_device *dev); 416extern bool intel_lvds_init(struct drm_device *dev);
363extern void intel_dp_init(struct drm_device *dev, int dp_reg); 417extern void intel_dp_init(struct drm_device *dev, int output_reg,
418 enum port port);
364void 419void
365intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 420intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
366 struct drm_display_mode *adjusted_mode); 421 struct drm_display_mode *adjusted_mode);
@@ -373,8 +428,6 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
373extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, 428extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
374 enum plane plane); 429 enum plane plane);
375 430
376void intel_sanitize_pm(struct drm_device *dev);
377
378/* intel_panel.c */ 431/* intel_panel.c */
379extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 432extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
380 struct drm_display_mode *adjusted_mode); 433 struct drm_display_mode *adjusted_mode);
@@ -391,10 +444,27 @@ extern void intel_panel_disable_backlight(struct drm_device *dev);
391extern void intel_panel_destroy_backlight(struct drm_device *dev); 444extern void intel_panel_destroy_backlight(struct drm_device *dev);
392extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); 445extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
393 446
447struct intel_set_config {
448 struct drm_encoder **save_connector_encoders;
449 struct drm_crtc **save_encoder_crtcs;
450
451 bool fb_changed;
452 bool mode_changed;
453};
454
455extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
456 int x, int y, struct drm_framebuffer *old_fb);
457extern void intel_modeset_disable(struct drm_device *dev);
394extern void intel_crtc_load_lut(struct drm_crtc *crtc); 458extern void intel_crtc_load_lut(struct drm_crtc *crtc);
395extern void intel_encoder_prepare(struct drm_encoder *encoder); 459extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
396extern void intel_encoder_commit(struct drm_encoder *encoder); 460extern void intel_encoder_noop(struct drm_encoder *encoder);
397extern void intel_encoder_destroy(struct drm_encoder *encoder); 461extern void intel_encoder_destroy(struct drm_encoder *encoder);
462extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
463extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
464extern void intel_connector_dpms(struct drm_connector *, int mode);
465extern bool intel_connector_get_hw_state(struct intel_connector *connector);
466extern void intel_modeset_check_state(struct drm_device *dev);
467
398 468
399static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) 469static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
400{ 470{
@@ -417,12 +487,10 @@ struct intel_load_detect_pipe {
417 bool load_detect_temp; 487 bool load_detect_temp;
418 int dpms_mode; 488 int dpms_mode;
419}; 489};
420extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 490extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
421 struct drm_connector *connector,
422 struct drm_display_mode *mode, 491 struct drm_display_mode *mode,
423 struct intel_load_detect_pipe *old); 492 struct intel_load_detect_pipe *old);
424extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 493extern void intel_release_load_detect_pipe(struct drm_connector *connector,
425 struct drm_connector *connector,
426 struct intel_load_detect_pipe *old); 494 struct intel_load_detect_pipe *old);
427 495
428extern void intelfb_restore(void); 496extern void intelfb_restore(void);
@@ -503,7 +571,10 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
503extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); 571extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
504extern void ironlake_teardown_rc6(struct drm_device *dev); 572extern void ironlake_teardown_rc6(struct drm_device *dev);
505 573
506extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode); 574extern void intel_enable_ddi(struct intel_encoder *encoder);
575extern void intel_disable_ddi(struct intel_encoder *encoder);
576extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
577 enum pipe *pipe);
507extern void intel_ddi_mode_set(struct drm_encoder *encoder, 578extern void intel_ddi_mode_set(struct drm_encoder *encoder,
508 struct drm_display_mode *mode, 579 struct drm_display_mode *mode,
509 struct drm_display_mode *adjusted_mode); 580 struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ac9f2dd5648a..15da99533e5b 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -36,6 +36,7 @@
36#define SIL164_ADDR 0x38 36#define SIL164_ADDR 0x38
37#define CH7xxx_ADDR 0x76 37#define CH7xxx_ADDR 0x76
38#define TFP410_ADDR 0x38 38#define TFP410_ADDR 0x38
39#define NS2501_ADDR 0x38
39 40
40static const struct intel_dvo_device intel_dvo_devices[] = { 41static const struct intel_dvo_device intel_dvo_devices[] = {
41 { 42 {
@@ -73,7 +74,14 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
73 .slave_addr = 0x75, 74 .slave_addr = 0x75,
74 .gpio = GMBUS_PORT_DPB, 75 .gpio = GMBUS_PORT_DPB,
75 .dev_ops = &ch7017_ops, 76 .dev_ops = &ch7017_ops,
76 } 77 },
78 {
79 .type = INTEL_DVO_CHIP_TMDS,
80 .name = "ns2501",
81 .dvo_reg = DVOC,
82 .slave_addr = NS2501_ADDR,
83 .dev_ops = &ns2501_ops,
84 }
77}; 85};
78 86
79struct intel_dvo { 87struct intel_dvo {
@@ -96,22 +104,91 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
96 struct intel_dvo, base); 104 struct intel_dvo, base);
97} 105}
98 106
99static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) 107static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
100{ 108{
101 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 109 struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
102 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); 110
111 return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
112}
113
114static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
115 enum pipe *pipe)
116{
117 struct drm_device *dev = encoder->base.dev;
118 struct drm_i915_private *dev_priv = dev->dev_private;
119 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
120 u32 tmp;
121
122 tmp = I915_READ(intel_dvo->dev.dvo_reg);
123
124 if (!(tmp & DVO_ENABLE))
125 return false;
126
127 *pipe = PORT_TO_PIPE(tmp);
128
129 return true;
130}
131
132static void intel_disable_dvo(struct intel_encoder *encoder)
133{
134 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
135 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
136 u32 dvo_reg = intel_dvo->dev.dvo_reg;
137 u32 temp = I915_READ(dvo_reg);
138
139 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
140 I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
141 I915_READ(dvo_reg);
142}
143
144static void intel_enable_dvo(struct intel_encoder *encoder)
145{
146 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
147 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
103 u32 dvo_reg = intel_dvo->dev.dvo_reg; 148 u32 dvo_reg = intel_dvo->dev.dvo_reg;
104 u32 temp = I915_READ(dvo_reg); 149 u32 temp = I915_READ(dvo_reg);
105 150
151 I915_WRITE(dvo_reg, temp | DVO_ENABLE);
152 I915_READ(dvo_reg);
153 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
154}
155
156static void intel_dvo_dpms(struct drm_connector *connector, int mode)
157{
158 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
159 struct drm_crtc *crtc;
160
161 /* dvo supports only 2 dpms states. */
162 if (mode != DRM_MODE_DPMS_ON)
163 mode = DRM_MODE_DPMS_OFF;
164
165 if (mode == connector->dpms)
166 return;
167
168 connector->dpms = mode;
169
170 /* Only need to change hw state when actually enabled */
171 crtc = intel_dvo->base.base.crtc;
172 if (!crtc) {
173 intel_dvo->base.connectors_active = false;
174 return;
175 }
176
106 if (mode == DRM_MODE_DPMS_ON) { 177 if (mode == DRM_MODE_DPMS_ON) {
107 I915_WRITE(dvo_reg, temp | DVO_ENABLE); 178 intel_dvo->base.connectors_active = true;
108 I915_READ(dvo_reg); 179
109 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode); 180 intel_crtc_update_dpms(crtc);
181
182 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
110 } else { 183 } else {
111 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode); 184 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
112 I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); 185
113 I915_READ(dvo_reg); 186 intel_dvo->base.connectors_active = false;
187
188 intel_crtc_update_dpms(crtc);
114 } 189 }
190
191 intel_modeset_check_state(connector->dev);
115} 192}
116 193
117static int intel_dvo_mode_valid(struct drm_connector *connector, 194static int intel_dvo_mode_valid(struct drm_connector *connector,
@@ -266,15 +343,13 @@ static void intel_dvo_destroy(struct drm_connector *connector)
266} 343}
267 344
268static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { 345static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
269 .dpms = intel_dvo_dpms,
270 .mode_fixup = intel_dvo_mode_fixup, 346 .mode_fixup = intel_dvo_mode_fixup,
271 .prepare = intel_encoder_prepare,
272 .mode_set = intel_dvo_mode_set, 347 .mode_set = intel_dvo_mode_set,
273 .commit = intel_encoder_commit, 348 .disable = intel_encoder_noop,
274}; 349};
275 350
276static const struct drm_connector_funcs intel_dvo_connector_funcs = { 351static const struct drm_connector_funcs intel_dvo_connector_funcs = {
277 .dpms = drm_helper_connector_dpms, 352 .dpms = intel_dvo_dpms,
278 .detect = intel_dvo_detect, 353 .detect = intel_dvo_detect,
279 .destroy = intel_dvo_destroy, 354 .destroy = intel_dvo_destroy,
280 .fill_modes = drm_helper_probe_single_connector_modes, 355 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -363,6 +438,11 @@ void intel_dvo_init(struct drm_device *dev)
363 drm_encoder_init(dev, &intel_encoder->base, 438 drm_encoder_init(dev, &intel_encoder->base,
364 &intel_dvo_enc_funcs, encoder_type); 439 &intel_dvo_enc_funcs, encoder_type);
365 440
441 intel_encoder->disable = intel_disable_dvo;
442 intel_encoder->enable = intel_enable_dvo;
443 intel_encoder->get_hw_state = intel_dvo_get_hw_state;
444 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
445
366 /* Now, try to find a controller */ 446 /* Now, try to find a controller */
367 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { 447 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
368 struct drm_connector *connector = &intel_connector->base; 448 struct drm_connector *connector = &intel_connector->base;
@@ -395,17 +475,14 @@ void intel_dvo_init(struct drm_device *dev)
395 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 475 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
396 switch (dvo->type) { 476 switch (dvo->type) {
397 case INTEL_DVO_CHIP_TMDS: 477 case INTEL_DVO_CHIP_TMDS:
398 intel_encoder->clone_mask = 478 intel_encoder->cloneable = true;
399 (1 << INTEL_DVO_TMDS_CLONE_BIT) |
400 (1 << INTEL_ANALOG_CLONE_BIT);
401 drm_connector_init(dev, connector, 479 drm_connector_init(dev, connector,
402 &intel_dvo_connector_funcs, 480 &intel_dvo_connector_funcs,
403 DRM_MODE_CONNECTOR_DVII); 481 DRM_MODE_CONNECTOR_DVII);
404 encoder_type = DRM_MODE_ENCODER_TMDS; 482 encoder_type = DRM_MODE_ENCODER_TMDS;
405 break; 483 break;
406 case INTEL_DVO_CHIP_LVDS: 484 case INTEL_DVO_CHIP_LVDS:
407 intel_encoder->clone_mask = 485 intel_encoder->cloneable = false;
408 (1 << INTEL_DVO_LVDS_CLONE_BIT);
409 drm_connector_init(dev, connector, 486 drm_connector_init(dev, connector,
410 &intel_dvo_connector_funcs, 487 &intel_dvo_connector_funcs,
411 DRM_MODE_CONNECTOR_LVDS); 488 DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 025be7dd2a27..9ba0aaed7ee8 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -150,6 +150,9 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
150 I915_WRITE(VIDEO_DIP_DATA, *data); 150 I915_WRITE(VIDEO_DIP_DATA, *data);
151 data++; 151 data++;
152 } 152 }
153 /* Write every possible data byte to force correct ECC calculation. */
154 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
155 I915_WRITE(VIDEO_DIP_DATA, 0);
153 mmiowb(); 156 mmiowb();
154 157
155 val |= g4x_infoframe_enable(frame); 158 val |= g4x_infoframe_enable(frame);
@@ -185,6 +188,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
185 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); 188 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
186 data++; 189 data++;
187 } 190 }
191 /* Write every possible data byte to force correct ECC calculation. */
192 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
193 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
188 mmiowb(); 194 mmiowb();
189 195
190 val |= g4x_infoframe_enable(frame); 196 val |= g4x_infoframe_enable(frame);
@@ -223,6 +229,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
223 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); 229 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
224 data++; 230 data++;
225 } 231 }
232 /* Write every possible data byte to force correct ECC calculation. */
233 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
234 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
226 mmiowb(); 235 mmiowb();
227 236
228 val |= g4x_infoframe_enable(frame); 237 val |= g4x_infoframe_enable(frame);
@@ -258,6 +267,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
258 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); 267 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
259 data++; 268 data++;
260 } 269 }
270 /* Write every possible data byte to force correct ECC calculation. */
271 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
272 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
261 mmiowb(); 273 mmiowb();
262 274
263 val |= g4x_infoframe_enable(frame); 275 val |= g4x_infoframe_enable(frame);
@@ -291,6 +303,9 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
291 I915_WRITE(data_reg + i, *data); 303 I915_WRITE(data_reg + i, *data);
292 data++; 304 data++;
293 } 305 }
306 /* Write every possible data byte to force correct ECC calculation. */
307 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
308 I915_WRITE(data_reg + i, 0);
294 mmiowb(); 309 mmiowb();
295 310
296 val |= hsw_infoframe_enable(frame); 311 val |= hsw_infoframe_enable(frame);
@@ -376,6 +391,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
376 port = VIDEO_DIP_PORT_C; 391 port = VIDEO_DIP_PORT_C;
377 break; 392 break;
378 default: 393 default:
394 BUG();
379 return; 395 return;
380 } 396 }
381 397
@@ -434,6 +450,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
434 port = VIDEO_DIP_PORT_D; 450 port = VIDEO_DIP_PORT_D;
435 break; 451 break;
436 default: 452 default:
453 BUG();
437 return; 454 return;
438 } 455 }
439 456
@@ -600,15 +617,36 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
600 intel_hdmi->set_infoframes(encoder, adjusted_mode); 617 intel_hdmi->set_infoframes(encoder, adjusted_mode);
601} 618}
602 619
603static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) 620static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
621 enum pipe *pipe)
604{ 622{
605 struct drm_device *dev = encoder->dev; 623 struct drm_device *dev = encoder->base.dev;
606 struct drm_i915_private *dev_priv = dev->dev_private; 624 struct drm_i915_private *dev_priv = dev->dev_private;
607 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 625 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
626 u32 tmp;
627
628 tmp = I915_READ(intel_hdmi->sdvox_reg);
629
630 if (!(tmp & SDVO_ENABLE))
631 return false;
632
633 if (HAS_PCH_CPT(dev))
634 *pipe = PORT_TO_PIPE_CPT(tmp);
635 else
636 *pipe = PORT_TO_PIPE(tmp);
637
638 return true;
639}
640
641static void intel_enable_hdmi(struct intel_encoder *encoder)
642{
643 struct drm_device *dev = encoder->base.dev;
644 struct drm_i915_private *dev_priv = dev->dev_private;
645 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
608 u32 temp; 646 u32 temp;
609 u32 enable_bits = SDVO_ENABLE; 647 u32 enable_bits = SDVO_ENABLE;
610 648
611 if (intel_hdmi->has_audio || mode != DRM_MODE_DPMS_ON) 649 if (intel_hdmi->has_audio)
612 enable_bits |= SDVO_AUDIO_ENABLE; 650 enable_bits |= SDVO_AUDIO_ENABLE;
613 651
614 temp = I915_READ(intel_hdmi->sdvox_reg); 652 temp = I915_READ(intel_hdmi->sdvox_reg);
@@ -616,31 +654,12 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
616 /* HW workaround for IBX, we need to move the port to transcoder A 654 /* HW workaround for IBX, we need to move the port to transcoder A
617 * before disabling it. */ 655 * before disabling it. */
618 if (HAS_PCH_IBX(dev)) { 656 if (HAS_PCH_IBX(dev)) {
619 struct drm_crtc *crtc = encoder->crtc; 657 struct drm_crtc *crtc = encoder->base.crtc;
620 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; 658 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
621 659
622 if (mode != DRM_MODE_DPMS_ON) { 660 /* Restore the transcoder select bit. */
623 if (temp & SDVO_PIPE_B_SELECT) { 661 if (pipe == PIPE_B)
624 temp &= ~SDVO_PIPE_B_SELECT; 662 enable_bits |= SDVO_PIPE_B_SELECT;
625 I915_WRITE(intel_hdmi->sdvox_reg, temp);
626 POSTING_READ(intel_hdmi->sdvox_reg);
627
628 /* Again we need to write this twice. */
629 I915_WRITE(intel_hdmi->sdvox_reg, temp);
630 POSTING_READ(intel_hdmi->sdvox_reg);
631
632 /* Transcoder selection bits only update
633 * effectively on vblank. */
634 if (crtc)
635 intel_wait_for_vblank(dev, pipe);
636 else
637 msleep(50);
638 }
639 } else {
640 /* Restore the transcoder select bit. */
641 if (pipe == PIPE_B)
642 enable_bits |= SDVO_PIPE_B_SELECT;
643 }
644 } 663 }
645 664
646 /* HW workaround, need to toggle enable bit off and on for 12bpc, but 665 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
@@ -651,12 +670,64 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
651 POSTING_READ(intel_hdmi->sdvox_reg); 670 POSTING_READ(intel_hdmi->sdvox_reg);
652 } 671 }
653 672
654 if (mode != DRM_MODE_DPMS_ON) { 673 temp |= enable_bits;
655 temp &= ~enable_bits; 674
656 } else { 675 I915_WRITE(intel_hdmi->sdvox_reg, temp);
657 temp |= enable_bits; 676 POSTING_READ(intel_hdmi->sdvox_reg);
677
678 /* HW workaround, need to write this twice for issue that may result
679 * in first write getting masked.
680 */
681 if (HAS_PCH_SPLIT(dev)) {
682 I915_WRITE(intel_hdmi->sdvox_reg, temp);
683 POSTING_READ(intel_hdmi->sdvox_reg);
684 }
685}
686
687static void intel_disable_hdmi(struct intel_encoder *encoder)
688{
689 struct drm_device *dev = encoder->base.dev;
690 struct drm_i915_private *dev_priv = dev->dev_private;
691 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
692 u32 temp;
693 u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
694
695 temp = I915_READ(intel_hdmi->sdvox_reg);
696
697 /* HW workaround for IBX, we need to move the port to transcoder A
698 * before disabling it. */
699 if (HAS_PCH_IBX(dev)) {
700 struct drm_crtc *crtc = encoder->base.crtc;
701 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
702
703 if (temp & SDVO_PIPE_B_SELECT) {
704 temp &= ~SDVO_PIPE_B_SELECT;
705 I915_WRITE(intel_hdmi->sdvox_reg, temp);
706 POSTING_READ(intel_hdmi->sdvox_reg);
707
708 /* Again we need to write this twice. */
709 I915_WRITE(intel_hdmi->sdvox_reg, temp);
710 POSTING_READ(intel_hdmi->sdvox_reg);
711
712 /* Transcoder selection bits only update
713 * effectively on vblank. */
714 if (crtc)
715 intel_wait_for_vblank(dev, pipe);
716 else
717 msleep(50);
718 }
719 }
720
721 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
722 * we do this anyway which shows more stable in testing.
723 */
724 if (HAS_PCH_SPLIT(dev)) {
725 I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
726 POSTING_READ(intel_hdmi->sdvox_reg);
658 } 727 }
659 728
729 temp &= ~enable_bits;
730
660 I915_WRITE(intel_hdmi->sdvox_reg, temp); 731 I915_WRITE(intel_hdmi->sdvox_reg, temp);
661 POSTING_READ(intel_hdmi->sdvox_reg); 732 POSTING_READ(intel_hdmi->sdvox_reg);
662 733
@@ -736,7 +807,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
736 drm_detect_hdmi_monitor(edid); 807 drm_detect_hdmi_monitor(edid);
737 intel_hdmi->has_audio = drm_detect_monitor_audio(edid); 808 intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
738 } 809 }
739 connector->display_info.raw_edid = NULL;
740 kfree(edid); 810 kfree(edid);
741 } 811 }
742 812
@@ -777,8 +847,6 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
777 if (edid) { 847 if (edid) {
778 if (edid->input & DRM_EDID_INPUT_DIGITAL) 848 if (edid->input & DRM_EDID_INPUT_DIGITAL)
779 has_audio = drm_detect_monitor_audio(edid); 849 has_audio = drm_detect_monitor_audio(edid);
780
781 connector->display_info.raw_edid = NULL;
782 kfree(edid); 850 kfree(edid);
783 } 851 }
784 852
@@ -832,9 +900,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
832done: 900done:
833 if (intel_hdmi->base.base.crtc) { 901 if (intel_hdmi->base.base.crtc) {
834 struct drm_crtc *crtc = intel_hdmi->base.base.crtc; 902 struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
835 drm_crtc_helper_set_mode(crtc, &crtc->mode, 903 intel_set_mode(crtc, &crtc->mode,
836 crtc->x, crtc->y, 904 crtc->x, crtc->y, crtc->fb);
837 crtc->fb);
838 } 905 }
839 906
840 return 0; 907 return 0;
@@ -848,23 +915,19 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
848} 915}
849 916
850static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = { 917static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
851 .dpms = intel_ddi_dpms,
852 .mode_fixup = intel_hdmi_mode_fixup, 918 .mode_fixup = intel_hdmi_mode_fixup,
853 .prepare = intel_encoder_prepare,
854 .mode_set = intel_ddi_mode_set, 919 .mode_set = intel_ddi_mode_set,
855 .commit = intel_encoder_commit, 920 .disable = intel_encoder_noop,
856}; 921};
857 922
858static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { 923static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
859 .dpms = intel_hdmi_dpms,
860 .mode_fixup = intel_hdmi_mode_fixup, 924 .mode_fixup = intel_hdmi_mode_fixup,
861 .prepare = intel_encoder_prepare,
862 .mode_set = intel_hdmi_mode_set, 925 .mode_set = intel_hdmi_mode_set,
863 .commit = intel_encoder_commit, 926 .disable = intel_encoder_noop,
864}; 927};
865 928
866static const struct drm_connector_funcs intel_hdmi_connector_funcs = { 929static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
867 .dpms = drm_helper_connector_dpms, 930 .dpms = intel_connector_dpms,
868 .detect = intel_hdmi_detect, 931 .detect = intel_hdmi_detect,
869 .fill_modes = drm_helper_probe_single_connector_modes, 932 .fill_modes = drm_helper_probe_single_connector_modes,
870 .set_property = intel_hdmi_set_property, 933 .set_property = intel_hdmi_set_property,
@@ -888,7 +951,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
888 intel_attach_broadcast_rgb_property(connector); 951 intel_attach_broadcast_rgb_property(connector);
889} 952}
890 953
891void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) 954void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
892{ 955{
893 struct drm_i915_private *dev_priv = dev->dev_private; 956 struct drm_i915_private *dev_priv = dev->dev_private;
894 struct drm_connector *connector; 957 struct drm_connector *connector;
@@ -922,48 +985,25 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
922 connector->doublescan_allowed = 0; 985 connector->doublescan_allowed = 0;
923 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 986 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
924 987
925 /* Set up the DDC bus. */ 988 intel_encoder->cloneable = false;
926 if (sdvox_reg == SDVOB) { 989
927 intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); 990 intel_hdmi->ddi_port = port;
928 intel_hdmi->ddc_bus = GMBUS_PORT_DPB; 991 switch (port) {
929 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 992 case PORT_B:
930 } else if (sdvox_reg == SDVOC) {
931 intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
932 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
933 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
934 } else if (sdvox_reg == HDMIB) {
935 intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
936 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
937 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
938 } else if (sdvox_reg == HDMIC) {
939 intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
940 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
941 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
942 } else if (sdvox_reg == HDMID) {
943 intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
944 intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
945 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
946 } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
947 DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
948 intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
949 intel_hdmi->ddc_bus = GMBUS_PORT_DPB; 993 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
950 intel_hdmi->ddi_port = PORT_B;
951 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 994 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
952 } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) { 995 break;
953 DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n"); 996 case PORT_C:
954 intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
955 intel_hdmi->ddc_bus = GMBUS_PORT_DPC; 997 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
956 intel_hdmi->ddi_port = PORT_C;
957 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 998 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
958 } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) { 999 break;
959 DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n"); 1000 case PORT_D:
960 intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
961 intel_hdmi->ddc_bus = GMBUS_PORT_DPD; 1001 intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
962 intel_hdmi->ddi_port = PORT_D;
963 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; 1002 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
964 } else { 1003 break;
965 /* If we got an unknown sdvox_reg, things are pretty much broken 1004 case PORT_A:
966 * in a way that we should let the kernel know about it */ 1005 /* Internal port only for eDP. */
1006 default:
967 BUG(); 1007 BUG();
968 } 1008 }
969 1009
@@ -986,10 +1026,21 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
986 intel_hdmi->set_infoframes = cpt_set_infoframes; 1026 intel_hdmi->set_infoframes = cpt_set_infoframes;
987 } 1027 }
988 1028
989 if (IS_HASWELL(dev)) 1029 if (IS_HASWELL(dev)) {
990 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw); 1030 intel_encoder->enable = intel_enable_ddi;
991 else 1031 intel_encoder->disable = intel_disable_ddi;
992 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); 1032 intel_encoder->get_hw_state = intel_ddi_get_hw_state;
1033 drm_encoder_helper_add(&intel_encoder->base,
1034 &intel_hdmi_helper_funcs_hsw);
1035 } else {
1036 intel_encoder->enable = intel_enable_hdmi;
1037 intel_encoder->disable = intel_disable_hdmi;
1038 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
1039 drm_encoder_helper_add(&intel_encoder->base,
1040 &intel_hdmi_helper_funcs);
1041 }
1042 intel_connector->get_hw_state = intel_connector_get_hw_state;
1043
993 1044
994 intel_hdmi_add_properties(intel_hdmi, connector); 1045 intel_hdmi_add_properties(intel_hdmi, connector);
995 1046
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8552be9f5db1..e3166df55daa 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -64,13 +64,40 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
64 struct intel_lvds, base); 64 struct intel_lvds, base);
65} 65}
66 66
67static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
68 enum pipe *pipe)
69{
70 struct drm_device *dev = encoder->base.dev;
71 struct drm_i915_private *dev_priv = dev->dev_private;
72 u32 lvds_reg, tmp;
73
74 if (HAS_PCH_SPLIT(dev)) {
75 lvds_reg = PCH_LVDS;
76 } else {
77 lvds_reg = LVDS;
78 }
79
80 tmp = I915_READ(lvds_reg);
81
82 if (!(tmp & LVDS_PORT_EN))
83 return false;
84
85 if (HAS_PCH_CPT(dev))
86 *pipe = PORT_TO_PIPE_CPT(tmp);
87 else
88 *pipe = PORT_TO_PIPE(tmp);
89
90 return true;
91}
92
67/** 93/**
68 * Sets the power state for the panel. 94 * Sets the power state for the panel.
69 */ 95 */
70static void intel_lvds_enable(struct intel_lvds *intel_lvds) 96static void intel_enable_lvds(struct intel_encoder *encoder)
71{ 97{
72 struct drm_device *dev = intel_lvds->base.base.dev; 98 struct drm_device *dev = encoder->base.dev;
73 struct intel_crtc *intel_crtc = to_intel_crtc(intel_lvds->base.base.crtc); 99 struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
100 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
74 struct drm_i915_private *dev_priv = dev->dev_private; 101 struct drm_i915_private *dev_priv = dev->dev_private;
75 u32 ctl_reg, lvds_reg, stat_reg; 102 u32 ctl_reg, lvds_reg, stat_reg;
76 103
@@ -110,9 +137,10 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
110 intel_panel_enable_backlight(dev, intel_crtc->pipe); 137 intel_panel_enable_backlight(dev, intel_crtc->pipe);
111} 138}
112 139
113static void intel_lvds_disable(struct intel_lvds *intel_lvds) 140static void intel_disable_lvds(struct intel_encoder *encoder)
114{ 141{
115 struct drm_device *dev = intel_lvds->base.base.dev; 142 struct drm_device *dev = encoder->base.dev;
143 struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
116 struct drm_i915_private *dev_priv = dev->dev_private; 144 struct drm_i915_private *dev_priv = dev->dev_private;
117 u32 ctl_reg, lvds_reg, stat_reg; 145 u32 ctl_reg, lvds_reg, stat_reg;
118 146
@@ -141,18 +169,6 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
141 POSTING_READ(lvds_reg); 169 POSTING_READ(lvds_reg);
142} 170}
143 171
144static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
145{
146 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
147
148 if (mode == DRM_MODE_DPMS_ON)
149 intel_lvds_enable(intel_lvds);
150 else
151 intel_lvds_disable(intel_lvds);
152
153 /* XXX: We never power down the LVDS pairs. */
154}
155
156static int intel_lvds_mode_valid(struct drm_connector *connector, 172static int intel_lvds_mode_valid(struct drm_connector *connector,
157 struct drm_display_mode *mode) 173 struct drm_display_mode *mode)
158{ 174{
@@ -233,9 +249,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
233{ 249{
234 struct drm_device *dev = encoder->dev; 250 struct drm_device *dev = encoder->dev;
235 struct drm_i915_private *dev_priv = dev->dev_private; 251 struct drm_i915_private *dev_priv = dev->dev_private;
236 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
237 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 252 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
238 struct intel_encoder *tmp_encoder; 253 struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
239 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; 254 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
240 int pipe; 255 int pipe;
241 256
@@ -245,14 +260,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
245 return false; 260 return false;
246 } 261 }
247 262
248 /* Should never happen!! */ 263 if (intel_encoder_check_is_cloned(&intel_lvds->base))
249 for_each_encoder_on_crtc(dev, encoder->crtc, tmp_encoder) { 264 return false;
250 if (&tmp_encoder->base != encoder) {
251 DRM_ERROR("Can't enable LVDS and another "
252 "encoder on the same pipe\n");
253 return false;
254 }
255 }
256 265
257 /* 266 /*
258 * We have timings from the BIOS for the panel, put them in 267 * We have timings from the BIOS for the panel, put them in
@@ -404,23 +413,6 @@ out:
404 return true; 413 return true;
405} 414}
406 415
407static void intel_lvds_prepare(struct drm_encoder *encoder)
408{
409 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
410
411 intel_lvds_disable(intel_lvds);
412}
413
414static void intel_lvds_commit(struct drm_encoder *encoder)
415{
416 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
417
418 /* Always do a full power on as we do not know what state
419 * we were left in.
420 */
421 intel_lvds_enable(intel_lvds);
422}
423
424static void intel_lvds_mode_set(struct drm_encoder *encoder, 416static void intel_lvds_mode_set(struct drm_encoder *encoder,
425 struct drm_display_mode *mode, 417 struct drm_display_mode *mode,
426 struct drm_display_mode *adjusted_mode) 418 struct drm_display_mode *adjusted_mode)
@@ -534,7 +526,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
534 dev_priv->modeset_on_lid = 0; 526 dev_priv->modeset_on_lid = 0;
535 527
536 mutex_lock(&dev->mode_config.mutex); 528 mutex_lock(&dev->mode_config.mutex);
537 drm_helper_resume_force_mode(dev); 529 intel_modeset_check_state(dev);
538 mutex_unlock(&dev->mode_config.mutex); 530 mutex_unlock(&dev->mode_config.mutex);
539 531
540 return NOTIFY_OK; 532 return NOTIFY_OK;
@@ -586,8 +578,8 @@ static int intel_lvds_set_property(struct drm_connector *connector,
586 * If the CRTC is enabled, the display will be changed 578 * If the CRTC is enabled, the display will be changed
587 * according to the new panel fitting mode. 579 * according to the new panel fitting mode.
588 */ 580 */
589 drm_crtc_helper_set_mode(crtc, &crtc->mode, 581 intel_set_mode(crtc, &crtc->mode,
590 crtc->x, crtc->y, crtc->fb); 582 crtc->x, crtc->y, crtc->fb);
591 } 583 }
592 } 584 }
593 585
@@ -595,11 +587,9 @@ static int intel_lvds_set_property(struct drm_connector *connector,
595} 587}
596 588
597static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { 589static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
598 .dpms = intel_lvds_dpms,
599 .mode_fixup = intel_lvds_mode_fixup, 590 .mode_fixup = intel_lvds_mode_fixup,
600 .prepare = intel_lvds_prepare,
601 .mode_set = intel_lvds_mode_set, 591 .mode_set = intel_lvds_mode_set,
602 .commit = intel_lvds_commit, 592 .disable = intel_encoder_noop,
603}; 593};
604 594
605static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 595static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -609,7 +599,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
609}; 599};
610 600
611static const struct drm_connector_funcs intel_lvds_connector_funcs = { 601static const struct drm_connector_funcs intel_lvds_connector_funcs = {
612 .dpms = drm_helper_connector_dpms, 602 .dpms = intel_connector_dpms,
613 .detect = intel_lvds_detect, 603 .detect = intel_lvds_detect,
614 .fill_modes = drm_helper_probe_single_connector_modes, 604 .fill_modes = drm_helper_probe_single_connector_modes,
615 .set_property = intel_lvds_set_property, 605 .set_property = intel_lvds_set_property,
@@ -971,10 +961,15 @@ bool intel_lvds_init(struct drm_device *dev)
971 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, 961 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
972 DRM_MODE_ENCODER_LVDS); 962 DRM_MODE_ENCODER_LVDS);
973 963
964 intel_encoder->enable = intel_enable_lvds;
965 intel_encoder->disable = intel_disable_lvds;
966 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
967 intel_connector->get_hw_state = intel_connector_get_hw_state;
968
974 intel_connector_attach_encoder(intel_connector, intel_encoder); 969 intel_connector_attach_encoder(intel_connector, intel_encoder);
975 intel_encoder->type = INTEL_OUTPUT_LVDS; 970 intel_encoder->type = INTEL_OUTPUT_LVDS;
976 971
977 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 972 intel_encoder->cloneable = false;
978 if (HAS_PCH_SPLIT(dev)) 973 if (HAS_PCH_SPLIT(dev))
979 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 974 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
980 else if (IS_GEN4(dev)) 975 else if (IS_GEN4(dev))
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index cc71fd9aaed5..cabd84bf66eb 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
45 drm_mode_connector_update_edid_property(connector, edid); 45 drm_mode_connector_update_edid_property(connector, edid);
46 ret = drm_add_edid_modes(connector, edid); 46 ret = drm_add_edid_modes(connector, edid);
47 drm_edid_to_eld(connector, edid); 47 drm_edid_to_eld(connector, edid);
48 connector->display_info.raw_edid = NULL;
49 kfree(edid); 48 kfree(edid);
50 49
51 return ret; 50 return ret;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 5cc624eb6133..5530413213d8 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -427,6 +427,25 @@ blind_set:
427 goto end; 427 goto end;
428} 428}
429 429
430static void intel_setup_cadls(struct drm_device *dev)
431{
432 struct drm_i915_private *dev_priv = dev->dev_private;
433 struct intel_opregion *opregion = &dev_priv->opregion;
434 int i = 0;
435 u32 disp_id;
436
437 /* Initialize the CADL field by duplicating the DIDL values.
438 * Technically, this is not always correct as display outputs may exist,
439 * but not active. This initialization is necessary for some Clevo
440 * laptops that check this field before processing the brightness and
441 * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
442 * there are less than eight devices. */
443 do {
444 disp_id = ioread32(&opregion->acpi->didl[i]);
445 iowrite32(disp_id, &opregion->acpi->cadl[i]);
446 } while (++i < 8 && disp_id != 0);
447}
448
430void intel_opregion_init(struct drm_device *dev) 449void intel_opregion_init(struct drm_device *dev)
431{ 450{
432 struct drm_i915_private *dev_priv = dev->dev_private; 451 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -436,8 +455,10 @@ void intel_opregion_init(struct drm_device *dev)
436 return; 455 return;
437 456
438 if (opregion->acpi) { 457 if (opregion->acpi) {
439 if (drm_core_check_feature(dev, DRIVER_MODESET)) 458 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
440 intel_didl_outputs(dev); 459 intel_didl_outputs(dev);
460 intel_setup_cadls(dev);
461 }
441 462
442 /* Notify BIOS we are ready to handle ACPI video ext notifs. 463 /* Notify BIOS we are ready to handle ACPI video ext notifs.
443 * Right now, all the events are handled by the ACPI video module. 464 * Right now, all the events are handled by the ACPI video module.
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 10510221d763..ebff850a9ab6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -234,54 +234,6 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
234 return 0; 234 return 0;
235} 235}
236 236
237/* Workaround for i830 bug where pipe a must be enable to change control regs */
238static int
239i830_activate_pipe_a(struct drm_device *dev)
240{
241 drm_i915_private_t *dev_priv = dev->dev_private;
242 struct intel_crtc *crtc;
243 struct drm_crtc_helper_funcs *crtc_funcs;
244 struct drm_display_mode vesa_640x480 = {
245 DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
246 752, 800, 0, 480, 489, 492, 525, 0,
247 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
248 }, *mode;
249
250 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
251 if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
252 return 0;
253
254 /* most i8xx have pipe a forced on, so don't trust dpms mode */
255 if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
256 return 0;
257
258 crtc_funcs = crtc->base.helper_private;
259 if (crtc_funcs->dpms == NULL)
260 return 0;
261
262 DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
263
264 mode = drm_mode_duplicate(dev, &vesa_640x480);
265
266 if (!drm_crtc_helper_set_mode(&crtc->base, mode,
267 crtc->base.x, crtc->base.y,
268 crtc->base.fb))
269 return 0;
270
271 crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
272 return 1;
273}
274
275static void
276i830_deactivate_pipe_a(struct drm_device *dev)
277{
278 drm_i915_private_t *dev_priv = dev->dev_private;
279 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
280 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
281
282 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
283}
284
285/* overlay needs to be disable in OCMD reg */ 237/* overlay needs to be disable in OCMD reg */
286static int intel_overlay_on(struct intel_overlay *overlay) 238static int intel_overlay_on(struct intel_overlay *overlay)
287{ 239{
@@ -289,17 +241,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
289 struct drm_i915_private *dev_priv = dev->dev_private; 241 struct drm_i915_private *dev_priv = dev->dev_private;
290 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 242 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
291 struct drm_i915_gem_request *request; 243 struct drm_i915_gem_request *request;
292 int pipe_a_quirk = 0;
293 int ret; 244 int ret;
294 245
295 BUG_ON(overlay->active); 246 BUG_ON(overlay->active);
296 overlay->active = 1; 247 overlay->active = 1;
297 248
298 if (IS_I830(dev)) { 249 WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
299 pipe_a_quirk = i830_activate_pipe_a(dev);
300 if (pipe_a_quirk < 0)
301 return pipe_a_quirk;
302 }
303 250
304 request = kzalloc(sizeof(*request), GFP_KERNEL); 251 request = kzalloc(sizeof(*request), GFP_KERNEL);
305 if (request == NULL) { 252 if (request == NULL) {
@@ -321,9 +268,6 @@ static int intel_overlay_on(struct intel_overlay *overlay)
321 268
322 ret = intel_overlay_do_wait_request(overlay, request, NULL); 269 ret = intel_overlay_do_wait_request(overlay, request, NULL);
323out: 270out:
324 if (pipe_a_quirk)
325 i830_deactivate_pipe_a(dev);
326
327 return ret; 271 return ret;
328} 272}
329 273
@@ -1438,7 +1382,7 @@ void intel_setup_overlay(struct drm_device *dev)
1438 } 1382 }
1439 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; 1383 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
1440 } else { 1384 } else {
1441 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); 1385 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
1442 if (ret) { 1386 if (ret) {
1443 DRM_ERROR("failed to pin overlay register bo\n"); 1387 DRM_ERROR("failed to pin overlay register bo\n");
1444 goto out_free_bo; 1388 goto out_free_bo;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ba8a27b1757a..d69f8f49beb5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -31,6 +31,8 @@
31#include "../../../platform/x86/intel_ips.h" 31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h> 32#include <linux/module.h>
33 33
34#define FORCEWAKE_ACK_TIMEOUT_MS 2
35
34/* FBC, or Frame Buffer Compression, is a technique employed to compress the 36/* FBC, or Frame Buffer Compression, is a technique employed to compress the
35 * framebuffer contents in-memory, aiming at reducing the required bandwidth 37 * framebuffer contents in-memory, aiming at reducing the required bandwidth
36 * during in-memory transfers and, therefore, reduce the power packet. 38 * during in-memory transfers and, therefore, reduce the power packet.
@@ -593,7 +595,7 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
593 break; 595 break;
594 } 596 }
595 597
596 dev_priv->r_t = dev_priv->mem_freq; 598 dev_priv->ips.r_t = dev_priv->mem_freq;
597 599
598 switch (csipll & 0x3ff) { 600 switch (csipll & 0x3ff) {
599 case 0x00c: 601 case 0x00c:
@@ -625,11 +627,11 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
625 } 627 }
626 628
627 if (dev_priv->fsb_freq == 3200) { 629 if (dev_priv->fsb_freq == 3200) {
628 dev_priv->c_m = 0; 630 dev_priv->ips.c_m = 0;
629 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 631 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
630 dev_priv->c_m = 1; 632 dev_priv->ips.c_m = 1;
631 } else { 633 } else {
632 dev_priv->c_m = 2; 634 dev_priv->ips.c_m = 2;
633 } 635 }
634} 636}
635 637
@@ -2138,7 +2140,7 @@ intel_alloc_context_page(struct drm_device *dev)
2138 return NULL; 2140 return NULL;
2139 } 2141 }
2140 2142
2141 ret = i915_gem_object_pin(ctx, 4096, true); 2143 ret = i915_gem_object_pin(ctx, 4096, true, false);
2142 if (ret) { 2144 if (ret) {
2143 DRM_ERROR("failed to pin power context: %d\n", ret); 2145 DRM_ERROR("failed to pin power context: %d\n", ret);
2144 goto err_unref; 2146 goto err_unref;
@@ -2160,11 +2162,22 @@ err_unref:
2160 return NULL; 2162 return NULL;
2161} 2163}
2162 2164
2165/**
2166 * Lock protecting IPS related data structures
2167 */
2168DEFINE_SPINLOCK(mchdev_lock);
2169
2170/* Global for IPS driver to get at the current i915 device. Protected by
2171 * mchdev_lock. */
2172static struct drm_i915_private *i915_mch_dev;
2173
2163bool ironlake_set_drps(struct drm_device *dev, u8 val) 2174bool ironlake_set_drps(struct drm_device *dev, u8 val)
2164{ 2175{
2165 struct drm_i915_private *dev_priv = dev->dev_private; 2176 struct drm_i915_private *dev_priv = dev->dev_private;
2166 u16 rgvswctl; 2177 u16 rgvswctl;
2167 2178
2179 assert_spin_locked(&mchdev_lock);
2180
2168 rgvswctl = I915_READ16(MEMSWCTL); 2181 rgvswctl = I915_READ16(MEMSWCTL);
2169 if (rgvswctl & MEMCTL_CMD_STS) { 2182 if (rgvswctl & MEMCTL_CMD_STS) {
2170 DRM_DEBUG("gpu busy, RCS change rejected\n"); 2183 DRM_DEBUG("gpu busy, RCS change rejected\n");
@@ -2188,6 +2201,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
2188 u32 rgvmodectl = I915_READ(MEMMODECTL); 2201 u32 rgvmodectl = I915_READ(MEMMODECTL);
2189 u8 fmax, fmin, fstart, vstart; 2202 u8 fmax, fmin, fstart, vstart;
2190 2203
2204 spin_lock_irq(&mchdev_lock);
2205
2191 /* Enable temp reporting */ 2206 /* Enable temp reporting */
2192 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); 2207 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2193 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); 2208 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
@@ -2211,12 +2226,12 @@ static void ironlake_enable_drps(struct drm_device *dev)
2211 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 2226 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2212 PXVFREQ_PX_SHIFT; 2227 PXVFREQ_PX_SHIFT;
2213 2228
2214 dev_priv->fmax = fmax; /* IPS callback will increase this */ 2229 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
2215 dev_priv->fstart = fstart; 2230 dev_priv->ips.fstart = fstart;
2216 2231
2217 dev_priv->max_delay = fstart; 2232 dev_priv->ips.max_delay = fstart;
2218 dev_priv->min_delay = fmin; 2233 dev_priv->ips.min_delay = fmin;
2219 dev_priv->cur_delay = fstart; 2234 dev_priv->ips.cur_delay = fstart;
2220 2235
2221 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", 2236 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2222 fmax, fmin, fstart); 2237 fmax, fmin, fstart);
@@ -2233,23 +2248,29 @@ static void ironlake_enable_drps(struct drm_device *dev)
2233 rgvmodectl |= MEMMODE_SWMODE_EN; 2248 rgvmodectl |= MEMMODE_SWMODE_EN;
2234 I915_WRITE(MEMMODECTL, rgvmodectl); 2249 I915_WRITE(MEMMODECTL, rgvmodectl);
2235 2250
2236 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) 2251 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2237 DRM_ERROR("stuck trying to change perf mode\n"); 2252 DRM_ERROR("stuck trying to change perf mode\n");
2238 msleep(1); 2253 mdelay(1);
2239 2254
2240 ironlake_set_drps(dev, fstart); 2255 ironlake_set_drps(dev, fstart);
2241 2256
2242 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + 2257 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2243 I915_READ(0x112e0); 2258 I915_READ(0x112e0);
2244 dev_priv->last_time1 = jiffies_to_msecs(jiffies); 2259 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
2245 dev_priv->last_count2 = I915_READ(0x112f4); 2260 dev_priv->ips.last_count2 = I915_READ(0x112f4);
2246 getrawmonotonic(&dev_priv->last_time2); 2261 getrawmonotonic(&dev_priv->ips.last_time2);
2262
2263 spin_unlock_irq(&mchdev_lock);
2247} 2264}
2248 2265
2249static void ironlake_disable_drps(struct drm_device *dev) 2266static void ironlake_disable_drps(struct drm_device *dev)
2250{ 2267{
2251 struct drm_i915_private *dev_priv = dev->dev_private; 2268 struct drm_i915_private *dev_priv = dev->dev_private;
2252 u16 rgvswctl = I915_READ16(MEMSWCTL); 2269 u16 rgvswctl;
2270
2271 spin_lock_irq(&mchdev_lock);
2272
2273 rgvswctl = I915_READ16(MEMSWCTL);
2253 2274
2254 /* Ack interrupts, disable EFC interrupt */ 2275 /* Ack interrupts, disable EFC interrupt */
2255 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); 2276 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
@@ -2259,31 +2280,54 @@ static void ironlake_disable_drps(struct drm_device *dev)
2259 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 2280 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2260 2281
2261 /* Go back to the starting frequency */ 2282 /* Go back to the starting frequency */
2262 ironlake_set_drps(dev, dev_priv->fstart); 2283 ironlake_set_drps(dev, dev_priv->ips.fstart);
2263 msleep(1); 2284 mdelay(1);
2264 rgvswctl |= MEMCTL_CMD_STS; 2285 rgvswctl |= MEMCTL_CMD_STS;
2265 I915_WRITE(MEMSWCTL, rgvswctl); 2286 I915_WRITE(MEMSWCTL, rgvswctl);
2266 msleep(1); 2287 mdelay(1);
2267 2288
2289 spin_unlock_irq(&mchdev_lock);
2268} 2290}
2269 2291
2270void gen6_set_rps(struct drm_device *dev, u8 val) 2292/* There's a funny hw issue where the hw returns all 0 when reading from
2293 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
2294 * ourselves, instead of doing a rmw cycle (which might result in us clearing
2295 * all limits and the gpu stuck at whatever frequency it is at atm).
2296 */
2297static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
2271{ 2298{
2272 struct drm_i915_private *dev_priv = dev->dev_private;
2273 u32 limits; 2299 u32 limits;
2274 2300
2275 limits = 0; 2301 limits = 0;
2276 if (val >= dev_priv->max_delay)
2277 val = dev_priv->max_delay;
2278 else
2279 limits |= dev_priv->max_delay << 24;
2280 2302
2281 if (val <= dev_priv->min_delay) 2303 if (*val >= dev_priv->rps.max_delay)
2282 val = dev_priv->min_delay; 2304 *val = dev_priv->rps.max_delay;
2283 else 2305 limits |= dev_priv->rps.max_delay << 24;
2284 limits |= dev_priv->min_delay << 16; 2306
2307 /* Only set the down limit when we've reached the lowest level to avoid
2308 * getting more interrupts, otherwise leave this clear. This prevents a
2309 * race in the hw when coming out of rc6: There's a tiny window where
2310 * the hw runs at the minimal clock before selecting the desired
2311 * frequency, if the down threshold expires in that window we will not
2312 * receive a down interrupt. */
2313 if (*val <= dev_priv->rps.min_delay) {
2314 *val = dev_priv->rps.min_delay;
2315 limits |= dev_priv->rps.min_delay << 16;
2316 }
2285 2317
2286 if (val == dev_priv->cur_delay) 2318 return limits;
2319}
2320
2321void gen6_set_rps(struct drm_device *dev, u8 val)
2322{
2323 struct drm_i915_private *dev_priv = dev->dev_private;
2324 u32 limits = gen6_rps_limits(dev_priv, &val);
2325
2326 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2327 WARN_ON(val > dev_priv->rps.max_delay);
2328 WARN_ON(val < dev_priv->rps.min_delay);
2329
2330 if (val == dev_priv->rps.cur_delay)
2287 return; 2331 return;
2288 2332
2289 I915_WRITE(GEN6_RPNSWREQ, 2333 I915_WRITE(GEN6_RPNSWREQ,
@@ -2296,7 +2340,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
2296 */ 2340 */
2297 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); 2341 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
2298 2342
2299 dev_priv->cur_delay = val; 2343 POSTING_READ(GEN6_RPNSWREQ);
2344
2345 dev_priv->rps.cur_delay = val;
2346
2347 trace_intel_gpu_freq_change(val * 50);
2300} 2348}
2301 2349
2302static void gen6_disable_rps(struct drm_device *dev) 2350static void gen6_disable_rps(struct drm_device *dev)
@@ -2312,40 +2360,40 @@ static void gen6_disable_rps(struct drm_device *dev)
2312 * register (PMIMR) to mask PM interrupts. The only risk is in leaving 2360 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2313 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ 2361 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2314 2362
2315 spin_lock_irq(&dev_priv->rps_lock); 2363 spin_lock_irq(&dev_priv->rps.lock);
2316 dev_priv->pm_iir = 0; 2364 dev_priv->rps.pm_iir = 0;
2317 spin_unlock_irq(&dev_priv->rps_lock); 2365 spin_unlock_irq(&dev_priv->rps.lock);
2318 2366
2319 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2367 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2320} 2368}
2321 2369
2322int intel_enable_rc6(const struct drm_device *dev) 2370int intel_enable_rc6(const struct drm_device *dev)
2323{ 2371{
2324 /* 2372 /* Respect the kernel parameter if it is set */
2325 * Respect the kernel parameter if it is set
2326 */
2327 if (i915_enable_rc6 >= 0) 2373 if (i915_enable_rc6 >= 0)
2328 return i915_enable_rc6; 2374 return i915_enable_rc6;
2329 2375
2330 /* 2376 if (INTEL_INFO(dev)->gen == 5) {
2331 * Disable RC6 on Ironlake 2377#ifdef CONFIG_INTEL_IOMMU
2332 */ 2378 /* Disable rc6 on ilk if VT-d is on. */
2333 if (INTEL_INFO(dev)->gen == 5) 2379 if (intel_iommu_gfx_mapped)
2334 return 0; 2380 return false;
2381#endif
2382 DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
2383 return INTEL_RC6_ENABLE;
2384 }
2335 2385
2336 /* On Haswell, only RC6 is available. So let's enable it by default to 2386 if (IS_HASWELL(dev)) {
2337 * provide better testing and coverage since the beginning. 2387 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
2338 */
2339 if (IS_HASWELL(dev))
2340 return INTEL_RC6_ENABLE; 2388 return INTEL_RC6_ENABLE;
2389 }
2341 2390
2342 /* 2391 /* snb/ivb have more than one rc6 state. */
2343 * Disable rc6 on Sandybridge
2344 */
2345 if (INTEL_INFO(dev)->gen == 6) { 2392 if (INTEL_INFO(dev)->gen == 6) {
2346 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); 2393 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
2347 return INTEL_RC6_ENABLE; 2394 return INTEL_RC6_ENABLE;
2348 } 2395 }
2396
2349 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); 2397 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2350 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 2398 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
2351} 2399}
@@ -2383,9 +2431,9 @@ static void gen6_enable_rps(struct drm_device *dev)
2383 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 2431 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2384 2432
2385 /* In units of 100MHz */ 2433 /* In units of 100MHz */
2386 dev_priv->max_delay = rp_state_cap & 0xff; 2434 dev_priv->rps.max_delay = rp_state_cap & 0xff;
2387 dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16; 2435 dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
2388 dev_priv->cur_delay = 0; 2436 dev_priv->rps.cur_delay = 0;
2389 2437
2390 /* disable the counters and set deterministic thresholds */ 2438 /* disable the counters and set deterministic thresholds */
2391 I915_WRITE(GEN6_RC_CONTROL, 0); 2439 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -2438,8 +2486,8 @@ static void gen6_enable_rps(struct drm_device *dev)
2438 2486
2439 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 2487 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2440 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 2488 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2441 dev_priv->max_delay << 24 | 2489 dev_priv->rps.max_delay << 24 |
2442 dev_priv->min_delay << 16); 2490 dev_priv->rps.min_delay << 16);
2443 2491
2444 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 2492 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
2445 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 2493 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -2477,7 +2525,7 @@ static void gen6_enable_rps(struct drm_device *dev)
2477 500)) 2525 500))
2478 DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 2526 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2479 if (pcu_mbox & (1<<31)) { /* OC supported */ 2527 if (pcu_mbox & (1<<31)) { /* OC supported */
2480 dev_priv->max_delay = pcu_mbox & 0xff; 2528 dev_priv->rps.max_delay = pcu_mbox & 0xff;
2481 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); 2529 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2482 } 2530 }
2483 2531
@@ -2485,10 +2533,10 @@ static void gen6_enable_rps(struct drm_device *dev)
2485 2533
2486 /* requires MSI enabled */ 2534 /* requires MSI enabled */
2487 I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); 2535 I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
2488 spin_lock_irq(&dev_priv->rps_lock); 2536 spin_lock_irq(&dev_priv->rps.lock);
2489 WARN_ON(dev_priv->pm_iir != 0); 2537 WARN_ON(dev_priv->rps.pm_iir != 0);
2490 I915_WRITE(GEN6_PMIMR, 0); 2538 I915_WRITE(GEN6_PMIMR, 0);
2491 spin_unlock_irq(&dev_priv->rps_lock); 2539 spin_unlock_irq(&dev_priv->rps.lock);
2492 /* enable all PM interrupts */ 2540 /* enable all PM interrupts */
2493 I915_WRITE(GEN6_PMINTRMSK, 0); 2541 I915_WRITE(GEN6_PMINTRMSK, 0);
2494 2542
@@ -2520,9 +2568,9 @@ static void gen6_update_ring_freq(struct drm_device *dev)
2520 * to use for memory access. We do this by specifying the IA frequency 2568 * to use for memory access. We do this by specifying the IA frequency
2521 * the PCU should use as a reference to determine the ring frequency. 2569 * the PCU should use as a reference to determine the ring frequency.
2522 */ 2570 */
2523 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; 2571 for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
2524 gpu_freq--) { 2572 gpu_freq--) {
2525 int diff = dev_priv->max_delay - gpu_freq; 2573 int diff = dev_priv->rps.max_delay - gpu_freq;
2526 2574
2527 /* 2575 /*
2528 * For GPU frequencies less than 750MHz, just use the lowest 2576 * For GPU frequencies less than 750MHz, just use the lowest
@@ -2686,14 +2734,16 @@ static const struct cparams {
2686 { 0, 800, 231, 23784 }, 2734 { 0, 800, 231, 23784 },
2687}; 2735};
2688 2736
2689unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 2737static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
2690{ 2738{
2691 u64 total_count, diff, ret; 2739 u64 total_count, diff, ret;
2692 u32 count1, count2, count3, m = 0, c = 0; 2740 u32 count1, count2, count3, m = 0, c = 0;
2693 unsigned long now = jiffies_to_msecs(jiffies), diff1; 2741 unsigned long now = jiffies_to_msecs(jiffies), diff1;
2694 int i; 2742 int i;
2695 2743
2696 diff1 = now - dev_priv->last_time1; 2744 assert_spin_locked(&mchdev_lock);
2745
2746 diff1 = now - dev_priv->ips.last_time1;
2697 2747
2698 /* Prevent division-by-zero if we are asking too fast. 2748 /* Prevent division-by-zero if we are asking too fast.
2699 * Also, we don't get interesting results if we are polling 2749 * Also, we don't get interesting results if we are polling
@@ -2701,7 +2751,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2701 * in such cases. 2751 * in such cases.
2702 */ 2752 */
2703 if (diff1 <= 10) 2753 if (diff1 <= 10)
2704 return dev_priv->chipset_power; 2754 return dev_priv->ips.chipset_power;
2705 2755
2706 count1 = I915_READ(DMIEC); 2756 count1 = I915_READ(DMIEC);
2707 count2 = I915_READ(DDREC); 2757 count2 = I915_READ(DDREC);
@@ -2710,16 +2760,16 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2710 total_count = count1 + count2 + count3; 2760 total_count = count1 + count2 + count3;
2711 2761
2712 /* FIXME: handle per-counter overflow */ 2762 /* FIXME: handle per-counter overflow */
2713 if (total_count < dev_priv->last_count1) { 2763 if (total_count < dev_priv->ips.last_count1) {
2714 diff = ~0UL - dev_priv->last_count1; 2764 diff = ~0UL - dev_priv->ips.last_count1;
2715 diff += total_count; 2765 diff += total_count;
2716 } else { 2766 } else {
2717 diff = total_count - dev_priv->last_count1; 2767 diff = total_count - dev_priv->ips.last_count1;
2718 } 2768 }
2719 2769
2720 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 2770 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
2721 if (cparams[i].i == dev_priv->c_m && 2771 if (cparams[i].i == dev_priv->ips.c_m &&
2722 cparams[i].t == dev_priv->r_t) { 2772 cparams[i].t == dev_priv->ips.r_t) {
2723 m = cparams[i].m; 2773 m = cparams[i].m;
2724 c = cparams[i].c; 2774 c = cparams[i].c;
2725 break; 2775 break;
@@ -2730,14 +2780,30 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2730 ret = ((m * diff) + c); 2780 ret = ((m * diff) + c);
2731 ret = div_u64(ret, 10); 2781 ret = div_u64(ret, 10);
2732 2782
2733 dev_priv->last_count1 = total_count; 2783 dev_priv->ips.last_count1 = total_count;
2734 dev_priv->last_time1 = now; 2784 dev_priv->ips.last_time1 = now;
2735 2785
2736 dev_priv->chipset_power = ret; 2786 dev_priv->ips.chipset_power = ret;
2737 2787
2738 return ret; 2788 return ret;
2739} 2789}
2740 2790
2791unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2792{
2793 unsigned long val;
2794
2795 if (dev_priv->info->gen != 5)
2796 return 0;
2797
2798 spin_lock_irq(&mchdev_lock);
2799
2800 val = __i915_chipset_val(dev_priv);
2801
2802 spin_unlock_irq(&mchdev_lock);
2803
2804 return val;
2805}
2806
2741unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 2807unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
2742{ 2808{
2743 unsigned long m, x, b; 2809 unsigned long m, x, b;
@@ -2894,18 +2960,17 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
2894 return v_table[pxvid].vd; 2960 return v_table[pxvid].vd;
2895} 2961}
2896 2962
2897void i915_update_gfx_val(struct drm_i915_private *dev_priv) 2963static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
2898{ 2964{
2899 struct timespec now, diff1; 2965 struct timespec now, diff1;
2900 u64 diff; 2966 u64 diff;
2901 unsigned long diffms; 2967 unsigned long diffms;
2902 u32 count; 2968 u32 count;
2903 2969
2904 if (dev_priv->info->gen != 5) 2970 assert_spin_locked(&mchdev_lock);
2905 return;
2906 2971
2907 getrawmonotonic(&now); 2972 getrawmonotonic(&now);
2908 diff1 = timespec_sub(now, dev_priv->last_time2); 2973 diff1 = timespec_sub(now, dev_priv->ips.last_time2);
2909 2974
2910 /* Don't divide by 0 */ 2975 /* Don't divide by 0 */
2911 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; 2976 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
@@ -2914,28 +2979,42 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
2914 2979
2915 count = I915_READ(GFXEC); 2980 count = I915_READ(GFXEC);
2916 2981
2917 if (count < dev_priv->last_count2) { 2982 if (count < dev_priv->ips.last_count2) {
2918 diff = ~0UL - dev_priv->last_count2; 2983 diff = ~0UL - dev_priv->ips.last_count2;
2919 diff += count; 2984 diff += count;
2920 } else { 2985 } else {
2921 diff = count - dev_priv->last_count2; 2986 diff = count - dev_priv->ips.last_count2;
2922 } 2987 }
2923 2988
2924 dev_priv->last_count2 = count; 2989 dev_priv->ips.last_count2 = count;
2925 dev_priv->last_time2 = now; 2990 dev_priv->ips.last_time2 = now;
2926 2991
2927 /* More magic constants... */ 2992 /* More magic constants... */
2928 diff = diff * 1181; 2993 diff = diff * 1181;
2929 diff = div_u64(diff, diffms * 10); 2994 diff = div_u64(diff, diffms * 10);
2930 dev_priv->gfx_power = diff; 2995 dev_priv->ips.gfx_power = diff;
2931} 2996}
2932 2997
2933unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 2998void i915_update_gfx_val(struct drm_i915_private *dev_priv)
2999{
3000 if (dev_priv->info->gen != 5)
3001 return;
3002
3003 spin_lock_irq(&mchdev_lock);
3004
3005 __i915_update_gfx_val(dev_priv);
3006
3007 spin_unlock_irq(&mchdev_lock);
3008}
3009
3010static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
2934{ 3011{
2935 unsigned long t, corr, state1, corr2, state2; 3012 unsigned long t, corr, state1, corr2, state2;
2936 u32 pxvid, ext_v; 3013 u32 pxvid, ext_v;
2937 3014
2938 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); 3015 assert_spin_locked(&mchdev_lock);
3016
3017 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
2939 pxvid = (pxvid >> 24) & 0x7f; 3018 pxvid = (pxvid >> 24) & 0x7f;
2940 ext_v = pvid_to_extvid(dev_priv, pxvid); 3019 ext_v = pvid_to_extvid(dev_priv, pxvid);
2941 3020
@@ -2955,27 +3034,31 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
2955 3034
2956 corr = corr * ((150142 * state1) / 10000 - 78642); 3035 corr = corr * ((150142 * state1) / 10000 - 78642);
2957 corr /= 100000; 3036 corr /= 100000;
2958 corr2 = (corr * dev_priv->corr); 3037 corr2 = (corr * dev_priv->ips.corr);
2959 3038
2960 state2 = (corr2 * state1) / 10000; 3039 state2 = (corr2 * state1) / 10000;
2961 state2 /= 100; /* convert to mW */ 3040 state2 /= 100; /* convert to mW */
2962 3041
2963 i915_update_gfx_val(dev_priv); 3042 __i915_update_gfx_val(dev_priv);
2964 3043
2965 return dev_priv->gfx_power + state2; 3044 return dev_priv->ips.gfx_power + state2;
2966} 3045}
2967 3046
2968/* Global for IPS driver to get at the current i915 device */ 3047unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
2969static struct drm_i915_private *i915_mch_dev; 3048{
2970/* 3049 unsigned long val;
2971 * Lock protecting IPS related data structures 3050
2972 * - i915_mch_dev 3051 if (dev_priv->info->gen != 5)
2973 * - dev_priv->max_delay 3052 return 0;
2974 * - dev_priv->min_delay 3053
2975 * - dev_priv->fmax 3054 spin_lock_irq(&mchdev_lock);
2976 * - dev_priv->gpu_busy 3055
2977 */ 3056 val = __i915_gfx_val(dev_priv);
2978static DEFINE_SPINLOCK(mchdev_lock); 3057
3058 spin_unlock_irq(&mchdev_lock);
3059
3060 return val;
3061}
2979 3062
2980/** 3063/**
2981 * i915_read_mch_val - return value for IPS use 3064 * i915_read_mch_val - return value for IPS use
@@ -2988,18 +3071,18 @@ unsigned long i915_read_mch_val(void)
2988 struct drm_i915_private *dev_priv; 3071 struct drm_i915_private *dev_priv;
2989 unsigned long chipset_val, graphics_val, ret = 0; 3072 unsigned long chipset_val, graphics_val, ret = 0;
2990 3073
2991 spin_lock(&mchdev_lock); 3074 spin_lock_irq(&mchdev_lock);
2992 if (!i915_mch_dev) 3075 if (!i915_mch_dev)
2993 goto out_unlock; 3076 goto out_unlock;
2994 dev_priv = i915_mch_dev; 3077 dev_priv = i915_mch_dev;
2995 3078
2996 chipset_val = i915_chipset_val(dev_priv); 3079 chipset_val = __i915_chipset_val(dev_priv);
2997 graphics_val = i915_gfx_val(dev_priv); 3080 graphics_val = __i915_gfx_val(dev_priv);
2998 3081
2999 ret = chipset_val + graphics_val; 3082 ret = chipset_val + graphics_val;
3000 3083
3001out_unlock: 3084out_unlock:
3002 spin_unlock(&mchdev_lock); 3085 spin_unlock_irq(&mchdev_lock);
3003 3086
3004 return ret; 3087 return ret;
3005} 3088}
@@ -3015,18 +3098,18 @@ bool i915_gpu_raise(void)
3015 struct drm_i915_private *dev_priv; 3098 struct drm_i915_private *dev_priv;
3016 bool ret = true; 3099 bool ret = true;
3017 3100
3018 spin_lock(&mchdev_lock); 3101 spin_lock_irq(&mchdev_lock);
3019 if (!i915_mch_dev) { 3102 if (!i915_mch_dev) {
3020 ret = false; 3103 ret = false;
3021 goto out_unlock; 3104 goto out_unlock;
3022 } 3105 }
3023 dev_priv = i915_mch_dev; 3106 dev_priv = i915_mch_dev;
3024 3107
3025 if (dev_priv->max_delay > dev_priv->fmax) 3108 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
3026 dev_priv->max_delay--; 3109 dev_priv->ips.max_delay--;
3027 3110
3028out_unlock: 3111out_unlock:
3029 spin_unlock(&mchdev_lock); 3112 spin_unlock_irq(&mchdev_lock);
3030 3113
3031 return ret; 3114 return ret;
3032} 3115}
@@ -3043,18 +3126,18 @@ bool i915_gpu_lower(void)
3043 struct drm_i915_private *dev_priv; 3126 struct drm_i915_private *dev_priv;
3044 bool ret = true; 3127 bool ret = true;
3045 3128
3046 spin_lock(&mchdev_lock); 3129 spin_lock_irq(&mchdev_lock);
3047 if (!i915_mch_dev) { 3130 if (!i915_mch_dev) {
3048 ret = false; 3131 ret = false;
3049 goto out_unlock; 3132 goto out_unlock;
3050 } 3133 }
3051 dev_priv = i915_mch_dev; 3134 dev_priv = i915_mch_dev;
3052 3135
3053 if (dev_priv->max_delay < dev_priv->min_delay) 3136 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
3054 dev_priv->max_delay++; 3137 dev_priv->ips.max_delay++;
3055 3138
3056out_unlock: 3139out_unlock:
3057 spin_unlock(&mchdev_lock); 3140 spin_unlock_irq(&mchdev_lock);
3058 3141
3059 return ret; 3142 return ret;
3060} 3143}
@@ -3068,17 +3151,20 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
3068bool i915_gpu_busy(void) 3151bool i915_gpu_busy(void)
3069{ 3152{
3070 struct drm_i915_private *dev_priv; 3153 struct drm_i915_private *dev_priv;
3154 struct intel_ring_buffer *ring;
3071 bool ret = false; 3155 bool ret = false;
3156 int i;
3072 3157
3073 spin_lock(&mchdev_lock); 3158 spin_lock_irq(&mchdev_lock);
3074 if (!i915_mch_dev) 3159 if (!i915_mch_dev)
3075 goto out_unlock; 3160 goto out_unlock;
3076 dev_priv = i915_mch_dev; 3161 dev_priv = i915_mch_dev;
3077 3162
3078 ret = dev_priv->busy; 3163 for_each_ring(ring, dev_priv, i)
3164 ret |= !list_empty(&ring->request_list);
3079 3165
3080out_unlock: 3166out_unlock:
3081 spin_unlock(&mchdev_lock); 3167 spin_unlock_irq(&mchdev_lock);
3082 3168
3083 return ret; 3169 return ret;
3084} 3170}
@@ -3095,20 +3181,20 @@ bool i915_gpu_turbo_disable(void)
3095 struct drm_i915_private *dev_priv; 3181 struct drm_i915_private *dev_priv;
3096 bool ret = true; 3182 bool ret = true;
3097 3183
3098 spin_lock(&mchdev_lock); 3184 spin_lock_irq(&mchdev_lock);
3099 if (!i915_mch_dev) { 3185 if (!i915_mch_dev) {
3100 ret = false; 3186 ret = false;
3101 goto out_unlock; 3187 goto out_unlock;
3102 } 3188 }
3103 dev_priv = i915_mch_dev; 3189 dev_priv = i915_mch_dev;
3104 3190
3105 dev_priv->max_delay = dev_priv->fstart; 3191 dev_priv->ips.max_delay = dev_priv->ips.fstart;
3106 3192
3107 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) 3193 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
3108 ret = false; 3194 ret = false;
3109 3195
3110out_unlock: 3196out_unlock:
3111 spin_unlock(&mchdev_lock); 3197 spin_unlock_irq(&mchdev_lock);
3112 3198
3113 return ret; 3199 return ret;
3114} 3200}
@@ -3136,19 +3222,20 @@ ips_ping_for_i915_load(void)
3136 3222
3137void intel_gpu_ips_init(struct drm_i915_private *dev_priv) 3223void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
3138{ 3224{
3139 spin_lock(&mchdev_lock); 3225 /* We only register the i915 ips part with intel-ips once everything is
3226 * set up, to avoid intel-ips sneaking in and reading bogus values. */
3227 spin_lock_irq(&mchdev_lock);
3140 i915_mch_dev = dev_priv; 3228 i915_mch_dev = dev_priv;
3141 dev_priv->mchdev_lock = &mchdev_lock; 3229 spin_unlock_irq(&mchdev_lock);
3142 spin_unlock(&mchdev_lock);
3143 3230
3144 ips_ping_for_i915_load(); 3231 ips_ping_for_i915_load();
3145} 3232}
3146 3233
3147void intel_gpu_ips_teardown(void) 3234void intel_gpu_ips_teardown(void)
3148{ 3235{
3149 spin_lock(&mchdev_lock); 3236 spin_lock_irq(&mchdev_lock);
3150 i915_mch_dev = NULL; 3237 i915_mch_dev = NULL;
3151 spin_unlock(&mchdev_lock); 3238 spin_unlock_irq(&mchdev_lock);
3152} 3239}
3153static void intel_init_emon(struct drm_device *dev) 3240static void intel_init_emon(struct drm_device *dev)
3154{ 3241{
@@ -3218,7 +3305,7 @@ static void intel_init_emon(struct drm_device *dev)
3218 3305
3219 lcfuse = I915_READ(LCFUSE02); 3306 lcfuse = I915_READ(LCFUSE02);
3220 3307
3221 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); 3308 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
3222} 3309}
3223 3310
3224void intel_disable_gt_powersave(struct drm_device *dev) 3311void intel_disable_gt_powersave(struct drm_device *dev)
@@ -3731,42 +3818,6 @@ void intel_init_clock_gating(struct drm_device *dev)
3731 dev_priv->display.init_pch_clock_gating(dev); 3818 dev_priv->display.init_pch_clock_gating(dev);
3732} 3819}
3733 3820
3734static void gen6_sanitize_pm(struct drm_device *dev)
3735{
3736 struct drm_i915_private *dev_priv = dev->dev_private;
3737 u32 limits, delay, old;
3738
3739 gen6_gt_force_wake_get(dev_priv);
3740
3741 old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
3742 /* Make sure we continue to get interrupts
3743 * until we hit the minimum or maximum frequencies.
3744 */
3745 limits &= ~(0x3f << 16 | 0x3f << 24);
3746 delay = dev_priv->cur_delay;
3747 if (delay < dev_priv->max_delay)
3748 limits |= (dev_priv->max_delay & 0x3f) << 24;
3749 if (delay > dev_priv->min_delay)
3750 limits |= (dev_priv->min_delay & 0x3f) << 16;
3751
3752 if (old != limits) {
3753 /* Note that the known failure case is to read back 0. */
3754 DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
3755 "expected %08x, was %08x\n", limits, old);
3756 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3757 }
3758
3759 gen6_gt_force_wake_put(dev_priv);
3760}
3761
3762void intel_sanitize_pm(struct drm_device *dev)
3763{
3764 struct drm_i915_private *dev_priv = dev->dev_private;
3765
3766 if (dev_priv->display.sanitize_pm)
3767 dev_priv->display.sanitize_pm(dev);
3768}
3769
3770/* Starting with Haswell, we have different power wells for 3821/* Starting with Haswell, we have different power wells for
3771 * different parts of the GPU. This attempts to enable them all. 3822 * different parts of the GPU. This attempts to enable them all.
3772 */ 3823 */
@@ -3852,7 +3903,6 @@ void intel_init_pm(struct drm_device *dev)
3852 dev_priv->display.update_wm = NULL; 3903 dev_priv->display.update_wm = NULL;
3853 } 3904 }
3854 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 3905 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
3855 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3856 } else if (IS_IVYBRIDGE(dev)) { 3906 } else if (IS_IVYBRIDGE(dev)) {
3857 /* FIXME: detect B0+ stepping and use auto training */ 3907 /* FIXME: detect B0+ stepping and use auto training */
3858 if (SNB_READ_WM0_LATENCY()) { 3908 if (SNB_READ_WM0_LATENCY()) {
@@ -3864,7 +3914,6 @@ void intel_init_pm(struct drm_device *dev)
3864 dev_priv->display.update_wm = NULL; 3914 dev_priv->display.update_wm = NULL;
3865 } 3915 }
3866 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 3916 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3867 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3868 } else if (IS_HASWELL(dev)) { 3917 } else if (IS_HASWELL(dev)) {
3869 if (SNB_READ_WM0_LATENCY()) { 3918 if (SNB_READ_WM0_LATENCY()) {
3870 dev_priv->display.update_wm = sandybridge_update_wm; 3919 dev_priv->display.update_wm = sandybridge_update_wm;
@@ -3876,7 +3925,6 @@ void intel_init_pm(struct drm_device *dev)
3876 dev_priv->display.update_wm = NULL; 3925 dev_priv->display.update_wm = NULL;
3877 } 3926 }
3878 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 3927 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
3879 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3880 } else 3928 } else
3881 dev_priv->display.update_wm = NULL; 3929 dev_priv->display.update_wm = NULL;
3882 } else if (IS_VALLEYVIEW(dev)) { 3930 } else if (IS_VALLEYVIEW(dev)) {
@@ -3955,14 +4003,16 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
3955 else 4003 else
3956 forcewake_ack = FORCEWAKE_ACK; 4004 forcewake_ack = FORCEWAKE_ACK;
3957 4005
3958 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) 4006 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
3959 DRM_ERROR("Force wake wait timed out\n"); 4007 FORCEWAKE_ACK_TIMEOUT_MS))
4008 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
3960 4009
3961 I915_WRITE_NOTRACE(FORCEWAKE, 1); 4010 I915_WRITE_NOTRACE(FORCEWAKE, 1);
3962 POSTING_READ(FORCEWAKE); 4011 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
3963 4012
3964 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) 4013 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
3965 DRM_ERROR("Force wake wait timed out\n"); 4014 FORCEWAKE_ACK_TIMEOUT_MS))
4015 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
3966 4016
3967 __gen6_gt_wait_for_thread_c0(dev_priv); 4017 __gen6_gt_wait_for_thread_c0(dev_priv);
3968} 4018}
@@ -3976,14 +4026,16 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
3976 else 4026 else
3977 forcewake_ack = FORCEWAKE_MT_ACK; 4027 forcewake_ack = FORCEWAKE_MT_ACK;
3978 4028
3979 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) 4029 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
3980 DRM_ERROR("Force wake wait timed out\n"); 4030 FORCEWAKE_ACK_TIMEOUT_MS))
4031 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
3981 4032
3982 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); 4033 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
3983 POSTING_READ(FORCEWAKE_MT); 4034 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
3984 4035
3985 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) 4036 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
3986 DRM_ERROR("Force wake wait timed out\n"); 4037 FORCEWAKE_ACK_TIMEOUT_MS))
4038 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
3987 4039
3988 __gen6_gt_wait_for_thread_c0(dev_priv); 4040 __gen6_gt_wait_for_thread_c0(dev_priv);
3989} 4041}
@@ -4016,14 +4068,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
4016static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 4068static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
4017{ 4069{
4018 I915_WRITE_NOTRACE(FORCEWAKE, 0); 4070 I915_WRITE_NOTRACE(FORCEWAKE, 0);
4019 POSTING_READ(FORCEWAKE); 4071 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
4020 gen6_gt_check_fifodbg(dev_priv); 4072 gen6_gt_check_fifodbg(dev_priv);
4021} 4073}
4022 4074
4023static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) 4075static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
4024{ 4076{
4025 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); 4077 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
4026 POSTING_READ(FORCEWAKE_MT); 4078 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
4027 gen6_gt_check_fifodbg(dev_priv); 4079 gen6_gt_check_fifodbg(dev_priv);
4028} 4080}
4029 4081
@@ -4062,24 +4114,24 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
4062 4114
4063static void vlv_force_wake_get(struct drm_i915_private *dev_priv) 4115static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4064{ 4116{
4065 /* Already awake? */ 4117 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
4066 if ((I915_READ(0x130094) & 0xa1) == 0xa1) 4118 FORCEWAKE_ACK_TIMEOUT_MS))
4067 return; 4119 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4068 4120
4069 I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff); 4121 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
4070 POSTING_READ(FORCEWAKE_VLV);
4071 4122
4072 if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500)) 4123 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
4073 DRM_ERROR("Force wake wait timed out\n"); 4124 FORCEWAKE_ACK_TIMEOUT_MS))
4125 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4074 4126
4075 __gen6_gt_wait_for_thread_c0(dev_priv); 4127 __gen6_gt_wait_for_thread_c0(dev_priv);
4076} 4128}
4077 4129
4078static void vlv_force_wake_put(struct drm_i915_private *dev_priv) 4130static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
4079{ 4131{
4080 I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000); 4132 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
4081 /* FIXME: confirm VLV behavior with Punit folks */ 4133 /* The below doubles as a POSTING_READ */
4082 POSTING_READ(FORCEWAKE_VLV); 4134 gen6_gt_check_fifodbg(dev_priv);
4083} 4135}
4084 4136
4085void intel_gt_init(struct drm_device *dev) 4137void intel_gt_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1aef516cc6fa..ecbc5c5dbbbc 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -261,6 +261,83 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
261 return 0; 261 return 0;
262} 262}
263 263
264static int
265gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
266{
267 int ret;
268
269 ret = intel_ring_begin(ring, 4);
270 if (ret)
271 return ret;
272
273 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
274 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
275 PIPE_CONTROL_STALL_AT_SCOREBOARD);
276 intel_ring_emit(ring, 0);
277 intel_ring_emit(ring, 0);
278 intel_ring_advance(ring);
279
280 return 0;
281}
282
283static int
284gen7_render_ring_flush(struct intel_ring_buffer *ring,
285 u32 invalidate_domains, u32 flush_domains)
286{
287 u32 flags = 0;
288 struct pipe_control *pc = ring->private;
289 u32 scratch_addr = pc->gtt_offset + 128;
290 int ret;
291
292 /*
293 * Ensure that any following seqno writes only happen when the render
294 * cache is indeed flushed.
295 *
296 * Workaround: 4th PIPE_CONTROL command (except the ones with only
297 * read-cache invalidate bits set) must have the CS_STALL bit set. We
298 * don't try to be clever and just set it unconditionally.
299 */
300 flags |= PIPE_CONTROL_CS_STALL;
301
302 /* Just flush everything. Experiments have shown that reducing the
303 * number of bits based on the write domains has little performance
304 * impact.
305 */
306 if (flush_domains) {
307 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
308 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
309 }
310 if (invalidate_domains) {
311 flags |= PIPE_CONTROL_TLB_INVALIDATE;
312 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
313 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
314 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
315 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
316 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
317 /*
318 * TLB invalidate requires a post-sync write.
319 */
320 flags |= PIPE_CONTROL_QW_WRITE;
321
322 /* Workaround: we must issue a pipe_control with CS-stall bit
323 * set before a pipe_control command that has the state cache
324 * invalidate bit set. */
325 gen7_render_ring_cs_stall_wa(ring);
326 }
327
328 ret = intel_ring_begin(ring, 4);
329 if (ret)
330 return ret;
331
332 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
333 intel_ring_emit(ring, flags);
334 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
335 intel_ring_emit(ring, 0);
336 intel_ring_advance(ring);
337
338 return 0;
339}
340
264static void ring_write_tail(struct intel_ring_buffer *ring, 341static void ring_write_tail(struct intel_ring_buffer *ring,
265 u32 value) 342 u32 value)
266{ 343{
@@ -381,12 +458,12 @@ init_pipe_control(struct intel_ring_buffer *ring)
381 458
382 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 459 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
383 460
384 ret = i915_gem_object_pin(obj, 4096, true); 461 ret = i915_gem_object_pin(obj, 4096, true, false);
385 if (ret) 462 if (ret)
386 goto err_unref; 463 goto err_unref;
387 464
388 pc->gtt_offset = obj->gtt_offset; 465 pc->gtt_offset = obj->gtt_offset;
389 pc->cpu_page = kmap(obj->pages[0]); 466 pc->cpu_page = kmap(sg_page(obj->pages->sgl));
390 if (pc->cpu_page == NULL) 467 if (pc->cpu_page == NULL)
391 goto err_unpin; 468 goto err_unpin;
392 469
@@ -413,7 +490,8 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
413 return; 490 return;
414 491
415 obj = pc->obj; 492 obj = pc->obj;
416 kunmap(obj->pages[0]); 493
494 kunmap(sg_page(obj->pages->sgl));
417 i915_gem_object_unpin(obj); 495 i915_gem_object_unpin(obj);
418 drm_gem_object_unreference(&obj->base); 496 drm_gem_object_unreference(&obj->base);
419 497
@@ -461,7 +539,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
461 if (INTEL_INFO(dev)->gen >= 6) 539 if (INTEL_INFO(dev)->gen >= 6)
462 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 540 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
463 541
464 if (IS_IVYBRIDGE(dev)) 542 if (HAS_L3_GPU_CACHE(dev))
465 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); 543 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
466 544
467 return ret; 545 return ret;
@@ -627,26 +705,24 @@ pc_render_add_request(struct intel_ring_buffer *ring,
627} 705}
628 706
629static u32 707static u32
630gen6_ring_get_seqno(struct intel_ring_buffer *ring) 708gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
631{ 709{
632 struct drm_device *dev = ring->dev;
633
634 /* Workaround to force correct ordering between irq and seqno writes on 710 /* Workaround to force correct ordering between irq and seqno writes on
635 * ivb (and maybe also on snb) by reading from a CS register (like 711 * ivb (and maybe also on snb) by reading from a CS register (like
636 * ACTHD) before reading the status page. */ 712 * ACTHD) before reading the status page. */
637 if (IS_GEN6(dev) || IS_GEN7(dev)) 713 if (!lazy_coherency)
638 intel_ring_get_active_head(ring); 714 intel_ring_get_active_head(ring);
639 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 715 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
640} 716}
641 717
642static u32 718static u32
643ring_get_seqno(struct intel_ring_buffer *ring) 719ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
644{ 720{
645 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 721 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
646} 722}
647 723
648static u32 724static u32
649pc_render_get_seqno(struct intel_ring_buffer *ring) 725pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
650{ 726{
651 struct pipe_control *pc = ring->private; 727 struct pipe_control *pc = ring->private;
652 return pc->cpu_page[0]; 728 return pc->cpu_page[0];
@@ -851,7 +927,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
851 927
852 spin_lock_irqsave(&dev_priv->irq_lock, flags); 928 spin_lock_irqsave(&dev_priv->irq_lock, flags);
853 if (ring->irq_refcount++ == 0) { 929 if (ring->irq_refcount++ == 0) {
854 if (IS_IVYBRIDGE(dev) && ring->id == RCS) 930 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
855 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | 931 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
856 GEN6_RENDER_L3_PARITY_ERROR)); 932 GEN6_RENDER_L3_PARITY_ERROR));
857 else 933 else
@@ -874,7 +950,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
874 950
875 spin_lock_irqsave(&dev_priv->irq_lock, flags); 951 spin_lock_irqsave(&dev_priv->irq_lock, flags);
876 if (--ring->irq_refcount == 0) { 952 if (--ring->irq_refcount == 0) {
877 if (IS_IVYBRIDGE(dev) && ring->id == RCS) 953 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
878 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); 954 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
879 else 955 else
880 I915_WRITE_IMR(ring, ~0); 956 I915_WRITE_IMR(ring, ~0);
@@ -950,7 +1026,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
950 if (obj == NULL) 1026 if (obj == NULL)
951 return; 1027 return;
952 1028
953 kunmap(obj->pages[0]); 1029 kunmap(sg_page(obj->pages->sgl));
954 i915_gem_object_unpin(obj); 1030 i915_gem_object_unpin(obj);
955 drm_gem_object_unreference(&obj->base); 1031 drm_gem_object_unreference(&obj->base);
956 ring->status_page.obj = NULL; 1032 ring->status_page.obj = NULL;
@@ -971,13 +1047,13 @@ static int init_status_page(struct intel_ring_buffer *ring)
971 1047
972 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1048 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
973 1049
974 ret = i915_gem_object_pin(obj, 4096, true); 1050 ret = i915_gem_object_pin(obj, 4096, true, false);
975 if (ret != 0) { 1051 if (ret != 0) {
976 goto err_unref; 1052 goto err_unref;
977 } 1053 }
978 1054
979 ring->status_page.gfx_addr = obj->gtt_offset; 1055 ring->status_page.gfx_addr = obj->gtt_offset;
980 ring->status_page.page_addr = kmap(obj->pages[0]); 1056 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
981 if (ring->status_page.page_addr == NULL) { 1057 if (ring->status_page.page_addr == NULL) {
982 ret = -ENOMEM; 1058 ret = -ENOMEM;
983 goto err_unpin; 1059 goto err_unpin;
@@ -1009,7 +1085,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1009 ring->dev = dev; 1085 ring->dev = dev;
1010 INIT_LIST_HEAD(&ring->active_list); 1086 INIT_LIST_HEAD(&ring->active_list);
1011 INIT_LIST_HEAD(&ring->request_list); 1087 INIT_LIST_HEAD(&ring->request_list);
1012 INIT_LIST_HEAD(&ring->gpu_write_list);
1013 ring->size = 32 * PAGE_SIZE; 1088 ring->size = 32 * PAGE_SIZE;
1014 1089
1015 init_waitqueue_head(&ring->irq_queue); 1090 init_waitqueue_head(&ring->irq_queue);
@@ -1029,7 +1104,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1029 1104
1030 ring->obj = obj; 1105 ring->obj = obj;
1031 1106
1032 ret = i915_gem_object_pin(obj, PAGE_SIZE, true); 1107 ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
1033 if (ret) 1108 if (ret)
1034 goto err_unref; 1109 goto err_unref;
1035 1110
@@ -1378,7 +1453,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1378 1453
1379 if (INTEL_INFO(dev)->gen >= 6) { 1454 if (INTEL_INFO(dev)->gen >= 6) {
1380 ring->add_request = gen6_add_request; 1455 ring->add_request = gen6_add_request;
1381 ring->flush = gen6_render_ring_flush; 1456 ring->flush = gen7_render_ring_flush;
1457 if (INTEL_INFO(dev)->gen == 6)
1458 ring->flush = gen6_render_ring_flush;
1382 ring->irq_get = gen6_ring_get_irq; 1459 ring->irq_get = gen6_ring_get_irq;
1383 ring->irq_put = gen6_ring_put_irq; 1460 ring->irq_put = gen6_ring_put_irq;
1384 ring->irq_enable_mask = GT_USER_INTERRUPT; 1461 ring->irq_enable_mask = GT_USER_INTERRUPT;
@@ -1480,7 +1557,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1480 ring->dev = dev; 1557 ring->dev = dev;
1481 INIT_LIST_HEAD(&ring->active_list); 1558 INIT_LIST_HEAD(&ring->active_list);
1482 INIT_LIST_HEAD(&ring->request_list); 1559 INIT_LIST_HEAD(&ring->request_list);
1483 INIT_LIST_HEAD(&ring->gpu_write_list);
1484 1560
1485 ring->size = size; 1561 ring->size = size;
1486 ring->effective_size = ring->size; 1562 ring->effective_size = ring->size;
@@ -1573,3 +1649,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1573 1649
1574 return intel_init_ring_buffer(dev, ring); 1650 return intel_init_ring_buffer(dev, ring);
1575} 1651}
1652
1653int
1654intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
1655{
1656 int ret;
1657
1658 if (!ring->gpu_caches_dirty)
1659 return 0;
1660
1661 ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
1662 if (ret)
1663 return ret;
1664
1665 trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
1666
1667 ring->gpu_caches_dirty = false;
1668 return 0;
1669}
1670
1671int
1672intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
1673{
1674 uint32_t flush_domains;
1675 int ret;
1676
1677 flush_domains = 0;
1678 if (ring->gpu_caches_dirty)
1679 flush_domains = I915_GEM_GPU_DOMAINS;
1680
1681 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
1682 if (ret)
1683 return ret;
1684
1685 trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
1686
1687 ring->gpu_caches_dirty = false;
1688 return 0;
1689}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 1d3c81fdad92..2ea7a311a1f0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -72,7 +72,14 @@ struct intel_ring_buffer {
72 u32 flush_domains); 72 u32 flush_domains);
73 int (*add_request)(struct intel_ring_buffer *ring, 73 int (*add_request)(struct intel_ring_buffer *ring,
74 u32 *seqno); 74 u32 *seqno);
75 u32 (*get_seqno)(struct intel_ring_buffer *ring); 75 /* Some chipsets are not quite as coherent as advertised and need
76 * an expensive kick to force a true read of the up-to-date seqno.
77 * However, the up-to-date seqno is not always required and the last
78 * seen value is good enough. Note that the seqno will always be
79 * monotonic, even if not coherent.
80 */
81 u32 (*get_seqno)(struct intel_ring_buffer *ring,
82 bool lazy_coherency);
76 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, 83 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
77 u32 offset, u32 length); 84 u32 offset, u32 length);
78 void (*cleanup)(struct intel_ring_buffer *ring); 85 void (*cleanup)(struct intel_ring_buffer *ring);
@@ -101,15 +108,6 @@ struct intel_ring_buffer {
101 struct list_head request_list; 108 struct list_head request_list;
102 109
103 /** 110 /**
104 * List of objects currently pending a GPU write flush.
105 *
106 * All elements on this list will belong to either the
107 * active_list or flushing_list, last_rendering_seqno can
108 * be used to differentiate between the two elements.
109 */
110 struct list_head gpu_write_list;
111
112 /**
113 * Do we have some not yet emitted requests outstanding? 111 * Do we have some not yet emitted requests outstanding?
114 */ 112 */
115 u32 outstanding_lazy_request; 113 u32 outstanding_lazy_request;
@@ -204,6 +202,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
204void intel_ring_advance(struct intel_ring_buffer *ring); 202void intel_ring_advance(struct intel_ring_buffer *ring);
205 203
206u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); 204u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
205int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
206int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
207 207
208int intel_init_render_ring_buffer(struct drm_device *dev); 208int intel_init_render_ring_buffer(struct drm_device *dev);
209int intel_init_bsd_ring_buffer(struct drm_device *dev); 209int intel_init_bsd_ring_buffer(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d251d9d7a06c..0007a4d9bf6e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -96,7 +96,7 @@ struct intel_sdvo {
96 /* 96 /*
97 * Hotplug activation bits for this device 97 * Hotplug activation bits for this device
98 */ 98 */
99 uint8_t hotplug_active[2]; 99 uint16_t hotplug_active;
100 100
101 /** 101 /**
102 * This is used to select the color range of RBG outputs in HDMI mode. 102 * This is used to select the color range of RBG outputs in HDMI mode.
@@ -627,6 +627,14 @@ static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
627 &outputs, sizeof(outputs)); 627 &outputs, sizeof(outputs));
628} 628}
629 629
630static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
631 u16 *outputs)
632{
633 return intel_sdvo_get_value(intel_sdvo,
634 SDVO_CMD_GET_ACTIVE_OUTPUTS,
635 outputs, sizeof(*outputs));
636}
637
630static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo, 638static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
631 int mode) 639 int mode)
632{ 640{
@@ -1141,51 +1149,132 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1141 intel_sdvo_write_sdvox(intel_sdvo, sdvox); 1149 intel_sdvo_write_sdvox(intel_sdvo, sdvox);
1142} 1150}
1143 1151
1144static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) 1152static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
1145{ 1153{
1146 struct drm_device *dev = encoder->dev; 1154 struct intel_sdvo_connector *intel_sdvo_connector =
1155 to_intel_sdvo_connector(&connector->base);
1156 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
1157 u16 active_outputs;
1158
1159 intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
1160
1161 if (active_outputs & intel_sdvo_connector->output_flag)
1162 return true;
1163 else
1164 return false;
1165}
1166
1167static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
1168 enum pipe *pipe)
1169{
1170 struct drm_device *dev = encoder->base.dev;
1147 struct drm_i915_private *dev_priv = dev->dev_private; 1171 struct drm_i915_private *dev_priv = dev->dev_private;
1148 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); 1172 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1149 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 1173 u32 tmp;
1174
1175 tmp = I915_READ(intel_sdvo->sdvo_reg);
1176
1177 if (!(tmp & SDVO_ENABLE))
1178 return false;
1179
1180 if (HAS_PCH_CPT(dev))
1181 *pipe = PORT_TO_PIPE_CPT(tmp);
1182 else
1183 *pipe = PORT_TO_PIPE(tmp);
1184
1185 return true;
1186}
1187
1188static void intel_disable_sdvo(struct intel_encoder *encoder)
1189{
1190 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1191 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1192 u32 temp;
1193
1194 intel_sdvo_set_active_outputs(intel_sdvo, 0);
1195 if (0)
1196 intel_sdvo_set_encoder_power_state(intel_sdvo,
1197 DRM_MODE_DPMS_OFF);
1198
1199 temp = I915_READ(intel_sdvo->sdvo_reg);
1200 if ((temp & SDVO_ENABLE) != 0) {
1201 intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
1202 }
1203}
1204
1205static void intel_enable_sdvo(struct intel_encoder *encoder)
1206{
1207 struct drm_device *dev = encoder->base.dev;
1208 struct drm_i915_private *dev_priv = dev->dev_private;
1209 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1210 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1150 u32 temp; 1211 u32 temp;
1212 bool input1, input2;
1213 int i;
1214 u8 status;
1215
1216 temp = I915_READ(intel_sdvo->sdvo_reg);
1217 if ((temp & SDVO_ENABLE) == 0)
1218 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
1219 for (i = 0; i < 2; i++)
1220 intel_wait_for_vblank(dev, intel_crtc->pipe);
1221
1222 status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
1223 /* Warn if the device reported failure to sync.
1224 * A lot of SDVO devices fail to notify of sync, but it's
1225 * a given it the status is a success, we succeeded.
1226 */
1227 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
1228 DRM_DEBUG_KMS("First %s output reported failure to "
1229 "sync\n", SDVO_NAME(intel_sdvo));
1230 }
1231
1232 if (0)
1233 intel_sdvo_set_encoder_power_state(intel_sdvo,
1234 DRM_MODE_DPMS_ON);
1235 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
1236}
1237
1238static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
1239{
1240 struct drm_crtc *crtc;
1241 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1242
1243 /* dvo supports only 2 dpms states. */
1244 if (mode != DRM_MODE_DPMS_ON)
1245 mode = DRM_MODE_DPMS_OFF;
1246
1247 if (mode == connector->dpms)
1248 return;
1249
1250 connector->dpms = mode;
1251
1252 /* Only need to change hw state when actually enabled */
1253 crtc = intel_sdvo->base.base.crtc;
1254 if (!crtc) {
1255 intel_sdvo->base.connectors_active = false;
1256 return;
1257 }
1151 1258
1152 if (mode != DRM_MODE_DPMS_ON) { 1259 if (mode != DRM_MODE_DPMS_ON) {
1153 intel_sdvo_set_active_outputs(intel_sdvo, 0); 1260 intel_sdvo_set_active_outputs(intel_sdvo, 0);
1154 if (0) 1261 if (0)
1155 intel_sdvo_set_encoder_power_state(intel_sdvo, mode); 1262 intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
1156 1263
1157 if (mode == DRM_MODE_DPMS_OFF) { 1264 intel_sdvo->base.connectors_active = false;
1158 temp = I915_READ(intel_sdvo->sdvo_reg); 1265
1159 if ((temp & SDVO_ENABLE) != 0) { 1266 intel_crtc_update_dpms(crtc);
1160 intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
1161 }
1162 }
1163 } else { 1267 } else {
1164 bool input1, input2; 1268 intel_sdvo->base.connectors_active = true;
1165 int i; 1269
1166 u8 status; 1270 intel_crtc_update_dpms(crtc);
1167
1168 temp = I915_READ(intel_sdvo->sdvo_reg);
1169 if ((temp & SDVO_ENABLE) == 0)
1170 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
1171 for (i = 0; i < 2; i++)
1172 intel_wait_for_vblank(dev, intel_crtc->pipe);
1173
1174 status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
1175 /* Warn if the device reported failure to sync.
1176 * A lot of SDVO devices fail to notify of sync, but it's
1177 * a given it the status is a success, we succeeded.
1178 */
1179 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
1180 DRM_DEBUG_KMS("First %s output reported failure to "
1181 "sync\n", SDVO_NAME(intel_sdvo));
1182 }
1183 1271
1184 if (0) 1272 if (0)
1185 intel_sdvo_set_encoder_power_state(intel_sdvo, mode); 1273 intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
1186 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); 1274 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
1187 } 1275 }
1188 return; 1276
1277 intel_modeset_check_state(connector->dev);
1189} 1278}
1190 1279
1191static int intel_sdvo_mode_valid(struct drm_connector *connector, 1280static int intel_sdvo_mode_valid(struct drm_connector *connector,
@@ -1250,25 +1339,29 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
1250 return true; 1339 return true;
1251} 1340}
1252 1341
1253static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) 1342static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
1254{ 1343{
1255 struct drm_device *dev = intel_sdvo->base.base.dev; 1344 struct drm_device *dev = intel_sdvo->base.base.dev;
1256 u8 response[2]; 1345 uint16_t hotplug;
1257 1346
1258 /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise 1347 /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
1259 * on the line. */ 1348 * on the line. */
1260 if (IS_I945G(dev) || IS_I945GM(dev)) 1349 if (IS_I945G(dev) || IS_I945GM(dev))
1261 return false; 1350 return 0;
1262 1351
1263 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, 1352 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
1264 &response, 2) && response[0]; 1353 &hotplug, sizeof(hotplug)))
1354 return 0;
1355
1356 return hotplug;
1265} 1357}
1266 1358
1267static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) 1359static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
1268{ 1360{
1269 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1361 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1270 1362
1271 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); 1363 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
1364 &intel_sdvo->hotplug_active, 2);
1272} 1365}
1273 1366
1274static bool 1367static bool
@@ -1344,7 +1437,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
1344 } 1437 }
1345 } else 1438 } else
1346 status = connector_status_disconnected; 1439 status = connector_status_disconnected;
1347 connector->display_info.raw_edid = NULL;
1348 kfree(edid); 1440 kfree(edid);
1349 } 1441 }
1350 1442
@@ -1418,7 +1510,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1418 else 1510 else
1419 ret = connector_status_disconnected; 1511 ret = connector_status_disconnected;
1420 1512
1421 connector->display_info.raw_edid = NULL;
1422 kfree(edid); 1513 kfree(edid);
1423 } else 1514 } else
1424 ret = connector_status_connected; 1515 ret = connector_status_connected;
@@ -1464,7 +1555,6 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1464 drm_add_edid_modes(connector, edid); 1555 drm_add_edid_modes(connector, edid);
1465 } 1556 }
1466 1557
1467 connector->display_info.raw_edid = NULL;
1468 kfree(edid); 1558 kfree(edid);
1469 } 1559 }
1470} 1560}
@@ -1836,8 +1926,8 @@ set_value:
1836done: 1926done:
1837 if (intel_sdvo->base.base.crtc) { 1927 if (intel_sdvo->base.base.crtc) {
1838 struct drm_crtc *crtc = intel_sdvo->base.base.crtc; 1928 struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
1839 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, 1929 intel_set_mode(crtc, &crtc->mode,
1840 crtc->y, crtc->fb); 1930 crtc->x, crtc->y, crtc->fb);
1841 } 1931 }
1842 1932
1843 return 0; 1933 return 0;
@@ -1845,15 +1935,13 @@ done:
1845} 1935}
1846 1936
1847static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { 1937static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
1848 .dpms = intel_sdvo_dpms,
1849 .mode_fixup = intel_sdvo_mode_fixup, 1938 .mode_fixup = intel_sdvo_mode_fixup,
1850 .prepare = intel_encoder_prepare,
1851 .mode_set = intel_sdvo_mode_set, 1939 .mode_set = intel_sdvo_mode_set,
1852 .commit = intel_encoder_commit, 1940 .disable = intel_encoder_noop,
1853}; 1941};
1854 1942
1855static const struct drm_connector_funcs intel_sdvo_connector_funcs = { 1943static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
1856 .dpms = drm_helper_connector_dpms, 1944 .dpms = intel_sdvo_dpms,
1857 .detect = intel_sdvo_detect, 1945 .detect = intel_sdvo_detect,
1858 .fill_modes = drm_helper_probe_single_connector_modes, 1946 .fill_modes = drm_helper_probe_single_connector_modes,
1859 .set_property = intel_sdvo_set_property, 1947 .set_property = intel_sdvo_set_property,
@@ -2025,6 +2113,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2025 connector->base.base.interlace_allowed = 1; 2113 connector->base.base.interlace_allowed = 1;
2026 connector->base.base.doublescan_allowed = 0; 2114 connector->base.base.doublescan_allowed = 0;
2027 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; 2115 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
2116 connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
2028 2117
2029 intel_connector_attach_encoder(&connector->base, &encoder->base); 2118 intel_connector_attach_encoder(&connector->base, &encoder->base);
2030 drm_sysfs_connector_add(&connector->base.base); 2119 drm_sysfs_connector_add(&connector->base.base);
@@ -2063,17 +2152,18 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2063 2152
2064 intel_connector = &intel_sdvo_connector->base; 2153 intel_connector = &intel_sdvo_connector->base;
2065 connector = &intel_connector->base; 2154 connector = &intel_connector->base;
2066 if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { 2155 if (intel_sdvo_get_hotplug_support(intel_sdvo) &
2156 intel_sdvo_connector->output_flag) {
2067 connector->polled = DRM_CONNECTOR_POLL_HPD; 2157 connector->polled = DRM_CONNECTOR_POLL_HPD;
2068 intel_sdvo->hotplug_active[0] |= 1 << device; 2158 intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
2069 /* Some SDVO devices have one-shot hotplug interrupts. 2159 /* Some SDVO devices have one-shot hotplug interrupts.
2070 * Ensure that they get re-enabled when an interrupt happens. 2160 * Ensure that they get re-enabled when an interrupt happens.
2071 */ 2161 */
2072 intel_encoder->hot_plug = intel_sdvo_enable_hotplug; 2162 intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
2073 intel_sdvo_enable_hotplug(intel_encoder); 2163 intel_sdvo_enable_hotplug(intel_encoder);
2074 } 2164 } else {
2075 else
2076 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; 2165 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
2166 }
2077 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2167 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2078 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2168 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2079 2169
@@ -2081,8 +2171,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2081 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2171 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2082 intel_sdvo->is_hdmi = true; 2172 intel_sdvo->is_hdmi = true;
2083 } 2173 }
2084 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2174 intel_sdvo->base.cloneable = true;
2085 (1 << INTEL_ANALOG_CLONE_BIT));
2086 2175
2087 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2176 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2088 if (intel_sdvo->is_hdmi) 2177 if (intel_sdvo->is_hdmi)
@@ -2113,7 +2202,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2113 2202
2114 intel_sdvo->is_tv = true; 2203 intel_sdvo->is_tv = true;
2115 intel_sdvo->base.needs_tv_clock = true; 2204 intel_sdvo->base.needs_tv_clock = true;
2116 intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; 2205 intel_sdvo->base.cloneable = false;
2117 2206
2118 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2207 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2119 2208
@@ -2156,8 +2245,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
2156 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; 2245 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
2157 } 2246 }
2158 2247
2159 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2248 intel_sdvo->base.cloneable = true;
2160 (1 << INTEL_ANALOG_CLONE_BIT));
2161 2249
2162 intel_sdvo_connector_init(intel_sdvo_connector, 2250 intel_sdvo_connector_init(intel_sdvo_connector,
2163 intel_sdvo); 2251 intel_sdvo);
@@ -2189,8 +2277,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2189 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; 2277 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
2190 } 2278 }
2191 2279
2192 intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | 2280 /* SDVO LVDS is cloneable because the SDVO encoder does the upscaling,
2193 (1 << INTEL_SDVO_LVDS_CLONE_BIT)); 2281 * as opposed to native LVDS, where we upscale with the panel-fitter
2282 * (and hence only the native LVDS resolution could be cloned). */
2283 intel_sdvo->base.cloneable = true;
2194 2284
2195 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2285 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2196 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) 2286 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
@@ -2575,6 +2665,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2575 2665
2576 drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); 2666 drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
2577 2667
2668 intel_encoder->disable = intel_disable_sdvo;
2669 intel_encoder->enable = intel_enable_sdvo;
2670 intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
2671
2578 /* In default case sdvo lvds is false */ 2672 /* In default case sdvo lvds is false */
2579 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) 2673 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
2580 goto err; 2674 goto err;
@@ -2589,7 +2683,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2589 /* Only enable the hotplug irq if we need it, to work around noisy 2683 /* Only enable the hotplug irq if we need it, to work around noisy
2590 * hotplug lines. 2684 * hotplug lines.
2591 */ 2685 */
2592 if (intel_sdvo->hotplug_active[0]) 2686 if (intel_sdvo->hotplug_active)
2593 dev_priv->hotplug_supported_mask |= hotplug_mask; 2687 dev_priv->hotplug_supported_mask |= hotplug_mask;
2594 2688
2595 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); 2689 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ccfb2ff4c31d..62bb048c135e 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -835,22 +835,37 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
835 base); 835 base);
836} 836}
837 837
838static bool
839intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
840{
841 struct drm_device *dev = encoder->base.dev;
842 struct drm_i915_private *dev_priv = dev->dev_private;
843 u32 tmp = I915_READ(TV_CTL);
844
845 if (!(tmp & TV_ENC_ENABLE))
846 return false;
847
848 *pipe = PORT_TO_PIPE(tmp);
849
850 return true;
851}
852
838static void 853static void
839intel_tv_dpms(struct drm_encoder *encoder, int mode) 854intel_enable_tv(struct intel_encoder *encoder)
840{ 855{
841 struct drm_device *dev = encoder->dev; 856 struct drm_device *dev = encoder->base.dev;
842 struct drm_i915_private *dev_priv = dev->dev_private; 857 struct drm_i915_private *dev_priv = dev->dev_private;
843 858
844 switch (mode) { 859 I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
845 case DRM_MODE_DPMS_ON: 860}
846 I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); 861
847 break; 862static void
848 case DRM_MODE_DPMS_STANDBY: 863intel_disable_tv(struct intel_encoder *encoder)
849 case DRM_MODE_DPMS_SUSPEND: 864{
850 case DRM_MODE_DPMS_OFF: 865 struct drm_device *dev = encoder->base.dev;
851 I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); 866 struct drm_i915_private *dev_priv = dev->dev_private;
852 break; 867
853 } 868 I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
854} 869}
855 870
856static const struct tv_mode * 871static const struct tv_mode *
@@ -894,17 +909,14 @@ intel_tv_mode_fixup(struct drm_encoder *encoder,
894 const struct drm_display_mode *mode, 909 const struct drm_display_mode *mode,
895 struct drm_display_mode *adjusted_mode) 910 struct drm_display_mode *adjusted_mode)
896{ 911{
897 struct drm_device *dev = encoder->dev;
898 struct intel_tv *intel_tv = enc_to_intel_tv(encoder); 912 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
899 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 913 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
900 struct intel_encoder *other_encoder;
901 914
902 if (!tv_mode) 915 if (!tv_mode)
903 return false; 916 return false;
904 917
905 for_each_encoder_on_crtc(dev, encoder->crtc, other_encoder) 918 if (intel_encoder_check_is_cloned(&intel_tv->base))
906 if (&other_encoder->base != encoder) 919 return false;
907 return false;
908 920
909 adjusted_mode->clock = tv_mode->clock; 921 adjusted_mode->clock = tv_mode->clock;
910 return true; 922 return true;
@@ -1302,12 +1314,9 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1302 if (force) { 1314 if (force) {
1303 struct intel_load_detect_pipe tmp; 1315 struct intel_load_detect_pipe tmp;
1304 1316
1305 if (intel_get_load_detect_pipe(&intel_tv->base, connector, 1317 if (intel_get_load_detect_pipe(connector, &mode, &tmp)) {
1306 &mode, &tmp)) {
1307 type = intel_tv_detect_type(intel_tv, connector); 1318 type = intel_tv_detect_type(intel_tv, connector);
1308 intel_release_load_detect_pipe(&intel_tv->base, 1319 intel_release_load_detect_pipe(connector, &tmp);
1309 connector,
1310 &tmp);
1311 } else 1320 } else
1312 return connector_status_unknown; 1321 return connector_status_unknown;
1313 } else 1322 } else
@@ -1473,22 +1482,20 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1473 } 1482 }
1474 1483
1475 if (changed && crtc) 1484 if (changed && crtc)
1476 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, 1485 intel_set_mode(crtc, &crtc->mode,
1477 crtc->y, crtc->fb); 1486 crtc->x, crtc->y, crtc->fb);
1478out: 1487out:
1479 return ret; 1488 return ret;
1480} 1489}
1481 1490
1482static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { 1491static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
1483 .dpms = intel_tv_dpms,
1484 .mode_fixup = intel_tv_mode_fixup, 1492 .mode_fixup = intel_tv_mode_fixup,
1485 .prepare = intel_encoder_prepare,
1486 .mode_set = intel_tv_mode_set, 1493 .mode_set = intel_tv_mode_set,
1487 .commit = intel_encoder_commit, 1494 .disable = intel_encoder_noop,
1488}; 1495};
1489 1496
1490static const struct drm_connector_funcs intel_tv_connector_funcs = { 1497static const struct drm_connector_funcs intel_tv_connector_funcs = {
1491 .dpms = drm_helper_connector_dpms, 1498 .dpms = intel_connector_dpms,
1492 .detect = intel_tv_detect, 1499 .detect = intel_tv_detect,
1493 .destroy = intel_tv_destroy, 1500 .destroy = intel_tv_destroy,
1494 .set_property = intel_tv_set_property, 1501 .set_property = intel_tv_set_property,
@@ -1618,10 +1625,15 @@ intel_tv_init(struct drm_device *dev)
1618 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, 1625 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
1619 DRM_MODE_ENCODER_TVDAC); 1626 DRM_MODE_ENCODER_TVDAC);
1620 1627
1628 intel_encoder->enable = intel_enable_tv;
1629 intel_encoder->disable = intel_disable_tv;
1630 intel_encoder->get_hw_state = intel_tv_get_hw_state;
1631 intel_connector->get_hw_state = intel_connector_get_hw_state;
1632
1621 intel_connector_attach_encoder(intel_connector, intel_encoder); 1633 intel_connector_attach_encoder(intel_connector, intel_encoder);
1622 intel_encoder->type = INTEL_OUTPUT_TVOUT; 1634 intel_encoder->type = INTEL_OUTPUT_TVOUT;
1623 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 1635 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1624 intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); 1636 intel_encoder->cloneable = false;
1625 intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); 1637 intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
1626 intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT); 1638 intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
1627 intel_tv->type = DRM_MODE_CONNECTOR_Unknown; 1639 intel_tv->type = DRM_MODE_CONNECTOR_Unknown;