aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c9
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c16
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c37
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c34
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c393
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c451
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c299
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h274
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1215
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c214
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c23
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c567
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c146
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h280
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c94
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c172
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h57
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c124
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1832
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c386
-rw-r--r--drivers/gpu/drm/i915/intel_dp.h144
-rw-r--r--drivers/gpu/drm/i915/intel_dp_i2c.c273
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h64
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c93
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c16
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c97
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c22
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c291
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c23
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c1406
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c849
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c154
35 files changed, 6950 insertions, 3131 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fa7b9be096bc..9929f84ec3e1 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -15,7 +15,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
15 intel_lvds.o \ 15 intel_lvds.o \
16 intel_bios.o \ 16 intel_bios.o \
17 intel_dp.o \ 17 intel_dp.o \
18 intel_dp_i2c.o \
19 intel_hdmi.o \ 18 intel_hdmi.o \
20 intel_sdvo.o \ 19 intel_sdvo.o \
21 intel_modes.o \ 20 intel_modes.o \
@@ -23,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
23 intel_fb.o \ 22 intel_fb.o \
24 intel_tv.o \ 23 intel_tv.o \
25 intel_dvo.o \ 24 intel_dvo.o \
25 intel_overlay.o \
26 dvo_ch7xxx.o \ 26 dvo_ch7xxx.o \
27 dvo_ch7017.o \ 27 dvo_ch7017.o \
28 dvo_ivch.o \ 28 dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 621815b531db..1184c14ba87d 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -249,7 +249,8 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
249 if (val != CH7017_DEVICE_ID_VALUE && 249 if (val != CH7017_DEVICE_ID_VALUE &&
250 val != CH7018_DEVICE_ID_VALUE && 250 val != CH7018_DEVICE_ID_VALUE &&
251 val != CH7019_DEVICE_ID_VALUE) { 251 val != CH7019_DEVICE_ID_VALUE) {
252 DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n", 252 DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
253 "Slave %d.\n",
253 val, i2cbus->adapter.name,dvo->slave_addr); 254 val, i2cbus->adapter.name,dvo->slave_addr);
254 goto fail; 255 goto fail;
255 } 256 }
@@ -284,7 +285,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
284 uint8_t horizontal_active_pixel_output, vertical_active_line_output; 285 uint8_t horizontal_active_pixel_output, vertical_active_line_output;
285 uint8_t active_input_line_output; 286 uint8_t active_input_line_output;
286 287
287 DRM_DEBUG("Registers before mode setting\n"); 288 DRM_DEBUG_KMS("Registers before mode setting\n");
288 ch7017_dump_regs(dvo); 289 ch7017_dump_regs(dvo);
289 290
290 /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/ 291 /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
@@ -346,7 +347,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
346 /* Turn the LVDS back on with new settings. */ 347 /* Turn the LVDS back on with new settings. */
347 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down); 348 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
348 349
349 DRM_DEBUG("Registers after mode setting\n"); 350 DRM_DEBUG_KMS("Registers after mode setting\n");
350 ch7017_dump_regs(dvo); 351 ch7017_dump_regs(dvo);
351} 352}
352 353
@@ -386,7 +387,7 @@ static void ch7017_dump_regs(struct intel_dvo_device *dvo)
386#define DUMP(reg) \ 387#define DUMP(reg) \
387do { \ 388do { \
388 ch7017_read(dvo, reg, &val); \ 389 ch7017_read(dvo, reg, &val); \
389 DRM_DEBUG(#reg ": %02x\n", val); \ 390 DRM_DEBUG_KMS(#reg ": %02x\n", val); \
390} while (0) 391} while (0)
391 392
392 DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT); 393 DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index a9b896289680..d56ff5cc22b2 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -152,7 +152,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
152 }; 152 };
153 153
154 if (!ch7xxx->quiet) { 154 if (!ch7xxx->quiet) {
155 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 155 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
156 addr, i2cbus->adapter.name, dvo->slave_addr); 156 addr, i2cbus->adapter.name, dvo->slave_addr);
157 } 157 }
158 return false; 158 return false;
@@ -179,7 +179,7 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
179 return true; 179 return true;
180 180
181 if (!ch7xxx->quiet) { 181 if (!ch7xxx->quiet) {
182 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 182 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
183 addr, i2cbus->adapter.name, dvo->slave_addr); 183 addr, i2cbus->adapter.name, dvo->slave_addr);
184 } 184 }
185 185
@@ -207,7 +207,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
207 207
208 name = ch7xxx_get_id(vendor); 208 name = ch7xxx_get_id(vendor);
209 if (!name) { 209 if (!name) {
210 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", 210 DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
211 "slave %d.\n",
211 vendor, adapter->name, dvo->slave_addr); 212 vendor, adapter->name, dvo->slave_addr);
212 goto out; 213 goto out;
213 } 214 }
@@ -217,13 +218,14 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
217 goto out; 218 goto out;
218 219
219 if (device != CH7xxx_DID) { 220 if (device != CH7xxx_DID) {
220 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", 221 DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
222 "slave %d.\n",
221 vendor, adapter->name, dvo->slave_addr); 223 vendor, adapter->name, dvo->slave_addr);
222 goto out; 224 goto out;
223 } 225 }
224 226
225 ch7xxx->quiet = false; 227 ch7xxx->quiet = false;
226 DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n", 228 DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
227 name, vendor, device); 229 name, vendor, device);
228 return true; 230 return true;
229out: 231out:
@@ -315,8 +317,8 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
315 317
316 for (i = 0; i < CH7xxx_NUM_REGS; i++) { 318 for (i = 0; i < CH7xxx_NUM_REGS; i++) {
317 if ((i % 8) == 0 ) 319 if ((i % 8) == 0 )
318 DRM_DEBUG("\n %02X: ", i); 320 DRM_LOG_KMS("\n %02X: ", i);
319 DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]); 321 DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
320 } 322 }
321} 323}
322 324
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index aa176f9921fe..24169e528f0f 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -202,7 +202,8 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
202 }; 202 };
203 203
204 if (!priv->quiet) { 204 if (!priv->quiet) {
205 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 205 DRM_DEBUG_KMS("Unable to read register 0x%02x from "
206 "%s:%02x.\n",
206 addr, i2cbus->adapter.name, dvo->slave_addr); 207 addr, i2cbus->adapter.name, dvo->slave_addr);
207 } 208 }
208 return false; 209 return false;
@@ -230,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
230 return true; 231 return true;
231 232
232 if (!priv->quiet) { 233 if (!priv->quiet) {
233 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 234 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
234 addr, i2cbus->adapter.name, dvo->slave_addr); 235 addr, i2cbus->adapter.name, dvo->slave_addr);
235 } 236 }
236 237
@@ -261,7 +262,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
261 * the address it's responding on. 262 * the address it's responding on.
262 */ 263 */
263 if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) { 264 if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
264 DRM_DEBUG("ivch detect failed due to address mismatch " 265 DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
265 "(%d vs %d)\n", 266 "(%d vs %d)\n",
266 (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr); 267 (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
267 goto out; 268 goto out;
@@ -367,41 +368,41 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
367 uint16_t val; 368 uint16_t val;
368 369
369 ivch_read(dvo, VR00, &val); 370 ivch_read(dvo, VR00, &val);
370 DRM_DEBUG("VR00: 0x%04x\n", val); 371 DRM_LOG_KMS("VR00: 0x%04x\n", val);
371 ivch_read(dvo, VR01, &val); 372 ivch_read(dvo, VR01, &val);
372 DRM_DEBUG("VR01: 0x%04x\n", val); 373 DRM_LOG_KMS("VR01: 0x%04x\n", val);
373 ivch_read(dvo, VR30, &val); 374 ivch_read(dvo, VR30, &val);
374 DRM_DEBUG("VR30: 0x%04x\n", val); 375 DRM_LOG_KMS("VR30: 0x%04x\n", val);
375 ivch_read(dvo, VR40, &val); 376 ivch_read(dvo, VR40, &val);
376 DRM_DEBUG("VR40: 0x%04x\n", val); 377 DRM_LOG_KMS("VR40: 0x%04x\n", val);
377 378
378 /* GPIO registers */ 379 /* GPIO registers */
379 ivch_read(dvo, VR80, &val); 380 ivch_read(dvo, VR80, &val);
380 DRM_DEBUG("VR80: 0x%04x\n", val); 381 DRM_LOG_KMS("VR80: 0x%04x\n", val);
381 ivch_read(dvo, VR81, &val); 382 ivch_read(dvo, VR81, &val);
382 DRM_DEBUG("VR81: 0x%04x\n", val); 383 DRM_LOG_KMS("VR81: 0x%04x\n", val);
383 ivch_read(dvo, VR82, &val); 384 ivch_read(dvo, VR82, &val);
384 DRM_DEBUG("VR82: 0x%04x\n", val); 385 DRM_LOG_KMS("VR82: 0x%04x\n", val);
385 ivch_read(dvo, VR83, &val); 386 ivch_read(dvo, VR83, &val);
386 DRM_DEBUG("VR83: 0x%04x\n", val); 387 DRM_LOG_KMS("VR83: 0x%04x\n", val);
387 ivch_read(dvo, VR84, &val); 388 ivch_read(dvo, VR84, &val);
388 DRM_DEBUG("VR84: 0x%04x\n", val); 389 DRM_LOG_KMS("VR84: 0x%04x\n", val);
389 ivch_read(dvo, VR85, &val); 390 ivch_read(dvo, VR85, &val);
390 DRM_DEBUG("VR85: 0x%04x\n", val); 391 DRM_LOG_KMS("VR85: 0x%04x\n", val);
391 ivch_read(dvo, VR86, &val); 392 ivch_read(dvo, VR86, &val);
392 DRM_DEBUG("VR86: 0x%04x\n", val); 393 DRM_LOG_KMS("VR86: 0x%04x\n", val);
393 ivch_read(dvo, VR87, &val); 394 ivch_read(dvo, VR87, &val);
394 DRM_DEBUG("VR87: 0x%04x\n", val); 395 DRM_LOG_KMS("VR87: 0x%04x\n", val);
395 ivch_read(dvo, VR88, &val); 396 ivch_read(dvo, VR88, &val);
396 DRM_DEBUG("VR88: 0x%04x\n", val); 397 DRM_LOG_KMS("VR88: 0x%04x\n", val);
397 398
398 /* Scratch register 0 - AIM Panel type */ 399 /* Scratch register 0 - AIM Panel type */
399 ivch_read(dvo, VR8E, &val); 400 ivch_read(dvo, VR8E, &val);
400 DRM_DEBUG("VR8E: 0x%04x\n", val); 401 DRM_LOG_KMS("VR8E: 0x%04x\n", val);
401 402
402 /* Scratch register 1 - Status register */ 403 /* Scratch register 1 - Status register */
403 ivch_read(dvo, VR8F, &val); 404 ivch_read(dvo, VR8F, &val);
404 DRM_DEBUG("VR8F: 0x%04x\n", val); 405 DRM_LOG_KMS("VR8F: 0x%04x\n", val);
405} 406}
406 407
407static void ivch_save(struct intel_dvo_device *dvo) 408static void ivch_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index e1c1f7341e5c..0001c13f0a80 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -105,7 +105,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
105 }; 105 };
106 106
107 if (!sil->quiet) { 107 if (!sil->quiet) {
108 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 108 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
109 addr, i2cbus->adapter.name, dvo->slave_addr); 109 addr, i2cbus->adapter.name, dvo->slave_addr);
110 } 110 }
111 return false; 111 return false;
@@ -131,7 +131,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
131 return true; 131 return true;
132 132
133 if (!sil->quiet) { 133 if (!sil->quiet) {
134 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 134 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
135 addr, i2cbus->adapter.name, dvo->slave_addr); 135 addr, i2cbus->adapter.name, dvo->slave_addr);
136 } 136 }
137 137
@@ -158,7 +158,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
158 goto out; 158 goto out;
159 159
160 if (ch != (SIL164_VID & 0xff)) { 160 if (ch != (SIL164_VID & 0xff)) {
161 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", 161 DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
162 ch, adapter->name, dvo->slave_addr); 162 ch, adapter->name, dvo->slave_addr);
163 goto out; 163 goto out;
164 } 164 }
@@ -167,13 +167,13 @@ static bool sil164_init(struct intel_dvo_device *dvo,
167 goto out; 167 goto out;
168 168
169 if (ch != (SIL164_DID & 0xff)) { 169 if (ch != (SIL164_DID & 0xff)) {
170 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", 170 DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
171 ch, adapter->name, dvo->slave_addr); 171 ch, adapter->name, dvo->slave_addr);
172 goto out; 172 goto out;
173 } 173 }
174 sil->quiet = false; 174 sil->quiet = false;
175 175
176 DRM_DEBUG("init sil164 dvo controller successfully!\n"); 176 DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
177 return true; 177 return true;
178 178
179out: 179out:
@@ -241,15 +241,15 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
241 uint8_t val; 241 uint8_t val;
242 242
243 sil164_readb(dvo, SIL164_FREQ_LO, &val); 243 sil164_readb(dvo, SIL164_FREQ_LO, &val);
244 DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val); 244 DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
245 sil164_readb(dvo, SIL164_FREQ_HI, &val); 245 sil164_readb(dvo, SIL164_FREQ_HI, &val);
246 DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val); 246 DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
247 sil164_readb(dvo, SIL164_REG8, &val); 247 sil164_readb(dvo, SIL164_REG8, &val);
248 DRM_DEBUG("SIL164_REG8: 0x%02x\n", val); 248 DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
249 sil164_readb(dvo, SIL164_REG9, &val); 249 sil164_readb(dvo, SIL164_REG9, &val);
250 DRM_DEBUG("SIL164_REG9: 0x%02x\n", val); 250 DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
251 sil164_readb(dvo, SIL164_REGC, &val); 251 sil164_readb(dvo, SIL164_REGC, &val);
252 DRM_DEBUG("SIL164_REGC: 0x%02x\n", val); 252 DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
253} 253}
254 254
255static void sil164_save(struct intel_dvo_device *dvo) 255static void sil164_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 9ecc907384ec..c7c391bc116a 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -130,7 +130,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
130 }; 130 };
131 131
132 if (!tfp->quiet) { 132 if (!tfp->quiet) {
133 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 133 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
134 addr, i2cbus->adapter.name, dvo->slave_addr); 134 addr, i2cbus->adapter.name, dvo->slave_addr);
135 } 135 }
136 return false; 136 return false;
@@ -156,7 +156,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
156 return true; 156 return true;
157 157
158 if (!tfp->quiet) { 158 if (!tfp->quiet) {
159 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 159 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
160 addr, i2cbus->adapter.name, dvo->slave_addr); 160 addr, i2cbus->adapter.name, dvo->slave_addr);
161 } 161 }
162 162
@@ -191,13 +191,15 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
191 tfp->quiet = true; 191 tfp->quiet = true;
192 192
193 if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { 193 if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
194 DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n", 194 DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
195 "Slave %d.\n",
195 id, adapter->name, dvo->slave_addr); 196 id, adapter->name, dvo->slave_addr);
196 goto out; 197 goto out;
197 } 198 }
198 199
199 if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { 200 if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
200 DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n", 201 DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
202 "Slave %d.\n",
201 id, adapter->name, dvo->slave_addr); 203 id, adapter->name, dvo->slave_addr);
202 goto out; 204 goto out;
203 } 205 }
@@ -262,33 +264,33 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
262 uint8_t val, val2; 264 uint8_t val, val2;
263 265
264 tfp410_readb(dvo, TFP410_REV, &val); 266 tfp410_readb(dvo, TFP410_REV, &val);
265 DRM_DEBUG("TFP410_REV: 0x%02X\n", val); 267 DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
266 tfp410_readb(dvo, TFP410_CTL_1, &val); 268 tfp410_readb(dvo, TFP410_CTL_1, &val);
267 DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val); 269 DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
268 tfp410_readb(dvo, TFP410_CTL_2, &val); 270 tfp410_readb(dvo, TFP410_CTL_2, &val);
269 DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val); 271 DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
270 tfp410_readb(dvo, TFP410_CTL_3, &val); 272 tfp410_readb(dvo, TFP410_CTL_3, &val);
271 DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val); 273 DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
272 tfp410_readb(dvo, TFP410_USERCFG, &val); 274 tfp410_readb(dvo, TFP410_USERCFG, &val);
273 DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val); 275 DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
274 tfp410_readb(dvo, TFP410_DE_DLY, &val); 276 tfp410_readb(dvo, TFP410_DE_DLY, &val);
275 DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val); 277 DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
276 tfp410_readb(dvo, TFP410_DE_CTL, &val); 278 tfp410_readb(dvo, TFP410_DE_CTL, &val);
277 DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val); 279 DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
278 tfp410_readb(dvo, TFP410_DE_TOP, &val); 280 tfp410_readb(dvo, TFP410_DE_TOP, &val);
279 DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val); 281 DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
280 tfp410_readb(dvo, TFP410_DE_CNT_LO, &val); 282 tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
281 tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2); 283 tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
282 DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); 284 DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
283 tfp410_readb(dvo, TFP410_DE_LIN_LO, &val); 285 tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
284 tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2); 286 tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
285 DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); 287 DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
286 tfp410_readb(dvo, TFP410_H_RES_LO, &val); 288 tfp410_readb(dvo, TFP410_H_RES_LO, &val);
287 tfp410_readb(dvo, TFP410_H_RES_HI, &val2); 289 tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
288 DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val); 290 DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
289 tfp410_readb(dvo, TFP410_V_RES_LO, &val); 291 tfp410_readb(dvo, TFP410_V_RES_LO, &val);
290 tfp410_readb(dvo, TFP410_V_RES_HI, &val2); 292 tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
291 DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val); 293 DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
292} 294}
293 295
294static void tfp410_save(struct intel_dvo_device *dvo) 296static void tfp410_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 26bf0552b3cb..a0b8447b06e7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,6 +27,8 @@
27 */ 27 */
28 28
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/debugfs.h>
31#include <linux/slab.h>
30#include "drmP.h" 32#include "drmP.h"
31#include "drm.h" 33#include "drm.h"
32#include "i915_drm.h" 34#include "i915_drm.h"
@@ -96,13 +98,14 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
96 { 98 {
97 struct drm_gem_object *obj = obj_priv->obj; 99 struct drm_gem_object *obj = obj_priv->obj;
98 100
99 seq_printf(m, " %p: %s %8zd %08x %08x %d %s", 101 seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
100 obj, 102 obj,
101 get_pin_flag(obj_priv), 103 get_pin_flag(obj_priv),
102 obj->size, 104 obj->size,
103 obj->read_domains, obj->write_domain, 105 obj->read_domains, obj->write_domain,
104 obj_priv->last_rendering_seqno, 106 obj_priv->last_rendering_seqno,
105 obj_priv->dirty ? "dirty" : ""); 107 obj_priv->dirty ? " dirty" : "",
108 obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
106 109
107 if (obj->name) 110 if (obj->name)
108 seq_printf(m, " (name: %d)", obj->name); 111 seq_printf(m, " (name: %d)", obj->name);
@@ -160,7 +163,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
160 struct drm_device *dev = node->minor->dev; 163 struct drm_device *dev = node->minor->dev;
161 drm_i915_private_t *dev_priv = dev->dev_private; 164 drm_i915_private_t *dev_priv = dev->dev_private;
162 165
163 if (!IS_IGDNG(dev)) { 166 if (!HAS_PCH_SPLIT(dev)) {
164 seq_printf(m, "Interrupt enable: %08x\n", 167 seq_printf(m, "Interrupt enable: %08x\n",
165 I915_READ(IER)); 168 I915_READ(IER));
166 seq_printf(m, "Interrupt identity: %08x\n", 169 seq_printf(m, "Interrupt identity: %08x\n",
@@ -223,7 +226,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
223 } else { 226 } else {
224 struct drm_i915_gem_object *obj_priv; 227 struct drm_i915_gem_object *obj_priv;
225 228
226 obj_priv = obj->driver_private; 229 obj_priv = to_intel_bo(obj);
227 seq_printf(m, "Fenced object[%2d] = %p: %s " 230 seq_printf(m, "Fenced object[%2d] = %p: %s "
228 "%08x %08zx %08x %s %08x %08x %d", 231 "%08x %08zx %08x %s %08x %08x %d",
229 i, obj, get_pin_flag(obj_priv), 232 i, obj, get_pin_flag(obj_priv),
@@ -270,7 +273,7 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co
270 mem = kmap_atomic(pages[page], KM_USER0); 273 mem = kmap_atomic(pages[page], KM_USER0);
271 for (i = 0; i < PAGE_SIZE; i += 4) 274 for (i = 0; i < PAGE_SIZE; i += 4)
272 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 275 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
273 kunmap_atomic(pages[page], KM_USER0); 276 kunmap_atomic(mem, KM_USER0);
274 } 277 }
275} 278}
276 279
@@ -288,7 +291,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
288 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 291 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
289 obj = obj_priv->obj; 292 obj = obj_priv->obj;
290 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 293 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
291 ret = i915_gem_object_get_pages(obj); 294 ret = i915_gem_object_get_pages(obj, 0);
292 if (ret) { 295 if (ret) {
293 DRM_ERROR("Failed to get pages: %d\n", ret); 296 DRM_ERROR("Failed to get pages: %d\n", ret);
294 spin_unlock(&dev_priv->mm.active_list_lock); 297 spin_unlock(&dev_priv->mm.active_list_lock);
@@ -348,6 +351,36 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
348 return 0; 351 return 0;
349} 352}
350 353
354static const char *pin_flag(int pinned)
355{
356 if (pinned > 0)
357 return " P";
358 else if (pinned < 0)
359 return " p";
360 else
361 return "";
362}
363
364static const char *tiling_flag(int tiling)
365{
366 switch (tiling) {
367 default:
368 case I915_TILING_NONE: return "";
369 case I915_TILING_X: return " X";
370 case I915_TILING_Y: return " Y";
371 }
372}
373
374static const char *dirty_flag(int dirty)
375{
376 return dirty ? " dirty" : "";
377}
378
379static const char *purgeable_flag(int purgeable)
380{
381 return purgeable ? " purgeable" : "";
382}
383
351static int i915_error_state(struct seq_file *m, void *unused) 384static int i915_error_state(struct seq_file *m, void *unused)
352{ 385{
353 struct drm_info_node *node = (struct drm_info_node *) m->private; 386 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -355,6 +388,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
355 drm_i915_private_t *dev_priv = dev->dev_private; 388 drm_i915_private_t *dev_priv = dev->dev_private;
356 struct drm_i915_error_state *error; 389 struct drm_i915_error_state *error;
357 unsigned long flags; 390 unsigned long flags;
391 int i, page, offset, elt;
358 392
359 spin_lock_irqsave(&dev_priv->error_lock, flags); 393 spin_lock_irqsave(&dev_priv->error_lock, flags);
360 if (!dev_priv->first_error) { 394 if (!dev_priv->first_error) {
@@ -366,6 +400,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
366 400
367 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 401 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
368 error->time.tv_usec); 402 error->time.tv_usec);
403 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
369 seq_printf(m, "EIR: 0x%08x\n", error->eir); 404 seq_printf(m, "EIR: 0x%08x\n", error->eir);
370 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); 405 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
371 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 406 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
@@ -377,6 +412,59 @@ static int i915_error_state(struct seq_file *m, void *unused)
377 seq_printf(m, " INSTPS: 0x%08x\n", error->instps); 412 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
378 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 413 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
379 } 414 }
415 seq_printf(m, "seqno: 0x%08x\n", error->seqno);
416
417 if (error->active_bo_count) {
418 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
419
420 for (i = 0; i < error->active_bo_count; i++) {
421 seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
422 error->active_bo[i].gtt_offset,
423 error->active_bo[i].size,
424 error->active_bo[i].read_domains,
425 error->active_bo[i].write_domain,
426 error->active_bo[i].seqno,
427 pin_flag(error->active_bo[i].pinned),
428 tiling_flag(error->active_bo[i].tiling),
429 dirty_flag(error->active_bo[i].dirty),
430 purgeable_flag(error->active_bo[i].purgeable));
431
432 if (error->active_bo[i].name)
433 seq_printf(m, " (name: %d)", error->active_bo[i].name);
434 if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
435 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
436
437 seq_printf(m, "\n");
438 }
439 }
440
441 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
442 if (error->batchbuffer[i]) {
443 struct drm_i915_error_object *obj = error->batchbuffer[i];
444
445 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
446 offset = 0;
447 for (page = 0; page < obj->page_count; page++) {
448 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
449 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
450 offset += 4;
451 }
452 }
453 }
454 }
455
456 if (error->ringbuffer) {
457 struct drm_i915_error_object *obj = error->ringbuffer;
458
459 seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
460 offset = 0;
461 for (page = 0; page < obj->page_count; page++) {
462 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
463 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
464 offset += 4;
465 }
466 }
467 }
380 468
381out: 469out:
382 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 470 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -384,37 +472,270 @@ out:
384 return 0; 472 return 0;
385} 473}
386 474
387static int i915_registers_info(struct seq_file *m, void *data) { 475static int i915_rstdby_delays(struct seq_file *m, void *unused)
476{
388 struct drm_info_node *node = (struct drm_info_node *) m->private; 477 struct drm_info_node *node = (struct drm_info_node *) m->private;
389 struct drm_device *dev = node->minor->dev; 478 struct drm_device *dev = node->minor->dev;
390 drm_i915_private_t *dev_priv = dev->dev_private; 479 drm_i915_private_t *dev_priv = dev->dev_private;
391 uint32_t reg; 480 u16 crstanddelay = I915_READ16(CRSTANDVID);
392 481
393#define DUMP_RANGE(start, end) \ 482 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
394 for (reg=start; reg < end; reg += 4) \
395 seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
396
397 DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
398 DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
399 DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
400 DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
401 DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
402 DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
403 DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
404 DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
405 DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
406 DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
407 DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
408 DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
409 DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
410 DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
411 483
412 return 0; 484 return 0;
413} 485}
414 486
487static int i915_cur_delayinfo(struct seq_file *m, void *unused)
488{
489 struct drm_info_node *node = (struct drm_info_node *) m->private;
490 struct drm_device *dev = node->minor->dev;
491 drm_i915_private_t *dev_priv = dev->dev_private;
492 u16 rgvswctl = I915_READ16(MEMSWCTL);
493
494 seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3);
495 seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1);
496 seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf,
497 rgvswctl & 0x3f);
498
499 return 0;
500}
501
502static int i915_delayfreq_table(struct seq_file *m, void *unused)
503{
504 struct drm_info_node *node = (struct drm_info_node *) m->private;
505 struct drm_device *dev = node->minor->dev;
506 drm_i915_private_t *dev_priv = dev->dev_private;
507 u32 delayfreq;
508 int i;
509
510 for (i = 0; i < 16; i++) {
511 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
512 seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq);
513 }
514
515 return 0;
516}
517
518static inline int MAP_TO_MV(int map)
519{
520 return 1250 - (map * 25);
521}
522
523static int i915_inttoext_table(struct seq_file *m, void *unused)
524{
525 struct drm_info_node *node = (struct drm_info_node *) m->private;
526 struct drm_device *dev = node->minor->dev;
527 drm_i915_private_t *dev_priv = dev->dev_private;
528 u32 inttoext;
529 int i;
530
531 for (i = 1; i <= 32; i++) {
532 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
533 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
534 }
535
536 return 0;
537}
538
539static int i915_drpc_info(struct seq_file *m, void *unused)
540{
541 struct drm_info_node *node = (struct drm_info_node *) m->private;
542 struct drm_device *dev = node->minor->dev;
543 drm_i915_private_t *dev_priv = dev->dev_private;
544 u32 rgvmodectl = I915_READ(MEMMODECTL);
545
546 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
547 "yes" : "no");
548 seq_printf(m, "Boost freq: %d\n",
549 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
550 MEMMODE_BOOST_FREQ_SHIFT);
551 seq_printf(m, "HW control enabled: %s\n",
552 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
553 seq_printf(m, "SW control enabled: %s\n",
554 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
555 seq_printf(m, "Gated voltage change: %s\n",
556 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
557 seq_printf(m, "Starting frequency: P%d\n",
558 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
559 seq_printf(m, "Max frequency: P%d\n",
560 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
561 seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
562
563 return 0;
564}
565
566static int i915_fbc_status(struct seq_file *m, void *unused)
567{
568 struct drm_info_node *node = (struct drm_info_node *) m->private;
569 struct drm_device *dev = node->minor->dev;
570 struct drm_crtc *crtc;
571 drm_i915_private_t *dev_priv = dev->dev_private;
572 bool fbc_enabled = false;
573
574 if (!dev_priv->display.fbc_enabled) {
575 seq_printf(m, "FBC unsupported on this chipset\n");
576 return 0;
577 }
578
579 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
580 if (!crtc->enabled)
581 continue;
582 if (dev_priv->display.fbc_enabled(crtc))
583 fbc_enabled = true;
584 }
585
586 if (fbc_enabled) {
587 seq_printf(m, "FBC enabled\n");
588 } else {
589 seq_printf(m, "FBC disabled: ");
590 switch (dev_priv->no_fbc_reason) {
591 case FBC_STOLEN_TOO_SMALL:
592 seq_printf(m, "not enough stolen memory");
593 break;
594 case FBC_UNSUPPORTED_MODE:
595 seq_printf(m, "mode not supported");
596 break;
597 case FBC_MODE_TOO_LARGE:
598 seq_printf(m, "mode too large");
599 break;
600 case FBC_BAD_PLANE:
601 seq_printf(m, "FBC unsupported on plane");
602 break;
603 case FBC_NOT_TILED:
604 seq_printf(m, "scanout buffer not tiled");
605 break;
606 default:
607 seq_printf(m, "unknown reason");
608 }
609 seq_printf(m, "\n");
610 }
611 return 0;
612}
613
614static int i915_sr_status(struct seq_file *m, void *unused)
615{
616 struct drm_info_node *node = (struct drm_info_node *) m->private;
617 struct drm_device *dev = node->minor->dev;
618 drm_i915_private_t *dev_priv = dev->dev_private;
619 bool sr_enabled = false;
620
621 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev))
622 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
623 else if (IS_I915GM(dev))
624 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
625 else if (IS_PINEVIEW(dev))
626 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
627
628 seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
629 "disabled");
630
631 return 0;
632}
633
634static int
635i915_wedged_open(struct inode *inode,
636 struct file *filp)
637{
638 filp->private_data = inode->i_private;
639 return 0;
640}
641
642static ssize_t
643i915_wedged_read(struct file *filp,
644 char __user *ubuf,
645 size_t max,
646 loff_t *ppos)
647{
648 struct drm_device *dev = filp->private_data;
649 drm_i915_private_t *dev_priv = dev->dev_private;
650 char buf[80];
651 int len;
652
653 len = snprintf(buf, sizeof (buf),
654 "wedged : %d\n",
655 atomic_read(&dev_priv->mm.wedged));
656
657 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
658}
659
660static ssize_t
661i915_wedged_write(struct file *filp,
662 const char __user *ubuf,
663 size_t cnt,
664 loff_t *ppos)
665{
666 struct drm_device *dev = filp->private_data;
667 drm_i915_private_t *dev_priv = dev->dev_private;
668 char buf[20];
669 int val = 1;
670
671 if (cnt > 0) {
672 if (cnt > sizeof (buf) - 1)
673 return -EINVAL;
674
675 if (copy_from_user(buf, ubuf, cnt))
676 return -EFAULT;
677 buf[cnt] = 0;
678
679 val = simple_strtoul(buf, NULL, 0);
680 }
681
682 DRM_INFO("Manually setting wedged to %d\n", val);
683
684 atomic_set(&dev_priv->mm.wedged, val);
685 if (val) {
686 DRM_WAKEUP(&dev_priv->irq_queue);
687 queue_work(dev_priv->wq, &dev_priv->error_work);
688 }
689
690 return cnt;
691}
692
693static const struct file_operations i915_wedged_fops = {
694 .owner = THIS_MODULE,
695 .open = i915_wedged_open,
696 .read = i915_wedged_read,
697 .write = i915_wedged_write,
698};
699
700/* As the drm_debugfs_init() routines are called before dev->dev_private is
701 * allocated we need to hook into the minor for release. */
702static int
703drm_add_fake_info_node(struct drm_minor *minor,
704 struct dentry *ent,
705 const void *key)
706{
707 struct drm_info_node *node;
708
709 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
710 if (node == NULL) {
711 debugfs_remove(ent);
712 return -ENOMEM;
713 }
714
715 node->minor = minor;
716 node->dent = ent;
717 node->info_ent = (void *) key;
718 list_add(&node->list, &minor->debugfs_nodes.list);
719
720 return 0;
721}
722
723static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
724{
725 struct drm_device *dev = minor->dev;
726 struct dentry *ent;
727
728 ent = debugfs_create_file("i915_wedged",
729 S_IRUGO | S_IWUSR,
730 root, dev,
731 &i915_wedged_fops);
732 if (IS_ERR(ent))
733 return PTR_ERR(ent);
734
735 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
736}
415 737
416static struct drm_info_list i915_debugfs_list[] = { 738static struct drm_info_list i915_debugfs_list[] = {
417 {"i915_regs", i915_registers_info, 0},
418 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 739 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
419 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 740 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
420 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 741 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
@@ -427,11 +748,24 @@ static struct drm_info_list i915_debugfs_list[] = {
427 {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, 748 {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
428 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 749 {"i915_batchbuffers", i915_batchbuffer_info, 0},
429 {"i915_error_state", i915_error_state, 0}, 750 {"i915_error_state", i915_error_state, 0},
751 {"i915_rstdby_delays", i915_rstdby_delays, 0},
752 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
753 {"i915_delayfreq_table", i915_delayfreq_table, 0},
754 {"i915_inttoext_table", i915_inttoext_table, 0},
755 {"i915_drpc_info", i915_drpc_info, 0},
756 {"i915_fbc_status", i915_fbc_status, 0},
757 {"i915_sr_status", i915_sr_status, 0},
430}; 758};
431#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 759#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
432 760
433int i915_debugfs_init(struct drm_minor *minor) 761int i915_debugfs_init(struct drm_minor *minor)
434{ 762{
763 int ret;
764
765 ret = i915_wedged_create(minor->debugfs_root, minor);
766 if (ret)
767 return ret;
768
435 return drm_debugfs_create_files(i915_debugfs_list, 769 return drm_debugfs_create_files(i915_debugfs_list,
436 I915_DEBUGFS_ENTRIES, 770 I915_DEBUGFS_ENTRIES,
437 minor->debugfs_root, minor); 771 minor->debugfs_root, minor);
@@ -441,7 +775,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
441{ 775{
442 drm_debugfs_remove_files(i915_debugfs_list, 776 drm_debugfs_remove_files(i915_debugfs_list,
443 I915_DEBUGFS_ENTRIES, minor); 777 I915_DEBUGFS_ENTRIES, minor);
778 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
779 1, minor);
444} 780}
445 781
446#endif /* CONFIG_DEBUG_FS */ 782#endif /* CONFIG_DEBUG_FS */
447
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e5b138be45fa..c3cfafcbfe7d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -35,6 +35,10 @@
35#include "i915_drv.h" 35#include "i915_drv.h"
36#include "i915_trace.h" 36#include "i915_trace.h"
37#include <linux/vgaarb.h> 37#include <linux/vgaarb.h>
38#include <linux/acpi.h>
39#include <linux/pnp.h>
40#include <linux/vga_switcheroo.h>
41#include <linux/slab.h>
38 42
39/* Really want an OS-independent resettable timer. Would like to have 43/* Really want an OS-independent resettable timer. Would like to have
40 * this loop run for (eg) 3 sec, but have the timer reset every time 44 * this loop run for (eg) 3 sec, but have the timer reset every time
@@ -123,7 +127,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
123 drm_i915_private_t *dev_priv = dev->dev_private; 127 drm_i915_private_t *dev_priv = dev->dev_private;
124 /* Program Hardware Status Page */ 128 /* Program Hardware Status Page */
125 dev_priv->status_page_dmah = 129 dev_priv->status_page_dmah =
126 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); 130 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
127 131
128 if (!dev_priv->status_page_dmah) { 132 if (!dev_priv->status_page_dmah) {
129 DRM_ERROR("Can not allocate hardware status page\n"); 133 DRM_ERROR("Can not allocate hardware status page\n");
@@ -134,6 +138,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
134 138
135 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 139 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
136 140
141 if (IS_I965G(dev))
142 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
143 0xf0;
144
137 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 145 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
138 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 146 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
139 return 0; 147 return 0;
@@ -731,8 +739,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
731 if (cmdbuf->num_cliprects) { 739 if (cmdbuf->num_cliprects) {
732 cliprects = kcalloc(cmdbuf->num_cliprects, 740 cliprects = kcalloc(cmdbuf->num_cliprects,
733 sizeof(struct drm_clip_rect), GFP_KERNEL); 741 sizeof(struct drm_clip_rect), GFP_KERNEL);
734 if (cliprects == NULL) 742 if (cliprects == NULL) {
743 ret = -ENOMEM;
735 goto fail_batch_free; 744 goto fail_batch_free;
745 }
736 746
737 ret = copy_from_user(cliprects, cmdbuf->cliprects, 747 ret = copy_from_user(cliprects, cmdbuf->cliprects,
738 cmdbuf->num_cliprects * 748 cmdbuf->num_cliprects *
@@ -807,9 +817,19 @@ static int i915_getparam(struct drm_device *dev, void *data,
807 case I915_PARAM_NUM_FENCES_AVAIL: 817 case I915_PARAM_NUM_FENCES_AVAIL:
808 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 818 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
809 break; 819 break;
820 case I915_PARAM_HAS_OVERLAY:
821 value = dev_priv->overlay ? 1 : 0;
822 break;
823 case I915_PARAM_HAS_PAGEFLIPPING:
824 value = 1;
825 break;
826 case I915_PARAM_HAS_EXECBUF2:
827 /* depends on GEM */
828 value = dev_priv->has_gem;
829 break;
810 default: 830 default:
811 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 831 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
812 param->param); 832 param->param);
813 return -EINVAL; 833 return -EINVAL;
814 } 834 }
815 835
@@ -917,6 +937,120 @@ static int i915_get_bridge_dev(struct drm_device *dev)
917 return 0; 937 return 0;
918} 938}
919 939
940#define MCHBAR_I915 0x44
941#define MCHBAR_I965 0x48
942#define MCHBAR_SIZE (4*4096)
943
944#define DEVEN_REG 0x54
945#define DEVEN_MCHBAR_EN (1 << 28)
946
947/* Allocate space for the MCH regs if needed, return nonzero on error */
948static int
949intel_alloc_mchbar_resource(struct drm_device *dev)
950{
951 drm_i915_private_t *dev_priv = dev->dev_private;
952 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
953 u32 temp_lo, temp_hi = 0;
954 u64 mchbar_addr;
955 int ret = 0;
956
957 if (IS_I965G(dev))
958 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
959 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
960 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
961
962 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
963#ifdef CONFIG_PNP
964 if (mchbar_addr &&
965 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
966 ret = 0;
967 goto out;
968 }
969#endif
970
971 /* Get some space for it */
972 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
973 MCHBAR_SIZE, MCHBAR_SIZE,
974 PCIBIOS_MIN_MEM,
975 0, pcibios_align_resource,
976 dev_priv->bridge_dev);
977 if (ret) {
978 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
979 dev_priv->mch_res.start = 0;
980 goto out;
981 }
982
983 if (IS_I965G(dev))
984 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
985 upper_32_bits(dev_priv->mch_res.start));
986
987 pci_write_config_dword(dev_priv->bridge_dev, reg,
988 lower_32_bits(dev_priv->mch_res.start));
989out:
990 return ret;
991}
992
993/* Setup MCHBAR if possible, return true if we should disable it again */
994static void
995intel_setup_mchbar(struct drm_device *dev)
996{
997 drm_i915_private_t *dev_priv = dev->dev_private;
998 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
999 u32 temp;
1000 bool enabled;
1001
1002 dev_priv->mchbar_need_disable = false;
1003
1004 if (IS_I915G(dev) || IS_I915GM(dev)) {
1005 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1006 enabled = !!(temp & DEVEN_MCHBAR_EN);
1007 } else {
1008 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1009 enabled = temp & 1;
1010 }
1011
1012 /* If it's already enabled, don't have to do anything */
1013 if (enabled)
1014 return;
1015
1016 if (intel_alloc_mchbar_resource(dev))
1017 return;
1018
1019 dev_priv->mchbar_need_disable = true;
1020
1021 /* Space is allocated or reserved, so enable it. */
1022 if (IS_I915G(dev) || IS_I915GM(dev)) {
1023 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
1024 temp | DEVEN_MCHBAR_EN);
1025 } else {
1026 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1027 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
1028 }
1029}
1030
1031static void
1032intel_teardown_mchbar(struct drm_device *dev)
1033{
1034 drm_i915_private_t *dev_priv = dev->dev_private;
1035 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
1036 u32 temp;
1037
1038 if (dev_priv->mchbar_need_disable) {
1039 if (IS_I915G(dev) || IS_I915GM(dev)) {
1040 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1041 temp &= ~DEVEN_MCHBAR_EN;
1042 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
1043 } else {
1044 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1045 temp &= ~1;
1046 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
1047 }
1048 }
1049
1050 if (dev_priv->mch_res.start)
1051 release_resource(&dev_priv->mch_res);
1052}
1053
920/** 1054/**
921 * i915_probe_agp - get AGP bootup configuration 1055 * i915_probe_agp - get AGP bootup configuration
922 * @pdev: PCI device 1056 * @pdev: PCI device
@@ -962,59 +1096,123 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
962 * Some of the preallocated space is taken by the GTT 1096 * Some of the preallocated space is taken by the GTT
963 * and popup. GTT is 1K per MB of aperture size, and popup is 4K. 1097 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
964 */ 1098 */
965 if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev)) 1099 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
966 overhead = 4096; 1100 overhead = 4096;
967 else 1101 else
968 overhead = (*aperture_size / 1024) + 4096; 1102 overhead = (*aperture_size / 1024) + 4096;
969 1103
970 switch (tmp & INTEL_GMCH_GMS_MASK) { 1104 if (IS_GEN6(dev)) {
971 case INTEL_855_GMCH_GMS_DISABLED: 1105 /* SNB has memory control reg at 0x50.w */
972 DRM_ERROR("video memory is disabled\n"); 1106 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
973 return -1; 1107
974 case INTEL_855_GMCH_GMS_STOLEN_1M: 1108 switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
975 stolen = 1 * 1024 * 1024; 1109 case INTEL_855_GMCH_GMS_DISABLED:
976 break; 1110 DRM_ERROR("video memory is disabled\n");
977 case INTEL_855_GMCH_GMS_STOLEN_4M: 1111 return -1;
978 stolen = 4 * 1024 * 1024; 1112 case SNB_GMCH_GMS_STOLEN_32M:
979 break; 1113 stolen = 32 * 1024 * 1024;
980 case INTEL_855_GMCH_GMS_STOLEN_8M: 1114 break;
981 stolen = 8 * 1024 * 1024; 1115 case SNB_GMCH_GMS_STOLEN_64M:
982 break; 1116 stolen = 64 * 1024 * 1024;
983 case INTEL_855_GMCH_GMS_STOLEN_16M: 1117 break;
984 stolen = 16 * 1024 * 1024; 1118 case SNB_GMCH_GMS_STOLEN_96M:
985 break; 1119 stolen = 96 * 1024 * 1024;
986 case INTEL_855_GMCH_GMS_STOLEN_32M: 1120 break;
987 stolen = 32 * 1024 * 1024; 1121 case SNB_GMCH_GMS_STOLEN_128M:
988 break; 1122 stolen = 128 * 1024 * 1024;
989 case INTEL_915G_GMCH_GMS_STOLEN_48M: 1123 break;
990 stolen = 48 * 1024 * 1024; 1124 case SNB_GMCH_GMS_STOLEN_160M:
991 break; 1125 stolen = 160 * 1024 * 1024;
992 case INTEL_915G_GMCH_GMS_STOLEN_64M: 1126 break;
993 stolen = 64 * 1024 * 1024; 1127 case SNB_GMCH_GMS_STOLEN_192M:
994 break; 1128 stolen = 192 * 1024 * 1024;
995 case INTEL_GMCH_GMS_STOLEN_128M: 1129 break;
996 stolen = 128 * 1024 * 1024; 1130 case SNB_GMCH_GMS_STOLEN_224M:
997 break; 1131 stolen = 224 * 1024 * 1024;
998 case INTEL_GMCH_GMS_STOLEN_256M: 1132 break;
999 stolen = 256 * 1024 * 1024; 1133 case SNB_GMCH_GMS_STOLEN_256M:
1000 break; 1134 stolen = 256 * 1024 * 1024;
1001 case INTEL_GMCH_GMS_STOLEN_96M: 1135 break;
1002 stolen = 96 * 1024 * 1024; 1136 case SNB_GMCH_GMS_STOLEN_288M:
1003 break; 1137 stolen = 288 * 1024 * 1024;
1004 case INTEL_GMCH_GMS_STOLEN_160M: 1138 break;
1005 stolen = 160 * 1024 * 1024; 1139 case SNB_GMCH_GMS_STOLEN_320M:
1006 break; 1140 stolen = 320 * 1024 * 1024;
1007 case INTEL_GMCH_GMS_STOLEN_224M: 1141 break;
1008 stolen = 224 * 1024 * 1024; 1142 case SNB_GMCH_GMS_STOLEN_352M:
1009 break; 1143 stolen = 352 * 1024 * 1024;
1010 case INTEL_GMCH_GMS_STOLEN_352M: 1144 break;
1011 stolen = 352 * 1024 * 1024; 1145 case SNB_GMCH_GMS_STOLEN_384M:
1012 break; 1146 stolen = 384 * 1024 * 1024;
1013 default: 1147 break;
1014 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", 1148 case SNB_GMCH_GMS_STOLEN_416M:
1015 tmp & INTEL_GMCH_GMS_MASK); 1149 stolen = 416 * 1024 * 1024;
1016 return -1; 1150 break;
1151 case SNB_GMCH_GMS_STOLEN_448M:
1152 stolen = 448 * 1024 * 1024;
1153 break;
1154 case SNB_GMCH_GMS_STOLEN_480M:
1155 stolen = 480 * 1024 * 1024;
1156 break;
1157 case SNB_GMCH_GMS_STOLEN_512M:
1158 stolen = 512 * 1024 * 1024;
1159 break;
1160 default:
1161 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1162 tmp & SNB_GMCH_GMS_STOLEN_MASK);
1163 return -1;
1164 }
1165 } else {
1166 switch (tmp & INTEL_GMCH_GMS_MASK) {
1167 case INTEL_855_GMCH_GMS_DISABLED:
1168 DRM_ERROR("video memory is disabled\n");
1169 return -1;
1170 case INTEL_855_GMCH_GMS_STOLEN_1M:
1171 stolen = 1 * 1024 * 1024;
1172 break;
1173 case INTEL_855_GMCH_GMS_STOLEN_4M:
1174 stolen = 4 * 1024 * 1024;
1175 break;
1176 case INTEL_855_GMCH_GMS_STOLEN_8M:
1177 stolen = 8 * 1024 * 1024;
1178 break;
1179 case INTEL_855_GMCH_GMS_STOLEN_16M:
1180 stolen = 16 * 1024 * 1024;
1181 break;
1182 case INTEL_855_GMCH_GMS_STOLEN_32M:
1183 stolen = 32 * 1024 * 1024;
1184 break;
1185 case INTEL_915G_GMCH_GMS_STOLEN_48M:
1186 stolen = 48 * 1024 * 1024;
1187 break;
1188 case INTEL_915G_GMCH_GMS_STOLEN_64M:
1189 stolen = 64 * 1024 * 1024;
1190 break;
1191 case INTEL_GMCH_GMS_STOLEN_128M:
1192 stolen = 128 * 1024 * 1024;
1193 break;
1194 case INTEL_GMCH_GMS_STOLEN_256M:
1195 stolen = 256 * 1024 * 1024;
1196 break;
1197 case INTEL_GMCH_GMS_STOLEN_96M:
1198 stolen = 96 * 1024 * 1024;
1199 break;
1200 case INTEL_GMCH_GMS_STOLEN_160M:
1201 stolen = 160 * 1024 * 1024;
1202 break;
1203 case INTEL_GMCH_GMS_STOLEN_224M:
1204 stolen = 224 * 1024 * 1024;
1205 break;
1206 case INTEL_GMCH_GMS_STOLEN_352M:
1207 stolen = 352 * 1024 * 1024;
1208 break;
1209 default:
1210 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1211 tmp & INTEL_GMCH_GMS_MASK);
1212 return -1;
1213 }
1017 } 1214 }
1215
1018 *preallocated_size = stolen - overhead; 1216 *preallocated_size = stolen - overhead;
1019 *start = overhead; 1217 *start = overhead;
1020 1218
@@ -1048,7 +1246,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1048 int gtt_offset, gtt_size; 1246 int gtt_offset, gtt_size;
1049 1247
1050 if (IS_I965G(dev)) { 1248 if (IS_I965G(dev)) {
1051 if (IS_G4X(dev) || IS_IGDNG(dev)) { 1249 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1052 gtt_offset = 2*1024*1024; 1250 gtt_offset = 2*1024*1024;
1053 gtt_size = 2*1024*1024; 1251 gtt_size = 2*1024*1024;
1054 } else { 1252 } else {
@@ -1070,7 +1268,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1070 1268
1071 entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); 1269 entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
1072 1270
1073 DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); 1271 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
1074 1272
1075 /* Mask out these reserved bits on this hardware. */ 1273 /* Mask out these reserved bits on this hardware. */
1076 if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || 1274 if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
@@ -1096,7 +1294,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1096 phys =(entry & PTE_ADDRESS_MASK) | 1294 phys =(entry & PTE_ADDRESS_MASK) |
1097 ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); 1295 ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
1098 1296
1099 DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); 1297 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
1100 1298
1101 return phys; 1299 return phys;
1102} 1300}
@@ -1111,11 +1309,13 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1111{ 1309{
1112 struct drm_i915_private *dev_priv = dev->dev_private; 1310 struct drm_i915_private *dev_priv = dev->dev_private;
1113 struct drm_mm_node *compressed_fb, *compressed_llb; 1311 struct drm_mm_node *compressed_fb, *compressed_llb;
1114 unsigned long cfb_base, ll_base; 1312 unsigned long cfb_base;
1313 unsigned long ll_base = 0;
1115 1314
1116 /* Leave 1M for line length buffer & misc. */ 1315 /* Leave 1M for line length buffer & misc. */
1117 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); 1316 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
1118 if (!compressed_fb) { 1317 if (!compressed_fb) {
1318 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1119 i915_warn_stolen(dev); 1319 i915_warn_stolen(dev);
1120 return; 1320 return;
1121 } 1321 }
@@ -1123,6 +1323,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1123 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1323 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1124 if (!compressed_fb) { 1324 if (!compressed_fb) {
1125 i915_warn_stolen(dev); 1325 i915_warn_stolen(dev);
1326 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1126 return; 1327 return;
1127 } 1328 }
1128 1329
@@ -1156,6 +1357,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1156 1357
1157 dev_priv->cfb_size = size; 1358 dev_priv->cfb_size = size;
1158 1359
1360 dev_priv->compressed_fb = compressed_fb;
1361
1159 if (IS_GM45(dev)) { 1362 if (IS_GM45(dev)) {
1160 g4x_disable_fbc(dev); 1363 g4x_disable_fbc(dev);
1161 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1364 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
@@ -1163,12 +1366,22 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1163 i8xx_disable_fbc(dev); 1366 i8xx_disable_fbc(dev);
1164 I915_WRITE(FBC_CFB_BASE, cfb_base); 1367 I915_WRITE(FBC_CFB_BASE, cfb_base);
1165 I915_WRITE(FBC_LL_BASE, ll_base); 1368 I915_WRITE(FBC_LL_BASE, ll_base);
1369 dev_priv->compressed_llb = compressed_llb;
1166 } 1370 }
1167 1371
1168 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, 1372 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
1169 ll_base, size >> 20); 1373 ll_base, size >> 20);
1170} 1374}
1171 1375
1376static void i915_cleanup_compression(struct drm_device *dev)
1377{
1378 struct drm_i915_private *dev_priv = dev->dev_private;
1379
1380 drm_mm_put_block(dev_priv->compressed_fb);
1381 if (!IS_GM45(dev))
1382 drm_mm_put_block(dev_priv->compressed_llb);
1383}
1384
1172/* true = enable decode, false = disable decoder */ 1385/* true = enable decode, false = disable decoder */
1173static unsigned int i915_vga_set_decode(void *cookie, bool state) 1386static unsigned int i915_vga_set_decode(void *cookie, bool state)
1174{ 1387{
@@ -1182,6 +1395,32 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
1182 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1395 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1183} 1396}
1184 1397
1398static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1399{
1400 struct drm_device *dev = pci_get_drvdata(pdev);
1401 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1402 if (state == VGA_SWITCHEROO_ON) {
1403 printk(KERN_INFO "i915: switched off\n");
1404 /* i915 resume handler doesn't set to D0 */
1405 pci_set_power_state(dev->pdev, PCI_D0);
1406 i915_resume(dev);
1407 } else {
1408 printk(KERN_ERR "i915: switched off\n");
1409 i915_suspend(dev, pmm);
1410 }
1411}
1412
1413static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1414{
1415 struct drm_device *dev = pci_get_drvdata(pdev);
1416 bool can_switch;
1417
1418 spin_lock(&dev->count_lock);
1419 can_switch = (dev->open_count == 0);
1420 spin_unlock(&dev->count_lock);
1421 return can_switch;
1422}
1423
1185static int i915_load_modeset_init(struct drm_device *dev, 1424static int i915_load_modeset_init(struct drm_device *dev,
1186 unsigned long prealloc_start, 1425 unsigned long prealloc_start,
1187 unsigned long prealloc_size, 1426 unsigned long prealloc_size,
@@ -1194,14 +1433,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
1194 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 1433 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
1195 0xff000000; 1434 0xff000000;
1196 1435
1197 if (IS_MOBILE(dev) || IS_I9XX(dev))
1198 dev_priv->cursor_needs_physical = true;
1199 else
1200 dev_priv->cursor_needs_physical = false;
1201
1202 if (IS_I965G(dev) || IS_G33(dev))
1203 dev_priv->cursor_needs_physical = false;
1204
1205 /* Basic memrange allocator for stolen space (aka vram) */ 1436 /* Basic memrange allocator for stolen space (aka vram) */
1206 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 1437 drm_mm_init(&dev_priv->vram, 0, prealloc_size);
1207 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); 1438 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
@@ -1251,6 +1482,14 @@ static int i915_load_modeset_init(struct drm_device *dev,
1251 if (ret) 1482 if (ret)
1252 goto destroy_ringbuffer; 1483 goto destroy_ringbuffer;
1253 1484
1485 ret = vga_switcheroo_register_client(dev->pdev,
1486 i915_switcheroo_set_state,
1487 i915_switcheroo_can_switch);
1488 if (ret)
1489 goto destroy_ringbuffer;
1490
1491 intel_modeset_init(dev);
1492
1254 ret = drm_irq_install(dev); 1493 ret = drm_irq_install(dev);
1255 if (ret) 1494 if (ret)
1256 goto destroy_ringbuffer; 1495 goto destroy_ringbuffer;
@@ -1265,14 +1504,14 @@ static int i915_load_modeset_init(struct drm_device *dev,
1265 1504
1266 I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); 1505 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1267 1506
1268 intel_modeset_init(dev);
1269
1270 drm_helper_initial_config(dev); 1507 drm_helper_initial_config(dev);
1271 1508
1272 return 0; 1509 return 0;
1273 1510
1274destroy_ringbuffer: 1511destroy_ringbuffer:
1512 mutex_lock(&dev->struct_mutex);
1275 i915_gem_cleanup_ringbuffer(dev); 1513 i915_gem_cleanup_ringbuffer(dev);
1514 mutex_unlock(&dev->struct_mutex);
1276out: 1515out:
1277 return ret; 1516 return ret;
1278} 1517}
@@ -1306,7 +1545,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
1306 drm_i915_private_t *dev_priv = dev->dev_private; 1545 drm_i915_private_t *dev_priv = dev->dev_private;
1307 u32 tmp; 1546 u32 tmp;
1308 1547
1309 if (!IS_IGD(dev)) 1548 if (!IS_PINEVIEW(dev))
1310 return; 1549 return;
1311 1550
1312 tmp = I915_READ(CLKCFG); 1551 tmp = I915_READ(CLKCFG);
@@ -1354,7 +1593,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1354{ 1593{
1355 struct drm_i915_private *dev_priv = dev->dev_private; 1594 struct drm_i915_private *dev_priv = dev->dev_private;
1356 resource_size_t base, size; 1595 resource_size_t base, size;
1357 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; 1596 int ret = 0, mmio_bar;
1358 uint32_t agp_size, prealloc_size, prealloc_start; 1597 uint32_t agp_size, prealloc_size, prealloc_start;
1359 1598
1360 /* i915 has 4 more counters */ 1599 /* i915 has 4 more counters */
@@ -1370,8 +1609,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1370 1609
1371 dev->dev_private = (void *)dev_priv; 1610 dev->dev_private = (void *)dev_priv;
1372 dev_priv->dev = dev; 1611 dev_priv->dev = dev;
1612 dev_priv->info = (struct intel_device_info *) flags;
1373 1613
1374 /* Add register map (needed for suspend/resume) */ 1614 /* Add register map (needed for suspend/resume) */
1615 mmio_bar = IS_I9XX(dev) ? 0 : 1;
1375 base = drm_get_resource_start(dev, mmio_bar); 1616 base = drm_get_resource_start(dev, mmio_bar);
1376 size = drm_get_resource_len(dev, mmio_bar); 1617 size = drm_get_resource_len(dev, mmio_bar);
1377 1618
@@ -1413,7 +1654,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1413 if (ret) 1654 if (ret)
1414 goto out_iomapfree; 1655 goto out_iomapfree;
1415 1656
1416 dev_priv->wq = create_workqueue("i915"); 1657 dev_priv->wq = create_singlethread_workqueue("i915");
1417 if (dev_priv->wq == NULL) { 1658 if (dev_priv->wq == NULL) {
1418 DRM_ERROR("Failed to create our workqueue.\n"); 1659 DRM_ERROR("Failed to create our workqueue.\n");
1419 ret = -ENOMEM; 1660 ret = -ENOMEM;
@@ -1434,11 +1675,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1434 1675
1435 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1676 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1436 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1677 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1437 if (IS_G4X(dev) || IS_IGDNG(dev)) { 1678 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1438 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 1679 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1439 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1680 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1440 } 1681 }
1441 1682
1683 /* Try to make sure MCHBAR is enabled before poking at it */
1684 intel_setup_mchbar(dev);
1685
1442 i915_gem_load(dev); 1686 i915_gem_load(dev);
1443 1687
1444 /* Init HWS */ 1688 /* Init HWS */
@@ -1489,9 +1733,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1489 } 1733 }
1490 1734
1491 /* Must be done after probing outputs */ 1735 /* Must be done after probing outputs */
1492 /* FIXME: verify on IGDNG */ 1736 intel_opregion_init(dev, 0);
1493 if (!IS_IGDNG(dev))
1494 intel_opregion_init(dev, 0);
1495 1737
1496 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 1738 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
1497 (unsigned long) dev); 1739 (unsigned long) dev);
@@ -1514,6 +1756,8 @@ int i915_driver_unload(struct drm_device *dev)
1514{ 1756{
1515 struct drm_i915_private *dev_priv = dev->dev_private; 1757 struct drm_i915_private *dev_priv = dev->dev_private;
1516 1758
1759 i915_destroy_error_state(dev);
1760
1517 destroy_workqueue(dev_priv->wq); 1761 destroy_workqueue(dev_priv->wq);
1518 del_timer_sync(&dev_priv->hangcheck_timer); 1762 del_timer_sync(&dev_priv->hangcheck_timer);
1519 1763
@@ -1525,7 +1769,17 @@ int i915_driver_unload(struct drm_device *dev)
1525 } 1769 }
1526 1770
1527 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1771 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1772 /*
1773 * free the memory space allocated for the child device
1774 * config parsed from VBT
1775 */
1776 if (dev_priv->child_dev && dev_priv->child_dev_num) {
1777 kfree(dev_priv->child_dev);
1778 dev_priv->child_dev = NULL;
1779 dev_priv->child_dev_num = 0;
1780 }
1528 drm_irq_uninstall(dev); 1781 drm_irq_uninstall(dev);
1782 vga_switcheroo_unregister_client(dev->pdev);
1529 vga_client_register(dev->pdev, NULL, NULL, NULL); 1783 vga_client_register(dev->pdev, NULL, NULL, NULL);
1530 } 1784 }
1531 1785
@@ -1535,8 +1789,7 @@ int i915_driver_unload(struct drm_device *dev)
1535 if (dev_priv->regs != NULL) 1789 if (dev_priv->regs != NULL)
1536 iounmap(dev_priv->regs); 1790 iounmap(dev_priv->regs);
1537 1791
1538 if (!IS_IGDNG(dev)) 1792 intel_opregion_free(dev, 0);
1539 intel_opregion_free(dev, 0);
1540 1793
1541 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1794 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1542 intel_modeset_cleanup(dev); 1795 intel_modeset_cleanup(dev);
@@ -1546,10 +1799,16 @@ int i915_driver_unload(struct drm_device *dev)
1546 mutex_lock(&dev->struct_mutex); 1799 mutex_lock(&dev->struct_mutex);
1547 i915_gem_cleanup_ringbuffer(dev); 1800 i915_gem_cleanup_ringbuffer(dev);
1548 mutex_unlock(&dev->struct_mutex); 1801 mutex_unlock(&dev->struct_mutex);
1802 if (I915_HAS_FBC(dev) && i915_powersave)
1803 i915_cleanup_compression(dev);
1549 drm_mm_takedown(&dev_priv->vram); 1804 drm_mm_takedown(&dev_priv->vram);
1550 i915_gem_lastclose(dev); 1805 i915_gem_lastclose(dev);
1806
1807 intel_cleanup_overlay(dev);
1551 } 1808 }
1552 1809
1810 intel_teardown_mchbar(dev);
1811
1553 pci_dev_put(dev_priv->bridge_dev); 1812 pci_dev_put(dev_priv->bridge_dev);
1554 kfree(dev->dev_private); 1813 kfree(dev->dev_private);
1555 1814
@@ -1592,6 +1851,7 @@ void i915_driver_lastclose(struct drm_device * dev)
1592 1851
1593 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { 1852 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1594 drm_fb_helper_restore(); 1853 drm_fb_helper_restore();
1854 vga_switcheroo_process_delayed_switch();
1595 return; 1855 return;
1596 } 1856 }
1597 1857
@@ -1636,26 +1896,29 @@ struct drm_ioctl_desc i915_ioctls[] = {
1636 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 1896 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
1637 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 1897 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1638 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1898 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1639 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1899 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1640 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 1900 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1641 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1901 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
1642 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1902 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1643 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), 1903 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1644 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), 1904 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1645 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1905 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1646 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1906 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1647 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), 1907 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1648 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), 1908 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
1649 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), 1909 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1650 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), 1910 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1651 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0), 1911 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
1652 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), 1912 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1653 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), 1913 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1654 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), 1914 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1655 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), 1915 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
1656 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), 1916 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
1657 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 1917 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1658 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), 1918 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1919 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1920 DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1921 DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1659}; 1922};
1660 1923
1661int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 1924int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 7f436ec075f6..cc03537bb883 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -33,7 +33,6 @@
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_drv.h" 34#include "i915_drv.h"
35 35
36#include "drm_pciids.h"
37#include <linux/console.h> 36#include <linux/console.h>
38#include "drm_crtc_helper.h" 37#include "drm_crtc_helper.h"
39 38
@@ -46,36 +45,163 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
46unsigned int i915_powersave = 1; 45unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0400); 46module_param_named(powersave, i915_powersave, int, 0400);
48 47
48unsigned int i915_lvds_downclock = 0;
49module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
50
49static struct drm_driver driver; 51static struct drm_driver driver;
52extern int intel_agp_enabled;
53
54#define INTEL_VGA_DEVICE(id, info) { \
55 .class = PCI_CLASS_DISPLAY_VGA << 8, \
56 .class_mask = 0xffff00, \
57 .vendor = 0x8086, \
58 .device = id, \
59 .subvendor = PCI_ANY_ID, \
60 .subdevice = PCI_ANY_ID, \
61 .driver_data = (unsigned long) info }
62
63const static struct intel_device_info intel_i830_info = {
64 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
65};
66
67const static struct intel_device_info intel_845g_info = {
68 .is_i8xx = 1,
69};
70
71const static struct intel_device_info intel_i85x_info = {
72 .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
73 .cursor_needs_physical = 1,
74};
75
76const static struct intel_device_info intel_i865g_info = {
77 .is_i8xx = 1,
78};
79
80const static struct intel_device_info intel_i915g_info = {
81 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
82};
83const static struct intel_device_info intel_i915gm_info = {
84 .is_i9xx = 1, .is_mobile = 1,
85 .cursor_needs_physical = 1,
86};
87const static struct intel_device_info intel_i945g_info = {
88 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
89};
90const static struct intel_device_info intel_i945gm_info = {
91 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
92 .has_hotplug = 1, .cursor_needs_physical = 1,
93};
94
95const static struct intel_device_info intel_i965g_info = {
96 .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
97};
98
99const static struct intel_device_info intel_i965gm_info = {
100 .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
101 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
102 .has_hotplug = 1,
103};
104
105const static struct intel_device_info intel_g33_info = {
106 .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
107 .has_hotplug = 1,
108};
109
110const static struct intel_device_info intel_g45_info = {
111 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
112 .has_pipe_cxsr = 1,
113 .has_hotplug = 1,
114};
115
116const static struct intel_device_info intel_gm45_info = {
117 .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
118 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
119 .has_pipe_cxsr = 1,
120 .has_hotplug = 1,
121};
122
123const static struct intel_device_info intel_pineview_info = {
124 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
125 .need_gfx_hws = 1,
126 .has_hotplug = 1,
127};
128
129const static struct intel_device_info intel_ironlake_d_info = {
130 .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
131 .has_pipe_cxsr = 1,
132 .has_hotplug = 1,
133};
50 134
51static struct pci_device_id pciidlist[] = { 135const static struct intel_device_info intel_ironlake_m_info = {
52 i915_PCI_IDS 136 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
137 .need_gfx_hws = 1, .has_rc6 = 1,
138 .has_hotplug = 1,
139};
140
141const static struct intel_device_info intel_sandybridge_d_info = {
142 .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
143 .has_hotplug = 1, .is_gen6 = 1,
144};
145
146const static struct intel_device_info intel_sandybridge_m_info = {
147 .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
148 .has_hotplug = 1, .is_gen6 = 1,
149};
150
151const static struct pci_device_id pciidlist[] = {
152 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
153 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
154 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
155 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
156 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
157 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
158 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
159 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
160 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
161 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
162 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
163 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
164 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
165 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
166 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
167 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
168 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
169 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
170 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
171 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
172 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
173 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
174 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
175 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
176 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
177 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
178 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
179 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
180 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
181 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
182 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
183 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
184 {0, 0, 0}
53}; 185};
54 186
55#if defined(CONFIG_DRM_I915_KMS) 187#if defined(CONFIG_DRM_I915_KMS)
56MODULE_DEVICE_TABLE(pci, pciidlist); 188MODULE_DEVICE_TABLE(pci, pciidlist);
57#endif 189#endif
58 190
59static int i915_suspend(struct drm_device *dev, pm_message_t state) 191static int i915_drm_freeze(struct drm_device *dev)
60{ 192{
61 struct drm_i915_private *dev_priv = dev->dev_private; 193 struct drm_i915_private *dev_priv = dev->dev_private;
62 194
63 if (!dev || !dev_priv) {
64 DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
65 DRM_ERROR("DRM not initialized, aborting suspend.\n");
66 return -ENODEV;
67 }
68
69 if (state.event == PM_EVENT_PRETHAW)
70 return 0;
71
72 pci_save_state(dev->pdev); 195 pci_save_state(dev->pdev);
73 196
74 /* If KMS is active, we do the leavevt stuff here */ 197 /* If KMS is active, we do the leavevt stuff here */
75 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 198 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
76 if (i915_gem_idle(dev)) 199 int error = i915_gem_idle(dev);
200 if (error) {
77 dev_err(&dev->pdev->dev, 201 dev_err(&dev->pdev->dev,
78 "GEM idle failed, resume may fail\n"); 202 "GEM idle failed, resume might fail\n");
203 return error;
204 }
79 drm_irq_uninstall(dev); 205 drm_irq_uninstall(dev);
80 } 206 }
81 207
@@ -83,26 +209,42 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
83 209
84 intel_opregion_free(dev, 1); 210 intel_opregion_free(dev, 1);
85 211
212 /* Modeset on resume, not lid events */
213 dev_priv->modeset_on_lid = 0;
214
215 return 0;
216}
217
218int i915_suspend(struct drm_device *dev, pm_message_t state)
219{
220 int error;
221
222 if (!dev || !dev->dev_private) {
223 DRM_ERROR("dev: %p\n", dev);
224 DRM_ERROR("DRM not initialized, aborting suspend.\n");
225 return -ENODEV;
226 }
227
228 if (state.event == PM_EVENT_PRETHAW)
229 return 0;
230
231 error = i915_drm_freeze(dev);
232 if (error)
233 return error;
234
86 if (state.event == PM_EVENT_SUSPEND) { 235 if (state.event == PM_EVENT_SUSPEND) {
87 /* Shut down the device */ 236 /* Shut down the device */
88 pci_disable_device(dev->pdev); 237 pci_disable_device(dev->pdev);
89 pci_set_power_state(dev->pdev, PCI_D3hot); 238 pci_set_power_state(dev->pdev, PCI_D3hot);
90 } 239 }
91 240
92 /* Modeset on resume, not lid events */
93 dev_priv->modeset_on_lid = 0;
94
95 return 0; 241 return 0;
96} 242}
97 243
98static int i915_resume(struct drm_device *dev) 244static int i915_drm_thaw(struct drm_device *dev)
99{ 245{
100 struct drm_i915_private *dev_priv = dev->dev_private; 246 struct drm_i915_private *dev_priv = dev->dev_private;
101 int ret = 0; 247 int error = 0;
102
103 if (pci_enable_device(dev->pdev))
104 return -1;
105 pci_set_master(dev->pdev);
106 248
107 i915_restore_state(dev); 249 i915_restore_state(dev);
108 250
@@ -113,21 +255,28 @@ static int i915_resume(struct drm_device *dev)
113 mutex_lock(&dev->struct_mutex); 255 mutex_lock(&dev->struct_mutex);
114 dev_priv->mm.suspended = 0; 256 dev_priv->mm.suspended = 0;
115 257
116 ret = i915_gem_init_ringbuffer(dev); 258 error = i915_gem_init_ringbuffer(dev);
117 if (ret != 0)
118 ret = -1;
119 mutex_unlock(&dev->struct_mutex); 259 mutex_unlock(&dev->struct_mutex);
120 260
121 drm_irq_install(dev); 261 drm_irq_install(dev);
122 } 262
123 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
124 /* Resume the modeset for every activated CRTC */ 263 /* Resume the modeset for every activated CRTC */
125 drm_helper_resume_force_mode(dev); 264 drm_helper_resume_force_mode(dev);
126 } 265 }
127 266
128 dev_priv->modeset_on_lid = 0; 267 dev_priv->modeset_on_lid = 0;
129 268
130 return ret; 269 return error;
270}
271
272int i915_resume(struct drm_device *dev)
273{
274 if (pci_enable_device(dev->pdev))
275 return -EIO;
276
277 pci_set_master(dev->pdev);
278
279 return i915_drm_thaw(dev);
131} 280}
132 281
133/** 282/**
@@ -213,7 +362,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
213 !dev_priv->mm.suspended) { 362 !dev_priv->mm.suspended) {
214 drm_i915_ring_buffer_t *ring = &dev_priv->ring; 363 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
215 struct drm_gem_object *obj = ring->ring_obj; 364 struct drm_gem_object *obj = ring->ring_obj;
216 struct drm_i915_gem_object *obj_priv = obj->driver_private; 365 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
217 dev_priv->mm.suspended = 0; 366 dev_priv->mm.suspended = 0;
218 367
219 /* Stop the ring if it's running. */ 368 /* Stop the ring if it's running. */
@@ -268,22 +417,73 @@ i915_pci_remove(struct pci_dev *pdev)
268 drm_put_dev(dev); 417 drm_put_dev(dev);
269} 418}
270 419
271static int 420static int i915_pm_suspend(struct device *dev)
272i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
273{ 421{
274 struct drm_device *dev = pci_get_drvdata(pdev); 422 struct pci_dev *pdev = to_pci_dev(dev);
423 struct drm_device *drm_dev = pci_get_drvdata(pdev);
424 int error;
275 425
276 return i915_suspend(dev, state); 426 if (!drm_dev || !drm_dev->dev_private) {
427 dev_err(dev, "DRM not initialized, aborting suspend.\n");
428 return -ENODEV;
429 }
430
431 error = i915_drm_freeze(drm_dev);
432 if (error)
433 return error;
434
435 pci_disable_device(pdev);
436 pci_set_power_state(pdev, PCI_D3hot);
437
438 return 0;
277} 439}
278 440
279static int 441static int i915_pm_resume(struct device *dev)
280i915_pci_resume(struct pci_dev *pdev)
281{ 442{
282 struct drm_device *dev = pci_get_drvdata(pdev); 443 struct pci_dev *pdev = to_pci_dev(dev);
444 struct drm_device *drm_dev = pci_get_drvdata(pdev);
445
446 return i915_resume(drm_dev);
447}
448
449static int i915_pm_freeze(struct device *dev)
450{
451 struct pci_dev *pdev = to_pci_dev(dev);
452 struct drm_device *drm_dev = pci_get_drvdata(pdev);
453
454 if (!drm_dev || !drm_dev->dev_private) {
455 dev_err(dev, "DRM not initialized, aborting suspend.\n");
456 return -ENODEV;
457 }
458
459 return i915_drm_freeze(drm_dev);
460}
461
462static int i915_pm_thaw(struct device *dev)
463{
464 struct pci_dev *pdev = to_pci_dev(dev);
465 struct drm_device *drm_dev = pci_get_drvdata(pdev);
466
467 return i915_drm_thaw(drm_dev);
468}
469
470static int i915_pm_poweroff(struct device *dev)
471{
472 struct pci_dev *pdev = to_pci_dev(dev);
473 struct drm_device *drm_dev = pci_get_drvdata(pdev);
283 474
284 return i915_resume(dev); 475 return i915_drm_freeze(drm_dev);
285} 476}
286 477
478const struct dev_pm_ops i915_pm_ops = {
479 .suspend = i915_pm_suspend,
480 .resume = i915_pm_resume,
481 .freeze = i915_pm_freeze,
482 .thaw = i915_pm_thaw,
483 .poweroff = i915_pm_poweroff,
484 .restore = i915_pm_resume,
485};
486
287static struct vm_operations_struct i915_gem_vm_ops = { 487static struct vm_operations_struct i915_gem_vm_ops = {
288 .fault = i915_gem_fault, 488 .fault = i915_gem_fault,
289 .open = drm_gem_vm_open, 489 .open = drm_gem_vm_open,
@@ -303,8 +503,11 @@ static struct drm_driver driver = {
303 .lastclose = i915_driver_lastclose, 503 .lastclose = i915_driver_lastclose,
304 .preclose = i915_driver_preclose, 504 .preclose = i915_driver_preclose,
305 .postclose = i915_driver_postclose, 505 .postclose = i915_driver_postclose,
506
507 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
306 .suspend = i915_suspend, 508 .suspend = i915_suspend,
307 .resume = i915_resume, 509 .resume = i915_resume,
510
308 .device_is_agp = i915_driver_device_is_agp, 511 .device_is_agp = i915_driver_device_is_agp,
309 .enable_vblank = i915_enable_vblank, 512 .enable_vblank = i915_enable_vblank,
310 .disable_vblank = i915_disable_vblank, 513 .disable_vblank = i915_disable_vblank,
@@ -329,10 +532,11 @@ static struct drm_driver driver = {
329 .owner = THIS_MODULE, 532 .owner = THIS_MODULE,
330 .open = drm_open, 533 .open = drm_open,
331 .release = drm_release, 534 .release = drm_release,
332 .ioctl = drm_ioctl, 535 .unlocked_ioctl = drm_ioctl,
333 .mmap = drm_gem_mmap, 536 .mmap = drm_gem_mmap,
334 .poll = drm_poll, 537 .poll = drm_poll,
335 .fasync = drm_fasync, 538 .fasync = drm_fasync,
539 .read = drm_read,
336#ifdef CONFIG_COMPAT 540#ifdef CONFIG_COMPAT
337 .compat_ioctl = i915_compat_ioctl, 541 .compat_ioctl = i915_compat_ioctl,
338#endif 542#endif
@@ -343,10 +547,7 @@ static struct drm_driver driver = {
343 .id_table = pciidlist, 547 .id_table = pciidlist,
344 .probe = i915_pci_probe, 548 .probe = i915_pci_probe,
345 .remove = i915_pci_remove, 549 .remove = i915_pci_remove,
346#ifdef CONFIG_PM 550 .driver.pm = &i915_pm_ops,
347 .resume = i915_pci_resume,
348 .suspend = i915_pci_suspend,
349#endif
350 }, 551 },
351 552
352 .name = DRIVER_NAME, 553 .name = DRIVER_NAME,
@@ -359,6 +560,11 @@ static struct drm_driver driver = {
359 560
360static int __init i915_init(void) 561static int __init i915_init(void)
361{ 562{
563 if (!intel_agp_enabled) {
564 DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
565 return -ENODEV;
566 }
567
362 driver.num_ioctls = i915_max_ioctl; 568 driver.num_ioctls = i915_max_ioctl;
363 569
364 i915_gem_shrinker_init(); 570 i915_gem_shrinker_init();
@@ -384,6 +590,11 @@ static int __init i915_init(void)
384 driver.driver_features &= ~DRIVER_MODESET; 590 driver.driver_features &= ~DRIVER_MODESET;
385#endif 591#endif
386 592
593 if (!(driver.driver_features & DRIVER_MODESET)) {
594 driver.suspend = i915_suspend;
595 driver.resume = i915_resume;
596 }
597
387 return drm_init(&driver); 598 return drm_init(&driver);
388} 599}
389 600
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a725f6591192..6e4790065d9e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -150,7 +150,27 @@ struct drm_i915_error_state {
150 u32 instps; 150 u32 instps;
151 u32 instdone1; 151 u32 instdone1;
152 u32 seqno; 152 u32 seqno;
153 u64 bbaddr;
153 struct timeval time; 154 struct timeval time;
155 struct drm_i915_error_object {
156 int page_count;
157 u32 gtt_offset;
158 u32 *pages[0];
159 } *ringbuffer, *batchbuffer[2];
160 struct drm_i915_error_buffer {
161 size_t size;
162 u32 name;
163 u32 seqno;
164 u32 gtt_offset;
165 u32 read_domains;
166 u32 write_domain;
167 u32 fence_reg;
168 s32 pinned:2;
169 u32 tiling:2;
170 u32 dirty:1;
171 u32 purgeable:1;
172 } *active_bo;
173 u32 active_bo_count;
154}; 174};
155 175
156struct drm_i915_display_funcs { 176struct drm_i915_display_funcs {
@@ -170,9 +190,43 @@ struct drm_i915_display_funcs {
170 /* clock gating init */ 190 /* clock gating init */
171}; 191};
172 192
193struct intel_overlay;
194
195struct intel_device_info {
196 u8 is_mobile : 1;
197 u8 is_i8xx : 1;
198 u8 is_i85x : 1;
199 u8 is_i915g : 1;
200 u8 is_i9xx : 1;
201 u8 is_i945gm : 1;
202 u8 is_i965g : 1;
203 u8 is_i965gm : 1;
204 u8 is_g33 : 1;
205 u8 need_gfx_hws : 1;
206 u8 is_g4x : 1;
207 u8 is_pineview : 1;
208 u8 is_ironlake : 1;
209 u8 is_gen6 : 1;
210 u8 has_fbc : 1;
211 u8 has_rc6 : 1;
212 u8 has_pipe_cxsr : 1;
213 u8 has_hotplug : 1;
214 u8 cursor_needs_physical : 1;
215};
216
217enum no_fbc_reason {
218 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
219 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
220 FBC_MODE_TOO_LARGE, /* mode too large for compression */
221 FBC_BAD_PLANE, /* fbc not supported on plane */
222 FBC_NOT_TILED, /* buffer not tiled */
223};
224
173typedef struct drm_i915_private { 225typedef struct drm_i915_private {
174 struct drm_device *dev; 226 struct drm_device *dev;
175 227
228 const struct intel_device_info *info;
229
176 int has_gem; 230 int has_gem;
177 231
178 void __iomem *regs; 232 void __iomem *regs;
@@ -182,11 +236,15 @@ typedef struct drm_i915_private {
182 236
183 drm_dma_handle_t *status_page_dmah; 237 drm_dma_handle_t *status_page_dmah;
184 void *hw_status_page; 238 void *hw_status_page;
239 void *seqno_page;
185 dma_addr_t dma_status_page; 240 dma_addr_t dma_status_page;
186 uint32_t counter; 241 uint32_t counter;
187 unsigned int status_gfx_addr; 242 unsigned int status_gfx_addr;
243 unsigned int seqno_gfx_addr;
188 drm_local_map_t hws_map; 244 drm_local_map_t hws_map;
189 struct drm_gem_object *hws_obj; 245 struct drm_gem_object *hws_obj;
246 struct drm_gem_object *seqno_obj;
247 struct drm_gem_object *pwrctx;
190 248
191 struct resource mch_res; 249 struct resource mch_res;
192 250
@@ -206,11 +264,13 @@ typedef struct drm_i915_private {
206 /** Cached value of IMR to avoid reads in updating the bitfield */ 264 /** Cached value of IMR to avoid reads in updating the bitfield */
207 u32 irq_mask_reg; 265 u32 irq_mask_reg;
208 u32 pipestat[2]; 266 u32 pipestat[2];
209 /** splitted irq regs for graphics and display engine on IGDNG, 267 /** splitted irq regs for graphics and display engine on Ironlake,
210 irq_mask_reg is still used for display irq. */ 268 irq_mask_reg is still used for display irq. */
211 u32 gt_irq_mask_reg; 269 u32 gt_irq_mask_reg;
212 u32 gt_irq_enable_reg; 270 u32 gt_irq_enable_reg;
213 u32 de_irq_enable_reg; 271 u32 de_irq_enable_reg;
272 u32 pch_irq_mask_reg;
273 u32 pch_irq_enable_reg;
214 274
215 u32 hotplug_supported_mask; 275 u32 hotplug_supported_mask;
216 struct work_struct hotplug_work; 276 struct work_struct hotplug_work;
@@ -227,8 +287,6 @@ typedef struct drm_i915_private {
227 int hangcheck_count; 287 int hangcheck_count;
228 uint32_t last_acthd; 288 uint32_t last_acthd;
229 289
230 bool cursor_needs_physical;
231
232 struct drm_mm vram; 290 struct drm_mm vram;
233 291
234 unsigned long cfb_size; 292 unsigned long cfb_size;
@@ -240,6 +298,9 @@ typedef struct drm_i915_private {
240 298
241 struct intel_opregion opregion; 299 struct intel_opregion opregion;
242 300
301 /* overlay */
302 struct intel_overlay *overlay;
303
243 /* LVDS info */ 304 /* LVDS info */
244 int backlight_duty_cycle; /* restore backlight to this value */ 305 int backlight_duty_cycle; /* restore backlight to this value */
245 bool panel_wants_dither; 306 bool panel_wants_dither;
@@ -255,10 +316,11 @@ typedef struct drm_i915_private {
255 unsigned int lvds_use_ssc:1; 316 unsigned int lvds_use_ssc:1;
256 unsigned int edp_support:1; 317 unsigned int edp_support:1;
257 int lvds_ssc_freq; 318 int lvds_ssc_freq;
319 int edp_bpp;
258 320
259 struct notifier_block lid_notifier; 321 struct notifier_block lid_notifier;
260 322
261 int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */ 323 int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
262 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 324 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
263 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 325 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
264 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 326 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -279,7 +341,6 @@ typedef struct drm_i915_private {
279 u32 saveDSPACNTR; 341 u32 saveDSPACNTR;
280 u32 saveDSPBCNTR; 342 u32 saveDSPBCNTR;
281 u32 saveDSPARB; 343 u32 saveDSPARB;
282 u32 saveRENDERSTANDBY;
283 u32 saveHWS; 344 u32 saveHWS;
284 u32 savePIPEACONF; 345 u32 savePIPEACONF;
285 u32 savePIPEBCONF; 346 u32 savePIPEBCONF;
@@ -374,8 +435,6 @@ typedef struct drm_i915_private {
374 u32 saveFDI_RXA_IMR; 435 u32 saveFDI_RXA_IMR;
375 u32 saveFDI_RXB_IMR; 436 u32 saveFDI_RXB_IMR;
376 u32 saveCACHE_MODE_0; 437 u32 saveCACHE_MODE_0;
377 u32 saveD_STATE;
378 u32 saveDSPCLK_GATE_D;
379 u32 saveMI_ARB_STATE; 438 u32 saveMI_ARB_STATE;
380 u32 saveSWF0[16]; 439 u32 saveSWF0[16];
381 u32 saveSWF1[16]; 440 u32 saveSWF1[16];
@@ -426,6 +485,7 @@ typedef struct drm_i915_private {
426 u32 savePIPEB_DATA_N1; 485 u32 savePIPEB_DATA_N1;
427 u32 savePIPEB_LINK_M1; 486 u32 savePIPEB_LINK_M1;
428 u32 savePIPEB_LINK_N1; 487 u32 savePIPEB_LINK_N1;
488 u32 saveMCHBAR_RENDER_STANDBY;
429 489
430 struct { 490 struct {
431 struct drm_mm gtt_space; 491 struct drm_mm gtt_space;
@@ -467,6 +527,15 @@ typedef struct drm_i915_private {
467 struct list_head flushing_list; 527 struct list_head flushing_list;
468 528
469 /** 529 /**
530 * List of objects currently pending a GPU write flush.
531 *
532 * All elements on this list will belong to either the
533 * active_list or flushing_list, last_rendering_seqno can
534 * be used to differentiate between the two elements.
535 */
536 struct list_head gpu_write_list;
537
538 /**
470 * LRU list of objects which are not in the ringbuffer and 539 * LRU list of objects which are not in the ringbuffer and
471 * are ready to unbind, but are still in the GTT. 540 * are ready to unbind, but are still in the GTT.
472 * 541 *
@@ -539,13 +608,35 @@ typedef struct drm_i915_private {
539 /* indicate whether the LVDS_BORDER should be enabled or not */ 608 /* indicate whether the LVDS_BORDER should be enabled or not */
540 unsigned int lvds_border_bits; 609 unsigned int lvds_border_bits;
541 610
611 struct drm_crtc *plane_to_crtc_mapping[2];
612 struct drm_crtc *pipe_to_crtc_mapping[2];
613 wait_queue_head_t pending_flip_queue;
614
542 /* Reclocking support */ 615 /* Reclocking support */
543 bool render_reclock_avail; 616 bool render_reclock_avail;
544 bool lvds_downclock_avail; 617 bool lvds_downclock_avail;
618 /* indicate whether the LVDS EDID is OK */
619 bool lvds_edid_good;
620 /* indicates the reduced downclock for LVDS*/
621 int lvds_downclock;
545 struct work_struct idle_work; 622 struct work_struct idle_work;
546 struct timer_list idle_timer; 623 struct timer_list idle_timer;
547 bool busy; 624 bool busy;
548 u16 orig_clock; 625 u16 orig_clock;
626 int child_dev_num;
627 struct child_device_config *child_dev;
628 struct drm_connector *int_lvds_connector;
629
630 bool mchbar_need_disable;
631
632 u8 cur_delay;
633 u8 min_delay;
634 u8 max_delay;
635
636 enum no_fbc_reason no_fbc_reason;
637
638 struct drm_mm_node *compressed_fb;
639 struct drm_mm_node *compressed_llb;
549} drm_i915_private_t; 640} drm_i915_private_t;
550 641
551/** driver private structure attached to each drm_gem_object */ 642/** driver private structure attached to each drm_gem_object */
@@ -557,6 +648,8 @@ struct drm_i915_gem_object {
557 648
558 /** This object's place on the active/flushing/inactive lists */ 649 /** This object's place on the active/flushing/inactive lists */
559 struct list_head list; 650 struct list_head list;
651 /** This object's place on GPU write list */
652 struct list_head gpu_write_list;
560 653
561 /** This object's place on the fenced object LRU */ 654 /** This object's place on the fenced object LRU */
562 struct list_head fence_list; 655 struct list_head fence_list;
@@ -638,8 +731,17 @@ struct drm_i915_gem_object {
638 * Advice: are the backing pages purgeable? 731 * Advice: are the backing pages purgeable?
639 */ 732 */
640 int madv; 733 int madv;
734
735 /**
736 * Number of crtcs where this object is currently the fb, but
737 * will be page flipped away on the next vblank. When it
738 * reaches 0, dev_priv->pending_flip_queue will be woken up.
739 */
740 atomic_t pending_flip;
641}; 741};
642 742
743#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private)
744
643/** 745/**
644 * Request queue structure. 746 * Request queue structure.
645 * 747 *
@@ -681,7 +783,10 @@ extern struct drm_ioctl_desc i915_ioctls[];
681extern int i915_max_ioctl; 783extern int i915_max_ioctl;
682extern unsigned int i915_fbpercrtc; 784extern unsigned int i915_fbpercrtc;
683extern unsigned int i915_powersave; 785extern unsigned int i915_powersave;
786extern unsigned int i915_lvds_downclock;
684 787
788extern int i915_suspend(struct drm_device *dev, pm_message_t state);
789extern int i915_resume(struct drm_device *dev);
685extern void i915_save_display(struct drm_device *dev); 790extern void i915_save_display(struct drm_device *dev);
686extern void i915_restore_display(struct drm_device *dev); 791extern void i915_restore_display(struct drm_device *dev);
687extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 792extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
@@ -707,6 +812,7 @@ extern int i965_reset(struct drm_device *dev, u8 flags);
707 812
708/* i915_irq.c */ 813/* i915_irq.c */
709void i915_hangcheck_elapsed(unsigned long data); 814void i915_hangcheck_elapsed(unsigned long data);
815void i915_destroy_error_state(struct drm_device *dev);
710extern int i915_irq_emit(struct drm_device *dev, void *data, 816extern int i915_irq_emit(struct drm_device *dev, void *data,
711 struct drm_file *file_priv); 817 struct drm_file *file_priv);
712extern int i915_irq_wait(struct drm_device *dev, void *data, 818extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -738,6 +844,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
738void 844void
739i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 845i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
740 846
847void intel_enable_asle (struct drm_device *dev);
848
741 849
742/* i915_mem.c */ 850/* i915_mem.c */
743extern int i915_mem_alloc(struct drm_device *dev, void *data, 851extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -770,6 +878,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
770 struct drm_file *file_priv); 878 struct drm_file *file_priv);
771int i915_gem_execbuffer(struct drm_device *dev, void *data, 879int i915_gem_execbuffer(struct drm_device *dev, void *data,
772 struct drm_file *file_priv); 880 struct drm_file *file_priv);
881int i915_gem_execbuffer2(struct drm_device *dev, void *data,
882 struct drm_file *file_priv);
773int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 883int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
774 struct drm_file *file_priv); 884 struct drm_file *file_priv);
775int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 885int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
@@ -813,17 +923,22 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
813int i915_gem_do_init(struct drm_device *dev, unsigned long start, 923int i915_gem_do_init(struct drm_device *dev, unsigned long start,
814 unsigned long end); 924 unsigned long end);
815int i915_gem_idle(struct drm_device *dev); 925int i915_gem_idle(struct drm_device *dev);
926uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
927 uint32_t flush_domains);
928int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
816int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 929int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
817int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 930int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
818 int write); 931 int write);
932int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
819int i915_gem_attach_phys_object(struct drm_device *dev, 933int i915_gem_attach_phys_object(struct drm_device *dev,
820 struct drm_gem_object *obj, int id); 934 struct drm_gem_object *obj, int id);
821void i915_gem_detach_phys_object(struct drm_device *dev, 935void i915_gem_detach_phys_object(struct drm_device *dev,
822 struct drm_gem_object *obj); 936 struct drm_gem_object *obj);
823void i915_gem_free_all_phys_object(struct drm_device *dev); 937void i915_gem_free_all_phys_object(struct drm_device *dev);
824int i915_gem_object_get_pages(struct drm_gem_object *obj); 938int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
825void i915_gem_object_put_pages(struct drm_gem_object *obj); 939void i915_gem_object_put_pages(struct drm_gem_object *obj);
826void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 940void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
941void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
827 942
828void i915_gem_shrinker_init(void); 943void i915_gem_shrinker_init(void);
829void i915_gem_shrinker_exit(void); 944void i915_gem_shrinker_exit(void);
@@ -832,6 +947,10 @@ void i915_gem_shrinker_exit(void);
832void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 947void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
833void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); 948void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
834void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); 949void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
950bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
951 int tiling_mode);
952bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
953 int tiling_mode);
835 954
836/* i915_gem_debug.c */ 955/* i915_gem_debug.c */
837void i915_gem_dump_object(struct drm_gem_object *obj, int len, 956void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -863,11 +982,13 @@ extern int i915_restore_state(struct drm_device *dev);
863extern int intel_opregion_init(struct drm_device *dev, int resume); 982extern int intel_opregion_init(struct drm_device *dev, int resume);
864extern void intel_opregion_free(struct drm_device *dev, int suspend); 983extern void intel_opregion_free(struct drm_device *dev, int suspend);
865extern void opregion_asle_intr(struct drm_device *dev); 984extern void opregion_asle_intr(struct drm_device *dev);
985extern void ironlake_opregion_gse_intr(struct drm_device *dev);
866extern void opregion_enable_asle(struct drm_device *dev); 986extern void opregion_enable_asle(struct drm_device *dev);
867#else 987#else
868static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; } 988static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
869static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; } 989static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
870static inline void opregion_asle_intr(struct drm_device *dev) { return; } 990static inline void opregion_asle_intr(struct drm_device *dev) { return; }
991static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
871static inline void opregion_enable_asle(struct drm_device *dev) { return; } 992static inline void opregion_enable_asle(struct drm_device *dev) { return; }
872#endif 993#endif
873 994
@@ -952,85 +1073,76 @@ extern void g4x_disable_fbc(struct drm_device *dev);
952extern int i915_wrap_ring(struct drm_device * dev); 1073extern int i915_wrap_ring(struct drm_device * dev);
953extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 1074extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
954 1075
955#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1076#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
956#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1077
957#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 1078#define IS_I830(dev) ((dev)->pci_device == 0x3577)
958#define IS_I855(dev) ((dev)->pci_device == 0x3582) 1079#define IS_845G(dev) ((dev)->pci_device == 0x2562)
959#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1080#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
960 1081#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
961#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) 1082#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
962#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1083#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
963#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1084#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
964#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ 1085#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
965 (dev)->pci_device == 0x27AE) 1086#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
966#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ 1087#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
967 (dev)->pci_device == 0x2982 || \ 1088#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
968 (dev)->pci_device == 0x2992 || \ 1089#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
969 (dev)->pci_device == 0x29A2 || \ 1090#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
970 (dev)->pci_device == 0x2A02 || \ 1091#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
971 (dev)->pci_device == 0x2A12 || \ 1092#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
972 (dev)->pci_device == 0x2A42 || \ 1093#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
973 (dev)->pci_device == 0x2E02 || \ 1094#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
974 (dev)->pci_device == 0x2E12 || \ 1095#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
975 (dev)->pci_device == 0x2E22 || \ 1096#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
976 (dev)->pci_device == 0x2E32 || \ 1097#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
977 (dev)->pci_device == 0x2E42 || \ 1098#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
978 (dev)->pci_device == 0x0042 || \ 1099#define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6)
979 (dev)->pci_device == 0x0046) 1100#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
980 1101
981#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \ 1102#define IS_GEN3(dev) (IS_I915G(dev) || \
982 (dev)->pci_device == 0x2A12) 1103 IS_I915GM(dev) || \
983 1104 IS_I945G(dev) || \
984#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 1105 IS_I945GM(dev) || \
985 1106 IS_G33(dev) || \
986#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ 1107 IS_PINEVIEW(dev))
987 (dev)->pci_device == 0x2E12 || \ 1108#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
988 (dev)->pci_device == 0x2E22 || \ 1109 (dev)->pci_device == 0x2982 || \
989 (dev)->pci_device == 0x2E32 || \ 1110 (dev)->pci_device == 0x2992 || \
990 (dev)->pci_device == 0x2E42 || \ 1111 (dev)->pci_device == 0x29A2 || \
991 IS_GM45(dev)) 1112 (dev)->pci_device == 0x2A02 || \
992 1113 (dev)->pci_device == 0x2A12 || \
993#define IS_IGDG(dev) ((dev)->pci_device == 0xa001) 1114 (dev)->pci_device == 0x2E02 || \
994#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011) 1115 (dev)->pci_device == 0x2E12 || \
995#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev)) 1116 (dev)->pci_device == 0x2E22 || \
996 1117 (dev)->pci_device == 0x2E32 || \
997#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ 1118 (dev)->pci_device == 0x2A42 || \
998 (dev)->pci_device == 0x29B2 || \ 1119 (dev)->pci_device == 0x2E42)
999 (dev)->pci_device == 0x29D2 || \ 1120
1000 (IS_IGD(dev))) 1121#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1001 1122
1002#define IS_IGDNG_D(dev) ((dev)->pci_device == 0x0042)
1003#define IS_IGDNG_M(dev) ((dev)->pci_device == 0x0046)
1004#define IS_IGDNG(dev) (IS_IGDNG_D(dev) || IS_IGDNG_M(dev))
1005
1006#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
1007 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
1008 IS_IGDNG(dev))
1009
1010#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
1011 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
1012 IS_IGD(dev) || IS_IGDNG_M(dev))
1013
1014#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
1015 IS_IGDNG(dev))
1016/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1123/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1017 * rows, which changed the alignment requirements and fence programming. 1124 * rows, which changed the alignment requirements and fence programming.
1018 */ 1125 */
1019#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ 1126#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
1020 IS_I915GM(dev))) 1127 IS_I915GM(dev)))
1021#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1128#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev))
1022#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1129#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1023#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) 1130#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1024#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) 1131#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1132#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
1133 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
1134#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1025/* dsparb controlled by hw only */ 1135/* dsparb controlled by hw only */
1026#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1136#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1027 1137
1028#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) 1138#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
1029#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1139#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1030#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \ 1140#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1031 (IS_I9XX(dev) || IS_GM45(dev)) && \ 1141#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
1032 !IS_IGD(dev) && \ 1142
1033 !IS_IGDNG(dev)) 1143#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
1144 IS_GEN6(dev))
1145#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
1034 1146
1035#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1147#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1036 1148
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index abfc27b0c2ea..ef3d91dda71a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "i915_trace.h" 32#include "i915_trace.h"
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/slab.h>
34#include <linux/swap.h> 35#include <linux/swap.h>
35#include <linux/pci.h> 36#include <linux/pci.h>
36 37
@@ -128,9 +129,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
128 return -ENOMEM; 129 return -ENOMEM;
129 130
130 ret = drm_gem_handle_create(file_priv, obj, &handle); 131 ret = drm_gem_handle_create(file_priv, obj, &handle);
131 mutex_lock(&dev->struct_mutex); 132 drm_gem_object_handle_unreference_unlocked(obj);
132 drm_gem_object_handle_unreference(obj);
133 mutex_unlock(&dev->struct_mutex);
134 133
135 if (ret) 134 if (ret)
136 return ret; 135 return ret;
@@ -164,7 +163,7 @@ fast_shmem_read(struct page **pages,
164static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) 163static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
165{ 164{
166 drm_i915_private_t *dev_priv = obj->dev->dev_private; 165 drm_i915_private_t *dev_priv = obj->dev->dev_private;
167 struct drm_i915_gem_object *obj_priv = obj->driver_private; 166 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
168 167
169 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 168 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
170 obj_priv->tiling_mode != I915_TILING_NONE; 169 obj_priv->tiling_mode != I915_TILING_NONE;
@@ -265,7 +264,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
265 struct drm_i915_gem_pread *args, 264 struct drm_i915_gem_pread *args,
266 struct drm_file *file_priv) 265 struct drm_file *file_priv)
267{ 266{
268 struct drm_i915_gem_object *obj_priv = obj->driver_private; 267 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
269 ssize_t remain; 268 ssize_t remain;
270 loff_t offset, page_base; 269 loff_t offset, page_base;
271 char __user *user_data; 270 char __user *user_data;
@@ -277,7 +276,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
277 276
278 mutex_lock(&dev->struct_mutex); 277 mutex_lock(&dev->struct_mutex);
279 278
280 ret = i915_gem_object_get_pages(obj); 279 ret = i915_gem_object_get_pages(obj, 0);
281 if (ret != 0) 280 if (ret != 0)
282 goto fail_unlock; 281 goto fail_unlock;
283 282
@@ -286,7 +285,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
286 if (ret != 0) 285 if (ret != 0)
287 goto fail_put_pages; 286 goto fail_put_pages;
288 287
289 obj_priv = obj->driver_private; 288 obj_priv = to_intel_bo(obj);
290 offset = args->offset; 289 offset = args->offset;
291 290
292 while (remain > 0) { 291 while (remain > 0) {
@@ -321,40 +320,24 @@ fail_unlock:
321 return ret; 320 return ret;
322} 321}
323 322
324static inline gfp_t
325i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
326{
327 return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
328}
329
330static inline void
331i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
332{
333 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
334}
335
336static int 323static int
337i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) 324i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
338{ 325{
339 int ret; 326 int ret;
340 327
341 ret = i915_gem_object_get_pages(obj); 328 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
342 329
343 /* If we've insufficient memory to map in the pages, attempt 330 /* If we've insufficient memory to map in the pages, attempt
344 * to make some space by throwing out some old buffers. 331 * to make some space by throwing out some old buffers.
345 */ 332 */
346 if (ret == -ENOMEM) { 333 if (ret == -ENOMEM) {
347 struct drm_device *dev = obj->dev; 334 struct drm_device *dev = obj->dev;
348 gfp_t gfp;
349 335
350 ret = i915_gem_evict_something(dev, obj->size); 336 ret = i915_gem_evict_something(dev, obj->size);
351 if (ret) 337 if (ret)
352 return ret; 338 return ret;
353 339
354 gfp = i915_gem_object_get_page_gfp_mask(obj); 340 ret = i915_gem_object_get_pages(obj, 0);
355 i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
356 ret = i915_gem_object_get_pages(obj);
357 i915_gem_object_set_page_gfp_mask (obj, gfp);
358 } 341 }
359 342
360 return ret; 343 return ret;
@@ -371,7 +354,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
371 struct drm_i915_gem_pread *args, 354 struct drm_i915_gem_pread *args,
372 struct drm_file *file_priv) 355 struct drm_file *file_priv)
373{ 356{
374 struct drm_i915_gem_object *obj_priv = obj->driver_private; 357 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
375 struct mm_struct *mm = current->mm; 358 struct mm_struct *mm = current->mm;
376 struct page **user_pages; 359 struct page **user_pages;
377 ssize_t remain; 360 ssize_t remain;
@@ -420,7 +403,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
420 if (ret != 0) 403 if (ret != 0)
421 goto fail_put_pages; 404 goto fail_put_pages;
422 405
423 obj_priv = obj->driver_private; 406 obj_priv = to_intel_bo(obj);
424 offset = args->offset; 407 offset = args->offset;
425 408
426 while (remain > 0) { 409 while (remain > 0) {
@@ -496,7 +479,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
496 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 479 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
497 if (obj == NULL) 480 if (obj == NULL)
498 return -EBADF; 481 return -EBADF;
499 obj_priv = obj->driver_private; 482 obj_priv = to_intel_bo(obj);
500 483
501 /* Bounds check source. 484 /* Bounds check source.
502 * 485 *
@@ -504,7 +487,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
504 */ 487 */
505 if (args->offset > obj->size || args->size > obj->size || 488 if (args->offset > obj->size || args->size > obj->size ||
506 args->offset + args->size > obj->size) { 489 args->offset + args->size > obj->size) {
507 drm_gem_object_unreference(obj); 490 drm_gem_object_unreference_unlocked(obj);
508 return -EINVAL; 491 return -EINVAL;
509 } 492 }
510 493
@@ -517,7 +500,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
517 file_priv); 500 file_priv);
518 } 501 }
519 502
520 drm_gem_object_unreference(obj); 503 drm_gem_object_unreference_unlocked(obj);
521 504
522 return ret; 505 return ret;
523} 506}
@@ -598,7 +581,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
598 struct drm_i915_gem_pwrite *args, 581 struct drm_i915_gem_pwrite *args,
599 struct drm_file *file_priv) 582 struct drm_file *file_priv)
600{ 583{
601 struct drm_i915_gem_object *obj_priv = obj->driver_private; 584 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
602 drm_i915_private_t *dev_priv = dev->dev_private; 585 drm_i915_private_t *dev_priv = dev->dev_private;
603 ssize_t remain; 586 ssize_t remain;
604 loff_t offset, page_base; 587 loff_t offset, page_base;
@@ -622,7 +605,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
622 if (ret) 605 if (ret)
623 goto fail; 606 goto fail;
624 607
625 obj_priv = obj->driver_private; 608 obj_priv = to_intel_bo(obj);
626 offset = obj_priv->gtt_offset + args->offset; 609 offset = obj_priv->gtt_offset + args->offset;
627 610
628 while (remain > 0) { 611 while (remain > 0) {
@@ -672,7 +655,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
672 struct drm_i915_gem_pwrite *args, 655 struct drm_i915_gem_pwrite *args,
673 struct drm_file *file_priv) 656 struct drm_file *file_priv)
674{ 657{
675 struct drm_i915_gem_object *obj_priv = obj->driver_private; 658 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
676 drm_i915_private_t *dev_priv = dev->dev_private; 659 drm_i915_private_t *dev_priv = dev->dev_private;
677 ssize_t remain; 660 ssize_t remain;
678 loff_t gtt_page_base, offset; 661 loff_t gtt_page_base, offset;
@@ -716,7 +699,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
716 if (ret) 699 if (ret)
717 goto out_unpin_object; 700 goto out_unpin_object;
718 701
719 obj_priv = obj->driver_private; 702 obj_priv = to_intel_bo(obj);
720 offset = obj_priv->gtt_offset + args->offset; 703 offset = obj_priv->gtt_offset + args->offset;
721 704
722 while (remain > 0) { 705 while (remain > 0) {
@@ -778,7 +761,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
778 struct drm_i915_gem_pwrite *args, 761 struct drm_i915_gem_pwrite *args,
779 struct drm_file *file_priv) 762 struct drm_file *file_priv)
780{ 763{
781 struct drm_i915_gem_object *obj_priv = obj->driver_private; 764 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
782 ssize_t remain; 765 ssize_t remain;
783 loff_t offset, page_base; 766 loff_t offset, page_base;
784 char __user *user_data; 767 char __user *user_data;
@@ -790,7 +773,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
790 773
791 mutex_lock(&dev->struct_mutex); 774 mutex_lock(&dev->struct_mutex);
792 775
793 ret = i915_gem_object_get_pages(obj); 776 ret = i915_gem_object_get_pages(obj, 0);
794 if (ret != 0) 777 if (ret != 0)
795 goto fail_unlock; 778 goto fail_unlock;
796 779
@@ -798,7 +781,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
798 if (ret != 0) 781 if (ret != 0)
799 goto fail_put_pages; 782 goto fail_put_pages;
800 783
801 obj_priv = obj->driver_private; 784 obj_priv = to_intel_bo(obj);
802 offset = args->offset; 785 offset = args->offset;
803 obj_priv->dirty = 1; 786 obj_priv->dirty = 1;
804 787
@@ -846,7 +829,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
846 struct drm_i915_gem_pwrite *args, 829 struct drm_i915_gem_pwrite *args,
847 struct drm_file *file_priv) 830 struct drm_file *file_priv)
848{ 831{
849 struct drm_i915_gem_object *obj_priv = obj->driver_private; 832 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
850 struct mm_struct *mm = current->mm; 833 struct mm_struct *mm = current->mm;
851 struct page **user_pages; 834 struct page **user_pages;
852 ssize_t remain; 835 ssize_t remain;
@@ -894,7 +877,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
894 if (ret != 0) 877 if (ret != 0)
895 goto fail_put_pages; 878 goto fail_put_pages;
896 879
897 obj_priv = obj->driver_private; 880 obj_priv = to_intel_bo(obj);
898 offset = args->offset; 881 offset = args->offset;
899 obj_priv->dirty = 1; 882 obj_priv->dirty = 1;
900 883
@@ -969,7 +952,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
969 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 952 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
970 if (obj == NULL) 953 if (obj == NULL)
971 return -EBADF; 954 return -EBADF;
972 obj_priv = obj->driver_private; 955 obj_priv = to_intel_bo(obj);
973 956
974 /* Bounds check destination. 957 /* Bounds check destination.
975 * 958 *
@@ -977,7 +960,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
977 */ 960 */
978 if (args->offset > obj->size || args->size > obj->size || 961 if (args->offset > obj->size || args->size > obj->size ||
979 args->offset + args->size > obj->size) { 962 args->offset + args->size > obj->size) {
980 drm_gem_object_unreference(obj); 963 drm_gem_object_unreference_unlocked(obj);
981 return -EINVAL; 964 return -EINVAL;
982 } 965 }
983 966
@@ -1011,7 +994,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1011 DRM_INFO("pwrite failed %d\n", ret); 994 DRM_INFO("pwrite failed %d\n", ret);
1012#endif 995#endif
1013 996
1014 drm_gem_object_unreference(obj); 997 drm_gem_object_unreference_unlocked(obj);
1015 998
1016 return ret; 999 return ret;
1017} 1000}
@@ -1051,7 +1034,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1051 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1034 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1052 if (obj == NULL) 1035 if (obj == NULL)
1053 return -EBADF; 1036 return -EBADF;
1054 obj_priv = obj->driver_private; 1037 obj_priv = to_intel_bo(obj);
1055 1038
1056 mutex_lock(&dev->struct_mutex); 1039 mutex_lock(&dev->struct_mutex);
1057 1040
@@ -1113,7 +1096,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1113 DRM_INFO("%s: sw_finish %d (%p %zd)\n", 1096 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1114 __func__, args->handle, obj, obj->size); 1097 __func__, args->handle, obj, obj->size);
1115#endif 1098#endif
1116 obj_priv = obj->driver_private; 1099 obj_priv = to_intel_bo(obj);
1117 1100
1118 /* Pinned buffers may be scanout, so flush the cache */ 1101 /* Pinned buffers may be scanout, so flush the cache */
1119 if (obj_priv->pin_count) 1102 if (obj_priv->pin_count)
@@ -1154,9 +1137,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1154 PROT_READ | PROT_WRITE, MAP_SHARED, 1137 PROT_READ | PROT_WRITE, MAP_SHARED,
1155 args->offset); 1138 args->offset);
1156 up_write(&current->mm->mmap_sem); 1139 up_write(&current->mm->mmap_sem);
1157 mutex_lock(&dev->struct_mutex); 1140 drm_gem_object_unreference_unlocked(obj);
1158 drm_gem_object_unreference(obj);
1159 mutex_unlock(&dev->struct_mutex);
1160 if (IS_ERR((void *)addr)) 1141 if (IS_ERR((void *)addr))
1161 return addr; 1142 return addr;
1162 1143
@@ -1186,7 +1167,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1186 struct drm_gem_object *obj = vma->vm_private_data; 1167 struct drm_gem_object *obj = vma->vm_private_data;
1187 struct drm_device *dev = obj->dev; 1168 struct drm_device *dev = obj->dev;
1188 struct drm_i915_private *dev_priv = dev->dev_private; 1169 struct drm_i915_private *dev_priv = dev->dev_private;
1189 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1170 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1190 pgoff_t page_offset; 1171 pgoff_t page_offset;
1191 unsigned long pfn; 1172 unsigned long pfn;
1192 int ret = 0; 1173 int ret = 0;
@@ -1253,7 +1234,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1253{ 1234{
1254 struct drm_device *dev = obj->dev; 1235 struct drm_device *dev = obj->dev;
1255 struct drm_gem_mm *mm = dev->mm_private; 1236 struct drm_gem_mm *mm = dev->mm_private;
1256 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1237 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1257 struct drm_map_list *list; 1238 struct drm_map_list *list;
1258 struct drm_local_map *map; 1239 struct drm_local_map *map;
1259 int ret = 0; 1240 int ret = 0;
@@ -1288,6 +1269,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1288 list->hash.key = list->file_offset_node->start; 1269 list->hash.key = list->file_offset_node->start;
1289 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { 1270 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1290 DRM_ERROR("failed to add to map hash\n"); 1271 DRM_ERROR("failed to add to map hash\n");
1272 ret = -ENOMEM;
1291 goto out_free_mm; 1273 goto out_free_mm;
1292 } 1274 }
1293 1275
@@ -1309,7 +1291,7 @@ out_free_list:
1309 * i915_gem_release_mmap - remove physical page mappings 1291 * i915_gem_release_mmap - remove physical page mappings
1310 * @obj: obj in question 1292 * @obj: obj in question
1311 * 1293 *
1312 * Preserve the reservation of the mmaping with the DRM core code, but 1294 * Preserve the reservation of the mmapping with the DRM core code, but
1313 * relinquish ownership of the pages back to the system. 1295 * relinquish ownership of the pages back to the system.
1314 * 1296 *
1315 * It is vital that we remove the page mapping if we have mapped a tiled 1297 * It is vital that we remove the page mapping if we have mapped a tiled
@@ -1323,7 +1305,7 @@ void
1323i915_gem_release_mmap(struct drm_gem_object *obj) 1305i915_gem_release_mmap(struct drm_gem_object *obj)
1324{ 1306{
1325 struct drm_device *dev = obj->dev; 1307 struct drm_device *dev = obj->dev;
1326 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1308 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1327 1309
1328 if (dev->dev_mapping) 1310 if (dev->dev_mapping)
1329 unmap_mapping_range(dev->dev_mapping, 1311 unmap_mapping_range(dev->dev_mapping,
@@ -1334,7 +1316,7 @@ static void
1334i915_gem_free_mmap_offset(struct drm_gem_object *obj) 1316i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1335{ 1317{
1336 struct drm_device *dev = obj->dev; 1318 struct drm_device *dev = obj->dev;
1337 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1319 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1338 struct drm_gem_mm *mm = dev->mm_private; 1320 struct drm_gem_mm *mm = dev->mm_private;
1339 struct drm_map_list *list; 1321 struct drm_map_list *list;
1340 1322
@@ -1365,7 +1347,7 @@ static uint32_t
1365i915_gem_get_gtt_alignment(struct drm_gem_object *obj) 1347i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1366{ 1348{
1367 struct drm_device *dev = obj->dev; 1349 struct drm_device *dev = obj->dev;
1368 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1350 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1369 int start, i; 1351 int start, i;
1370 1352
1371 /* 1353 /*
@@ -1424,7 +1406,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1424 1406
1425 mutex_lock(&dev->struct_mutex); 1407 mutex_lock(&dev->struct_mutex);
1426 1408
1427 obj_priv = obj->driver_private; 1409 obj_priv = to_intel_bo(obj);
1428 1410
1429 if (obj_priv->madv != I915_MADV_WILLNEED) { 1411 if (obj_priv->madv != I915_MADV_WILLNEED) {
1430 DRM_ERROR("Attempting to mmap a purgeable buffer\n"); 1412 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
@@ -1468,7 +1450,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1468void 1450void
1469i915_gem_object_put_pages(struct drm_gem_object *obj) 1451i915_gem_object_put_pages(struct drm_gem_object *obj)
1470{ 1452{
1471 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1453 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1472 int page_count = obj->size / PAGE_SIZE; 1454 int page_count = obj->size / PAGE_SIZE;
1473 int i; 1455 int i;
1474 1456
@@ -1485,9 +1467,6 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1485 obj_priv->dirty = 0; 1467 obj_priv->dirty = 0;
1486 1468
1487 for (i = 0; i < page_count; i++) { 1469 for (i = 0; i < page_count; i++) {
1488 if (obj_priv->pages[i] == NULL)
1489 break;
1490
1491 if (obj_priv->dirty) 1470 if (obj_priv->dirty)
1492 set_page_dirty(obj_priv->pages[i]); 1471 set_page_dirty(obj_priv->pages[i]);
1493 1472
@@ -1507,7 +1486,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
1507{ 1486{
1508 struct drm_device *dev = obj->dev; 1487 struct drm_device *dev = obj->dev;
1509 drm_i915_private_t *dev_priv = dev->dev_private; 1488 drm_i915_private_t *dev_priv = dev->dev_private;
1510 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1489 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1511 1490
1512 /* Add a reference if we're newly entering the active list. */ 1491 /* Add a reference if we're newly entering the active list. */
1513 if (!obj_priv->active) { 1492 if (!obj_priv->active) {
@@ -1527,7 +1506,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1527{ 1506{
1528 struct drm_device *dev = obj->dev; 1507 struct drm_device *dev = obj->dev;
1529 drm_i915_private_t *dev_priv = dev->dev_private; 1508 drm_i915_private_t *dev_priv = dev->dev_private;
1530 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1509 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1531 1510
1532 BUG_ON(!obj_priv->active); 1511 BUG_ON(!obj_priv->active);
1533 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); 1512 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
@@ -1538,7 +1517,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1538static void 1517static void
1539i915_gem_object_truncate(struct drm_gem_object *obj) 1518i915_gem_object_truncate(struct drm_gem_object *obj)
1540{ 1519{
1541 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1520 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1542 struct inode *inode; 1521 struct inode *inode;
1543 1522
1544 inode = obj->filp->f_path.dentry->d_inode; 1523 inode = obj->filp->f_path.dentry->d_inode;
@@ -1559,7 +1538,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1559{ 1538{
1560 struct drm_device *dev = obj->dev; 1539 struct drm_device *dev = obj->dev;
1561 drm_i915_private_t *dev_priv = dev->dev_private; 1540 drm_i915_private_t *dev_priv = dev->dev_private;
1562 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1541 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1563 1542
1564 i915_verify_inactive(dev, __FILE__, __LINE__); 1543 i915_verify_inactive(dev, __FILE__, __LINE__);
1565 if (obj_priv->pin_count != 0) 1544 if (obj_priv->pin_count != 0)
@@ -1567,6 +1546,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1567 else 1546 else
1568 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1547 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1569 1548
1549 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1550
1570 obj_priv->last_rendering_seqno = 0; 1551 obj_priv->last_rendering_seqno = 0;
1571 if (obj_priv->active) { 1552 if (obj_priv->active) {
1572 obj_priv->active = 0; 1553 obj_priv->active = 0;
@@ -1575,6 +1556,45 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1575 i915_verify_inactive(dev, __FILE__, __LINE__); 1556 i915_verify_inactive(dev, __FILE__, __LINE__);
1576} 1557}
1577 1558
1559static void
1560i915_gem_process_flushing_list(struct drm_device *dev,
1561 uint32_t flush_domains, uint32_t seqno)
1562{
1563 drm_i915_private_t *dev_priv = dev->dev_private;
1564 struct drm_i915_gem_object *obj_priv, *next;
1565
1566 list_for_each_entry_safe(obj_priv, next,
1567 &dev_priv->mm.gpu_write_list,
1568 gpu_write_list) {
1569 struct drm_gem_object *obj = obj_priv->obj;
1570
1571 if ((obj->write_domain & flush_domains) ==
1572 obj->write_domain) {
1573 uint32_t old_write_domain = obj->write_domain;
1574
1575 obj->write_domain = 0;
1576 list_del_init(&obj_priv->gpu_write_list);
1577 i915_gem_object_move_to_active(obj, seqno);
1578
1579 /* update the fence lru list */
1580 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1581 list_move_tail(&obj_priv->fence_list,
1582 &dev_priv->mm.fence_list);
1583
1584 trace_i915_gem_object_change_domain(obj,
1585 obj->read_domains,
1586 old_write_domain);
1587 }
1588 }
1589}
1590
1591#define PIPE_CONTROL_FLUSH(addr) \
1592 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
1593 PIPE_CONTROL_DEPTH_STALL); \
1594 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
1595 OUT_RING(0); \
1596 OUT_RING(0); \
1597
1578/** 1598/**
1579 * Creates a new sequence number, emitting a write of it to the status page 1599 * Creates a new sequence number, emitting a write of it to the status page
1580 * plus an interrupt, which will trigger i915_user_interrupt_handler. 1600 * plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1583,7 +1603,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1583 * 1603 *
1584 * Returned sequence numbers are nonzero on success. 1604 * Returned sequence numbers are nonzero on success.
1585 */ 1605 */
1586static uint32_t 1606uint32_t
1587i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 1607i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1588 uint32_t flush_domains) 1608 uint32_t flush_domains)
1589{ 1609{
@@ -1609,15 +1629,49 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1609 if (dev_priv->mm.next_gem_seqno == 0) 1629 if (dev_priv->mm.next_gem_seqno == 0)
1610 dev_priv->mm.next_gem_seqno++; 1630 dev_priv->mm.next_gem_seqno++;
1611 1631
1612 BEGIN_LP_RING(4); 1632 if (HAS_PIPE_CONTROL(dev)) {
1613 OUT_RING(MI_STORE_DWORD_INDEX); 1633 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
1614 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1615 OUT_RING(seqno);
1616 1634
1617 OUT_RING(MI_USER_INTERRUPT); 1635 /*
1618 ADVANCE_LP_RING(); 1636 * Workaround qword write incoherence by flushing the
1637 * PIPE_NOTIFY buffers out to memory before requesting
1638 * an interrupt.
1639 */
1640 BEGIN_LP_RING(32);
1641 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1642 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
1643 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1644 OUT_RING(seqno);
1645 OUT_RING(0);
1646 PIPE_CONTROL_FLUSH(scratch_addr);
1647 scratch_addr += 128; /* write to separate cachelines */
1648 PIPE_CONTROL_FLUSH(scratch_addr);
1649 scratch_addr += 128;
1650 PIPE_CONTROL_FLUSH(scratch_addr);
1651 scratch_addr += 128;
1652 PIPE_CONTROL_FLUSH(scratch_addr);
1653 scratch_addr += 128;
1654 PIPE_CONTROL_FLUSH(scratch_addr);
1655 scratch_addr += 128;
1656 PIPE_CONTROL_FLUSH(scratch_addr);
1657 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1658 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
1659 PIPE_CONTROL_NOTIFY);
1660 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1661 OUT_RING(seqno);
1662 OUT_RING(0);
1663 ADVANCE_LP_RING();
1664 } else {
1665 BEGIN_LP_RING(4);
1666 OUT_RING(MI_STORE_DWORD_INDEX);
1667 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1668 OUT_RING(seqno);
1669
1670 OUT_RING(MI_USER_INTERRUPT);
1671 ADVANCE_LP_RING();
1672 }
1619 1673
1620 DRM_DEBUG("%d\n", seqno); 1674 DRM_DEBUG_DRIVER("%d\n", seqno);
1621 1675
1622 request->seqno = seqno; 1676 request->seqno = seqno;
1623 request->emitted_jiffies = jiffies; 1677 request->emitted_jiffies = jiffies;
@@ -1633,27 +1687,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1633 /* Associate any objects on the flushing list matching the write 1687 /* Associate any objects on the flushing list matching the write
1634 * domain we're flushing with our flush. 1688 * domain we're flushing with our flush.
1635 */ 1689 */
1636 if (flush_domains != 0) { 1690 if (flush_domains != 0)
1637 struct drm_i915_gem_object *obj_priv, *next; 1691 i915_gem_process_flushing_list(dev, flush_domains, seqno);
1638
1639 list_for_each_entry_safe(obj_priv, next,
1640 &dev_priv->mm.flushing_list, list) {
1641 struct drm_gem_object *obj = obj_priv->obj;
1642
1643 if ((obj->write_domain & flush_domains) ==
1644 obj->write_domain) {
1645 uint32_t old_write_domain = obj->write_domain;
1646
1647 obj->write_domain = 0;
1648 i915_gem_object_move_to_active(obj, seqno);
1649
1650 trace_i915_gem_object_change_domain(obj,
1651 obj->read_domains,
1652 old_write_domain);
1653 }
1654 }
1655
1656 }
1657 1692
1658 if (!dev_priv->mm.suspended) { 1693 if (!dev_priv->mm.suspended) {
1659 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1694 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1758,7 +1793,10 @@ i915_get_gem_seqno(struct drm_device *dev)
1758{ 1793{
1759 drm_i915_private_t *dev_priv = dev->dev_private; 1794 drm_i915_private_t *dev_priv = dev->dev_private;
1760 1795
1761 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); 1796 if (HAS_PIPE_CONTROL(dev))
1797 return ((volatile u32 *)(dev_priv->seqno_page))[0];
1798 else
1799 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1762} 1800}
1763 1801
1764/** 1802/**
@@ -1820,12 +1858,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
1820 mutex_unlock(&dev->struct_mutex); 1858 mutex_unlock(&dev->struct_mutex);
1821} 1859}
1822 1860
1823/** 1861int
1824 * Waits for a sequence number to be signaled, and cleans up the 1862i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1825 * request and object lists appropriately for that event.
1826 */
1827static int
1828i915_wait_request(struct drm_device *dev, uint32_t seqno)
1829{ 1863{
1830 drm_i915_private_t *dev_priv = dev->dev_private; 1864 drm_i915_private_t *dev_priv = dev->dev_private;
1831 u32 ier; 1865 u32 ier;
@@ -1837,7 +1871,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1837 return -EIO; 1871 return -EIO;
1838 1872
1839 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1873 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1840 if (IS_IGDNG(dev)) 1874 if (HAS_PCH_SPLIT(dev))
1841 ier = I915_READ(DEIER) | I915_READ(GTIER); 1875 ier = I915_READ(DEIER) | I915_READ(GTIER);
1842 else 1876 else
1843 ier = I915_READ(IER); 1877 ier = I915_READ(IER);
@@ -1852,10 +1886,15 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1852 1886
1853 dev_priv->mm.waiting_gem_seqno = seqno; 1887 dev_priv->mm.waiting_gem_seqno = seqno;
1854 i915_user_irq_get(dev); 1888 i915_user_irq_get(dev);
1855 ret = wait_event_interruptible(dev_priv->irq_queue, 1889 if (interruptible)
1856 i915_seqno_passed(i915_get_gem_seqno(dev), 1890 ret = wait_event_interruptible(dev_priv->irq_queue,
1857 seqno) || 1891 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1858 atomic_read(&dev_priv->mm.wedged)); 1892 atomic_read(&dev_priv->mm.wedged));
1893 else
1894 wait_event(dev_priv->irq_queue,
1895 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1896 atomic_read(&dev_priv->mm.wedged));
1897
1859 i915_user_irq_put(dev); 1898 i915_user_irq_put(dev);
1860 dev_priv->mm.waiting_gem_seqno = 0; 1899 dev_priv->mm.waiting_gem_seqno = 0;
1861 1900
@@ -1879,6 +1918,16 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1879 return ret; 1918 return ret;
1880} 1919}
1881 1920
1921/**
1922 * Waits for a sequence number to be signaled, and cleans up the
1923 * request and object lists appropriately for that event.
1924 */
1925static int
1926i915_wait_request(struct drm_device *dev, uint32_t seqno)
1927{
1928 return i915_do_wait_request(dev, seqno, 1);
1929}
1930
1882static void 1931static void
1883i915_gem_flush(struct drm_device *dev, 1932i915_gem_flush(struct drm_device *dev,
1884 uint32_t invalidate_domains, 1933 uint32_t invalidate_domains,
@@ -1947,7 +1996,7 @@ i915_gem_flush(struct drm_device *dev,
1947#endif 1996#endif
1948 BEGIN_LP_RING(2); 1997 BEGIN_LP_RING(2);
1949 OUT_RING(cmd); 1998 OUT_RING(cmd);
1950 OUT_RING(0); /* noop */ 1999 OUT_RING(MI_NOOP);
1951 ADVANCE_LP_RING(); 2000 ADVANCE_LP_RING();
1952 } 2001 }
1953} 2002}
@@ -1960,7 +2009,7 @@ static int
1960i915_gem_object_wait_rendering(struct drm_gem_object *obj) 2009i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1961{ 2010{
1962 struct drm_device *dev = obj->dev; 2011 struct drm_device *dev = obj->dev;
1963 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2012 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1964 int ret; 2013 int ret;
1965 2014
1966 /* This function only exists to support waiting for existing rendering, 2015 /* This function only exists to support waiting for existing rendering,
@@ -1991,7 +2040,8 @@ int
1991i915_gem_object_unbind(struct drm_gem_object *obj) 2040i915_gem_object_unbind(struct drm_gem_object *obj)
1992{ 2041{
1993 struct drm_device *dev = obj->dev; 2042 struct drm_device *dev = obj->dev;
1994 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2043 drm_i915_private_t *dev_priv = dev->dev_private;
2044 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1995 int ret = 0; 2045 int ret = 0;
1996 2046
1997#if WATCH_BUF 2047#if WATCH_BUF
@@ -2009,9 +2059,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2009 /* blow away mappings if mapped through GTT */ 2059 /* blow away mappings if mapped through GTT */
2010 i915_gem_release_mmap(obj); 2060 i915_gem_release_mmap(obj);
2011 2061
2012 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2013 i915_gem_clear_fence_reg(obj);
2014
2015 /* Move the object to the CPU domain to ensure that 2062 /* Move the object to the CPU domain to ensure that
2016 * any possible CPU writes while it's not in the GTT 2063 * any possible CPU writes while it's not in the GTT
2017 * are flushed when we go to remap it. This will 2064 * are flushed when we go to remap it. This will
@@ -2027,6 +2074,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2027 2074
2028 BUG_ON(obj_priv->active); 2075 BUG_ON(obj_priv->active);
2029 2076
2077 /* release the fence reg _after_ flushing */
2078 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2079 i915_gem_clear_fence_reg(obj);
2080
2030 if (obj_priv->agp_mem != NULL) { 2081 if (obj_priv->agp_mem != NULL) {
2031 drm_unbind_agp(obj_priv->agp_mem); 2082 drm_unbind_agp(obj_priv->agp_mem);
2032 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); 2083 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
@@ -2045,8 +2096,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2045 } 2096 }
2046 2097
2047 /* Remove ourselves from the LRU list if present. */ 2098 /* Remove ourselves from the LRU list if present. */
2099 spin_lock(&dev_priv->mm.active_list_lock);
2048 if (!list_empty(&obj_priv->list)) 2100 if (!list_empty(&obj_priv->list))
2049 list_del_init(&obj_priv->list); 2101 list_del_init(&obj_priv->list);
2102 spin_unlock(&dev_priv->mm.active_list_lock);
2050 2103
2051 if (i915_gem_object_is_purgeable(obj_priv)) 2104 if (i915_gem_object_is_purgeable(obj_priv))
2052 i915_gem_object_truncate(obj); 2105 i915_gem_object_truncate(obj);
@@ -2084,10 +2137,33 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2084} 2137}
2085 2138
2086static int 2139static int
2087i915_gem_evict_everything(struct drm_device *dev) 2140i915_gpu_idle(struct drm_device *dev)
2088{ 2141{
2089 drm_i915_private_t *dev_priv = dev->dev_private; 2142 drm_i915_private_t *dev_priv = dev->dev_private;
2143 bool lists_empty;
2090 uint32_t seqno; 2144 uint32_t seqno;
2145
2146 spin_lock(&dev_priv->mm.active_list_lock);
2147 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
2148 list_empty(&dev_priv->mm.active_list);
2149 spin_unlock(&dev_priv->mm.active_list_lock);
2150
2151 if (lists_empty)
2152 return 0;
2153
2154 /* Flush everything onto the inactive list. */
2155 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2156 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2157 if (seqno == 0)
2158 return -ENOMEM;
2159
2160 return i915_wait_request(dev, seqno);
2161}
2162
2163static int
2164i915_gem_evict_everything(struct drm_device *dev)
2165{
2166 drm_i915_private_t *dev_priv = dev->dev_private;
2091 int ret; 2167 int ret;
2092 bool lists_empty; 2168 bool lists_empty;
2093 2169
@@ -2101,15 +2177,12 @@ i915_gem_evict_everything(struct drm_device *dev)
2101 return -ENOSPC; 2177 return -ENOSPC;
2102 2178
2103 /* Flush everything (on to the inactive lists) and evict */ 2179 /* Flush everything (on to the inactive lists) and evict */
2104 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2180 ret = i915_gpu_idle(dev);
2105 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2106 if (seqno == 0)
2107 return -ENOMEM;
2108
2109 ret = i915_wait_request(dev, seqno);
2110 if (ret) 2181 if (ret)
2111 return ret; 2182 return ret;
2112 2183
2184 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2185
2113 ret = i915_gem_evict_from_inactive_list(dev); 2186 ret = i915_gem_evict_from_inactive_list(dev);
2114 if (ret) 2187 if (ret)
2115 return ret; 2188 return ret;
@@ -2144,7 +2217,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2144#if WATCH_LRU 2217#if WATCH_LRU
2145 DRM_INFO("%s: evicting %p\n", __func__, obj); 2218 DRM_INFO("%s: evicting %p\n", __func__, obj);
2146#endif 2219#endif
2147 obj_priv = obj->driver_private; 2220 obj_priv = to_intel_bo(obj);
2148 BUG_ON(obj_priv->pin_count != 0); 2221 BUG_ON(obj_priv->pin_count != 0);
2149 BUG_ON(obj_priv->active); 2222 BUG_ON(obj_priv->active);
2150 2223
@@ -2196,11 +2269,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2196 seqno = i915_add_request(dev, NULL, obj->write_domain); 2269 seqno = i915_add_request(dev, NULL, obj->write_domain);
2197 if (seqno == 0) 2270 if (seqno == 0)
2198 return -ENOMEM; 2271 return -ENOMEM;
2199
2200 ret = i915_wait_request(dev, seqno);
2201 if (ret)
2202 return ret;
2203
2204 continue; 2272 continue;
2205 } 2273 }
2206 } 2274 }
@@ -2217,14 +2285,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2217} 2285}
2218 2286
2219int 2287int
2220i915_gem_object_get_pages(struct drm_gem_object *obj) 2288i915_gem_object_get_pages(struct drm_gem_object *obj,
2289 gfp_t gfpmask)
2221{ 2290{
2222 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2291 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2223 int page_count, i; 2292 int page_count, i;
2224 struct address_space *mapping; 2293 struct address_space *mapping;
2225 struct inode *inode; 2294 struct inode *inode;
2226 struct page *page; 2295 struct page *page;
2227 int ret;
2228 2296
2229 if (obj_priv->pages_refcount++ != 0) 2297 if (obj_priv->pages_refcount++ != 0)
2230 return 0; 2298 return 0;
@@ -2243,12 +2311,13 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2243 inode = obj->filp->f_path.dentry->d_inode; 2311 inode = obj->filp->f_path.dentry->d_inode;
2244 mapping = inode->i_mapping; 2312 mapping = inode->i_mapping;
2245 for (i = 0; i < page_count; i++) { 2313 for (i = 0; i < page_count; i++) {
2246 page = read_mapping_page(mapping, i, NULL); 2314 page = read_cache_page_gfp(mapping, i,
2247 if (IS_ERR(page)) { 2315 mapping_gfp_mask (mapping) |
2248 ret = PTR_ERR(page); 2316 __GFP_COLD |
2249 i915_gem_object_put_pages(obj); 2317 gfpmask);
2250 return ret; 2318 if (IS_ERR(page))
2251 } 2319 goto err_pages;
2320
2252 obj_priv->pages[i] = page; 2321 obj_priv->pages[i] = page;
2253 } 2322 }
2254 2323
@@ -2256,6 +2325,37 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2256 i915_gem_object_do_bit_17_swizzle(obj); 2325 i915_gem_object_do_bit_17_swizzle(obj);
2257 2326
2258 return 0; 2327 return 0;
2328
2329err_pages:
2330 while (i--)
2331 page_cache_release(obj_priv->pages[i]);
2332
2333 drm_free_large(obj_priv->pages);
2334 obj_priv->pages = NULL;
2335 obj_priv->pages_refcount--;
2336 return PTR_ERR(page);
2337}
2338
2339static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2340{
2341 struct drm_gem_object *obj = reg->obj;
2342 struct drm_device *dev = obj->dev;
2343 drm_i915_private_t *dev_priv = dev->dev_private;
2344 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2345 int regnum = obj_priv->fence_reg;
2346 uint64_t val;
2347
2348 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2349 0xfffff000) << 32;
2350 val |= obj_priv->gtt_offset & 0xfffff000;
2351 val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2352 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2353
2354 if (obj_priv->tiling_mode == I915_TILING_Y)
2355 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2356 val |= I965_FENCE_REG_VALID;
2357
2358 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2259} 2359}
2260 2360
2261static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) 2361static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
@@ -2263,7 +2363,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2263 struct drm_gem_object *obj = reg->obj; 2363 struct drm_gem_object *obj = reg->obj;
2264 struct drm_device *dev = obj->dev; 2364 struct drm_device *dev = obj->dev;
2265 drm_i915_private_t *dev_priv = dev->dev_private; 2365 drm_i915_private_t *dev_priv = dev->dev_private;
2266 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2366 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2267 int regnum = obj_priv->fence_reg; 2367 int regnum = obj_priv->fence_reg;
2268 uint64_t val; 2368 uint64_t val;
2269 2369
@@ -2283,7 +2383,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2283 struct drm_gem_object *obj = reg->obj; 2383 struct drm_gem_object *obj = reg->obj;
2284 struct drm_device *dev = obj->dev; 2384 struct drm_device *dev = obj->dev;
2285 drm_i915_private_t *dev_priv = dev->dev_private; 2385 drm_i915_private_t *dev_priv = dev->dev_private;
2286 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2386 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2287 int regnum = obj_priv->fence_reg; 2387 int regnum = obj_priv->fence_reg;
2288 int tile_width; 2388 int tile_width;
2289 uint32_t fence_reg, val; 2389 uint32_t fence_reg, val;
@@ -2306,6 +2406,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2306 pitch_val = obj_priv->stride / tile_width; 2406 pitch_val = obj_priv->stride / tile_width;
2307 pitch_val = ffs(pitch_val) - 1; 2407 pitch_val = ffs(pitch_val) - 1;
2308 2408
2409 if (obj_priv->tiling_mode == I915_TILING_Y &&
2410 HAS_128_BYTE_Y_TILING(dev))
2411 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2412 else
2413 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2414
2309 val = obj_priv->gtt_offset; 2415 val = obj_priv->gtt_offset;
2310 if (obj_priv->tiling_mode == I915_TILING_Y) 2416 if (obj_priv->tiling_mode == I915_TILING_Y)
2311 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2417 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2325,7 +2431,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2325 struct drm_gem_object *obj = reg->obj; 2431 struct drm_gem_object *obj = reg->obj;
2326 struct drm_device *dev = obj->dev; 2432 struct drm_device *dev = obj->dev;
2327 drm_i915_private_t *dev_priv = dev->dev_private; 2433 drm_i915_private_t *dev_priv = dev->dev_private;
2328 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2434 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2329 int regnum = obj_priv->fence_reg; 2435 int regnum = obj_priv->fence_reg;
2330 uint32_t val; 2436 uint32_t val;
2331 uint32_t pitch_val; 2437 uint32_t pitch_val;
@@ -2354,6 +2460,58 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2354 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); 2460 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2355} 2461}
2356 2462
2463static int i915_find_fence_reg(struct drm_device *dev)
2464{
2465 struct drm_i915_fence_reg *reg = NULL;
2466 struct drm_i915_gem_object *obj_priv = NULL;
2467 struct drm_i915_private *dev_priv = dev->dev_private;
2468 struct drm_gem_object *obj = NULL;
2469 int i, avail, ret;
2470
2471 /* First try to find a free reg */
2472 avail = 0;
2473 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2474 reg = &dev_priv->fence_regs[i];
2475 if (!reg->obj)
2476 return i;
2477
2478 obj_priv = to_intel_bo(reg->obj);
2479 if (!obj_priv->pin_count)
2480 avail++;
2481 }
2482
2483 if (avail == 0)
2484 return -ENOSPC;
2485
2486 /* None available, try to steal one or wait for a user to finish */
2487 i = I915_FENCE_REG_NONE;
2488 list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
2489 fence_list) {
2490 obj = obj_priv->obj;
2491
2492 if (obj_priv->pin_count)
2493 continue;
2494
2495 /* found one! */
2496 i = obj_priv->fence_reg;
2497 break;
2498 }
2499
2500 BUG_ON(i == I915_FENCE_REG_NONE);
2501
2502 /* We only have a reference on obj from the active list. put_fence_reg
2503 * might drop that one, causing a use-after-free in it. So hold a
2504 * private reference to obj like the other callers of put_fence_reg
2505 * (set_tiling ioctl) do. */
2506 drm_gem_object_reference(obj);
2507 ret = i915_gem_object_put_fence_reg(obj);
2508 drm_gem_object_unreference(obj);
2509 if (ret != 0)
2510 return ret;
2511
2512 return i;
2513}
2514
2357/** 2515/**
2358 * i915_gem_object_get_fence_reg - set up a fence reg for an object 2516 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2359 * @obj: object to map through a fence reg 2517 * @obj: object to map through a fence reg
@@ -2372,10 +2530,9 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2372{ 2530{
2373 struct drm_device *dev = obj->dev; 2531 struct drm_device *dev = obj->dev;
2374 struct drm_i915_private *dev_priv = dev->dev_private; 2532 struct drm_i915_private *dev_priv = dev->dev_private;
2375 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2533 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2376 struct drm_i915_fence_reg *reg = NULL; 2534 struct drm_i915_fence_reg *reg = NULL;
2377 struct drm_i915_gem_object *old_obj_priv = NULL; 2535 int ret;
2378 int i, ret, avail;
2379 2536
2380 /* Just update our place in the LRU if our fence is getting used. */ 2537 /* Just update our place in the LRU if our fence is getting used. */
2381 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 2538 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -2403,86 +2560,27 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2403 break; 2560 break;
2404 } 2561 }
2405 2562
2406 /* First try to find a free reg */ 2563 ret = i915_find_fence_reg(dev);
2407 avail = 0; 2564 if (ret < 0)
2408 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { 2565 return ret;
2409 reg = &dev_priv->fence_regs[i];
2410 if (!reg->obj)
2411 break;
2412
2413 old_obj_priv = reg->obj->driver_private;
2414 if (!old_obj_priv->pin_count)
2415 avail++;
2416 }
2417
2418 /* None available, try to steal one or wait for a user to finish */
2419 if (i == dev_priv->num_fence_regs) {
2420 struct drm_gem_object *old_obj = NULL;
2421
2422 if (avail == 0)
2423 return -ENOSPC;
2424
2425 list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
2426 fence_list) {
2427 old_obj = old_obj_priv->obj;
2428
2429 if (old_obj_priv->pin_count)
2430 continue;
2431
2432 /* Take a reference, as otherwise the wait_rendering
2433 * below may cause the object to get freed out from
2434 * under us.
2435 */
2436 drm_gem_object_reference(old_obj);
2437
2438 /* i915 uses fences for GPU access to tiled buffers */
2439 if (IS_I965G(dev) || !old_obj_priv->active)
2440 break;
2441
2442 /* This brings the object to the head of the LRU if it
2443 * had been written to. The only way this should
2444 * result in us waiting longer than the expected
2445 * optimal amount of time is if there was a
2446 * fence-using buffer later that was read-only.
2447 */
2448 i915_gem_object_flush_gpu_write_domain(old_obj);
2449 ret = i915_gem_object_wait_rendering(old_obj);
2450 if (ret != 0) {
2451 drm_gem_object_unreference(old_obj);
2452 return ret;
2453 }
2454
2455 break;
2456 }
2457
2458 /*
2459 * Zap this virtual mapping so we can set up a fence again
2460 * for this object next time we need it.
2461 */
2462 i915_gem_release_mmap(old_obj);
2463
2464 i = old_obj_priv->fence_reg;
2465 reg = &dev_priv->fence_regs[i];
2466
2467 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2468 list_del_init(&old_obj_priv->fence_list);
2469
2470 drm_gem_object_unreference(old_obj);
2471 }
2472 2566
2473 obj_priv->fence_reg = i; 2567 obj_priv->fence_reg = ret;
2568 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2474 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); 2569 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2475 2570
2476 reg->obj = obj; 2571 reg->obj = obj;
2477 2572
2478 if (IS_I965G(dev)) 2573 if (IS_GEN6(dev))
2574 sandybridge_write_fence_reg(reg);
2575 else if (IS_I965G(dev))
2479 i965_write_fence_reg(reg); 2576 i965_write_fence_reg(reg);
2480 else if (IS_I9XX(dev)) 2577 else if (IS_I9XX(dev))
2481 i915_write_fence_reg(reg); 2578 i915_write_fence_reg(reg);
2482 else 2579 else
2483 i830_write_fence_reg(reg); 2580 i830_write_fence_reg(reg);
2484 2581
2485 trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode); 2582 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2583 obj_priv->tiling_mode);
2486 2584
2487 return 0; 2585 return 0;
2488} 2586}
@@ -2499,11 +2597,14 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2499{ 2597{
2500 struct drm_device *dev = obj->dev; 2598 struct drm_device *dev = obj->dev;
2501 drm_i915_private_t *dev_priv = dev->dev_private; 2599 drm_i915_private_t *dev_priv = dev->dev_private;
2502 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2600 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2503 2601
2504 if (IS_I965G(dev)) 2602 if (IS_GEN6(dev)) {
2603 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2604 (obj_priv->fence_reg * 8), 0);
2605 } else if (IS_I965G(dev)) {
2505 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2606 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2506 else { 2607 } else {
2507 uint32_t fence_reg; 2608 uint32_t fence_reg;
2508 2609
2509 if (obj_priv->fence_reg < 8) 2610 if (obj_priv->fence_reg < 8)
@@ -2532,11 +2633,17 @@ int
2532i915_gem_object_put_fence_reg(struct drm_gem_object *obj) 2633i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2533{ 2634{
2534 struct drm_device *dev = obj->dev; 2635 struct drm_device *dev = obj->dev;
2535 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2636 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2536 2637
2537 if (obj_priv->fence_reg == I915_FENCE_REG_NONE) 2638 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2538 return 0; 2639 return 0;
2539 2640
2641 /* If we've changed tiling, GTT-mappings of the object
2642 * need to re-fault to ensure that the correct fence register
2643 * setup is in place.
2644 */
2645 i915_gem_release_mmap(obj);
2646
2540 /* On the i915, GPU access to tiled buffers is via a fence, 2647 /* On the i915, GPU access to tiled buffers is via a fence,
2541 * therefore we must wait for any outstanding access to complete 2648 * therefore we must wait for any outstanding access to complete
2542 * before clearing the fence. 2649 * before clearing the fence.
@@ -2545,12 +2652,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2545 int ret; 2652 int ret;
2546 2653
2547 i915_gem_object_flush_gpu_write_domain(obj); 2654 i915_gem_object_flush_gpu_write_domain(obj);
2548 i915_gem_object_flush_gtt_write_domain(obj);
2549 ret = i915_gem_object_wait_rendering(obj); 2655 ret = i915_gem_object_wait_rendering(obj);
2550 if (ret != 0) 2656 if (ret != 0)
2551 return ret; 2657 return ret;
2552 } 2658 }
2553 2659
2660 i915_gem_object_flush_gtt_write_domain(obj);
2554 i915_gem_clear_fence_reg (obj); 2661 i915_gem_clear_fence_reg (obj);
2555 2662
2556 return 0; 2663 return 0;
@@ -2564,14 +2671,11 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2564{ 2671{
2565 struct drm_device *dev = obj->dev; 2672 struct drm_device *dev = obj->dev;
2566 drm_i915_private_t *dev_priv = dev->dev_private; 2673 drm_i915_private_t *dev_priv = dev->dev_private;
2567 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2674 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2568 struct drm_mm_node *free_space; 2675 struct drm_mm_node *free_space;
2569 bool retry_alloc = false; 2676 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2570 int ret; 2677 int ret;
2571 2678
2572 if (dev_priv->mm.suspended)
2573 return -EBUSY;
2574
2575 if (obj_priv->madv != I915_MADV_WILLNEED) { 2679 if (obj_priv->madv != I915_MADV_WILLNEED) {
2576 DRM_ERROR("Attempting to bind a purgeable object\n"); 2680 DRM_ERROR("Attempting to bind a purgeable object\n");
2577 return -EINVAL; 2681 return -EINVAL;
@@ -2613,15 +2717,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2613 DRM_INFO("Binding object of size %zd at 0x%08x\n", 2717 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2614 obj->size, obj_priv->gtt_offset); 2718 obj->size, obj_priv->gtt_offset);
2615#endif 2719#endif
2616 if (retry_alloc) { 2720 ret = i915_gem_object_get_pages(obj, gfpmask);
2617 i915_gem_object_set_page_gfp_mask (obj,
2618 i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2619 }
2620 ret = i915_gem_object_get_pages(obj);
2621 if (retry_alloc) {
2622 i915_gem_object_set_page_gfp_mask (obj,
2623 i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2624 }
2625 if (ret) { 2721 if (ret) {
2626 drm_mm_put_block(obj_priv->gtt_space); 2722 drm_mm_put_block(obj_priv->gtt_space);
2627 obj_priv->gtt_space = NULL; 2723 obj_priv->gtt_space = NULL;
@@ -2631,9 +2727,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2631 ret = i915_gem_evict_something(dev, obj->size); 2727 ret = i915_gem_evict_something(dev, obj->size);
2632 if (ret) { 2728 if (ret) {
2633 /* now try to shrink everyone else */ 2729 /* now try to shrink everyone else */
2634 if (! retry_alloc) { 2730 if (gfpmask) {
2635 retry_alloc = true; 2731 gfpmask = 0;
2636 goto search_free; 2732 goto search_free;
2637 } 2733 }
2638 2734
2639 return ret; 2735 return ret;
@@ -2682,7 +2778,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2682void 2778void
2683i915_gem_clflush_object(struct drm_gem_object *obj) 2779i915_gem_clflush_object(struct drm_gem_object *obj)
2684{ 2780{
2685 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2781 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2686 2782
2687 /* If we don't have a page list set up, then we're not pinned 2783 /* If we don't have a page list set up, then we're not pinned
2688 * to GPU, and we can ignore the cache flush because it'll happen 2784 * to GPU, and we can ignore the cache flush because it'll happen
@@ -2701,7 +2797,6 @@ static void
2701i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) 2797i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2702{ 2798{
2703 struct drm_device *dev = obj->dev; 2799 struct drm_device *dev = obj->dev;
2704 uint32_t seqno;
2705 uint32_t old_write_domain; 2800 uint32_t old_write_domain;
2706 2801
2707 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2802 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
@@ -2710,9 +2805,8 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2710 /* Queue the GPU write cache flushing we need. */ 2805 /* Queue the GPU write cache flushing we need. */
2711 old_write_domain = obj->write_domain; 2806 old_write_domain = obj->write_domain;
2712 i915_gem_flush(dev, 0, obj->write_domain); 2807 i915_gem_flush(dev, 0, obj->write_domain);
2713 seqno = i915_add_request(dev, NULL, obj->write_domain); 2808 (void) i915_add_request(dev, NULL, obj->write_domain);
2714 obj->write_domain = 0; 2809 BUG_ON(obj->write_domain);
2715 i915_gem_object_move_to_active(obj, seqno);
2716 2810
2717 trace_i915_gem_object_change_domain(obj, 2811 trace_i915_gem_object_change_domain(obj,
2718 obj->read_domains, 2812 obj->read_domains,
@@ -2760,6 +2854,22 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2760 old_write_domain); 2854 old_write_domain);
2761} 2855}
2762 2856
2857void
2858i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2859{
2860 switch (obj->write_domain) {
2861 case I915_GEM_DOMAIN_GTT:
2862 i915_gem_object_flush_gtt_write_domain(obj);
2863 break;
2864 case I915_GEM_DOMAIN_CPU:
2865 i915_gem_object_flush_cpu_write_domain(obj);
2866 break;
2867 default:
2868 i915_gem_object_flush_gpu_write_domain(obj);
2869 break;
2870 }
2871}
2872
2763/** 2873/**
2764 * Moves a single object to the GTT read, and possibly write domain. 2874 * Moves a single object to the GTT read, and possibly write domain.
2765 * 2875 *
@@ -2769,7 +2879,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2769int 2879int
2770i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) 2880i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2771{ 2881{
2772 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2882 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2773 uint32_t old_write_domain, old_read_domains; 2883 uint32_t old_write_domain, old_read_domains;
2774 int ret; 2884 int ret;
2775 2885
@@ -2811,6 +2921,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2811 return 0; 2921 return 0;
2812} 2922}
2813 2923
2924/*
2925 * Prepare buffer for display plane. Use uninterruptible for possible flush
2926 * wait, as in modesetting process we're not supposed to be interrupted.
2927 */
2928int
2929i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2930{
2931 struct drm_device *dev = obj->dev;
2932 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2933 uint32_t old_write_domain, old_read_domains;
2934 int ret;
2935
2936 /* Not valid to be called on unbound objects. */
2937 if (obj_priv->gtt_space == NULL)
2938 return -EINVAL;
2939
2940 i915_gem_object_flush_gpu_write_domain(obj);
2941
2942 /* Wait on any GPU rendering and flushing to occur. */
2943 if (obj_priv->active) {
2944#if WATCH_BUF
2945 DRM_INFO("%s: object %p wait for seqno %08x\n",
2946 __func__, obj, obj_priv->last_rendering_seqno);
2947#endif
2948 ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
2949 if (ret != 0)
2950 return ret;
2951 }
2952
2953 old_write_domain = obj->write_domain;
2954 old_read_domains = obj->read_domains;
2955
2956 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2957
2958 i915_gem_object_flush_cpu_write_domain(obj);
2959
2960 /* It should now be out of any other write domains, and we can update
2961 * the domain values for our changes.
2962 */
2963 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2964 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2965 obj->write_domain = I915_GEM_DOMAIN_GTT;
2966 obj_priv->dirty = 1;
2967
2968 trace_i915_gem_object_change_domain(obj,
2969 old_read_domains,
2970 old_write_domain);
2971
2972 return 0;
2973}
2974
2814/** 2975/**
2815 * Moves a single object to the CPU read, and possibly write domain. 2976 * Moves a single object to the CPU read, and possibly write domain.
2816 * 2977 *
@@ -2981,7 +3142,7 @@ static void
2981i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) 3142i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2982{ 3143{
2983 struct drm_device *dev = obj->dev; 3144 struct drm_device *dev = obj->dev;
2984 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3145 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2985 uint32_t invalidate_domains = 0; 3146 uint32_t invalidate_domains = 0;
2986 uint32_t flush_domains = 0; 3147 uint32_t flush_domains = 0;
2987 uint32_t old_read_domains; 3148 uint32_t old_read_domains;
@@ -3066,7 +3227,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3066static void 3227static void
3067i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) 3228i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3068{ 3229{
3069 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3230 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3070 3231
3071 if (!obj_priv->page_cpu_valid) 3232 if (!obj_priv->page_cpu_valid)
3072 return; 3233 return;
@@ -3106,7 +3267,7 @@ static int
3106i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, 3267i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3107 uint64_t offset, uint64_t size) 3268 uint64_t offset, uint64_t size)
3108{ 3269{
3109 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3270 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3110 uint32_t old_read_domains; 3271 uint32_t old_read_domains;
3111 int i, ret; 3272 int i, ret;
3112 3273
@@ -3170,20 +3331,44 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3170static int 3331static int
3171i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, 3332i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3172 struct drm_file *file_priv, 3333 struct drm_file *file_priv,
3173 struct drm_i915_gem_exec_object *entry, 3334 struct drm_i915_gem_exec_object2 *entry,
3174 struct drm_i915_gem_relocation_entry *relocs) 3335 struct drm_i915_gem_relocation_entry *relocs)
3175{ 3336{
3176 struct drm_device *dev = obj->dev; 3337 struct drm_device *dev = obj->dev;
3177 drm_i915_private_t *dev_priv = dev->dev_private; 3338 drm_i915_private_t *dev_priv = dev->dev_private;
3178 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3339 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3179 int i, ret; 3340 int i, ret;
3180 void __iomem *reloc_page; 3341 void __iomem *reloc_page;
3342 bool need_fence;
3343
3344 need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3345 obj_priv->tiling_mode != I915_TILING_NONE;
3346
3347 /* Check fence reg constraints and rebind if necessary */
3348 if (need_fence && !i915_gem_object_fence_offset_ok(obj,
3349 obj_priv->tiling_mode))
3350 i915_gem_object_unbind(obj);
3181 3351
3182 /* Choose the GTT offset for our buffer and put it there. */ 3352 /* Choose the GTT offset for our buffer and put it there. */
3183 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); 3353 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3184 if (ret) 3354 if (ret)
3185 return ret; 3355 return ret;
3186 3356
3357 /*
3358 * Pre-965 chips need a fence register set up in order to
3359 * properly handle blits to/from tiled surfaces.
3360 */
3361 if (need_fence) {
3362 ret = i915_gem_object_get_fence_reg(obj);
3363 if (ret != 0) {
3364 if (ret != -EBUSY && ret != -ERESTARTSYS)
3365 DRM_ERROR("Failure to install fence: %d\n",
3366 ret);
3367 i915_gem_object_unpin(obj);
3368 return ret;
3369 }
3370 }
3371
3187 entry->offset = obj_priv->gtt_offset; 3372 entry->offset = obj_priv->gtt_offset;
3188 3373
3189 /* Apply the relocations, using the GTT aperture to avoid cache 3374 /* Apply the relocations, using the GTT aperture to avoid cache
@@ -3202,7 +3387,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3202 i915_gem_object_unpin(obj); 3387 i915_gem_object_unpin(obj);
3203 return -EBADF; 3388 return -EBADF;
3204 } 3389 }
3205 target_obj_priv = target_obj->driver_private; 3390 target_obj_priv = to_intel_bo(target_obj);
3206 3391
3207#if WATCH_RELOC 3392#if WATCH_RELOC
3208 DRM_INFO("%s: obj %p offset %08x target %d " 3393 DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3231,6 +3416,16 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3231 } 3416 }
3232 3417
3233 /* Validate that the target is in a valid r/w GPU domain */ 3418 /* Validate that the target is in a valid r/w GPU domain */
3419 if (reloc->write_domain & (reloc->write_domain - 1)) {
3420 DRM_ERROR("reloc with multiple write domains: "
3421 "obj %p target %d offset %d "
3422 "read %08x write %08x",
3423 obj, reloc->target_handle,
3424 (int) reloc->offset,
3425 reloc->read_domains,
3426 reloc->write_domain);
3427 return -EINVAL;
3428 }
3234 if (reloc->write_domain & I915_GEM_DOMAIN_CPU || 3429 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3235 reloc->read_domains & I915_GEM_DOMAIN_CPU) { 3430 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3236 DRM_ERROR("reloc with read/write CPU domains: " 3431 DRM_ERROR("reloc with read/write CPU domains: "
@@ -3345,7 +3540,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3345 */ 3540 */
3346static int 3541static int
3347i915_dispatch_gem_execbuffer(struct drm_device *dev, 3542i915_dispatch_gem_execbuffer(struct drm_device *dev,
3348 struct drm_i915_gem_execbuffer *exec, 3543 struct drm_i915_gem_execbuffer2 *exec,
3349 struct drm_clip_rect *cliprects, 3544 struct drm_clip_rect *cliprects,
3350 uint64_t exec_offset) 3545 uint64_t exec_offset)
3351{ 3546{
@@ -3435,7 +3630,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3435} 3630}
3436 3631
3437static int 3632static int
3438i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, 3633i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
3439 uint32_t buffer_count, 3634 uint32_t buffer_count,
3440 struct drm_i915_gem_relocation_entry **relocs) 3635 struct drm_i915_gem_relocation_entry **relocs)
3441{ 3636{
@@ -3450,8 +3645,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3450 } 3645 }
3451 3646
3452 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); 3647 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3453 if (*relocs == NULL) 3648 if (*relocs == NULL) {
3649 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
3454 return -ENOMEM; 3650 return -ENOMEM;
3651 }
3455 3652
3456 for (i = 0; i < buffer_count; i++) { 3653 for (i = 0; i < buffer_count; i++) {
3457 struct drm_i915_gem_relocation_entry __user *user_relocs; 3654 struct drm_i915_gem_relocation_entry __user *user_relocs;
@@ -3475,13 +3672,16 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3475} 3672}
3476 3673
3477static int 3674static int
3478i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, 3675i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
3479 uint32_t buffer_count, 3676 uint32_t buffer_count,
3480 struct drm_i915_gem_relocation_entry *relocs) 3677 struct drm_i915_gem_relocation_entry *relocs)
3481{ 3678{
3482 uint32_t reloc_count = 0, i; 3679 uint32_t reloc_count = 0, i;
3483 int ret = 0; 3680 int ret = 0;
3484 3681
3682 if (relocs == NULL)
3683 return 0;
3684
3485 for (i = 0; i < buffer_count; i++) { 3685 for (i = 0; i < buffer_count; i++) {
3486 struct drm_i915_gem_relocation_entry __user *user_relocs; 3686 struct drm_i915_gem_relocation_entry __user *user_relocs;
3487 int unwritten; 3687 int unwritten;
@@ -3508,7 +3708,7 @@ err:
3508} 3708}
3509 3709
3510static int 3710static int
3511i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, 3711i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
3512 uint64_t exec_offset) 3712 uint64_t exec_offset)
3513{ 3713{
3514 uint32_t exec_start, exec_len; 3714 uint32_t exec_start, exec_len;
@@ -3525,22 +3725,57 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3525 return 0; 3725 return 0;
3526} 3726}
3527 3727
3728static int
3729i915_gem_wait_for_pending_flip(struct drm_device *dev,
3730 struct drm_gem_object **object_list,
3731 int count)
3732{
3733 drm_i915_private_t *dev_priv = dev->dev_private;
3734 struct drm_i915_gem_object *obj_priv;
3735 DEFINE_WAIT(wait);
3736 int i, ret = 0;
3737
3738 for (;;) {
3739 prepare_to_wait(&dev_priv->pending_flip_queue,
3740 &wait, TASK_INTERRUPTIBLE);
3741 for (i = 0; i < count; i++) {
3742 obj_priv = to_intel_bo(object_list[i]);
3743 if (atomic_read(&obj_priv->pending_flip) > 0)
3744 break;
3745 }
3746 if (i == count)
3747 break;
3748
3749 if (!signal_pending(current)) {
3750 mutex_unlock(&dev->struct_mutex);
3751 schedule();
3752 mutex_lock(&dev->struct_mutex);
3753 continue;
3754 }
3755 ret = -ERESTARTSYS;
3756 break;
3757 }
3758 finish_wait(&dev_priv->pending_flip_queue, &wait);
3759
3760 return ret;
3761}
3762
3528int 3763int
3529i915_gem_execbuffer(struct drm_device *dev, void *data, 3764i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3530 struct drm_file *file_priv) 3765 struct drm_file *file_priv,
3766 struct drm_i915_gem_execbuffer2 *args,
3767 struct drm_i915_gem_exec_object2 *exec_list)
3531{ 3768{
3532 drm_i915_private_t *dev_priv = dev->dev_private; 3769 drm_i915_private_t *dev_priv = dev->dev_private;
3533 struct drm_i915_gem_execbuffer *args = data;
3534 struct drm_i915_gem_exec_object *exec_list = NULL;
3535 struct drm_gem_object **object_list = NULL; 3770 struct drm_gem_object **object_list = NULL;
3536 struct drm_gem_object *batch_obj; 3771 struct drm_gem_object *batch_obj;
3537 struct drm_i915_gem_object *obj_priv; 3772 struct drm_i915_gem_object *obj_priv;
3538 struct drm_clip_rect *cliprects = NULL; 3773 struct drm_clip_rect *cliprects = NULL;
3539 struct drm_i915_gem_relocation_entry *relocs; 3774 struct drm_i915_gem_relocation_entry *relocs = NULL;
3540 int ret, ret2, i, pinned = 0; 3775 int ret = 0, ret2, i, pinned = 0;
3541 uint64_t exec_offset; 3776 uint64_t exec_offset;
3542 uint32_t seqno, flush_domains, reloc_index; 3777 uint32_t seqno, flush_domains, reloc_index;
3543 int pin_tries; 3778 int pin_tries, flips;
3544 3779
3545#if WATCH_EXEC 3780#if WATCH_EXEC
3546 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 3781 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -3551,31 +3786,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3551 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); 3786 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3552 return -EINVAL; 3787 return -EINVAL;
3553 } 3788 }
3554 /* Copy in the exec list from userland */ 3789 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3555 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count); 3790 if (object_list == NULL) {
3556 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count); 3791 DRM_ERROR("Failed to allocate object list for %d buffers\n",
3557 if (exec_list == NULL || object_list == NULL) {
3558 DRM_ERROR("Failed to allocate exec or object list "
3559 "for %d buffers\n",
3560 args->buffer_count); 3792 args->buffer_count);
3561 ret = -ENOMEM; 3793 ret = -ENOMEM;
3562 goto pre_mutex_err; 3794 goto pre_mutex_err;
3563 } 3795 }
3564 ret = copy_from_user(exec_list,
3565 (struct drm_i915_relocation_entry __user *)
3566 (uintptr_t) args->buffers_ptr,
3567 sizeof(*exec_list) * args->buffer_count);
3568 if (ret != 0) {
3569 DRM_ERROR("copy %d exec entries failed %d\n",
3570 args->buffer_count, ret);
3571 goto pre_mutex_err;
3572 }
3573 3796
3574 if (args->num_cliprects != 0) { 3797 if (args->num_cliprects != 0) {
3575 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), 3798 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3576 GFP_KERNEL); 3799 GFP_KERNEL);
3577 if (cliprects == NULL) 3800 if (cliprects == NULL) {
3801 ret = -ENOMEM;
3578 goto pre_mutex_err; 3802 goto pre_mutex_err;
3803 }
3579 3804
3580 ret = copy_from_user(cliprects, 3805 ret = copy_from_user(cliprects,
3581 (struct drm_clip_rect __user *) 3806 (struct drm_clip_rect __user *)
@@ -3598,38 +3823,49 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3598 i915_verify_inactive(dev, __FILE__, __LINE__); 3823 i915_verify_inactive(dev, __FILE__, __LINE__);
3599 3824
3600 if (atomic_read(&dev_priv->mm.wedged)) { 3825 if (atomic_read(&dev_priv->mm.wedged)) {
3601 DRM_ERROR("Execbuf while wedged\n");
3602 mutex_unlock(&dev->struct_mutex); 3826 mutex_unlock(&dev->struct_mutex);
3603 ret = -EIO; 3827 ret = -EIO;
3604 goto pre_mutex_err; 3828 goto pre_mutex_err;
3605 } 3829 }
3606 3830
3607 if (dev_priv->mm.suspended) { 3831 if (dev_priv->mm.suspended) {
3608 DRM_ERROR("Execbuf while VT-switched.\n");
3609 mutex_unlock(&dev->struct_mutex); 3832 mutex_unlock(&dev->struct_mutex);
3610 ret = -EBUSY; 3833 ret = -EBUSY;
3611 goto pre_mutex_err; 3834 goto pre_mutex_err;
3612 } 3835 }
3613 3836
3614 /* Look up object handles */ 3837 /* Look up object handles */
3838 flips = 0;
3615 for (i = 0; i < args->buffer_count; i++) { 3839 for (i = 0; i < args->buffer_count; i++) {
3616 object_list[i] = drm_gem_object_lookup(dev, file_priv, 3840 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3617 exec_list[i].handle); 3841 exec_list[i].handle);
3618 if (object_list[i] == NULL) { 3842 if (object_list[i] == NULL) {
3619 DRM_ERROR("Invalid object handle %d at index %d\n", 3843 DRM_ERROR("Invalid object handle %d at index %d\n",
3620 exec_list[i].handle, i); 3844 exec_list[i].handle, i);
3845 /* prevent error path from reading uninitialized data */
3846 args->buffer_count = i + 1;
3621 ret = -EBADF; 3847 ret = -EBADF;
3622 goto err; 3848 goto err;
3623 } 3849 }
3624 3850
3625 obj_priv = object_list[i]->driver_private; 3851 obj_priv = to_intel_bo(object_list[i]);
3626 if (obj_priv->in_execbuffer) { 3852 if (obj_priv->in_execbuffer) {
3627 DRM_ERROR("Object %p appears more than once in object list\n", 3853 DRM_ERROR("Object %p appears more than once in object list\n",
3628 object_list[i]); 3854 object_list[i]);
3855 /* prevent error path from reading uninitialized data */
3856 args->buffer_count = i + 1;
3629 ret = -EBADF; 3857 ret = -EBADF;
3630 goto err; 3858 goto err;
3631 } 3859 }
3632 obj_priv->in_execbuffer = true; 3860 obj_priv->in_execbuffer = true;
3861 flips += atomic_read(&obj_priv->pending_flip);
3862 }
3863
3864 if (flips > 0) {
3865 ret = i915_gem_wait_for_pending_flip(dev, object_list,
3866 args->buffer_count);
3867 if (ret)
3868 goto err;
3633 } 3869 }
3634 3870
3635 /* Pin and relocate */ 3871 /* Pin and relocate */
@@ -3731,16 +3967,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3731 i915_gem_flush(dev, 3967 i915_gem_flush(dev,
3732 dev->invalidate_domains, 3968 dev->invalidate_domains,
3733 dev->flush_domains); 3969 dev->flush_domains);
3734 if (dev->flush_domains) 3970 if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
3735 (void)i915_add_request(dev, file_priv, 3971 (void)i915_add_request(dev, file_priv,
3736 dev->flush_domains); 3972 dev->flush_domains);
3737 } 3973 }
3738 3974
3739 for (i = 0; i < args->buffer_count; i++) { 3975 for (i = 0; i < args->buffer_count; i++) {
3740 struct drm_gem_object *obj = object_list[i]; 3976 struct drm_gem_object *obj = object_list[i];
3977 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3741 uint32_t old_write_domain = obj->write_domain; 3978 uint32_t old_write_domain = obj->write_domain;
3742 3979
3743 obj->write_domain = obj->pending_write_domain; 3980 obj->write_domain = obj->pending_write_domain;
3981 if (obj->write_domain)
3982 list_move_tail(&obj_priv->gpu_write_list,
3983 &dev_priv->mm.gpu_write_list);
3984 else
3985 list_del_init(&obj_priv->gpu_write_list);
3986
3744 trace_i915_gem_object_change_domain(obj, 3987 trace_i915_gem_object_change_domain(obj,
3745 obj->read_domains, 3988 obj->read_domains,
3746 old_write_domain); 3989 old_write_domain);
@@ -3806,7 +4049,7 @@ err:
3806 4049
3807 for (i = 0; i < args->buffer_count; i++) { 4050 for (i = 0; i < args->buffer_count; i++) {
3808 if (object_list[i]) { 4051 if (object_list[i]) {
3809 obj_priv = object_list[i]->driver_private; 4052 obj_priv = to_intel_bo(object_list[i]);
3810 obj_priv->in_execbuffer = false; 4053 obj_priv->in_execbuffer = false;
3811 } 4054 }
3812 drm_gem_object_unreference(object_list[i]); 4055 drm_gem_object_unreference(object_list[i]);
@@ -3814,8 +4057,101 @@ err:
3814 4057
3815 mutex_unlock(&dev->struct_mutex); 4058 mutex_unlock(&dev->struct_mutex);
3816 4059
4060pre_mutex_err:
4061 /* Copy the updated relocations out regardless of current error
4062 * state. Failure to update the relocs would mean that the next
4063 * time userland calls execbuf, it would do so with presumed offset
4064 * state that didn't match the actual object state.
4065 */
4066 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
4067 relocs);
4068 if (ret2 != 0) {
4069 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
4070
4071 if (ret == 0)
4072 ret = ret2;
4073 }
4074
4075 drm_free_large(object_list);
4076 kfree(cliprects);
4077
4078 return ret;
4079}
4080
4081/*
4082 * Legacy execbuffer just creates an exec2 list from the original exec object
4083 * list array and passes it to the real function.
4084 */
4085int
4086i915_gem_execbuffer(struct drm_device *dev, void *data,
4087 struct drm_file *file_priv)
4088{
4089 struct drm_i915_gem_execbuffer *args = data;
4090 struct drm_i915_gem_execbuffer2 exec2;
4091 struct drm_i915_gem_exec_object *exec_list = NULL;
4092 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4093 int ret, i;
4094
4095#if WATCH_EXEC
4096 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4097 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4098#endif
4099
4100 if (args->buffer_count < 1) {
4101 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
4102 return -EINVAL;
4103 }
4104
4105 /* Copy in the exec list from userland */
4106 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
4107 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4108 if (exec_list == NULL || exec2_list == NULL) {
4109 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4110 args->buffer_count);
4111 drm_free_large(exec_list);
4112 drm_free_large(exec2_list);
4113 return -ENOMEM;
4114 }
4115 ret = copy_from_user(exec_list,
4116 (struct drm_i915_relocation_entry __user *)
4117 (uintptr_t) args->buffers_ptr,
4118 sizeof(*exec_list) * args->buffer_count);
4119 if (ret != 0) {
4120 DRM_ERROR("copy %d exec entries failed %d\n",
4121 args->buffer_count, ret);
4122 drm_free_large(exec_list);
4123 drm_free_large(exec2_list);
4124 return -EFAULT;
4125 }
4126
4127 for (i = 0; i < args->buffer_count; i++) {
4128 exec2_list[i].handle = exec_list[i].handle;
4129 exec2_list[i].relocation_count = exec_list[i].relocation_count;
4130 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
4131 exec2_list[i].alignment = exec_list[i].alignment;
4132 exec2_list[i].offset = exec_list[i].offset;
4133 if (!IS_I965G(dev))
4134 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
4135 else
4136 exec2_list[i].flags = 0;
4137 }
4138
4139 exec2.buffers_ptr = args->buffers_ptr;
4140 exec2.buffer_count = args->buffer_count;
4141 exec2.batch_start_offset = args->batch_start_offset;
4142 exec2.batch_len = args->batch_len;
4143 exec2.DR1 = args->DR1;
4144 exec2.DR4 = args->DR4;
4145 exec2.num_cliprects = args->num_cliprects;
4146 exec2.cliprects_ptr = args->cliprects_ptr;
4147 exec2.flags = 0;
4148
4149 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
3817 if (!ret) { 4150 if (!ret) {
3818 /* Copy the new buffer offsets back to the user's exec list. */ 4151 /* Copy the new buffer offsets back to the user's exec list. */
4152 for (i = 0; i < args->buffer_count; i++)
4153 exec_list[i].offset = exec2_list[i].offset;
4154 /* ... and back out to userspace */
3819 ret = copy_to_user((struct drm_i915_relocation_entry __user *) 4155 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3820 (uintptr_t) args->buffers_ptr, 4156 (uintptr_t) args->buffers_ptr,
3821 exec_list, 4157 exec_list,
@@ -3828,25 +4164,62 @@ err:
3828 } 4164 }
3829 } 4165 }
3830 4166
3831 /* Copy the updated relocations out regardless of current error 4167 drm_free_large(exec_list);
3832 * state. Failure to update the relocs would mean that the next 4168 drm_free_large(exec2_list);
3833 * time userland calls execbuf, it would do so with presumed offset 4169 return ret;
3834 * state that didn't match the actual object state. 4170}
3835 */
3836 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3837 relocs);
3838 if (ret2 != 0) {
3839 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3840 4171
3841 if (ret == 0) 4172int
3842 ret = ret2; 4173i915_gem_execbuffer2(struct drm_device *dev, void *data,
4174 struct drm_file *file_priv)
4175{
4176 struct drm_i915_gem_execbuffer2 *args = data;
4177 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4178 int ret;
4179
4180#if WATCH_EXEC
4181 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4182 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4183#endif
4184
4185 if (args->buffer_count < 1) {
4186 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4187 return -EINVAL;
3843 } 4188 }
3844 4189
3845pre_mutex_err: 4190 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
3846 drm_free_large(object_list); 4191 if (exec2_list == NULL) {
3847 drm_free_large(exec_list); 4192 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
3848 kfree(cliprects); 4193 args->buffer_count);
4194 return -ENOMEM;
4195 }
4196 ret = copy_from_user(exec2_list,
4197 (struct drm_i915_relocation_entry __user *)
4198 (uintptr_t) args->buffers_ptr,
4199 sizeof(*exec2_list) * args->buffer_count);
4200 if (ret != 0) {
4201 DRM_ERROR("copy %d exec entries failed %d\n",
4202 args->buffer_count, ret);
4203 drm_free_large(exec2_list);
4204 return -EFAULT;
4205 }
3849 4206
4207 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4208 if (!ret) {
4209 /* Copy the new buffer offsets back to the user's exec list. */
4210 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4211 (uintptr_t) args->buffers_ptr,
4212 exec2_list,
4213 sizeof(*exec2_list) * args->buffer_count);
4214 if (ret) {
4215 ret = -EFAULT;
4216 DRM_ERROR("failed to copy %d exec entries "
4217 "back to user (%d)\n",
4218 args->buffer_count, ret);
4219 }
4220 }
4221
4222 drm_free_large(exec2_list);
3850 return ret; 4223 return ret;
3851} 4224}
3852 4225
@@ -3854,7 +4227,7 @@ int
3854i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) 4227i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3855{ 4228{
3856 struct drm_device *dev = obj->dev; 4229 struct drm_device *dev = obj->dev;
3857 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4230 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3858 int ret; 4231 int ret;
3859 4232
3860 i915_verify_inactive(dev, __FILE__, __LINE__); 4233 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -3863,19 +4236,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3863 if (ret) 4236 if (ret)
3864 return ret; 4237 return ret;
3865 } 4238 }
3866 /* 4239
3867 * Pre-965 chips need a fence register set up in order to
3868 * properly handle tiled surfaces.
3869 */
3870 if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
3871 ret = i915_gem_object_get_fence_reg(obj);
3872 if (ret != 0) {
3873 if (ret != -EBUSY && ret != -ERESTARTSYS)
3874 DRM_ERROR("Failure to install fence: %d\n",
3875 ret);
3876 return ret;
3877 }
3878 }
3879 obj_priv->pin_count++; 4240 obj_priv->pin_count++;
3880 4241
3881 /* If the object is not active and not pending a flush, 4242 /* If the object is not active and not pending a flush,
@@ -3899,7 +4260,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
3899{ 4260{
3900 struct drm_device *dev = obj->dev; 4261 struct drm_device *dev = obj->dev;
3901 drm_i915_private_t *dev_priv = dev->dev_private; 4262 drm_i915_private_t *dev_priv = dev->dev_private;
3902 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4263 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3903 4264
3904 i915_verify_inactive(dev, __FILE__, __LINE__); 4265 i915_verify_inactive(dev, __FILE__, __LINE__);
3905 obj_priv->pin_count--; 4266 obj_priv->pin_count--;
@@ -3939,7 +4300,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3939 mutex_unlock(&dev->struct_mutex); 4300 mutex_unlock(&dev->struct_mutex);
3940 return -EBADF; 4301 return -EBADF;
3941 } 4302 }
3942 obj_priv = obj->driver_private; 4303 obj_priv = to_intel_bo(obj);
3943 4304
3944 if (obj_priv->madv != I915_MADV_WILLNEED) { 4305 if (obj_priv->madv != I915_MADV_WILLNEED) {
3945 DRM_ERROR("Attempting to pin a purgeable buffer\n"); 4306 DRM_ERROR("Attempting to pin a purgeable buffer\n");
@@ -3996,7 +4357,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3996 return -EBADF; 4357 return -EBADF;
3997 } 4358 }
3998 4359
3999 obj_priv = obj->driver_private; 4360 obj_priv = to_intel_bo(obj);
4000 if (obj_priv->pin_filp != file_priv) { 4361 if (obj_priv->pin_filp != file_priv) {
4001 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", 4362 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4002 args->handle); 4363 args->handle);
@@ -4038,7 +4399,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4038 */ 4399 */
4039 i915_gem_retire_requests(dev); 4400 i915_gem_retire_requests(dev);
4040 4401
4041 obj_priv = obj->driver_private; 4402 obj_priv = to_intel_bo(obj);
4042 /* Don't count being on the flushing list against the object being 4403 /* Don't count being on the flushing list against the object being
4043 * done. Otherwise, a buffer left on the flushing list but not getting 4404 * done. Otherwise, a buffer left on the flushing list but not getting
4044 * flushed (because nobody's flushing that domain) won't ever return 4405 * flushed (because nobody's flushing that domain) won't ever return
@@ -4084,7 +4445,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4084 } 4445 }
4085 4446
4086 mutex_lock(&dev->struct_mutex); 4447 mutex_lock(&dev->struct_mutex);
4087 obj_priv = obj->driver_private; 4448 obj_priv = to_intel_bo(obj);
4088 4449
4089 if (obj_priv->pin_count) { 4450 if (obj_priv->pin_count) {
4090 drm_gem_object_unreference(obj); 4451 drm_gem_object_unreference(obj);
@@ -4133,6 +4494,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4133 obj_priv->obj = obj; 4494 obj_priv->obj = obj;
4134 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4495 obj_priv->fence_reg = I915_FENCE_REG_NONE;
4135 INIT_LIST_HEAD(&obj_priv->list); 4496 INIT_LIST_HEAD(&obj_priv->list);
4497 INIT_LIST_HEAD(&obj_priv->gpu_write_list);
4136 INIT_LIST_HEAD(&obj_priv->fence_list); 4498 INIT_LIST_HEAD(&obj_priv->fence_list);
4137 obj_priv->madv = I915_MADV_WILLNEED; 4499 obj_priv->madv = I915_MADV_WILLNEED;
4138 4500
@@ -4144,7 +4506,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4144void i915_gem_free_object(struct drm_gem_object *obj) 4506void i915_gem_free_object(struct drm_gem_object *obj)
4145{ 4507{
4146 struct drm_device *dev = obj->dev; 4508 struct drm_device *dev = obj->dev;
4147 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4509 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4148 4510
4149 trace_i915_gem_object_destroy(obj); 4511 trace_i915_gem_object_destroy(obj);
4150 4512
@@ -4192,8 +4554,7 @@ int
4192i915_gem_idle(struct drm_device *dev) 4554i915_gem_idle(struct drm_device *dev)
4193{ 4555{
4194 drm_i915_private_t *dev_priv = dev->dev_private; 4556 drm_i915_private_t *dev_priv = dev->dev_private;
4195 uint32_t seqno, cur_seqno, last_seqno; 4557 int ret;
4196 int stuck, ret;
4197 4558
4198 mutex_lock(&dev->struct_mutex); 4559 mutex_lock(&dev->struct_mutex);
4199 4560
@@ -4202,116 +4563,80 @@ i915_gem_idle(struct drm_device *dev)
4202 return 0; 4563 return 0;
4203 } 4564 }
4204 4565
4205 /* Hack! Don't let anybody do execbuf while we don't control the chip. 4566 ret = i915_gpu_idle(dev);
4206 * We need to replace this with a semaphore, or something. 4567 if (ret) {
4207 */
4208 dev_priv->mm.suspended = 1;
4209 del_timer(&dev_priv->hangcheck_timer);
4210
4211 /* Cancel the retire work handler, wait for it to finish if running
4212 */
4213 mutex_unlock(&dev->struct_mutex);
4214 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4215 mutex_lock(&dev->struct_mutex);
4216
4217 i915_kernel_lost_context(dev);
4218
4219 /* Flush the GPU along with all non-CPU write domains
4220 */
4221 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
4222 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
4223
4224 if (seqno == 0) {
4225 mutex_unlock(&dev->struct_mutex); 4568 mutex_unlock(&dev->struct_mutex);
4226 return -ENOMEM; 4569 return ret;
4227 } 4570 }
4228 4571
4229 dev_priv->mm.waiting_gem_seqno = seqno; 4572 /* Under UMS, be paranoid and evict. */
4230 last_seqno = 0; 4573 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4231 stuck = 0; 4574 ret = i915_gem_evict_from_inactive_list(dev);
4232 for (;;) { 4575 if (ret) {
4233 cur_seqno = i915_get_gem_seqno(dev); 4576 mutex_unlock(&dev->struct_mutex);
4234 if (i915_seqno_passed(cur_seqno, seqno)) 4577 return ret;
4235 break;
4236 if (last_seqno == cur_seqno) {
4237 if (stuck++ > 100) {
4238 DRM_ERROR("hardware wedged\n");
4239 atomic_set(&dev_priv->mm.wedged, 1);
4240 DRM_WAKEUP(&dev_priv->irq_queue);
4241 break;
4242 }
4243 } 4578 }
4244 msleep(10);
4245 last_seqno = cur_seqno;
4246 }
4247 dev_priv->mm.waiting_gem_seqno = 0;
4248
4249 i915_gem_retire_requests(dev);
4250
4251 spin_lock(&dev_priv->mm.active_list_lock);
4252 if (!atomic_read(&dev_priv->mm.wedged)) {
4253 /* Active and flushing should now be empty as we've
4254 * waited for a sequence higher than any pending execbuffer
4255 */
4256 WARN_ON(!list_empty(&dev_priv->mm.active_list));
4257 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
4258 /* Request should now be empty as we've also waited
4259 * for the last request in the list
4260 */
4261 WARN_ON(!list_empty(&dev_priv->mm.request_list));
4262 } 4579 }
4263 4580
4264 /* Empty the active and flushing lists to inactive. If there's 4581 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4265 * anything left at this point, it means that we're wedged and 4582 * We need to replace this with a semaphore, or something.
4266 * nothing good's going to happen by leaving them there. So strip 4583 * And not confound mm.suspended!
4267 * the GPU domains and just stuff them onto inactive.
4268 */ 4584 */
4269 while (!list_empty(&dev_priv->mm.active_list)) { 4585 dev_priv->mm.suspended = 1;
4270 struct drm_gem_object *obj; 4586 del_timer(&dev_priv->hangcheck_timer);
4271 uint32_t old_write_domain;
4272 4587
4273 obj = list_first_entry(&dev_priv->mm.active_list, 4588 i915_kernel_lost_context(dev);
4274 struct drm_i915_gem_object, 4589 i915_gem_cleanup_ringbuffer(dev);
4275 list)->obj;
4276 old_write_domain = obj->write_domain;
4277 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4278 i915_gem_object_move_to_inactive(obj);
4279 4590
4280 trace_i915_gem_object_change_domain(obj, 4591 mutex_unlock(&dev->struct_mutex);
4281 obj->read_domains,
4282 old_write_domain);
4283 }
4284 spin_unlock(&dev_priv->mm.active_list_lock);
4285 4592
4286 while (!list_empty(&dev_priv->mm.flushing_list)) { 4593 /* Cancel the retire work handler, which should be idle now. */
4287 struct drm_gem_object *obj; 4594 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4288 uint32_t old_write_domain;
4289 4595
4290 obj = list_first_entry(&dev_priv->mm.flushing_list, 4596 return 0;
4291 struct drm_i915_gem_object, 4597}
4292 list)->obj;
4293 old_write_domain = obj->write_domain;
4294 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4295 i915_gem_object_move_to_inactive(obj);
4296 4598
4297 trace_i915_gem_object_change_domain(obj, 4599/*
4298 obj->read_domains, 4600 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4299 old_write_domain); 4601 * over cache flushing.
4602 */
4603static int
4604i915_gem_init_pipe_control(struct drm_device *dev)
4605{
4606 drm_i915_private_t *dev_priv = dev->dev_private;
4607 struct drm_gem_object *obj;
4608 struct drm_i915_gem_object *obj_priv;
4609 int ret;
4610
4611 obj = drm_gem_object_alloc(dev, 4096);
4612 if (obj == NULL) {
4613 DRM_ERROR("Failed to allocate seqno page\n");
4614 ret = -ENOMEM;
4615 goto err;
4300 } 4616 }
4617 obj_priv = to_intel_bo(obj);
4618 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4301 4619
4620 ret = i915_gem_object_pin(obj, 4096);
4621 if (ret)
4622 goto err_unref;
4302 4623
4303 /* Move all inactive buffers out of the GTT. */ 4624 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4304 ret = i915_gem_evict_from_inactive_list(dev); 4625 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4305 WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); 4626 if (dev_priv->seqno_page == NULL)
4306 if (ret) { 4627 goto err_unpin;
4307 mutex_unlock(&dev->struct_mutex);
4308 return ret;
4309 }
4310 4628
4311 i915_gem_cleanup_ringbuffer(dev); 4629 dev_priv->seqno_obj = obj;
4312 mutex_unlock(&dev->struct_mutex); 4630 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4313 4631
4314 return 0; 4632 return 0;
4633
4634err_unpin:
4635 i915_gem_object_unpin(obj);
4636err_unref:
4637 drm_gem_object_unreference(obj);
4638err:
4639 return ret;
4315} 4640}
4316 4641
4317static int 4642static int
@@ -4331,15 +4656,16 @@ i915_gem_init_hws(struct drm_device *dev)
4331 obj = drm_gem_object_alloc(dev, 4096); 4656 obj = drm_gem_object_alloc(dev, 4096);
4332 if (obj == NULL) { 4657 if (obj == NULL) {
4333 DRM_ERROR("Failed to allocate status page\n"); 4658 DRM_ERROR("Failed to allocate status page\n");
4334 return -ENOMEM; 4659 ret = -ENOMEM;
4660 goto err;
4335 } 4661 }
4336 obj_priv = obj->driver_private; 4662 obj_priv = to_intel_bo(obj);
4337 obj_priv->agp_type = AGP_USER_CACHED_MEMORY; 4663 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4338 4664
4339 ret = i915_gem_object_pin(obj, 4096); 4665 ret = i915_gem_object_pin(obj, 4096);
4340 if (ret != 0) { 4666 if (ret != 0) {
4341 drm_gem_object_unreference(obj); 4667 drm_gem_object_unreference(obj);
4342 return ret; 4668 goto err_unref;
4343 } 4669 }
4344 4670
4345 dev_priv->status_gfx_addr = obj_priv->gtt_offset; 4671 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
@@ -4348,17 +4674,52 @@ i915_gem_init_hws(struct drm_device *dev)
4348 if (dev_priv->hw_status_page == NULL) { 4674 if (dev_priv->hw_status_page == NULL) {
4349 DRM_ERROR("Failed to map status page.\n"); 4675 DRM_ERROR("Failed to map status page.\n");
4350 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 4676 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4351 i915_gem_object_unpin(obj); 4677 ret = -EINVAL;
4352 drm_gem_object_unreference(obj); 4678 goto err_unpin;
4353 return -EINVAL; 4679 }
4680
4681 if (HAS_PIPE_CONTROL(dev)) {
4682 ret = i915_gem_init_pipe_control(dev);
4683 if (ret)
4684 goto err_unpin;
4354 } 4685 }
4686
4355 dev_priv->hws_obj = obj; 4687 dev_priv->hws_obj = obj;
4356 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 4688 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4357 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 4689 if (IS_GEN6(dev)) {
4358 I915_READ(HWS_PGA); /* posting read */ 4690 I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
4359 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 4691 I915_READ(HWS_PGA_GEN6); /* posting read */
4692 } else {
4693 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4694 I915_READ(HWS_PGA); /* posting read */
4695 }
4696 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4360 4697
4361 return 0; 4698 return 0;
4699
4700err_unpin:
4701 i915_gem_object_unpin(obj);
4702err_unref:
4703 drm_gem_object_unreference(obj);
4704err:
4705 return 0;
4706}
4707
4708static void
4709i915_gem_cleanup_pipe_control(struct drm_device *dev)
4710{
4711 drm_i915_private_t *dev_priv = dev->dev_private;
4712 struct drm_gem_object *obj;
4713 struct drm_i915_gem_object *obj_priv;
4714
4715 obj = dev_priv->seqno_obj;
4716 obj_priv = to_intel_bo(obj);
4717 kunmap(obj_priv->pages[0]);
4718 i915_gem_object_unpin(obj);
4719 drm_gem_object_unreference(obj);
4720 dev_priv->seqno_obj = NULL;
4721
4722 dev_priv->seqno_page = NULL;
4362} 4723}
4363 4724
4364static void 4725static void
@@ -4372,7 +4733,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
4372 return; 4733 return;
4373 4734
4374 obj = dev_priv->hws_obj; 4735 obj = dev_priv->hws_obj;
4375 obj_priv = obj->driver_private; 4736 obj_priv = to_intel_bo(obj);
4376 4737
4377 kunmap(obj_priv->pages[0]); 4738 kunmap(obj_priv->pages[0]);
4378 i915_gem_object_unpin(obj); 4739 i915_gem_object_unpin(obj);
@@ -4382,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device *dev)
4382 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 4743 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4383 dev_priv->hw_status_page = NULL; 4744 dev_priv->hw_status_page = NULL;
4384 4745
4746 if (HAS_PIPE_CONTROL(dev))
4747 i915_gem_cleanup_pipe_control(dev);
4748
4385 /* Write high address into HWS_PGA when disabling. */ 4749 /* Write high address into HWS_PGA when disabling. */
4386 I915_WRITE(HWS_PGA, 0x1ffff000); 4750 I915_WRITE(HWS_PGA, 0x1ffff000);
4387} 4751}
@@ -4406,7 +4770,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
4406 i915_gem_cleanup_hws(dev); 4770 i915_gem_cleanup_hws(dev);
4407 return -ENOMEM; 4771 return -ENOMEM;
4408 } 4772 }
4409 obj_priv = obj->driver_private; 4773 obj_priv = to_intel_bo(obj);
4410 4774
4411 ret = i915_gem_object_pin(obj, 4096); 4775 ret = i915_gem_object_pin(obj, 4096);
4412 if (ret != 0) { 4776 if (ret != 0) {
@@ -4492,6 +4856,11 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
4492 ring->space += ring->Size; 4856 ring->space += ring->Size;
4493 } 4857 }
4494 4858
4859 if (IS_I9XX(dev) && !IS_GEN3(dev)) {
4860 I915_WRITE(MI_MODE,
4861 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
4862 }
4863
4495 return 0; 4864 return 0;
4496} 4865}
4497 4866
@@ -4584,6 +4953,7 @@ i915_gem_load(struct drm_device *dev)
4584 spin_lock_init(&dev_priv->mm.active_list_lock); 4953 spin_lock_init(&dev_priv->mm.active_list_lock);
4585 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4954 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4586 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4955 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4956 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4587 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4957 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4588 INIT_LIST_HEAD(&dev_priv->mm.request_list); 4958 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4589 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4959 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4596,7 +4966,8 @@ i915_gem_load(struct drm_device *dev)
4596 spin_unlock(&shrink_list_lock); 4966 spin_unlock(&shrink_list_lock);
4597 4967
4598 /* Old X drivers will take 0-2 for front, back, depth buffers */ 4968 /* Old X drivers will take 0-2 for front, back, depth buffers */
4599 dev_priv->fence_reg_start = 3; 4969 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4970 dev_priv->fence_reg_start = 3;
4600 4971
4601 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4972 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4602 dev_priv->num_fence_regs = 16; 4973 dev_priv->num_fence_regs = 16;
@@ -4614,8 +4985,8 @@ i915_gem_load(struct drm_device *dev)
4614 for (i = 0; i < 8; i++) 4985 for (i = 0; i < 8; i++)
4615 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); 4986 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4616 } 4987 }
4617
4618 i915_gem_detect_bit_6_swizzle(dev); 4988 i915_gem_detect_bit_6_swizzle(dev);
4989 init_waitqueue_head(&dev_priv->pending_flip_queue);
4619} 4990}
4620 4991
4621/* 4992/*
@@ -4638,7 +5009,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
4638 5009
4639 phys_obj->id = id; 5010 phys_obj->id = id;
4640 5011
4641 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff); 5012 phys_obj->handle = drm_pci_alloc(dev, size, 0);
4642 if (!phys_obj->handle) { 5013 if (!phys_obj->handle) {
4643 ret = -ENOMEM; 5014 ret = -ENOMEM;
4644 goto kfree_obj; 5015 goto kfree_obj;
@@ -4692,11 +5063,11 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4692 int ret; 5063 int ret;
4693 int page_count; 5064 int page_count;
4694 5065
4695 obj_priv = obj->driver_private; 5066 obj_priv = to_intel_bo(obj);
4696 if (!obj_priv->phys_obj) 5067 if (!obj_priv->phys_obj)
4697 return; 5068 return;
4698 5069
4699 ret = i915_gem_object_get_pages(obj); 5070 ret = i915_gem_object_get_pages(obj, 0);
4700 if (ret) 5071 if (ret)
4701 goto out; 5072 goto out;
4702 5073
@@ -4731,7 +5102,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4731 if (id > I915_MAX_PHYS_OBJECT) 5102 if (id > I915_MAX_PHYS_OBJECT)
4732 return -EINVAL; 5103 return -EINVAL;
4733 5104
4734 obj_priv = obj->driver_private; 5105 obj_priv = to_intel_bo(obj);
4735 5106
4736 if (obj_priv->phys_obj) { 5107 if (obj_priv->phys_obj) {
4737 if (obj_priv->phys_obj->id == id) 5108 if (obj_priv->phys_obj->id == id)
@@ -4754,7 +5125,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4754 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 5125 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4755 obj_priv->phys_obj->cur_obj = obj; 5126 obj_priv->phys_obj->cur_obj = obj;
4756 5127
4757 ret = i915_gem_object_get_pages(obj); 5128 ret = i915_gem_object_get_pages(obj, 0);
4758 if (ret) { 5129 if (ret) {
4759 DRM_ERROR("failed to get page list\n"); 5130 DRM_ERROR("failed to get page list\n");
4760 goto out; 5131 goto out;
@@ -4782,7 +5153,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4782 struct drm_i915_gem_pwrite *args, 5153 struct drm_i915_gem_pwrite *args,
4783 struct drm_file *file_priv) 5154 struct drm_file *file_priv)
4784{ 5155{
4785 struct drm_i915_gem_object *obj_priv = obj->driver_private; 5156 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4786 void *obj_addr; 5157 void *obj_addr;
4787 int ret; 5158 int ret;
4788 char __user *user_data; 5159 char __user *user_data;
@@ -4790,7 +5161,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4790 user_data = (char __user *) (uintptr_t) args->data_ptr; 5161 user_data = (char __user *) (uintptr_t) args->data_ptr;
4791 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; 5162 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4792 5163
4793 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size); 5164 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
4794 ret = copy_from_user(obj_addr, user_data, args->size); 5165 ret = copy_from_user(obj_addr, user_data, args->size);
4795 if (ret) 5166 if (ret)
4796 return -EFAULT; 5167 return -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index e602614bd3f8..35507cf53fa3 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -72,7 +72,7 @@ void
72i915_gem_dump_object(struct drm_gem_object *obj, int len, 72i915_gem_dump_object(struct drm_gem_object *obj, int len,
73 const char *where, uint32_t mark) 73 const char *where, uint32_t mark)
74{ 74{
75 struct drm_i915_gem_object *obj_priv = obj->driver_private; 75 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
76 int page; 76 int page;
77 77
78 DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); 78 DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
@@ -137,7 +137,7 @@ void
137i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) 137i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
138{ 138{
139 struct drm_device *dev = obj->dev; 139 struct drm_device *dev = obj->dev;
140 struct drm_i915_gem_object *obj_priv = obj->driver_private; 140 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
141 int page; 141 int page;
142 uint32_t *gtt_mapping; 142 uint32_t *gtt_mapping;
143 uint32_t *backing_map = NULL; 143 uint32_t *backing_map = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 200e398453ca..4bdccefcf2cf 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,8 +25,6 @@
25 * 25 *
26 */ 26 */
27 27
28#include <linux/acpi.h>
29#include <linux/pnp.h>
30#include "linux/string.h" 28#include "linux/string.h"
31#include "linux/bitops.h" 29#include "linux/bitops.h"
32#include "drmP.h" 30#include "drmP.h"
@@ -83,120 +81,6 @@
83 * to match what the GPU expects. 81 * to match what the GPU expects.
84 */ 82 */
85 83
86#define MCHBAR_I915 0x44
87#define MCHBAR_I965 0x48
88#define MCHBAR_SIZE (4*4096)
89
90#define DEVEN_REG 0x54
91#define DEVEN_MCHBAR_EN (1 << 28)
92
93/* Allocate space for the MCH regs if needed, return nonzero on error */
94static int
95intel_alloc_mchbar_resource(struct drm_device *dev)
96{
97 drm_i915_private_t *dev_priv = dev->dev_private;
98 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
99 u32 temp_lo, temp_hi = 0;
100 u64 mchbar_addr;
101 int ret = 0;
102
103 if (IS_I965G(dev))
104 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
105 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
106 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
107
108 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
109#ifdef CONFIG_PNP
110 if (mchbar_addr &&
111 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
112 ret = 0;
113 goto out;
114 }
115#endif
116
117 /* Get some space for it */
118 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
119 MCHBAR_SIZE, MCHBAR_SIZE,
120 PCIBIOS_MIN_MEM,
121 0, pcibios_align_resource,
122 dev_priv->bridge_dev);
123 if (ret) {
124 DRM_DEBUG("failed bus alloc: %d\n", ret);
125 dev_priv->mch_res.start = 0;
126 goto out;
127 }
128
129 if (IS_I965G(dev))
130 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
131 upper_32_bits(dev_priv->mch_res.start));
132
133 pci_write_config_dword(dev_priv->bridge_dev, reg,
134 lower_32_bits(dev_priv->mch_res.start));
135out:
136 return ret;
137}
138
139/* Setup MCHBAR if possible, return true if we should disable it again */
140static bool
141intel_setup_mchbar(struct drm_device *dev)
142{
143 drm_i915_private_t *dev_priv = dev->dev_private;
144 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
145 u32 temp;
146 bool need_disable = false, enabled;
147
148 if (IS_I915G(dev) || IS_I915GM(dev)) {
149 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
150 enabled = !!(temp & DEVEN_MCHBAR_EN);
151 } else {
152 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
153 enabled = temp & 1;
154 }
155
156 /* If it's already enabled, don't have to do anything */
157 if (enabled)
158 goto out;
159
160 if (intel_alloc_mchbar_resource(dev))
161 goto out;
162
163 need_disable = true;
164
165 /* Space is allocated or reserved, so enable it. */
166 if (IS_I915G(dev) || IS_I915GM(dev)) {
167 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
168 temp | DEVEN_MCHBAR_EN);
169 } else {
170 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
171 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
172 }
173out:
174 return need_disable;
175}
176
177static void
178intel_teardown_mchbar(struct drm_device *dev, bool disable)
179{
180 drm_i915_private_t *dev_priv = dev->dev_private;
181 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
182 u32 temp;
183
184 if (disable) {
185 if (IS_I915G(dev) || IS_I915GM(dev)) {
186 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
187 temp &= ~DEVEN_MCHBAR_EN;
188 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
189 } else {
190 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
191 temp &= ~1;
192 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
193 }
194 }
195
196 if (dev_priv->mch_res.start)
197 release_resource(&dev_priv->mch_res);
198}
199
200/** 84/**
201 * Detects bit 6 swizzling of address lookup between IGD access and CPU 85 * Detects bit 6 swizzling of address lookup between IGD access and CPU
202 * access through main memory. 86 * access through main memory.
@@ -207,10 +91,9 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
207 drm_i915_private_t *dev_priv = dev->dev_private; 91 drm_i915_private_t *dev_priv = dev->dev_private;
208 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
209 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
210 bool need_disable;
211 94
212 if (IS_IGDNG(dev)) { 95 if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
213 /* On IGDNG whatever DRAM config, GPU always do 96 /* On Ironlake whatever DRAM config, GPU always do
214 * same swizzling setup. 97 * same swizzling setup.
215 */ 98 */
216 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 99 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
@@ -224,9 +107,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
224 } else if (IS_MOBILE(dev)) { 107 } else if (IS_MOBILE(dev)) {
225 uint32_t dcc; 108 uint32_t dcc;
226 109
227 /* Try to make sure MCHBAR is enabled before poking at it */
228 need_disable = intel_setup_mchbar(dev);
229
230 /* On mobile 9xx chipsets, channel interleave by the CPU is 110 /* On mobile 9xx chipsets, channel interleave by the CPU is
231 * determined by DCC. For single-channel, neither the CPU 111 * determined by DCC. For single-channel, neither the CPU
232 * nor the GPU do swizzling. For dual channel interleaved, 112 * nor the GPU do swizzling. For dual channel interleaved,
@@ -266,8 +146,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
266 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 146 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
267 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 147 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
268 } 148 }
269
270 intel_teardown_mchbar(dev, need_disable);
271 } else { 149 } else {
272 /* The 965, G33, and newer, have a very flexible memory 150 /* The 965, G33, and newer, have a very flexible memory
273 * configuration. It will enable dual-channel mode 151 * configuration. It will enable dual-channel mode
@@ -302,37 +180,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
302 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 180 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
303} 181}
304 182
305
306/**
307 * Returns the size of the fence for a tiled object of the given size.
308 */
309static int
310i915_get_fence_size(struct drm_device *dev, int size)
311{
312 int i;
313 int start;
314
315 if (IS_I965G(dev)) {
316 /* The 965 can have fences at any page boundary. */
317 return ALIGN(size, 4096);
318 } else {
319 /* Align the size to a power of two greater than the smallest
320 * fence size.
321 */
322 if (IS_I9XX(dev))
323 start = 1024 * 1024;
324 else
325 start = 512 * 1024;
326
327 for (i = start; i < size; i <<= 1)
328 ;
329
330 return i;
331 }
332}
333
334/* Check pitch constriants for all chips & tiling formats */ 183/* Check pitch constriants for all chips & tiling formats */
335static bool 184bool
336i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
337{ 186{
338 int tile_width; 187 int tile_width;
@@ -353,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
353 * reg, so dont bother to check the size */ 202 * reg, so dont bother to check the size */
354 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) 203 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
355 return false; 204 return false;
356 } else if (IS_I9XX(dev)) { 205 } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
357 uint32_t pitch_val = ffs(stride / tile_width) - 1; 206 if (stride > 8192)
358
359 /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
360 * instead of 4 (2KB) on 945s.
361 */
362 if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
363 size > (I830_FENCE_MAX_SIZE_VAL << 20))
364 return false; 207 return false;
365 } else {
366 uint32_t pitch_val = ffs(stride / tile_width) - 1;
367 208
368 if (pitch_val > I830_FENCE_MAX_PITCH_VAL || 209 if (IS_GEN3(dev)) {
369 size > (I830_FENCE_MAX_SIZE_VAL << 19)) 210 if (size > I830_FENCE_MAX_SIZE_VAL << 20)
370 return false; 211 return false;
212 } else {
213 if (size > I830_FENCE_MAX_SIZE_VAL << 19)
214 return false;
215 }
371 } 216 }
372 217
373 /* 965+ just needs multiples of tile width */ 218 /* 965+ just needs multiples of tile width */
@@ -384,20 +229,14 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
384 if (stride & (stride - 1)) 229 if (stride & (stride - 1))
385 return false; 230 return false;
386 231
387 /* We don't 0handle the aperture area covered by the fence being bigger
388 * than the object size.
389 */
390 if (i915_get_fence_size(dev, size) != size)
391 return false;
392
393 return true; 232 return true;
394} 233}
395 234
396static bool 235bool
397i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) 236i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
398{ 237{
399 struct drm_device *dev = obj->dev; 238 struct drm_device *dev = obj->dev;
400 struct drm_i915_gem_object *obj_priv = obj->driver_private; 239 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
401 240
402 if (obj_priv->gtt_space == NULL) 241 if (obj_priv->gtt_space == NULL)
403 return true; 242 return true;
@@ -437,12 +276,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
437 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 276 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
438 if (obj == NULL) 277 if (obj == NULL)
439 return -EINVAL; 278 return -EINVAL;
440 obj_priv = obj->driver_private; 279 obj_priv = to_intel_bo(obj);
441 280
442 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { 281 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
443 mutex_lock(&dev->struct_mutex); 282 drm_gem_object_unreference_unlocked(obj);
444 drm_gem_object_unreference(obj);
445 mutex_unlock(&dev->struct_mutex);
446 return -EINVAL; 283 return -EINVAL;
447 } 284 }
448 285
@@ -484,9 +321,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
484 * need to ensure that any fence register is cleared. 321 * need to ensure that any fence register is cleared.
485 */ 322 */
486 if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) 323 if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
487 ret = i915_gem_object_unbind(obj); 324 ret = i915_gem_object_unbind(obj);
325 else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
326 ret = i915_gem_object_put_fence_reg(obj);
488 else 327 else
489 ret = i915_gem_object_put_fence_reg(obj); 328 i915_gem_release_mmap(obj);
329
490 if (ret != 0) { 330 if (ret != 0) {
491 WARN(ret != -ERESTARTSYS, 331 WARN(ret != -ERESTARTSYS,
492 "failed to reset object for tiling switch"); 332 "failed to reset object for tiling switch");
@@ -495,12 +335,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
495 goto err; 335 goto err;
496 } 336 }
497 337
498 /* If we've changed tiling, GTT-mappings of the object
499 * need to re-fault to ensure that the correct fence register
500 * setup is in place.
501 */
502 i915_gem_release_mmap(obj);
503
504 obj_priv->tiling_mode = args->tiling_mode; 338 obj_priv->tiling_mode = args->tiling_mode;
505 obj_priv->stride = args->stride; 339 obj_priv->stride = args->stride;
506 } 340 }
@@ -526,7 +360,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
526 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 360 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
527 if (obj == NULL) 361 if (obj == NULL)
528 return -EINVAL; 362 return -EINVAL;
529 obj_priv = obj->driver_private; 363 obj_priv = to_intel_bo(obj);
530 364
531 mutex_lock(&dev->struct_mutex); 365 mutex_lock(&dev->struct_mutex);
532 366
@@ -589,7 +423,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
589{ 423{
590 struct drm_device *dev = obj->dev; 424 struct drm_device *dev = obj->dev;
591 drm_i915_private_t *dev_priv = dev->dev_private; 425 drm_i915_private_t *dev_priv = dev->dev_private;
592 struct drm_i915_gem_object *obj_priv = obj->driver_private; 426 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
593 int page_count = obj->size >> PAGE_SHIFT; 427 int page_count = obj->size >> PAGE_SHIFT;
594 int i; 428 int i;
595 429
@@ -618,7 +452,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
618{ 452{
619 struct drm_device *dev = obj->dev; 453 struct drm_device *dev = obj->dev;
620 drm_i915_private_t *dev_priv = dev->dev_private; 454 drm_i915_private_t *dev_priv = dev->dev_private;
621 struct drm_i915_gem_object *obj_priv = obj->driver_private; 455 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
622 int page_count = obj->size >> PAGE_SHIFT; 456 int page_count = obj->size >> PAGE_SHIFT;
623 int i; 457 int i;
624 458
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 1fe68a251b75..13b028994b2b 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -66,8 +66,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
66 &batchbuffer->cliprects)) 66 &batchbuffer->cliprects))
67 return -EFAULT; 67 return -EFAULT;
68 68
69 return drm_ioctl(file->f_path.dentry->d_inode, file, 69 return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
70 DRM_IOCTL_I915_BATCHBUFFER,
71 (unsigned long)batchbuffer); 70 (unsigned long)batchbuffer);
72} 71}
73 72
@@ -102,8 +101,8 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
102 &cmdbuffer->cliprects)) 101 &cmdbuffer->cliprects))
103 return -EFAULT; 102 return -EFAULT;
104 103
105 return drm_ioctl(file->f_path.dentry->d_inode, file, 104 return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
106 DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer); 105 (unsigned long)cmdbuffer);
107} 106}
108 107
109typedef struct drm_i915_irq_emit32 { 108typedef struct drm_i915_irq_emit32 {
@@ -125,8 +124,8 @@ static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
125 &request->irq_seq)) 124 &request->irq_seq))
126 return -EFAULT; 125 return -EFAULT;
127 126
128 return drm_ioctl(file->f_path.dentry->d_inode, file, 127 return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
129 DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request); 128 (unsigned long)request);
130} 129}
131typedef struct drm_i915_getparam32 { 130typedef struct drm_i915_getparam32 {
132 int param; 131 int param;
@@ -149,8 +148,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
149 &request->value)) 148 &request->value))
150 return -EFAULT; 149 return -EFAULT;
151 150
152 return drm_ioctl(file->f_path.dentry->d_inode, file, 151 return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
153 DRM_IOCTL_I915_GETPARAM, (unsigned long)request); 152 (unsigned long)request);
154} 153}
155 154
156typedef struct drm_i915_mem_alloc32 { 155typedef struct drm_i915_mem_alloc32 {
@@ -178,8 +177,8 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
178 &request->region_offset)) 177 &request->region_offset))
179 return -EFAULT; 178 return -EFAULT;
180 179
181 return drm_ioctl(file->f_path.dentry->d_inode, file, 180 return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
182 DRM_IOCTL_I915_ALLOC, (unsigned long)request); 181 (unsigned long)request);
183} 182}
184 183
185drm_ioctl_compat_t *i915_compat_ioctls[] = { 184drm_ioctl_compat_t *i915_compat_ioctls[] = {
@@ -211,12 +210,10 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
211 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) 210 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
212 fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; 211 fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
213 212
214 lock_kernel(); /* XXX for now */
215 if (fn != NULL) 213 if (fn != NULL)
216 ret = (*fn) (filp, cmd, arg); 214 ret = (*fn) (filp, cmd, arg);
217 else 215 else
218 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 216 ret = drm_ioctl(filp, cmd, arg);
219 unlock_kernel();
220 217
221 return ret; 218 return ret;
222} 219}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index aa7fd82aa6eb..df6a9cd82c4d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/sysrq.h> 29#include <linux/sysrq.h>
30#include <linux/slab.h>
30#include "drmP.h" 31#include "drmP.h"
31#include "drm.h" 32#include "drm.h"
32#include "i915_drm.h" 33#include "i915_drm.h"
@@ -43,10 +44,13 @@
43 * we leave them always unmasked in IMR and then control enabling them through 44 * we leave them always unmasked in IMR and then control enabling them through
44 * PIPESTAT alone. 45 * PIPESTAT alone.
45 */ 46 */
46#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \ 47#define I915_INTERRUPT_ENABLE_FIX \
47 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ 48 (I915_ASLE_INTERRUPT | \
48 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \ 49 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
49 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 50 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
51 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
52 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
50 54
51/** Interrupts that we mask and unmask at runtime. */ 55/** Interrupts that we mask and unmask at runtime. */
52#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) 56#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
@@ -61,7 +65,7 @@
61 DRM_I915_VBLANK_PIPE_B) 65 DRM_I915_VBLANK_PIPE_B)
62 66
63void 67void
64igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 68ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
65{ 69{
66 if ((dev_priv->gt_irq_mask_reg & mask) != 0) { 70 if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
67 dev_priv->gt_irq_mask_reg &= ~mask; 71 dev_priv->gt_irq_mask_reg &= ~mask;
@@ -71,7 +75,7 @@ igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
71} 75}
72 76
73static inline void 77static inline void
74igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 78ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
75{ 79{
76 if ((dev_priv->gt_irq_mask_reg & mask) != mask) { 80 if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
77 dev_priv->gt_irq_mask_reg |= mask; 81 dev_priv->gt_irq_mask_reg |= mask;
@@ -82,7 +86,7 @@ igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
82 86
83/* For display hotplug interrupt */ 87/* For display hotplug interrupt */
84void 88void
85igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 89ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
86{ 90{
87 if ((dev_priv->irq_mask_reg & mask) != 0) { 91 if ((dev_priv->irq_mask_reg & mask) != 0) {
88 dev_priv->irq_mask_reg &= ~mask; 92 dev_priv->irq_mask_reg &= ~mask;
@@ -92,7 +96,7 @@ igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
92} 96}
93 97
94static inline void 98static inline void
95igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 99ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
96{ 100{
97 if ((dev_priv->irq_mask_reg & mask) != mask) { 101 if ((dev_priv->irq_mask_reg & mask) != mask) {
98 dev_priv->irq_mask_reg |= mask; 102 dev_priv->irq_mask_reg |= mask;
@@ -157,6 +161,20 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
157} 161}
158 162
159/** 163/**
164 * intel_enable_asle - enable ASLE interrupt for OpRegion
165 */
166void intel_enable_asle (struct drm_device *dev)
167{
168 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
169
170 if (HAS_PCH_SPLIT(dev))
171 ironlake_enable_display_irq(dev_priv, DE_GSE);
172 else
173 i915_enable_pipestat(dev_priv, 1,
174 I915_LEGACY_BLC_EVENT_ENABLE);
175}
176
177/**
160 * i915_pipe_enabled - check if a pipe is enabled 178 * i915_pipe_enabled - check if a pipe is enabled
161 * @dev: DRM device 179 * @dev: DRM device
162 * @pipe: pipe to check 180 * @pipe: pipe to check
@@ -191,7 +209,8 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
191 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; 209 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
192 210
193 if (!i915_pipe_enabled(dev, pipe)) { 211 if (!i915_pipe_enabled(dev, pipe)) {
194 DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); 212 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
213 "pipe %d\n", pipe);
195 return 0; 214 return 0;
196 } 215 }
197 216
@@ -220,7 +239,8 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
220 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; 239 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
221 240
222 if (!i915_pipe_enabled(dev, pipe)) { 241 if (!i915_pipe_enabled(dev, pipe)) {
223 DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); 242 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
243 "pipe %d\n", pipe);
224 return 0; 244 return 0;
225 } 245 }
226 246
@@ -240,22 +260,72 @@ static void i915_hotplug_work_func(struct work_struct *work)
240 260
241 if (mode_config->num_connector) { 261 if (mode_config->num_connector) {
242 list_for_each_entry(connector, &mode_config->connector_list, head) { 262 list_for_each_entry(connector, &mode_config->connector_list, head) {
243 struct intel_output *intel_output = to_intel_output(connector); 263 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
244 264
245 if (intel_output->hot_plug) 265 if (intel_encoder->hot_plug)
246 (*intel_output->hot_plug) (intel_output); 266 (*intel_encoder->hot_plug) (intel_encoder);
247 } 267 }
248 } 268 }
249 /* Just fire off a uevent and let userspace tell us what to do */ 269 /* Just fire off a uevent and let userspace tell us what to do */
250 drm_sysfs_hotplug_event(dev); 270 drm_sysfs_hotplug_event(dev);
251} 271}
252 272
253irqreturn_t igdng_irq_handler(struct drm_device *dev) 273static void i915_handle_rps_change(struct drm_device *dev)
274{
275 drm_i915_private_t *dev_priv = dev->dev_private;
276 u32 busy_up, busy_down, max_avg, min_avg;
277 u16 rgvswctl;
278 u8 new_delay = dev_priv->cur_delay;
279
280 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG);
281 busy_up = I915_READ(RCPREVBSYTUPAVG);
282 busy_down = I915_READ(RCPREVBSYTDNAVG);
283 max_avg = I915_READ(RCBMAXAVG);
284 min_avg = I915_READ(RCBMINAVG);
285
286 /* Handle RCS change request from hw */
287 if (busy_up > max_avg) {
288 if (dev_priv->cur_delay != dev_priv->max_delay)
289 new_delay = dev_priv->cur_delay - 1;
290 if (new_delay < dev_priv->max_delay)
291 new_delay = dev_priv->max_delay;
292 } else if (busy_down < min_avg) {
293 if (dev_priv->cur_delay != dev_priv->min_delay)
294 new_delay = dev_priv->cur_delay + 1;
295 if (new_delay > dev_priv->min_delay)
296 new_delay = dev_priv->min_delay;
297 }
298
299 DRM_DEBUG("rps change requested: %d -> %d\n",
300 dev_priv->cur_delay, new_delay);
301
302 rgvswctl = I915_READ(MEMSWCTL);
303 if (rgvswctl & MEMCTL_CMD_STS) {
304 DRM_ERROR("gpu busy, RCS change rejected\n");
305 return; /* still busy with another command */
306 }
307
308 /* Program the new state */
309 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
310 (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
311 I915_WRITE(MEMSWCTL, rgvswctl);
312 POSTING_READ(MEMSWCTL);
313
314 rgvswctl |= MEMCTL_CMD_STS;
315 I915_WRITE(MEMSWCTL, rgvswctl);
316
317 dev_priv->cur_delay = new_delay;
318
319 DRM_DEBUG("rps changed\n");
320
321 return;
322}
323
324irqreturn_t ironlake_irq_handler(struct drm_device *dev)
254{ 325{
255 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 326 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
256 int ret = IRQ_NONE; 327 int ret = IRQ_NONE;
257 u32 de_iir, gt_iir, de_ier; 328 u32 de_iir, gt_iir, de_ier, pch_iir;
258 u32 new_de_iir, new_gt_iir;
259 struct drm_i915_master_private *master_priv; 329 struct drm_i915_master_private *master_priv;
260 330
261 /* disable master interrupt before clearing iir */ 331 /* disable master interrupt before clearing iir */
@@ -265,36 +335,65 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
265 335
266 de_iir = I915_READ(DEIIR); 336 de_iir = I915_READ(DEIIR);
267 gt_iir = I915_READ(GTIIR); 337 gt_iir = I915_READ(GTIIR);
338 pch_iir = I915_READ(SDEIIR);
268 339
269 for (;;) { 340 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
270 if (de_iir == 0 && gt_iir == 0) 341 goto done;
271 break;
272 342
273 ret = IRQ_HANDLED; 343 ret = IRQ_HANDLED;
274 344
275 I915_WRITE(DEIIR, de_iir); 345 if (dev->primary->master) {
276 new_de_iir = I915_READ(DEIIR); 346 master_priv = dev->primary->master->driver_priv;
277 I915_WRITE(GTIIR, gt_iir); 347 if (master_priv->sarea_priv)
278 new_gt_iir = I915_READ(GTIIR); 348 master_priv->sarea_priv->last_dispatch =
349 READ_BREADCRUMB(dev_priv);
350 }
279 351
280 if (dev->primary->master) { 352 if (gt_iir & GT_PIPE_NOTIFY) {
281 master_priv = dev->primary->master->driver_priv; 353 u32 seqno = i915_get_gem_seqno(dev);
282 if (master_priv->sarea_priv) 354 dev_priv->mm.irq_gem_seqno = seqno;
283 master_priv->sarea_priv->last_dispatch = 355 trace_i915_gem_request_complete(dev, seqno);
284 READ_BREADCRUMB(dev_priv); 356 DRM_WAKEUP(&dev_priv->irq_queue);
285 } 357 dev_priv->hangcheck_count = 0;
358 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
359 }
286 360
287 if (gt_iir & GT_USER_INTERRUPT) { 361 if (de_iir & DE_GSE)
288 u32 seqno = i915_get_gem_seqno(dev); 362 ironlake_opregion_gse_intr(dev);
289 dev_priv->mm.irq_gem_seqno = seqno; 363
290 trace_i915_gem_request_complete(dev, seqno); 364 if (de_iir & DE_PLANEA_FLIP_DONE) {
291 DRM_WAKEUP(&dev_priv->irq_queue); 365 intel_prepare_page_flip(dev, 0);
292 } 366 intel_finish_page_flip(dev, 0);
367 }
368
369 if (de_iir & DE_PLANEB_FLIP_DONE) {
370 intel_prepare_page_flip(dev, 1);
371 intel_finish_page_flip(dev, 1);
372 }
373
374 if (de_iir & DE_PIPEA_VBLANK)
375 drm_handle_vblank(dev, 0);
293 376
294 de_iir = new_de_iir; 377 if (de_iir & DE_PIPEB_VBLANK)
295 gt_iir = new_gt_iir; 378 drm_handle_vblank(dev, 1);
379
380 /* check event from PCH */
381 if ((de_iir & DE_PCH_EVENT) &&
382 (pch_iir & SDE_HOTPLUG_MASK)) {
383 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
384 }
385
386 if (de_iir & DE_PCU_EVENT) {
387 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS));
388 i915_handle_rps_change(dev);
296 } 389 }
297 390
391 /* should clear PCH hotplug event before clear CPU irq */
392 I915_WRITE(SDEIIR, pch_iir);
393 I915_WRITE(GTIIR, gt_iir);
394 I915_WRITE(DEIIR, de_iir);
395
396done:
298 I915_WRITE(DEIER, de_ier); 397 I915_WRITE(DEIER, de_ier);
299 (void)I915_READ(DEIER); 398 (void)I915_READ(DEIER);
300 399
@@ -317,23 +416,142 @@ static void i915_error_work_func(struct work_struct *work)
317 char *reset_event[] = { "RESET=1", NULL }; 416 char *reset_event[] = { "RESET=1", NULL };
318 char *reset_done_event[] = { "ERROR=0", NULL }; 417 char *reset_done_event[] = { "ERROR=0", NULL };
319 418
320 DRM_DEBUG("generating error event\n"); 419 DRM_DEBUG_DRIVER("generating error event\n");
321 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 420 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
322 421
323 if (atomic_read(&dev_priv->mm.wedged)) { 422 if (atomic_read(&dev_priv->mm.wedged)) {
324 if (IS_I965G(dev)) { 423 if (IS_I965G(dev)) {
325 DRM_DEBUG("resetting chip\n"); 424 DRM_DEBUG_DRIVER("resetting chip\n");
326 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 425 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
327 if (!i965_reset(dev, GDRST_RENDER)) { 426 if (!i965_reset(dev, GDRST_RENDER)) {
328 atomic_set(&dev_priv->mm.wedged, 0); 427 atomic_set(&dev_priv->mm.wedged, 0);
329 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 428 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
330 } 429 }
331 } else { 430 } else {
332 printk("reboot required\n"); 431 DRM_DEBUG_DRIVER("reboot required\n");
333 } 432 }
334 } 433 }
335} 434}
336 435
436static struct drm_i915_error_object *
437i915_error_object_create(struct drm_device *dev,
438 struct drm_gem_object *src)
439{
440 struct drm_i915_error_object *dst;
441 struct drm_i915_gem_object *src_priv;
442 int page, page_count;
443
444 if (src == NULL)
445 return NULL;
446
447 src_priv = to_intel_bo(src);
448 if (src_priv->pages == NULL)
449 return NULL;
450
451 page_count = src->size / PAGE_SIZE;
452
453 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
454 if (dst == NULL)
455 return NULL;
456
457 for (page = 0; page < page_count; page++) {
458 void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
459 unsigned long flags;
460
461 if (d == NULL)
462 goto unwind;
463 local_irq_save(flags);
464 s = kmap_atomic(src_priv->pages[page], KM_IRQ0);
465 memcpy(d, s, PAGE_SIZE);
466 kunmap_atomic(s, KM_IRQ0);
467 local_irq_restore(flags);
468 dst->pages[page] = d;
469 }
470 dst->page_count = page_count;
471 dst->gtt_offset = src_priv->gtt_offset;
472
473 return dst;
474
475unwind:
476 while (page--)
477 kfree(dst->pages[page]);
478 kfree(dst);
479 return NULL;
480}
481
482static void
483i915_error_object_free(struct drm_i915_error_object *obj)
484{
485 int page;
486
487 if (obj == NULL)
488 return;
489
490 for (page = 0; page < obj->page_count; page++)
491 kfree(obj->pages[page]);
492
493 kfree(obj);
494}
495
496static void
497i915_error_state_free(struct drm_device *dev,
498 struct drm_i915_error_state *error)
499{
500 i915_error_object_free(error->batchbuffer[0]);
501 i915_error_object_free(error->batchbuffer[1]);
502 i915_error_object_free(error->ringbuffer);
503 kfree(error->active_bo);
504 kfree(error);
505}
506
507static u32
508i915_get_bbaddr(struct drm_device *dev, u32 *ring)
509{
510 u32 cmd;
511
512 if (IS_I830(dev) || IS_845G(dev))
513 cmd = MI_BATCH_BUFFER;
514 else if (IS_I965G(dev))
515 cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
516 MI_BATCH_NON_SECURE_I965);
517 else
518 cmd = (MI_BATCH_BUFFER_START | (2 << 6));
519
520 return ring[0] == cmd ? ring[1] : 0;
521}
522
523static u32
524i915_ringbuffer_last_batch(struct drm_device *dev)
525{
526 struct drm_i915_private *dev_priv = dev->dev_private;
527 u32 head, bbaddr;
528 u32 *ring;
529
530 /* Locate the current position in the ringbuffer and walk back
531 * to find the most recently dispatched batch buffer.
532 */
533 bbaddr = 0;
534 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
535 ring = (u32 *)(dev_priv->ring.virtual_start + head);
536
537 while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
538 bbaddr = i915_get_bbaddr(dev, ring);
539 if (bbaddr)
540 break;
541 }
542
543 if (bbaddr == 0) {
544 ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size);
545 while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
546 bbaddr = i915_get_bbaddr(dev, ring);
547 if (bbaddr)
548 break;
549 }
550 }
551
552 return bbaddr;
553}
554
337/** 555/**
338 * i915_capture_error_state - capture an error record for later analysis 556 * i915_capture_error_state - capture an error record for later analysis
339 * @dev: drm device 557 * @dev: drm device
@@ -346,19 +564,26 @@ static void i915_error_work_func(struct work_struct *work)
346static void i915_capture_error_state(struct drm_device *dev) 564static void i915_capture_error_state(struct drm_device *dev)
347{ 565{
348 struct drm_i915_private *dev_priv = dev->dev_private; 566 struct drm_i915_private *dev_priv = dev->dev_private;
567 struct drm_i915_gem_object *obj_priv;
349 struct drm_i915_error_state *error; 568 struct drm_i915_error_state *error;
569 struct drm_gem_object *batchbuffer[2];
350 unsigned long flags; 570 unsigned long flags;
571 u32 bbaddr;
572 int count;
351 573
352 spin_lock_irqsave(&dev_priv->error_lock, flags); 574 spin_lock_irqsave(&dev_priv->error_lock, flags);
353 if (dev_priv->first_error) 575 error = dev_priv->first_error;
354 goto out; 576 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
577 if (error)
578 return;
355 579
356 error = kmalloc(sizeof(*error), GFP_ATOMIC); 580 error = kmalloc(sizeof(*error), GFP_ATOMIC);
357 if (!error) { 581 if (!error) {
358 DRM_DEBUG("out ot memory, not capturing error state\n"); 582 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
359 goto out; 583 return;
360 } 584 }
361 585
586 error->seqno = i915_get_gem_seqno(dev);
362 error->eir = I915_READ(EIR); 587 error->eir = I915_READ(EIR);
363 error->pgtbl_er = I915_READ(PGTBL_ER); 588 error->pgtbl_er = I915_READ(PGTBL_ER);
364 error->pipeastat = I915_READ(PIPEASTAT); 589 error->pipeastat = I915_READ(PIPEASTAT);
@@ -369,6 +594,7 @@ static void i915_capture_error_state(struct drm_device *dev)
369 error->ipehr = I915_READ(IPEHR); 594 error->ipehr = I915_READ(IPEHR);
370 error->instdone = I915_READ(INSTDONE); 595 error->instdone = I915_READ(INSTDONE);
371 error->acthd = I915_READ(ACTHD); 596 error->acthd = I915_READ(ACTHD);
597 error->bbaddr = 0;
372 } else { 598 } else {
373 error->ipeir = I915_READ(IPEIR_I965); 599 error->ipeir = I915_READ(IPEIR_I965);
374 error->ipehr = I915_READ(IPEHR_I965); 600 error->ipehr = I915_READ(IPEHR_I965);
@@ -376,14 +602,101 @@ static void i915_capture_error_state(struct drm_device *dev)
376 error->instps = I915_READ(INSTPS); 602 error->instps = I915_READ(INSTPS);
377 error->instdone1 = I915_READ(INSTDONE1); 603 error->instdone1 = I915_READ(INSTDONE1);
378 error->acthd = I915_READ(ACTHD_I965); 604 error->acthd = I915_READ(ACTHD_I965);
605 error->bbaddr = I915_READ64(BB_ADDR);
379 } 606 }
380 607
381 do_gettimeofday(&error->time); 608 bbaddr = i915_ringbuffer_last_batch(dev);
609
610 /* Grab the current batchbuffer, most likely to have crashed. */
611 batchbuffer[0] = NULL;
612 batchbuffer[1] = NULL;
613 count = 0;
614 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
615 struct drm_gem_object *obj = obj_priv->obj;
616
617 if (batchbuffer[0] == NULL &&
618 bbaddr >= obj_priv->gtt_offset &&
619 bbaddr < obj_priv->gtt_offset + obj->size)
620 batchbuffer[0] = obj;
621
622 if (batchbuffer[1] == NULL &&
623 error->acthd >= obj_priv->gtt_offset &&
624 error->acthd < obj_priv->gtt_offset + obj->size &&
625 batchbuffer[0] != obj)
626 batchbuffer[1] = obj;
627
628 count++;
629 }
630
631 /* We need to copy these to an anonymous buffer as the simplest
632 * method to avoid being overwritten by userpace.
633 */
634 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
635 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
636
637 /* Record the ringbuffer */
638 error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj);
639
640 /* Record buffers on the active list. */
641 error->active_bo = NULL;
642 error->active_bo_count = 0;
643
644 if (count)
645 error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
646 GFP_ATOMIC);
647
648 if (error->active_bo) {
649 int i = 0;
650 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
651 struct drm_gem_object *obj = obj_priv->obj;
652
653 error->active_bo[i].size = obj->size;
654 error->active_bo[i].name = obj->name;
655 error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
656 error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
657 error->active_bo[i].read_domains = obj->read_domains;
658 error->active_bo[i].write_domain = obj->write_domain;
659 error->active_bo[i].fence_reg = obj_priv->fence_reg;
660 error->active_bo[i].pinned = 0;
661 if (obj_priv->pin_count > 0)
662 error->active_bo[i].pinned = 1;
663 if (obj_priv->user_pin_count > 0)
664 error->active_bo[i].pinned = -1;
665 error->active_bo[i].tiling = obj_priv->tiling_mode;
666 error->active_bo[i].dirty = obj_priv->dirty;
667 error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
668
669 if (++i == count)
670 break;
671 }
672 error->active_bo_count = i;
673 }
382 674
383 dev_priv->first_error = error; 675 do_gettimeofday(&error->time);
384 676
385out: 677 spin_lock_irqsave(&dev_priv->error_lock, flags);
678 if (dev_priv->first_error == NULL) {
679 dev_priv->first_error = error;
680 error = NULL;
681 }
386 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 682 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
683
684 if (error)
685 i915_error_state_free(dev, error);
686}
687
688void i915_destroy_error_state(struct drm_device *dev)
689{
690 struct drm_i915_private *dev_priv = dev->dev_private;
691 struct drm_i915_error_state *error;
692
693 spin_lock(&dev_priv->error_lock);
694 error = dev_priv->first_error;
695 dev_priv->first_error = NULL;
696 spin_unlock(&dev_priv->error_lock);
697
698 if (error)
699 i915_error_state_free(dev, error);
387} 700}
388 701
389/** 702/**
@@ -512,7 +825,6 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
512 /* 825 /*
513 * Wakeup waiting processes so they don't hang 826 * Wakeup waiting processes so they don't hang
514 */ 827 */
515 printk("i915: Waking up sleeping processes\n");
516 DRM_WAKEUP(&dev_priv->irq_queue); 828 DRM_WAKEUP(&dev_priv->irq_queue);
517 } 829 }
518 830
@@ -535,8 +847,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
535 847
536 atomic_inc(&dev_priv->irq_received); 848 atomic_inc(&dev_priv->irq_received);
537 849
538 if (IS_IGDNG(dev)) 850 if (HAS_PCH_SPLIT(dev))
539 return igdng_irq_handler(dev); 851 return ironlake_irq_handler(dev);
540 852
541 iir = I915_READ(IIR); 853 iir = I915_READ(IIR);
542 854
@@ -568,14 +880,14 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
568 */ 880 */
569 if (pipea_stats & 0x8000ffff) { 881 if (pipea_stats & 0x8000ffff) {
570 if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS) 882 if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
571 DRM_DEBUG("pipe a underrun\n"); 883 DRM_DEBUG_DRIVER("pipe a underrun\n");
572 I915_WRITE(PIPEASTAT, pipea_stats); 884 I915_WRITE(PIPEASTAT, pipea_stats);
573 irq_received = 1; 885 irq_received = 1;
574 } 886 }
575 887
576 if (pipeb_stats & 0x8000ffff) { 888 if (pipeb_stats & 0x8000ffff) {
577 if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS) 889 if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
578 DRM_DEBUG("pipe b underrun\n"); 890 DRM_DEBUG_DRIVER("pipe b underrun\n");
579 I915_WRITE(PIPEBSTAT, pipeb_stats); 891 I915_WRITE(PIPEBSTAT, pipeb_stats);
580 irq_received = 1; 892 irq_received = 1;
581 } 893 }
@@ -591,7 +903,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
591 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 903 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
592 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 904 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
593 905
594 DRM_DEBUG("hotplug event received, stat 0x%08x\n", 906 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
595 hotplug_status); 907 hotplug_status);
596 if (hotplug_status & dev_priv->hotplug_supported_mask) 908 if (hotplug_status & dev_priv->hotplug_supported_mask)
597 queue_work(dev_priv->wq, 909 queue_work(dev_priv->wq,
@@ -599,27 +911,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
599 911
600 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 912 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
601 I915_READ(PORT_HOTPLUG_STAT); 913 I915_READ(PORT_HOTPLUG_STAT);
602
603 /* EOS interrupts occurs */
604 if (IS_IGD(dev) &&
605 (hotplug_status & CRT_EOS_INT_STATUS)) {
606 u32 temp;
607
608 DRM_DEBUG("EOS interrupt occurs\n");
609 /* status is already cleared */
610 temp = I915_READ(ADPA);
611 temp &= ~ADPA_DAC_ENABLE;
612 I915_WRITE(ADPA, temp);
613
614 temp = I915_READ(PORT_HOTPLUG_EN);
615 temp &= ~CRT_EOS_INT_EN;
616 I915_WRITE(PORT_HOTPLUG_EN, temp);
617
618 temp = I915_READ(PORT_HOTPLUG_STAT);
619 if (temp & CRT_EOS_INT_STATUS)
620 I915_WRITE(PORT_HOTPLUG_STAT,
621 CRT_EOS_INT_STATUS);
622 }
623 } 914 }
624 915
625 I915_WRITE(IIR, iir); 916 I915_WRITE(IIR, iir);
@@ -641,14 +932,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
641 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 932 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
642 } 933 }
643 934
935 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
936 intel_prepare_page_flip(dev, 0);
937
938 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
939 intel_prepare_page_flip(dev, 1);
940
644 if (pipea_stats & vblank_status) { 941 if (pipea_stats & vblank_status) {
645 vblank++; 942 vblank++;
646 drm_handle_vblank(dev, 0); 943 drm_handle_vblank(dev, 0);
944 intel_finish_page_flip(dev, 0);
647 } 945 }
648 946
649 if (pipeb_stats & vblank_status) { 947 if (pipeb_stats & vblank_status) {
650 vblank++; 948 vblank++;
651 drm_handle_vblank(dev, 1); 949 drm_handle_vblank(dev, 1);
950 intel_finish_page_flip(dev, 1);
652 } 951 }
653 952
654 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || 953 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
@@ -684,7 +983,7 @@ static int i915_emit_irq(struct drm_device * dev)
684 983
685 i915_kernel_lost_context(dev); 984 i915_kernel_lost_context(dev);
686 985
687 DRM_DEBUG("\n"); 986 DRM_DEBUG_DRIVER("\n");
688 987
689 dev_priv->counter++; 988 dev_priv->counter++;
690 if (dev_priv->counter > 0x7FFFFFFFUL) 989 if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -709,8 +1008,8 @@ void i915_user_irq_get(struct drm_device *dev)
709 1008
710 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1009 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
711 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { 1010 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
712 if (IS_IGDNG(dev)) 1011 if (HAS_PCH_SPLIT(dev))
713 igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1012 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
714 else 1013 else
715 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 1014 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
716 } 1015 }
@@ -725,8 +1024,8 @@ void i915_user_irq_put(struct drm_device *dev)
725 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1024 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
726 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 1025 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
727 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { 1026 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
728 if (IS_IGDNG(dev)) 1027 if (HAS_PCH_SPLIT(dev))
729 igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1028 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
730 else 1029 else
731 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 1030 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
732 } 1031 }
@@ -749,7 +1048,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
749 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1048 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
750 int ret = 0; 1049 int ret = 0;
751 1050
752 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, 1051 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
753 READ_BREADCRUMB(dev_priv)); 1052 READ_BREADCRUMB(dev_priv));
754 1053
755 if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 1054 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
@@ -832,11 +1131,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
832 if (!(pipeconf & PIPEACONF_ENABLE)) 1131 if (!(pipeconf & PIPEACONF_ENABLE))
833 return -EINVAL; 1132 return -EINVAL;
834 1133
835 if (IS_IGDNG(dev))
836 return 0;
837
838 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1134 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
839 if (IS_I965G(dev)) 1135 if (HAS_PCH_SPLIT(dev))
1136 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1137 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1138 else if (IS_I965G(dev))
840 i915_enable_pipestat(dev_priv, pipe, 1139 i915_enable_pipestat(dev_priv, pipe,
841 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1140 PIPE_START_VBLANK_INTERRUPT_ENABLE);
842 else 1141 else
@@ -854,13 +1153,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
854 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1153 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
855 unsigned long irqflags; 1154 unsigned long irqflags;
856 1155
857 if (IS_IGDNG(dev))
858 return;
859
860 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1156 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
861 i915_disable_pipestat(dev_priv, pipe, 1157 if (HAS_PCH_SPLIT(dev))
862 PIPE_VBLANK_INTERRUPT_ENABLE | 1158 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
863 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1159 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1160 else
1161 i915_disable_pipestat(dev_priv, pipe,
1162 PIPE_VBLANK_INTERRUPT_ENABLE |
1163 PIPE_START_VBLANK_INTERRUPT_ENABLE);
864 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1164 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
865} 1165}
866 1166
@@ -868,7 +1168,7 @@ void i915_enable_interrupt (struct drm_device *dev)
868{ 1168{
869 struct drm_i915_private *dev_priv = dev->dev_private; 1169 struct drm_i915_private *dev_priv = dev->dev_private;
870 1170
871 if (!IS_IGDNG(dev)) 1171 if (!HAS_PCH_SPLIT(dev))
872 opregion_enable_asle(dev); 1172 opregion_enable_asle(dev);
873 dev_priv->irq_enabled = 1; 1173 dev_priv->irq_enabled = 1;
874} 1174}
@@ -944,7 +1244,11 @@ void i915_hangcheck_elapsed(unsigned long data)
944 struct drm_device *dev = (struct drm_device *)data; 1244 struct drm_device *dev = (struct drm_device *)data;
945 drm_i915_private_t *dev_priv = dev->dev_private; 1245 drm_i915_private_t *dev_priv = dev->dev_private;
946 uint32_t acthd; 1246 uint32_t acthd;
947 1247
1248 /* No reset support on this chip yet. */
1249 if (IS_GEN6(dev))
1250 return;
1251
948 if (!IS_I965G(dev)) 1252 if (!IS_I965G(dev))
949 acthd = I915_READ(ACTHD); 1253 acthd = I915_READ(ACTHD);
950 else 1254 else
@@ -976,7 +1280,7 @@ void i915_hangcheck_elapsed(unsigned long data)
976 1280
977/* drm_dma.h hooks 1281/* drm_dma.h hooks
978*/ 1282*/
979static void igdng_irq_preinstall(struct drm_device *dev) 1283static void ironlake_irq_preinstall(struct drm_device *dev)
980{ 1284{
981 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1285 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
982 1286
@@ -992,17 +1296,25 @@ static void igdng_irq_preinstall(struct drm_device *dev)
992 I915_WRITE(GTIMR, 0xffffffff); 1296 I915_WRITE(GTIMR, 0xffffffff);
993 I915_WRITE(GTIER, 0x0); 1297 I915_WRITE(GTIER, 0x0);
994 (void) I915_READ(GTIER); 1298 (void) I915_READ(GTIER);
1299
1300 /* south display irq */
1301 I915_WRITE(SDEIMR, 0xffffffff);
1302 I915_WRITE(SDEIER, 0x0);
1303 (void) I915_READ(SDEIER);
995} 1304}
996 1305
997static int igdng_irq_postinstall(struct drm_device *dev) 1306static int ironlake_irq_postinstall(struct drm_device *dev)
998{ 1307{
999 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1308 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1000 /* enable kind of interrupts always enabled */ 1309 /* enable kind of interrupts always enabled */
1001 u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */; 1310 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1002 u32 render_mask = GT_USER_INTERRUPT; 1311 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1312 u32 render_mask = GT_PIPE_NOTIFY;
1313 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1314 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1003 1315
1004 dev_priv->irq_mask_reg = ~display_mask; 1316 dev_priv->irq_mask_reg = ~display_mask;
1005 dev_priv->de_irq_enable_reg = display_mask; 1317 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
1006 1318
1007 /* should always can generate irq */ 1319 /* should always can generate irq */
1008 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1320 I915_WRITE(DEIIR, I915_READ(DEIIR));
@@ -1019,6 +1331,21 @@ static int igdng_irq_postinstall(struct drm_device *dev)
1019 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); 1331 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
1020 (void) I915_READ(GTIER); 1332 (void) I915_READ(GTIER);
1021 1333
1334 dev_priv->pch_irq_mask_reg = ~hotplug_mask;
1335 dev_priv->pch_irq_enable_reg = hotplug_mask;
1336
1337 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1338 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
1339 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
1340 (void) I915_READ(SDEIER);
1341
1342 if (IS_IRONLAKE_M(dev)) {
1343 /* Clear & enable PCU event interrupts */
1344 I915_WRITE(DEIIR, DE_PCU_EVENT);
1345 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1346 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1347 }
1348
1022 return 0; 1349 return 0;
1023} 1350}
1024 1351
@@ -1031,8 +1358,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1031 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1358 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1032 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1359 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1033 1360
1034 if (IS_IGDNG(dev)) { 1361 if (HAS_PCH_SPLIT(dev)) {
1035 igdng_irq_preinstall(dev); 1362 ironlake_irq_preinstall(dev);
1036 return; 1363 return;
1037 } 1364 }
1038 1365
@@ -1049,6 +1376,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1049 (void) I915_READ(IER); 1376 (void) I915_READ(IER);
1050} 1377}
1051 1378
1379/*
1380 * Must be called after intel_modeset_init or hotplug interrupts won't be
1381 * enabled correctly.
1382 */
1052int i915_driver_irq_postinstall(struct drm_device *dev) 1383int i915_driver_irq_postinstall(struct drm_device *dev)
1053{ 1384{
1054 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1385 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1059,8 +1390,8 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1059 1390
1060 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1391 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1061 1392
1062 if (IS_IGDNG(dev)) 1393 if (HAS_PCH_SPLIT(dev))
1063 return igdng_irq_postinstall(dev); 1394 return ironlake_irq_postinstall(dev);
1064 1395
1065 /* Unmask the interrupts that we always want on. */ 1396 /* Unmask the interrupts that we always want on. */
1066 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; 1397 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
@@ -1071,19 +1402,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1071 if (I915_HAS_HOTPLUG(dev)) { 1402 if (I915_HAS_HOTPLUG(dev)) {
1072 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1403 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1073 1404
1074 /* Leave other bits alone */ 1405 /* Note HDMI and DP share bits */
1075 hotplug_en |= HOTPLUG_EN_MASK; 1406 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1407 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1408 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1409 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1410 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1411 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1412 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1413 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1414 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1415 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1416 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
1417 hotplug_en |= CRT_HOTPLUG_INT_EN;
1418 /* Ignore TV since it's buggy */
1419
1076 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 1420 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1077 1421
1078 dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
1079 TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
1080 SDVOB_HOTPLUG_INT_STATUS;
1081 if (IS_G4X(dev)) {
1082 dev_priv->hotplug_supported_mask |=
1083 HDMIB_HOTPLUG_INT_STATUS |
1084 HDMIC_HOTPLUG_INT_STATUS |
1085 HDMID_HOTPLUG_INT_STATUS;
1086 }
1087 /* Enable in IER... */ 1422 /* Enable in IER... */
1088 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1423 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1089 /* and unmask in IMR */ 1424 /* and unmask in IMR */
@@ -1120,7 +1455,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1120 return 0; 1455 return 0;
1121} 1456}
1122 1457
1123static void igdng_irq_uninstall(struct drm_device *dev) 1458static void ironlake_irq_uninstall(struct drm_device *dev)
1124{ 1459{
1125 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1460 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1126 I915_WRITE(HWSTAM, 0xffffffff); 1461 I915_WRITE(HWSTAM, 0xffffffff);
@@ -1143,8 +1478,8 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
1143 1478
1144 dev_priv->vblank_pipe = 0; 1479 dev_priv->vblank_pipe = 0;
1145 1480
1146 if (IS_IGDNG(dev)) { 1481 if (HAS_PCH_SPLIT(dev)) {
1147 igdng_irq_uninstall(dev); 1482 ironlake_irq_uninstall(dev);
1148 return; 1483 return;
1149 } 1484 }
1150 1485
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 2d5193556d3f..8fcc75c1aa28 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -118,6 +118,10 @@ struct opregion_asle {
118#define ASLE_BACKLIGHT_FAIL (2<<12) 118#define ASLE_BACKLIGHT_FAIL (2<<12)
119#define ASLE_PFIT_FAIL (2<<14) 119#define ASLE_PFIT_FAIL (2<<14)
120#define ASLE_PWM_FREQ_FAIL (2<<16) 120#define ASLE_PWM_FREQ_FAIL (2<<16)
121#define ASLE_ALS_ILLUM_FAILED (1<<10)
122#define ASLE_BACKLIGHT_FAILED (1<<12)
123#define ASLE_PFIT_FAILED (1<<14)
124#define ASLE_PWM_FREQ_FAILED (1<<16)
121 125
122/* ASLE backlight brightness to set */ 126/* ASLE backlight brightness to set */
123#define ASLE_BCLP_VALID (1<<31) 127#define ASLE_BCLP_VALID (1<<31)
@@ -163,7 +167,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
163 if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE)) 167 if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
164 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); 168 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
165 else { 169 else {
166 if (IS_IGD(dev)) { 170 if (IS_PINEVIEW(dev)) {
167 blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); 171 blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
168 max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> 172 max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
169 BACKLIGHT_MODULATION_FREQ_SHIFT; 173 BACKLIGHT_MODULATION_FREQ_SHIFT;
@@ -224,7 +228,7 @@ void opregion_asle_intr(struct drm_device *dev)
224 asle_req = asle->aslc & ASLE_REQ_MSK; 228 asle_req = asle->aslc & ASLE_REQ_MSK;
225 229
226 if (!asle_req) { 230 if (!asle_req) {
227 DRM_DEBUG("non asle set request??\n"); 231 DRM_DEBUG_DRIVER("non asle set request??\n");
228 return; 232 return;
229 } 233 }
230 234
@@ -243,6 +247,73 @@ void opregion_asle_intr(struct drm_device *dev)
243 asle->aslc = asle_stat; 247 asle->aslc = asle_stat;
244} 248}
245 249
250static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp)
251{
252 struct drm_i915_private *dev_priv = dev->dev_private;
253 struct opregion_asle *asle = dev_priv->opregion.asle;
254 u32 cpu_pwm_ctl, pch_pwm_ctl2;
255 u32 max_backlight, level;
256
257 if (!(bclp & ASLE_BCLP_VALID))
258 return ASLE_BACKLIGHT_FAILED;
259
260 bclp &= ASLE_BCLP_MSK;
261 if (bclp < 0 || bclp > 255)
262 return ASLE_BACKLIGHT_FAILED;
263
264 cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
265 pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
266 /* get the max PWM frequency */
267 max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
268 /* calculate the expected PMW frequency */
269 level = (bclp * max_backlight) / 255;
270 /* reserve the high 16 bits */
271 cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
272 /* write the updated PWM frequency */
273 I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
274
275 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
276
277 return 0;
278}
279
280void ironlake_opregion_gse_intr(struct drm_device *dev)
281{
282 struct drm_i915_private *dev_priv = dev->dev_private;
283 struct opregion_asle *asle = dev_priv->opregion.asle;
284 u32 asle_stat = 0;
285 u32 asle_req;
286
287 if (!asle)
288 return;
289
290 asle_req = asle->aslc & ASLE_REQ_MSK;
291
292 if (!asle_req) {
293 DRM_DEBUG_DRIVER("non asle set request??\n");
294 return;
295 }
296
297 if (asle_req & ASLE_SET_ALS_ILLUM) {
298 DRM_DEBUG_DRIVER("Illum is not supported\n");
299 asle_stat |= ASLE_ALS_ILLUM_FAILED;
300 }
301
302 if (asle_req & ASLE_SET_BACKLIGHT)
303 asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp);
304
305 if (asle_req & ASLE_SET_PFIT) {
306 DRM_DEBUG_DRIVER("Pfit is not supported\n");
307 asle_stat |= ASLE_PFIT_FAILED;
308 }
309
310 if (asle_req & ASLE_SET_PWM_FREQ) {
311 DRM_DEBUG_DRIVER("PWM freq is not supported\n");
312 asle_stat |= ASLE_PWM_FREQ_FAILED;
313 }
314
315 asle->aslc = asle_stat;
316}
246#define ASLE_ALS_EN (1<<0) 317#define ASLE_ALS_EN (1<<0)
247#define ASLE_BLC_EN (1<<1) 318#define ASLE_BLC_EN (1<<1)
248#define ASLE_PFIT_EN (1<<2) 319#define ASLE_PFIT_EN (1<<2)
@@ -258,8 +329,7 @@ void opregion_enable_asle(struct drm_device *dev)
258 unsigned long irqflags; 329 unsigned long irqflags;
259 330
260 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 331 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
261 i915_enable_pipestat(dev_priv, 1, 332 intel_enable_asle(dev);
262 I915_LEGACY_BLC_EVENT_ENABLE);
263 spin_unlock_irqrestore(&dev_priv->user_irq_lock, 333 spin_unlock_irqrestore(&dev_priv->user_irq_lock,
264 irqflags); 334 irqflags);
265 } 335 }
@@ -312,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev)
312 struct drm_i915_private *dev_priv = dev->dev_private; 382 struct drm_i915_private *dev_priv = dev->dev_private;
313 struct intel_opregion *opregion = &dev_priv->opregion; 383 struct intel_opregion *opregion = &dev_priv->opregion;
314 struct drm_connector *connector; 384 struct drm_connector *connector;
385 acpi_handle handle;
386 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
387 unsigned long long device_id;
388 acpi_status status;
315 int i = 0; 389 int i = 0;
316 390
391 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
392 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
393 return;
394
395 if (acpi_is_video_device(acpi_dev))
396 acpi_video_bus = acpi_dev;
397 else {
398 list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
399 if (acpi_is_video_device(acpi_cdev)) {
400 acpi_video_bus = acpi_cdev;
401 break;
402 }
403 }
404 }
405
406 if (!acpi_video_bus) {
407 printk(KERN_WARNING "No ACPI video bus found\n");
408 return;
409 }
410
411 list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
412 if (i >= 8) {
413 dev_printk (KERN_ERR, &dev->pdev->dev,
414 "More than 8 outputs detected\n");
415 return;
416 }
417 status =
418 acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
419 NULL, &device_id);
420 if (ACPI_SUCCESS(status)) {
421 if (!device_id)
422 goto blind_set;
423 opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
424 i++;
425 }
426 }
427
428end:
429 /* If fewer than 8 outputs, the list must be null terminated */
430 if (i < 8)
431 opregion->acpi->didl[i] = 0;
432 return;
433
434blind_set:
435 i = 0;
317 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 436 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
318 int output_type = ACPI_OTHER_OUTPUT; 437 int output_type = ACPI_OTHER_OUTPUT;
319 if (i >= 8) { 438 if (i >= 8) {
@@ -346,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev)
346 opregion->acpi->didl[i] |= (1<<31) | output_type | i; 465 opregion->acpi->didl[i] |= (1<<31) | output_type | i;
347 i++; 466 i++;
348 } 467 }
349 468 goto end;
350 /* If fewer than 8 outputs, the list must be null terminated */
351 if (i < 8)
352 opregion->acpi->didl[i] = 0;
353} 469}
354 470
355int intel_opregion_init(struct drm_device *dev, int resume) 471int intel_opregion_init(struct drm_device *dev, int resume)
@@ -361,9 +477,9 @@ int intel_opregion_init(struct drm_device *dev, int resume)
361 int err = 0; 477 int err = 0;
362 478
363 pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); 479 pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
364 DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls); 480 DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
365 if (asls == 0) { 481 if (asls == 0) {
366 DRM_DEBUG("ACPI OpRegion not supported!\n"); 482 DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
367 return -ENOTSUPP; 483 return -ENOTSUPP;
368 } 484 }
369 485
@@ -373,30 +489,30 @@ int intel_opregion_init(struct drm_device *dev, int resume)
373 489
374 opregion->header = base; 490 opregion->header = base;
375 if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) { 491 if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
376 DRM_DEBUG("opregion signature mismatch\n"); 492 DRM_DEBUG_DRIVER("opregion signature mismatch\n");
377 err = -EINVAL; 493 err = -EINVAL;
378 goto err_out; 494 goto err_out;
379 } 495 }
380 496
381 mboxes = opregion->header->mboxes; 497 mboxes = opregion->header->mboxes;
382 if (mboxes & MBOX_ACPI) { 498 if (mboxes & MBOX_ACPI) {
383 DRM_DEBUG("Public ACPI methods supported\n"); 499 DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
384 opregion->acpi = base + OPREGION_ACPI_OFFSET; 500 opregion->acpi = base + OPREGION_ACPI_OFFSET;
385 if (drm_core_check_feature(dev, DRIVER_MODESET)) 501 if (drm_core_check_feature(dev, DRIVER_MODESET))
386 intel_didl_outputs(dev); 502 intel_didl_outputs(dev);
387 } else { 503 } else {
388 DRM_DEBUG("Public ACPI methods not supported\n"); 504 DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
389 err = -ENOTSUPP; 505 err = -ENOTSUPP;
390 goto err_out; 506 goto err_out;
391 } 507 }
392 opregion->enabled = 1; 508 opregion->enabled = 1;
393 509
394 if (mboxes & MBOX_SWSCI) { 510 if (mboxes & MBOX_SWSCI) {
395 DRM_DEBUG("SWSCI supported\n"); 511 DRM_DEBUG_DRIVER("SWSCI supported\n");
396 opregion->swsci = base + OPREGION_SWSCI_OFFSET; 512 opregion->swsci = base + OPREGION_SWSCI_OFFSET;
397 } 513 }
398 if (mboxes & MBOX_ASLE) { 514 if (mboxes & MBOX_ASLE) {
399 DRM_DEBUG("ASLE supported\n"); 515 DRM_DEBUG_DRIVER("ASLE supported\n");
400 opregion->asle = base + OPREGION_ASLE_OFFSET; 516 opregion->asle = base + OPREGION_ASLE_OFFSET;
401 opregion_enable_asle(dev); 517 opregion_enable_asle(dev);
402 } 518 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1687edf68795..4cbc5210fd30 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -53,6 +53,25 @@
53#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) 53#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
54#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) 54#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
55 55
56#define SNB_GMCH_CTRL 0x50
57#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
58#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
59#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
60#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
61#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
62#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
63#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
64#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
65#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
66#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
67#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
68#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
69#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
70#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
71#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
72#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
73#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
74
56/* PCI config space */ 75/* PCI config space */
57 76
58#define HPLLCC 0xc0 /* 855 only */ 77#define HPLLCC 0xc0 /* 855 only */
@@ -61,6 +80,7 @@
61#define GC_CLOCK_100_200 (1 << 0) 80#define GC_CLOCK_100_200 (1 << 0)
62#define GC_CLOCK_100_133 (2 << 0) 81#define GC_CLOCK_100_133 (2 << 0)
63#define GC_CLOCK_166_250 (3 << 0) 82#define GC_CLOCK_166_250 (3 << 0)
83#define GCFGC2 0xda
64#define GCFGC 0xf0 /* 915+ only */ 84#define GCFGC 0xf0 /* 915+ only */
65#define GC_LOW_FREQUENCY_ENABLE (1 << 7) 85#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
66#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) 86#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
@@ -140,6 +160,7 @@
140#define MI_NOOP MI_INSTR(0, 0) 160#define MI_NOOP MI_INSTR(0, 0)
141#define MI_USER_INTERRUPT MI_INSTR(0x02, 0) 161#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
142#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) 162#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
163#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16)
143#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) 164#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
144#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) 165#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
145#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) 166#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
@@ -151,7 +172,13 @@
151#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ 172#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
152#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) 173#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
153#define MI_REPORT_HEAD MI_INSTR(0x07, 0) 174#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
175#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
176#define MI_OVERLAY_CONTINUE (0x0<<21)
177#define MI_OVERLAY_ON (0x1<<21)
178#define MI_OVERLAY_OFF (0x2<<21)
154#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) 179#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
180#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
181#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
155#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) 182#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
156#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ 183#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
157#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) 184#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -203,6 +230,16 @@
203#define ASYNC_FLIP (1<<22) 230#define ASYNC_FLIP (1<<22)
204#define DISPLAY_PLANE_A (0<<20) 231#define DISPLAY_PLANE_A (0<<20)
205#define DISPLAY_PLANE_B (1<<20) 232#define DISPLAY_PLANE_B (1<<20)
233#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
234#define PIPE_CONTROL_QW_WRITE (1<<14)
235#define PIPE_CONTROL_DEPTH_STALL (1<<13)
236#define PIPE_CONTROL_WC_FLUSH (1<<12)
237#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
238#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
239#define PIPE_CONTROL_ISP_DIS (1<<9)
240#define PIPE_CONTROL_NOTIFY (1<<8)
241#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
242#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
206 243
207/* 244/*
208 * Fence registers 245 * Fence registers
@@ -214,7 +251,7 @@
214#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) 251#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
215#define I830_FENCE_PITCH_SHIFT 4 252#define I830_FENCE_PITCH_SHIFT 4
216#define I830_FENCE_REG_VALID (1<<0) 253#define I830_FENCE_REG_VALID (1<<0)
217#define I915_FENCE_MAX_PITCH_VAL 0x10 254#define I915_FENCE_MAX_PITCH_VAL 4
218#define I830_FENCE_MAX_PITCH_VAL 6 255#define I830_FENCE_MAX_PITCH_VAL 6
219#define I830_FENCE_MAX_SIZE_VAL (1<<8) 256#define I830_FENCE_MAX_SIZE_VAL (1<<8)
220 257
@@ -227,6 +264,9 @@
227#define I965_FENCE_REG_VALID (1<<0) 264#define I965_FENCE_REG_VALID (1<<0)
228#define I965_FENCE_MAX_PITCH_VAL 0x0400 265#define I965_FENCE_MAX_PITCH_VAL 0x0400
229 266
267#define FENCE_REG_SANDYBRIDGE_0 0x100000
268#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
269
230/* 270/*
231 * Instruction and interrupt control regs 271 * Instruction and interrupt control regs
232 */ 272 */
@@ -258,13 +298,20 @@
258#define INSTDONE1 0x0207c /* 965+ only */ 298#define INSTDONE1 0x0207c /* 965+ only */
259#define ACTHD_I965 0x02074 299#define ACTHD_I965 0x02074
260#define HWS_PGA 0x02080 300#define HWS_PGA 0x02080
301#define HWS_PGA_GEN6 0x04080
261#define HWS_ADDRESS_MASK 0xfffff000 302#define HWS_ADDRESS_MASK 0xfffff000
262#define HWS_START_ADDRESS_SHIFT 4 303#define HWS_START_ADDRESS_SHIFT 4
304#define PWRCTXA 0x2088 /* 965GM+ only */
305#define PWRCTX_EN (1<<0)
263#define IPEIR 0x02088 306#define IPEIR 0x02088
264#define IPEHR 0x0208c 307#define IPEHR 0x0208c
265#define INSTDONE 0x02090 308#define INSTDONE 0x02090
266#define NOPID 0x02094 309#define NOPID 0x02094
267#define HWSTAM 0x02098 310#define HWSTAM 0x02098
311
312#define MI_MODE 0x0209c
313# define VS_TIMER_DISPATCH (1 << 6)
314
268#define SCPD0 0x0209c /* 915+ only */ 315#define SCPD0 0x0209c /* 915+ only */
269#define IER 0x020a0 316#define IER 0x020a0
270#define IIR 0x020a4 317#define IIR 0x020a4
@@ -273,7 +320,7 @@
273#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 320#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
274#define I915_DISPLAY_PORT_INTERRUPT (1<<17) 321#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
275#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) 322#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
276#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) 323#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
277#define I915_HWB_OOM_INTERRUPT (1<<13) 324#define I915_HWB_OOM_INTERRUPT (1<<13)
278#define I915_SYNC_STATUS_INTERRUPT (1<<12) 325#define I915_SYNC_STATUS_INTERRUPT (1<<12)
279#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) 326#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
@@ -297,11 +344,14 @@
297#define I915_ERROR_MEMORY_REFRESH (1<<1) 344#define I915_ERROR_MEMORY_REFRESH (1<<1)
298#define I915_ERROR_INSTRUCTION (1<<0) 345#define I915_ERROR_INSTRUCTION (1<<0)
299#define INSTPM 0x020c0 346#define INSTPM 0x020c0
347#define INSTPM_SELF_EN (1<<12) /* 915GM only */
300#define ACTHD 0x020c8 348#define ACTHD 0x020c8
301#define FW_BLC 0x020d8 349#define FW_BLC 0x020d8
302#define FW_BLC2 0x020dc 350#define FW_BLC2 0x020dc
303#define FW_BLC_SELF 0x020e0 /* 915+ only */ 351#define FW_BLC_SELF 0x020e0 /* 915+ only */
304#define FW_BLC_SELF_EN (1<<15) 352#define FW_BLC_SELF_EN_MASK (1<<31)
353#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
354#define FW_BLC_SELF_EN (1<<15) /* 945 only */
305#define MM_BURST_LENGTH 0x00700000 355#define MM_BURST_LENGTH 0x00700000
306#define MM_FIFO_WATERMARK 0x0001F000 356#define MM_FIFO_WATERMARK 0x0001F000
307#define LM_BURST_LENGTH 0x00000700 357#define LM_BURST_LENGTH 0x00000700
@@ -315,6 +365,7 @@
315#define CM0_COLOR_EVICT_DISABLE (1<<3) 365#define CM0_COLOR_EVICT_DISABLE (1<<3)
316#define CM0_DEPTH_WRITE_DISABLE (1<<1) 366#define CM0_DEPTH_WRITE_DISABLE (1<<1)
317#define CM0_RC_OP_FLUSH_DISABLE (1<<0) 367#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
368#define BB_ADDR 0x02140 /* 8 bytes */
318#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 369#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
319 370
320 371
@@ -329,6 +380,7 @@
329#define FBC_CTL_PERIODIC (1<<30) 380#define FBC_CTL_PERIODIC (1<<30)
330#define FBC_CTL_INTERVAL_SHIFT (16) 381#define FBC_CTL_INTERVAL_SHIFT (16)
331#define FBC_CTL_UNCOMPRESSIBLE (1<<14) 382#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
383#define FBC_CTL_C3_IDLE (1<<13)
332#define FBC_CTL_STRIDE_SHIFT (5) 384#define FBC_CTL_STRIDE_SHIFT (5)
333#define FBC_CTL_FENCENO (1<<0) 385#define FBC_CTL_FENCENO (1<<0)
334#define FBC_COMMAND 0x0320c 386#define FBC_COMMAND 0x0320c
@@ -405,6 +457,13 @@
405# define GPIO_DATA_VAL_IN (1 << 12) 457# define GPIO_DATA_VAL_IN (1 << 12)
406# define GPIO_DATA_PULLUP_DISABLE (1 << 13) 458# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
407 459
460#define GMBUS0 0x5100
461#define GMBUS1 0x5104
462#define GMBUS2 0x5108
463#define GMBUS3 0x510c
464#define GMBUS4 0x5110
465#define GMBUS5 0x5120
466
408/* 467/*
409 * Clock control & power management 468 * Clock control & power management
410 */ 469 */
@@ -435,7 +494,7 @@
435#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ 494#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
436#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ 495#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
437#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 496#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
438#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */ 497#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
439 498
440#define I915_FIFO_UNDERRUN_STATUS (1UL<<31) 499#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
441#define I915_CRC_ERROR_ENABLE (1UL<<29) 500#define I915_CRC_ERROR_ENABLE (1UL<<29)
@@ -512,7 +571,7 @@
512 */ 571 */
513#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 572#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
514#define DPLL_FPA01_P1_POST_DIV_SHIFT 16 573#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
515#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15 574#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
516/* i830, required in DVO non-gang */ 575/* i830, required in DVO non-gang */
517#define PLL_P2_DIVIDE_BY_4 (1 << 23) 576#define PLL_P2_DIVIDE_BY_4 (1 << 23)
518#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ 577#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
@@ -522,7 +581,7 @@
522#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) 581#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
523#define PLL_REF_INPUT_MASK (3 << 13) 582#define PLL_REF_INPUT_MASK (3 << 13)
524#define PLL_LOAD_PULSE_PHASE_SHIFT 9 583#define PLL_LOAD_PULSE_PHASE_SHIFT 9
525/* IGDNG */ 584/* Ironlake */
526# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9 585# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
527# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9) 586# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
528# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9) 587# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
@@ -586,12 +645,12 @@
586#define FPB0 0x06048 645#define FPB0 0x06048
587#define FPB1 0x0604c 646#define FPB1 0x0604c
588#define FP_N_DIV_MASK 0x003f0000 647#define FP_N_DIV_MASK 0x003f0000
589#define FP_N_IGD_DIV_MASK 0x00ff0000 648#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
590#define FP_N_DIV_SHIFT 16 649#define FP_N_DIV_SHIFT 16
591#define FP_M1_DIV_MASK 0x00003f00 650#define FP_M1_DIV_MASK 0x00003f00
592#define FP_M1_DIV_SHIFT 8 651#define FP_M1_DIV_SHIFT 8
593#define FP_M2_DIV_MASK 0x0000003f 652#define FP_M2_DIV_MASK 0x0000003f
594#define FP_M2_IGD_DIV_MASK 0x000000ff 653#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
595#define FP_M2_DIV_SHIFT 0 654#define FP_M2_DIV_SHIFT 0
596#define DPLL_TEST 0x606c 655#define DPLL_TEST 0x606c
597#define DPLLB_TEST_SDVO_DIV_1 (0 << 22) 656#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
@@ -767,9 +826,144 @@
767#define CLKCFG_MEM_800 (3 << 4) 826#define CLKCFG_MEM_800 (3 << 4)
768#define CLKCFG_MEM_MASK (7 << 4) 827#define CLKCFG_MEM_MASK (7 << 4)
769 828
770/** GM965 GM45 render standby register */ 829#define CRSTANDVID 0x11100
771#define MCHBAR_RENDER_STANDBY 0x111B8 830#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
772 831#define PXVFREQ_PX_MASK 0x7f000000
832#define PXVFREQ_PX_SHIFT 24
833#define VIDFREQ_BASE 0x11110
834#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */
835#define VIDFREQ2 0x11114
836#define VIDFREQ3 0x11118
837#define VIDFREQ4 0x1111c
838#define VIDFREQ_P0_MASK 0x1f000000
839#define VIDFREQ_P0_SHIFT 24
840#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
841#define VIDFREQ_P0_CSCLK_SHIFT 20
842#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
843#define VIDFREQ_P0_CRCLK_SHIFT 16
844#define VIDFREQ_P1_MASK 0x00001f00
845#define VIDFREQ_P1_SHIFT 8
846#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
847#define VIDFREQ_P1_CSCLK_SHIFT 4
848#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
849#define INTTOEXT_BASE_ILK 0x11300
850#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */
851#define INTTOEXT_MAP3_SHIFT 24
852#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
853#define INTTOEXT_MAP2_SHIFT 16
854#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
855#define INTTOEXT_MAP1_SHIFT 8
856#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
857#define INTTOEXT_MAP0_SHIFT 0
858#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
859#define MEMSWCTL 0x11170 /* Ironlake only */
860#define MEMCTL_CMD_MASK 0xe000
861#define MEMCTL_CMD_SHIFT 13
862#define MEMCTL_CMD_RCLK_OFF 0
863#define MEMCTL_CMD_RCLK_ON 1
864#define MEMCTL_CMD_CHFREQ 2
865#define MEMCTL_CMD_CHVID 3
866#define MEMCTL_CMD_VMMOFF 4
867#define MEMCTL_CMD_VMMON 5
868#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
869 when command complete */
870#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
871#define MEMCTL_FREQ_SHIFT 8
872#define MEMCTL_SFCAVM (1<<7)
873#define MEMCTL_TGT_VID_MASK 0x007f
874#define MEMIHYST 0x1117c
875#define MEMINTREN 0x11180 /* 16 bits */
876#define MEMINT_RSEXIT_EN (1<<8)
877#define MEMINT_CX_SUPR_EN (1<<7)
878#define MEMINT_CONT_BUSY_EN (1<<6)
879#define MEMINT_AVG_BUSY_EN (1<<5)
880#define MEMINT_EVAL_CHG_EN (1<<4)
881#define MEMINT_MON_IDLE_EN (1<<3)
882#define MEMINT_UP_EVAL_EN (1<<2)
883#define MEMINT_DOWN_EVAL_EN (1<<1)
884#define MEMINT_SW_CMD_EN (1<<0)
885#define MEMINTRSTR 0x11182 /* 16 bits */
886#define MEM_RSEXIT_MASK 0xc000
887#define MEM_RSEXIT_SHIFT 14
888#define MEM_CONT_BUSY_MASK 0x3000
889#define MEM_CONT_BUSY_SHIFT 12
890#define MEM_AVG_BUSY_MASK 0x0c00
891#define MEM_AVG_BUSY_SHIFT 10
892#define MEM_EVAL_CHG_MASK 0x0300
893#define MEM_EVAL_BUSY_SHIFT 8
894#define MEM_MON_IDLE_MASK 0x00c0
895#define MEM_MON_IDLE_SHIFT 6
896#define MEM_UP_EVAL_MASK 0x0030
897#define MEM_UP_EVAL_SHIFT 4
898#define MEM_DOWN_EVAL_MASK 0x000c
899#define MEM_DOWN_EVAL_SHIFT 2
900#define MEM_SW_CMD_MASK 0x0003
901#define MEM_INT_STEER_GFX 0
902#define MEM_INT_STEER_CMR 1
903#define MEM_INT_STEER_SMI 2
904#define MEM_INT_STEER_SCI 3
905#define MEMINTRSTS 0x11184
906#define MEMINT_RSEXIT (1<<7)
907#define MEMINT_CONT_BUSY (1<<6)
908#define MEMINT_AVG_BUSY (1<<5)
909#define MEMINT_EVAL_CHG (1<<4)
910#define MEMINT_MON_IDLE (1<<3)
911#define MEMINT_UP_EVAL (1<<2)
912#define MEMINT_DOWN_EVAL (1<<1)
913#define MEMINT_SW_CMD (1<<0)
914#define MEMMODECTL 0x11190
915#define MEMMODE_BOOST_EN (1<<31)
916#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
917#define MEMMODE_BOOST_FREQ_SHIFT 24
918#define MEMMODE_IDLE_MODE_MASK 0x00030000
919#define MEMMODE_IDLE_MODE_SHIFT 16
920#define MEMMODE_IDLE_MODE_EVAL 0
921#define MEMMODE_IDLE_MODE_CONT 1
922#define MEMMODE_HWIDLE_EN (1<<15)
923#define MEMMODE_SWMODE_EN (1<<14)
924#define MEMMODE_RCLK_GATE (1<<13)
925#define MEMMODE_HW_UPDATE (1<<12)
926#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
927#define MEMMODE_FSTART_SHIFT 8
928#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
929#define MEMMODE_FMAX_SHIFT 4
930#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
931#define RCBMAXAVG 0x1119c
932#define MEMSWCTL2 0x1119e /* Cantiga only */
933#define SWMEMCMD_RENDER_OFF (0 << 13)
934#define SWMEMCMD_RENDER_ON (1 << 13)
935#define SWMEMCMD_SWFREQ (2 << 13)
936#define SWMEMCMD_TARVID (3 << 13)
937#define SWMEMCMD_VRM_OFF (4 << 13)
938#define SWMEMCMD_VRM_ON (5 << 13)
939#define CMDSTS (1<<12)
940#define SFCAVM (1<<11)
941#define SWFREQ_MASK 0x0380 /* P0-7 */
942#define SWFREQ_SHIFT 7
943#define TARVID_MASK 0x001f
944#define MEMSTAT_CTG 0x111a0
945#define RCBMINAVG 0x111a0
946#define RCUPEI 0x111b0
947#define RCDNEI 0x111b4
948#define MCHBAR_RENDER_STANDBY 0x111b8
949#define RCX_SW_EXIT (1<<23)
950#define RSX_STATUS_MASK 0x00700000
951#define VIDCTL 0x111c0
952#define VIDSTS 0x111c8
953#define VIDSTART 0x111cc /* 8 bits */
954#define MEMSTAT_ILK 0x111f8
955#define MEMSTAT_VID_MASK 0x7f00
956#define MEMSTAT_VID_SHIFT 8
957#define MEMSTAT_PSTATE_MASK 0x00f8
958#define MEMSTAT_PSTATE_SHIFT 3
959#define MEMSTAT_MON_ACTV (1<<2)
960#define MEMSTAT_SRC_CTL_MASK 0x0003
961#define MEMSTAT_SRC_CTL_CORE 0
962#define MEMSTAT_SRC_CTL_TRB 1
963#define MEMSTAT_SRC_CTL_THM 2
964#define MEMSTAT_SRC_CTL_STDBY 3
965#define RCPREVBSYTUPAVG 0x113b8
966#define RCPREVBSYTDNAVG 0x113bc
773#define PEG_BAND_GAP_DATA 0x14d68 967#define PEG_BAND_GAP_DATA 0x14d68
774 968
775/* 969/*
@@ -844,7 +1038,6 @@
844#define SDVOB_HOTPLUG_INT_EN (1 << 26) 1038#define SDVOB_HOTPLUG_INT_EN (1 << 26)
845#define SDVOC_HOTPLUG_INT_EN (1 << 25) 1039#define SDVOC_HOTPLUG_INT_EN (1 << 25)
846#define TV_HOTPLUG_INT_EN (1 << 18) 1040#define TV_HOTPLUG_INT_EN (1 << 18)
847#define CRT_EOS_INT_EN (1 << 10)
848#define CRT_HOTPLUG_INT_EN (1 << 9) 1041#define CRT_HOTPLUG_INT_EN (1 << 9)
849#define CRT_HOTPLUG_FORCE_DETECT (1 << 3) 1042#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
850#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) 1043#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
@@ -863,14 +1056,6 @@
863#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 1056#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
864#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ 1057#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
865#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f 1058#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
866#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \
867 HDMIC_HOTPLUG_INT_EN | \
868 HDMID_HOTPLUG_INT_EN | \
869 SDVOB_HOTPLUG_INT_EN | \
870 SDVOC_HOTPLUG_INT_EN | \
871 TV_HOTPLUG_INT_EN | \
872 CRT_HOTPLUG_INT_EN)
873
874 1059
875#define PORT_HOTPLUG_STAT 0x61114 1060#define PORT_HOTPLUG_STAT 0x61114
876#define HDMIB_HOTPLUG_INT_STATUS (1 << 29) 1061#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -879,7 +1064,6 @@
879#define DPC_HOTPLUG_INT_STATUS (1 << 28) 1064#define DPC_HOTPLUG_INT_STATUS (1 << 28)
880#define HDMID_HOTPLUG_INT_STATUS (1 << 27) 1065#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
881#define DPD_HOTPLUG_INT_STATUS (1 << 27) 1066#define DPD_HOTPLUG_INT_STATUS (1 << 27)
882#define CRT_EOS_INT_STATUS (1 << 12)
883#define CRT_HOTPLUG_INT_STATUS (1 << 11) 1067#define CRT_HOTPLUG_INT_STATUS (1 << 11)
884#define TV_HOTPLUG_INT_STATUS (1 << 10) 1068#define TV_HOTPLUG_INT_STATUS (1 << 10)
885#define CRT_HOTPLUG_MONITOR_MASK (3 << 8) 1069#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
@@ -968,6 +1152,8 @@
968#define LVDS_PORT_EN (1 << 31) 1152#define LVDS_PORT_EN (1 << 31)
969/* Selects pipe B for LVDS data. Must be set on pre-965. */ 1153/* Selects pipe B for LVDS data. Must be set on pre-965. */
970#define LVDS_PIPEB_SELECT (1 << 30) 1154#define LVDS_PIPEB_SELECT (1 << 30)
1155/* LVDS dithering flag on 965/g4x platform */
1156#define LVDS_ENABLE_DITHER (1 << 25)
971/* Enable border for unscaled (or aspect-scaled) display */ 1157/* Enable border for unscaled (or aspect-scaled) display */
972#define LVDS_BORDER_ENABLE (1 << 15) 1158#define LVDS_BORDER_ENABLE (1 << 15)
973/* 1159/*
@@ -1620,7 +1806,7 @@
1620#define DP_CLOCK_OUTPUT_ENABLE (1 << 13) 1806#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
1621 1807
1622#define DP_SCRAMBLING_DISABLE (1 << 12) 1808#define DP_SCRAMBLING_DISABLE (1 << 12)
1623#define DP_SCRAMBLING_DISABLE_IGDNG (1 << 7) 1809#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
1624 1810
1625/** limit RGB values to avoid confusing TVs */ 1811/** limit RGB values to avoid confusing TVs */
1626#define DP_COLOR_RANGE_16_235 (1 << 8) 1812#define DP_COLOR_RANGE_16_235 (1 << 8)
@@ -1737,6 +1923,8 @@
1737 1923
1738/* Display & cursor control */ 1924/* Display & cursor control */
1739 1925
1926/* dithering flag on Ironlake */
1927#define PIPE_ENABLE_DITHER (1 << 4)
1740/* Pipe A */ 1928/* Pipe A */
1741#define PIPEADSL 0x70000 1929#define PIPEADSL 0x70000
1742#define PIPEACONF 0x70008 1930#define PIPEACONF 0x70008
@@ -1804,11 +1992,11 @@
1804#define DSPFW_PLANEB_SHIFT 8 1992#define DSPFW_PLANEB_SHIFT 8
1805#define DSPFW2 0x70038 1993#define DSPFW2 0x70038
1806#define DSPFW_CURSORA_MASK 0x00003f00 1994#define DSPFW_CURSORA_MASK 0x00003f00
1807#define DSPFW_CURSORA_SHIFT 16 1995#define DSPFW_CURSORA_SHIFT 8
1808#define DSPFW3 0x7003c 1996#define DSPFW3 0x7003c
1809#define DSPFW_HPLL_SR_EN (1<<31) 1997#define DSPFW_HPLL_SR_EN (1<<31)
1810#define DSPFW_CURSOR_SR_SHIFT 24 1998#define DSPFW_CURSOR_SR_SHIFT 24
1811#define IGD_SELF_REFRESH_EN (1<<30) 1999#define PINEVIEW_SELF_REFRESH_EN (1<<30)
1812 2000
1813/* FIFO watermark sizes etc */ 2001/* FIFO watermark sizes etc */
1814#define G4X_FIFO_LINE_SIZE 64 2002#define G4X_FIFO_LINE_SIZE 64
@@ -1824,16 +2012,16 @@
1824#define G4X_MAX_WM 0x3f 2012#define G4X_MAX_WM 0x3f
1825#define I915_MAX_WM 0x3f 2013#define I915_MAX_WM 0x3f
1826 2014
1827#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */ 2015#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */
1828#define IGD_FIFO_LINE_SIZE 64 2016#define PINEVIEW_FIFO_LINE_SIZE 64
1829#define IGD_MAX_WM 0x1ff 2017#define PINEVIEW_MAX_WM 0x1ff
1830#define IGD_DFT_WM 0x3f 2018#define PINEVIEW_DFT_WM 0x3f
1831#define IGD_DFT_HPLLOFF_WM 0 2019#define PINEVIEW_DFT_HPLLOFF_WM 0
1832#define IGD_GUARD_WM 10 2020#define PINEVIEW_GUARD_WM 10
1833#define IGD_CURSOR_FIFO 64 2021#define PINEVIEW_CURSOR_FIFO 64
1834#define IGD_CURSOR_MAX_WM 0x3f 2022#define PINEVIEW_CURSOR_MAX_WM 0x3f
1835#define IGD_CURSOR_DFT_WM 0 2023#define PINEVIEW_CURSOR_DFT_WM 0
1836#define IGD_CURSOR_GUARD_WM 5 2024#define PINEVIEW_CURSOR_GUARD_WM 5
1837 2025
1838/* 2026/*
1839 * The two pipe frame counter registers are not synchronized, so 2027 * The two pipe frame counter registers are not synchronized, so
@@ -1907,6 +2095,7 @@
1907#define DISPPLANE_16BPP (0x5<<26) 2095#define DISPPLANE_16BPP (0x5<<26)
1908#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) 2096#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
1909#define DISPPLANE_32BPP (0x7<<26) 2097#define DISPPLANE_32BPP (0x7<<26)
2098#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
1910#define DISPPLANE_STEREO_ENABLE (1<<25) 2099#define DISPPLANE_STEREO_ENABLE (1<<25)
1911#define DISPPLANE_STEREO_DISABLE 0 2100#define DISPPLANE_STEREO_DISABLE 0
1912#define DISPPLANE_SEL_PIPE_MASK (1<<24) 2101#define DISPPLANE_SEL_PIPE_MASK (1<<24)
@@ -1918,7 +2107,7 @@
1918#define DISPPLANE_NO_LINE_DOUBLE 0 2107#define DISPPLANE_NO_LINE_DOUBLE 0
1919#define DISPPLANE_STEREO_POLARITY_FIRST 0 2108#define DISPPLANE_STEREO_POLARITY_FIRST 0
1920#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 2109#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
1921#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* IGDNG */ 2110#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
1922#define DISPPLANE_TILED (1<<10) 2111#define DISPPLANE_TILED (1<<10)
1923#define DSPAADDR 0x70184 2112#define DSPAADDR 0x70184
1924#define DSPASTRIDE 0x70188 2113#define DSPASTRIDE 0x70188
@@ -1971,7 +2160,7 @@
1971# define VGA_2X_MODE (1 << 30) 2160# define VGA_2X_MODE (1 << 30)
1972# define VGA_PIPE_B_SELECT (1 << 29) 2161# define VGA_PIPE_B_SELECT (1 << 29)
1973 2162
1974/* IGDNG */ 2163/* Ironlake */
1975 2164
1976#define CPU_VGACNTRL 0x41000 2165#define CPU_VGACNTRL 0x41000
1977 2166
@@ -1997,6 +2186,14 @@
1997#define DISPLAY_PORT_PLL_BIOS_1 0x46010 2186#define DISPLAY_PORT_PLL_BIOS_1 0x46010
1998#define DISPLAY_PORT_PLL_BIOS_2 0x46014 2187#define DISPLAY_PORT_PLL_BIOS_2 0x46014
1999 2188
2189#define PCH_DSPCLK_GATE_D 0x42020
2190# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
2191# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
2192
2193#define PCH_3DCGDIS0 0x46020
2194# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
2195# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
2196
2000#define FDI_PLL_FREQ_CTL 0x46030 2197#define FDI_PLL_FREQ_CTL 0x46030
2001#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) 2198#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
2002#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 2199#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
@@ -2098,6 +2295,7 @@
2098#define DEIER 0x4400c 2295#define DEIER 0x4400c
2099 2296
2100/* GT interrupt */ 2297/* GT interrupt */
2298#define GT_PIPE_NOTIFY (1 << 4)
2101#define GT_SYNC_STATUS (1 << 2) 2299#define GT_SYNC_STATUS (1 << 2)
2102#define GT_USER_INTERRUPT (1 << 0) 2300#define GT_USER_INTERRUPT (1 << 0)
2103 2301
@@ -2117,6 +2315,7 @@
2117#define SDE_PORTC_HOTPLUG (1 << 9) 2315#define SDE_PORTC_HOTPLUG (1 << 9)
2118#define SDE_PORTB_HOTPLUG (1 << 8) 2316#define SDE_PORTB_HOTPLUG (1 << 8)
2119#define SDE_SDVOB_HOTPLUG (1 << 6) 2317#define SDE_SDVOB_HOTPLUG (1 << 6)
2318#define SDE_HOTPLUG_MASK (0xf << 8)
2120 2319
2121#define SDEISR 0xc4000 2320#define SDEISR 0xc4000
2122#define SDEIMR 0xc4004 2321#define SDEIMR 0xc4004
@@ -2157,6 +2356,13 @@
2157#define PCH_GPIOE 0xc5020 2356#define PCH_GPIOE 0xc5020
2158#define PCH_GPIOF 0xc5024 2357#define PCH_GPIOF 0xc5024
2159 2358
2359#define PCH_GMBUS0 0xc5100
2360#define PCH_GMBUS1 0xc5104
2361#define PCH_GMBUS2 0xc5108
2362#define PCH_GMBUS3 0xc510c
2363#define PCH_GMBUS4 0xc5110
2364#define PCH_GMBUS5 0xc5120
2365
2160#define PCH_DPLL_A 0xc6014 2366#define PCH_DPLL_A 0xc6014
2161#define PCH_DPLL_B 0xc6018 2367#define PCH_DPLL_B 0xc6018
2162 2368
@@ -2292,7 +2498,7 @@
2292#define FDI_DP_PORT_WIDTH_X3 (2<<19) 2498#define FDI_DP_PORT_WIDTH_X3 (2<<19)
2293#define FDI_DP_PORT_WIDTH_X4 (3<<19) 2499#define FDI_DP_PORT_WIDTH_X4 (3<<19)
2294#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) 2500#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
2295/* IGDNG: hardwired to 1 */ 2501/* Ironlake: hardwired to 1 */
2296#define FDI_TX_PLL_ENABLE (1<<14) 2502#define FDI_TX_PLL_ENABLE (1<<14)
2297/* both Tx and Rx */ 2503/* both Tx and Rx */
2298#define FDI_SCRAMBLING_ENABLE (0<<7) 2504#define FDI_SCRAMBLING_ENABLE (0<<7)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 6eec8171a44e..ac0d1a73ac22 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -27,14 +27,14 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "i915_drm.h" 29#include "i915_drm.h"
30#include "i915_drv.h" 30#include "intel_drv.h"
31 31
32static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) 32static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
33{ 33{
34 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
35 u32 dpll_reg; 35 u32 dpll_reg;
36 36
37 if (IS_IGDNG(dev)) { 37 if (IS_IRONLAKE(dev)) {
38 dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B; 38 dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
39 } else { 39 } else {
40 dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B; 40 dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
53 if (!i915_pipe_enabled(dev, pipe)) 53 if (!i915_pipe_enabled(dev, pipe))
54 return; 54 return;
55 55
56 if (IS_IGDNG(dev)) 56 if (IS_IRONLAKE(dev))
57 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; 57 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
58 58
59 if (pipe == PIPE_A) 59 if (pipe == PIPE_A)
@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
75 if (!i915_pipe_enabled(dev, pipe)) 75 if (!i915_pipe_enabled(dev, pipe))
76 return; 76 return;
77 77
78 if (IS_IGDNG(dev)) 78 if (IS_IRONLAKE(dev))
79 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; 79 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
80 80
81 if (pipe == PIPE_A) 81 if (pipe == PIPE_A)
@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
239 if (drm_core_check_feature(dev, DRIVER_MODESET)) 239 if (drm_core_check_feature(dev, DRIVER_MODESET))
240 return; 240 return;
241 241
242 if (IS_IGDNG(dev)) { 242 if (IS_IRONLAKE(dev)) {
243 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); 243 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
244 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); 244 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
245 } 245 }
@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
247 /* Pipe & plane A info */ 247 /* Pipe & plane A info */
248 dev_priv->savePIPEACONF = I915_READ(PIPEACONF); 248 dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
249 dev_priv->savePIPEASRC = I915_READ(PIPEASRC); 249 dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
250 if (IS_IGDNG(dev)) { 250 if (IS_IRONLAKE(dev)) {
251 dev_priv->saveFPA0 = I915_READ(PCH_FPA0); 251 dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
252 dev_priv->saveFPA1 = I915_READ(PCH_FPA1); 252 dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
253 dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A); 253 dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
256 dev_priv->saveFPA1 = I915_READ(FPA1); 256 dev_priv->saveFPA1 = I915_READ(FPA1);
257 dev_priv->saveDPLL_A = I915_READ(DPLL_A); 257 dev_priv->saveDPLL_A = I915_READ(DPLL_A);
258 } 258 }
259 if (IS_I965G(dev) && !IS_IGDNG(dev)) 259 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
260 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); 260 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
261 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); 261 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
262 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); 262 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
264 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); 264 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
265 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); 265 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
266 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); 266 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
267 if (!IS_IGDNG(dev)) 267 if (!IS_IRONLAKE(dev))
268 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); 268 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
269 269
270 if (IS_IGDNG(dev)) { 270 if (IS_IRONLAKE(dev)) {
271 dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1); 271 dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
272 dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1); 272 dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
273 dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1); 273 dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
304 /* Pipe & plane B info */ 304 /* Pipe & plane B info */
305 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); 305 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
306 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); 306 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
307 if (IS_IGDNG(dev)) { 307 if (IS_IRONLAKE(dev)) {
308 dev_priv->saveFPB0 = I915_READ(PCH_FPB0); 308 dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
309 dev_priv->saveFPB1 = I915_READ(PCH_FPB1); 309 dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
310 dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B); 310 dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
313 dev_priv->saveFPB1 = I915_READ(FPB1); 313 dev_priv->saveFPB1 = I915_READ(FPB1);
314 dev_priv->saveDPLL_B = I915_READ(DPLL_B); 314 dev_priv->saveDPLL_B = I915_READ(DPLL_B);
315 } 315 }
316 if (IS_I965G(dev) && !IS_IGDNG(dev)) 316 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
317 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); 317 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
318 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); 318 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
319 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); 319 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
321 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); 321 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
322 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); 322 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
323 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); 323 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
324 if (!IS_IGDNG(dev)) 324 if (!IS_IRONLAKE(dev))
325 dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); 325 dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
326 326
327 if (IS_IGDNG(dev)) { 327 if (IS_IRONLAKE(dev)) {
328 dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1); 328 dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
329 dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1); 329 dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
330 dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1); 330 dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
369 if (drm_core_check_feature(dev, DRIVER_MODESET)) 369 if (drm_core_check_feature(dev, DRIVER_MODESET))
370 return; 370 return;
371 371
372 if (IS_IGDNG(dev)) { 372 if (IS_IRONLAKE(dev)) {
373 dpll_a_reg = PCH_DPLL_A; 373 dpll_a_reg = PCH_DPLL_A;
374 dpll_b_reg = PCH_DPLL_B; 374 dpll_b_reg = PCH_DPLL_B;
375 fpa0_reg = PCH_FPA0; 375 fpa0_reg = PCH_FPA0;
@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
385 fpb1_reg = FPB1; 385 fpb1_reg = FPB1;
386 } 386 }
387 387
388 if (IS_IGDNG(dev)) { 388 if (IS_IRONLAKE(dev)) {
389 I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); 389 I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
390 I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); 390 I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
391 } 391 }
@@ -402,7 +402,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
402 /* Actually enable it */ 402 /* Actually enable it */
403 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); 403 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
404 DRM_UDELAY(150); 404 DRM_UDELAY(150);
405 if (IS_I965G(dev) && !IS_IGDNG(dev)) 405 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
406 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); 406 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
407 DRM_UDELAY(150); 407 DRM_UDELAY(150);
408 408
@@ -413,10 +413,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
413 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); 413 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
414 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); 414 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
415 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); 415 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
416 if (!IS_IGDNG(dev)) 416 if (!IS_IRONLAKE(dev))
417 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); 417 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
418 418
419 if (IS_IGDNG(dev)) { 419 if (IS_IRONLAKE(dev)) {
420 I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); 420 I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
421 I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); 421 I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
422 I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); 422 I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
@@ -467,7 +467,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
467 /* Actually enable it */ 467 /* Actually enable it */
468 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); 468 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
469 DRM_UDELAY(150); 469 DRM_UDELAY(150);
470 if (IS_I965G(dev) && !IS_IGDNG(dev)) 470 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
471 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); 471 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
472 DRM_UDELAY(150); 472 DRM_UDELAY(150);
473 473
@@ -478,10 +478,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
478 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); 478 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
479 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); 479 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
480 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); 480 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
481 if (!IS_IGDNG(dev)) 481 if (!IS_IRONLAKE(dev))
482 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); 482 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
483 483
484 if (IS_IGDNG(dev)) { 484 if (IS_IRONLAKE(dev)) {
485 I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); 485 I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
486 I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); 486 I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
487 I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); 487 I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
@@ -546,14 +546,14 @@ void i915_save_display(struct drm_device *dev)
546 dev_priv->saveCURSIZE = I915_READ(CURSIZE); 546 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
547 547
548 /* CRT state */ 548 /* CRT state */
549 if (IS_IGDNG(dev)) { 549 if (IS_IRONLAKE(dev)) {
550 dev_priv->saveADPA = I915_READ(PCH_ADPA); 550 dev_priv->saveADPA = I915_READ(PCH_ADPA);
551 } else { 551 } else {
552 dev_priv->saveADPA = I915_READ(ADPA); 552 dev_priv->saveADPA = I915_READ(ADPA);
553 } 553 }
554 554
555 /* LVDS state */ 555 /* LVDS state */
556 if (IS_IGDNG(dev)) { 556 if (IS_IRONLAKE(dev)) {
557 dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); 557 dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
558 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); 558 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
559 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); 559 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -571,10 +571,10 @@ void i915_save_display(struct drm_device *dev)
571 dev_priv->saveLVDS = I915_READ(LVDS); 571 dev_priv->saveLVDS = I915_READ(LVDS);
572 } 572 }
573 573
574 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev)) 574 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
575 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); 575 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
576 576
577 if (IS_IGDNG(dev)) { 577 if (IS_IRONLAKE(dev)) {
578 dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); 578 dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
579 dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); 579 dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
580 dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); 580 dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
@@ -614,7 +614,7 @@ void i915_save_display(struct drm_device *dev)
614 dev_priv->saveVGA0 = I915_READ(VGA0); 614 dev_priv->saveVGA0 = I915_READ(VGA0);
615 dev_priv->saveVGA1 = I915_READ(VGA1); 615 dev_priv->saveVGA1 = I915_READ(VGA1);
616 dev_priv->saveVGA_PD = I915_READ(VGA_PD); 616 dev_priv->saveVGA_PD = I915_READ(VGA_PD);
617 if (IS_IGDNG(dev)) 617 if (IS_IRONLAKE(dev))
618 dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); 618 dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
619 else 619 else
620 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 620 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
@@ -656,24 +656,24 @@ void i915_restore_display(struct drm_device *dev)
656 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); 656 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
657 657
658 /* CRT state */ 658 /* CRT state */
659 if (IS_IGDNG(dev)) 659 if (IS_IRONLAKE(dev))
660 I915_WRITE(PCH_ADPA, dev_priv->saveADPA); 660 I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
661 else 661 else
662 I915_WRITE(ADPA, dev_priv->saveADPA); 662 I915_WRITE(ADPA, dev_priv->saveADPA);
663 663
664 /* LVDS state */ 664 /* LVDS state */
665 if (IS_I965G(dev) && !IS_IGDNG(dev)) 665 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
666 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); 666 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
667 667
668 if (IS_IGDNG(dev)) { 668 if (IS_IRONLAKE(dev)) {
669 I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); 669 I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
670 } else if (IS_MOBILE(dev) && !IS_I830(dev)) 670 } else if (IS_MOBILE(dev) && !IS_I830(dev))
671 I915_WRITE(LVDS, dev_priv->saveLVDS); 671 I915_WRITE(LVDS, dev_priv->saveLVDS);
672 672
673 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev)) 673 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
674 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); 674 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
675 675
676 if (IS_IGDNG(dev)) { 676 if (IS_IRONLAKE(dev)) {
677 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); 677 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
678 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); 678 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
679 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); 679 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
@@ -682,6 +682,8 @@ void i915_restore_display(struct drm_device *dev)
682 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); 682 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
683 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); 683 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
684 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); 684 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
685 I915_WRITE(MCHBAR_RENDER_STANDBY,
686 dev_priv->saveMCHBAR_RENDER_STANDBY);
685 } else { 687 } else {
686 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); 688 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
687 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); 689 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
@@ -713,7 +715,7 @@ void i915_restore_display(struct drm_device *dev)
713 } 715 }
714 716
715 /* VGA state */ 717 /* VGA state */
716 if (IS_IGDNG(dev)) 718 if (IS_IRONLAKE(dev))
717 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); 719 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
718 else 720 else
719 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); 721 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
@@ -732,31 +734,28 @@ int i915_save_state(struct drm_device *dev)
732 734
733 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 735 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
734 736
735 /* Render Standby */
736 if (IS_I965G(dev) && IS_MOBILE(dev))
737 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
738
739 /* Hardware status page */ 737 /* Hardware status page */
740 dev_priv->saveHWS = I915_READ(HWS_PGA); 738 dev_priv->saveHWS = I915_READ(HWS_PGA);
741 739
742 i915_save_display(dev); 740 i915_save_display(dev);
743 741
744 /* Interrupt state */ 742 /* Interrupt state */
745 if (IS_IGDNG(dev)) { 743 if (IS_IRONLAKE(dev)) {
746 dev_priv->saveDEIER = I915_READ(DEIER); 744 dev_priv->saveDEIER = I915_READ(DEIER);
747 dev_priv->saveDEIMR = I915_READ(DEIMR); 745 dev_priv->saveDEIMR = I915_READ(DEIMR);
748 dev_priv->saveGTIER = I915_READ(GTIER); 746 dev_priv->saveGTIER = I915_READ(GTIER);
749 dev_priv->saveGTIMR = I915_READ(GTIMR); 747 dev_priv->saveGTIMR = I915_READ(GTIMR);
750 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); 748 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
751 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); 749 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
750 dev_priv->saveMCHBAR_RENDER_STANDBY =
751 I915_READ(MCHBAR_RENDER_STANDBY);
752 } else { 752 } else {
753 dev_priv->saveIER = I915_READ(IER); 753 dev_priv->saveIER = I915_READ(IER);
754 dev_priv->saveIMR = I915_READ(IMR); 754 dev_priv->saveIMR = I915_READ(IMR);
755 } 755 }
756 756
757 /* Clock gating state */ 757 if (IS_IRONLAKE_M(dev))
758 dev_priv->saveD_STATE = I915_READ(D_STATE); 758 ironlake_disable_drps(dev);
759 dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */
760 759
761 /* Cache mode state */ 760 /* Cache mode state */
762 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 761 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -795,10 +794,6 @@ int i915_restore_state(struct drm_device *dev)
795 794
796 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 795 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
797 796
798 /* Render Standby */
799 if (IS_I965G(dev) && IS_MOBILE(dev))
800 I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
801
802 /* Hardware status page */ 797 /* Hardware status page */
803 I915_WRITE(HWS_PGA, dev_priv->saveHWS); 798 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
804 799
@@ -817,7 +812,7 @@ int i915_restore_state(struct drm_device *dev)
817 i915_restore_display(dev); 812 i915_restore_display(dev);
818 813
819 /* Interrupt state */ 814 /* Interrupt state */
820 if (IS_IGDNG(dev)) { 815 if (IS_IRONLAKE(dev)) {
821 I915_WRITE(DEIER, dev_priv->saveDEIER); 816 I915_WRITE(DEIER, dev_priv->saveDEIER);
822 I915_WRITE(DEIMR, dev_priv->saveDEIMR); 817 I915_WRITE(DEIMR, dev_priv->saveDEIMR);
823 I915_WRITE(GTIER, dev_priv->saveGTIER); 818 I915_WRITE(GTIER, dev_priv->saveGTIER);
@@ -830,8 +825,10 @@ int i915_restore_state(struct drm_device *dev)
830 } 825 }
831 826
832 /* Clock gating state */ 827 /* Clock gating state */
833 I915_WRITE (D_STATE, dev_priv->saveD_STATE); 828 intel_init_clock_gating(dev);
834 I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D); 829
830 if (IS_IRONLAKE_M(dev))
831 ironlake_enable_drps(dev);
835 832
836 /* Cache mode state */ 833 /* Cache mode state */
837 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 834 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
@@ -846,6 +843,9 @@ int i915_restore_state(struct drm_device *dev)
846 for (i = 0; i < 3; i++) 843 for (i = 0; i < 3; i++)
847 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); 844 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
848 845
846 /* I2C state */
847 intel_i2c_reset_gmbus(dev);
848
849 return 0; 849 return 0;
850} 850}
851 851
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 96cd256e60e6..f9ba452f0cbf 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -33,6 +33,8 @@
33#define SLAVE_ADDR1 0x70 33#define SLAVE_ADDR1 0x70
34#define SLAVE_ADDR2 0x72 34#define SLAVE_ADDR2 0x72
35 35
36static int panel_type;
37
36static void * 38static void *
37find_section(struct bdb_header *bdb, int section_id) 39find_section(struct bdb_header *bdb, int section_id)
38{ 40{
@@ -114,6 +116,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
114 struct lvds_dvo_timing *dvo_timing; 116 struct lvds_dvo_timing *dvo_timing;
115 struct drm_display_mode *panel_fixed_mode; 117 struct drm_display_mode *panel_fixed_mode;
116 int lfp_data_size, dvo_timing_offset; 118 int lfp_data_size, dvo_timing_offset;
119 int i, temp_downclock;
120 struct drm_display_mode *temp_mode;
117 121
118 /* Defaults if we can't find VBT info */ 122 /* Defaults if we can't find VBT info */
119 dev_priv->lvds_dither = 0; 123 dev_priv->lvds_dither = 0;
@@ -126,6 +130,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
126 dev_priv->lvds_dither = lvds_options->pixel_dither; 130 dev_priv->lvds_dither = lvds_options->pixel_dither;
127 if (lvds_options->panel_type == 0xff) 131 if (lvds_options->panel_type == 0xff)
128 return; 132 return;
133 panel_type = lvds_options->panel_type;
129 134
130 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); 135 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
131 if (!lvds_lfp_data) 136 if (!lvds_lfp_data)
@@ -159,9 +164,50 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
159 164
160 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; 165 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
161 166
162 DRM_DEBUG("Found panel mode in BIOS VBT tables:\n"); 167 DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
163 drm_mode_debug_printmodeline(panel_fixed_mode); 168 drm_mode_debug_printmodeline(panel_fixed_mode);
164 169
170 temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
171 temp_downclock = panel_fixed_mode->clock;
172 /*
173 * enumerate the LVDS panel timing info entry in VBT to check whether
174 * the LVDS downclock is found.
175 */
176 for (i = 0; i < 16; i++) {
177 entry = (struct bdb_lvds_lfp_data_entry *)
178 ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i));
179 dvo_timing = (struct lvds_dvo_timing *)
180 ((unsigned char *)entry + dvo_timing_offset);
181
182 fill_detail_timing_data(temp_mode, dvo_timing);
183
184 if (temp_mode->hdisplay == panel_fixed_mode->hdisplay &&
185 temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
186 temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
187 temp_mode->htotal == panel_fixed_mode->htotal &&
188 temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
189 temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
190 temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
191 temp_mode->vtotal == panel_fixed_mode->vtotal &&
192 temp_mode->clock < temp_downclock) {
193 /*
194 * downclock is already found. But we expect
195 * to find the lower downclock.
196 */
197 temp_downclock = temp_mode->clock;
198 }
199 /* clear it to zero */
200 memset(temp_mode, 0, sizeof(*temp_mode));
201 }
202 kfree(temp_mode);
203 if (temp_downclock < panel_fixed_mode->clock &&
204 i915_lvds_downclock) {
205 dev_priv->lvds_downclock_avail = 1;
206 dev_priv->lvds_downclock = temp_downclock;
207 DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
208 "Normal Clock %dKHz, downclock %dKHz\n",
209 temp_downclock, panel_fixed_mode->clock);
210 }
165 return; 211 return;
166} 212}
167 213
@@ -201,6 +247,7 @@ static void
201parse_general_features(struct drm_i915_private *dev_priv, 247parse_general_features(struct drm_i915_private *dev_priv,
202 struct bdb_header *bdb) 248 struct bdb_header *bdb)
203{ 249{
250 struct drm_device *dev = dev_priv->dev;
204 struct bdb_general_features *general; 251 struct bdb_general_features *general;
205 252
206 /* Set sensible defaults in case we can't find the general block */ 253 /* Set sensible defaults in case we can't find the general block */
@@ -217,7 +264,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
217 if (IS_I85X(dev_priv->dev)) 264 if (IS_I85X(dev_priv->dev))
218 dev_priv->lvds_ssc_freq = 265 dev_priv->lvds_ssc_freq =
219 general->ssc_freq ? 66 : 48; 266 general->ssc_freq ? 66 : 48;
220 else if (IS_IGDNG(dev_priv->dev)) 267 else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
221 dev_priv->lvds_ssc_freq = 268 dev_priv->lvds_ssc_freq =
222 general->ssc_freq ? 100 : 120; 269 general->ssc_freq ? 100 : 120;
223 else 270 else
@@ -241,22 +288,18 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
241 GPIOF, 288 GPIOF,
242 }; 289 };
243 290
244 /* Set sensible defaults in case we can't find the general block
245 or it is the wrong chipset */
246 dev_priv->crt_ddc_bus = -1;
247
248 general = find_section(bdb, BDB_GENERAL_DEFINITIONS); 291 general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
249 if (general) { 292 if (general) {
250 u16 block_size = get_blocksize(general); 293 u16 block_size = get_blocksize(general);
251 if (block_size >= sizeof(*general)) { 294 if (block_size >= sizeof(*general)) {
252 int bus_pin = general->crt_ddc_gmbus_pin; 295 int bus_pin = general->crt_ddc_gmbus_pin;
253 DRM_DEBUG("crt_ddc_bus_pin: %d\n", bus_pin); 296 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
254 if ((bus_pin >= 1) && (bus_pin <= 6)) { 297 if ((bus_pin >= 1) && (bus_pin <= 6)) {
255 dev_priv->crt_ddc_bus = 298 dev_priv->crt_ddc_bus =
256 crt_bus_map_table[bus_pin-1]; 299 crt_bus_map_table[bus_pin-1];
257 } 300 }
258 } else { 301 } else {
259 DRM_DEBUG("BDB_GD too small (%d). Invalid.\n", 302 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
260 block_size); 303 block_size);
261 } 304 }
262 } 305 }
@@ -274,7 +317,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
274 317
275 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); 318 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
276 if (!p_defs) { 319 if (!p_defs) {
277 DRM_DEBUG("No general definition block is found\n"); 320 DRM_DEBUG_KMS("No general definition block is found\n");
278 return; 321 return;
279 } 322 }
280 /* judge whether the size of child device meets the requirements. 323 /* judge whether the size of child device meets the requirements.
@@ -284,7 +327,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
284 */ 327 */
285 if (p_defs->child_dev_size != sizeof(*p_child)) { 328 if (p_defs->child_dev_size != sizeof(*p_child)) {
286 /* different child dev size . Ignore it */ 329 /* different child dev size . Ignore it */
287 DRM_DEBUG("different child size is found. Invalid.\n"); 330 DRM_DEBUG_KMS("different child size is found. Invalid.\n");
288 return; 331 return;
289 } 332 }
290 /* get the block size of general definitions */ 333 /* get the block size of general definitions */
@@ -310,11 +353,11 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
310 if (p_child->dvo_port != DEVICE_PORT_DVOB && 353 if (p_child->dvo_port != DEVICE_PORT_DVOB &&
311 p_child->dvo_port != DEVICE_PORT_DVOC) { 354 p_child->dvo_port != DEVICE_PORT_DVOC) {
312 /* skip the incorrect SDVO port */ 355 /* skip the incorrect SDVO port */
313 DRM_DEBUG("Incorrect SDVO port. Skip it \n"); 356 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n");
314 continue; 357 continue;
315 } 358 }
316 DRM_DEBUG("the SDVO device with slave addr %2x is found on " 359 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
317 "%s port\n", 360 " %s port\n",
318 p_child->slave_addr, 361 p_child->slave_addr,
319 (p_child->dvo_port == DEVICE_PORT_DVOB) ? 362 (p_child->dvo_port == DEVICE_PORT_DVOB) ?
320 "SDVOB" : "SDVOC"); 363 "SDVOB" : "SDVOC");
@@ -325,21 +368,21 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
325 p_mapping->dvo_wiring = p_child->dvo_wiring; 368 p_mapping->dvo_wiring = p_child->dvo_wiring;
326 p_mapping->initialized = 1; 369 p_mapping->initialized = 1;
327 } else { 370 } else {
328 DRM_DEBUG("Maybe one SDVO port is shared by " 371 DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
329 "two SDVO device.\n"); 372 "two SDVO device.\n");
330 } 373 }
331 if (p_child->slave2_addr) { 374 if (p_child->slave2_addr) {
332 /* Maybe this is a SDVO device with multiple inputs */ 375 /* Maybe this is a SDVO device with multiple inputs */
333 /* And the mapping info is not added */ 376 /* And the mapping info is not added */
334 DRM_DEBUG("there exists the slave2_addr. Maybe this " 377 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
335 "is a SDVO device with multiple inputs.\n"); 378 " is a SDVO device with multiple inputs.\n");
336 } 379 }
337 count++; 380 count++;
338 } 381 }
339 382
340 if (!count) { 383 if (!count) {
341 /* No SDVO device info is found */ 384 /* No SDVO device info is found */
342 DRM_DEBUG("No SDVO device info is found in VBT\n"); 385 DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
343 } 386 }
344 return; 387 return;
345} 388}
@@ -366,6 +409,99 @@ parse_driver_features(struct drm_i915_private *dev_priv,
366 dev_priv->render_reclock_avail = true; 409 dev_priv->render_reclock_avail = true;
367} 410}
368 411
412static void
413parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
414{
415 struct bdb_edp *edp;
416
417 edp = find_section(bdb, BDB_EDP);
418 if (!edp) {
419 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
420 DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
421 "supported, assume 18bpp panel color "
422 "depth.\n");
423 dev_priv->edp_bpp = 18;
424 }
425 return;
426 }
427
428 switch ((edp->color_depth >> (panel_type * 2)) & 3) {
429 case EDP_18BPP:
430 dev_priv->edp_bpp = 18;
431 break;
432 case EDP_24BPP:
433 dev_priv->edp_bpp = 24;
434 break;
435 case EDP_30BPP:
436 dev_priv->edp_bpp = 30;
437 break;
438 }
439}
440
441static void
442parse_device_mapping(struct drm_i915_private *dev_priv,
443 struct bdb_header *bdb)
444{
445 struct bdb_general_definitions *p_defs;
446 struct child_device_config *p_child, *child_dev_ptr;
447 int i, child_device_num, count;
448 u16 block_size;
449
450 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
451 if (!p_defs) {
452 DRM_DEBUG_KMS("No general definition block is found\n");
453 return;
454 }
455 /* judge whether the size of child device meets the requirements.
456 * If the child device size obtained from general definition block
457 * is different with sizeof(struct child_device_config), skip the
458 * parsing of sdvo device info
459 */
460 if (p_defs->child_dev_size != sizeof(*p_child)) {
461 /* different child dev size . Ignore it */
462 DRM_DEBUG_KMS("different child size is found. Invalid.\n");
463 return;
464 }
465 /* get the block size of general definitions */
466 block_size = get_blocksize(p_defs);
467 /* get the number of child device */
468 child_device_num = (block_size - sizeof(*p_defs)) /
469 sizeof(*p_child);
470 count = 0;
471 /* get the number of child device that is present */
472 for (i = 0; i < child_device_num; i++) {
473 p_child = &(p_defs->devices[i]);
474 if (!p_child->device_type) {
475 /* skip the device block if device type is invalid */
476 continue;
477 }
478 count++;
479 }
480 if (!count) {
481 DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
482 return;
483 }
484 dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
485 if (!dev_priv->child_dev) {
486 DRM_DEBUG_KMS("No memory space for child device\n");
487 return;
488 }
489
490 dev_priv->child_dev_num = count;
491 count = 0;
492 for (i = 0; i < child_device_num; i++) {
493 p_child = &(p_defs->devices[i]);
494 if (!p_child->device_type) {
495 /* skip the device block if device type is invalid */
496 continue;
497 }
498 child_dev_ptr = dev_priv->child_dev + count;
499 count++;
500 memcpy((void *)child_dev_ptr, (void *)p_child,
501 sizeof(*p_child));
502 }
503 return;
504}
369/** 505/**
370 * intel_init_bios - initialize VBIOS settings & find VBT 506 * intel_init_bios - initialize VBIOS settings & find VBT
371 * @dev: DRM device 507 * @dev: DRM device
@@ -417,7 +553,9 @@ intel_init_bios(struct drm_device *dev)
417 parse_lfp_panel_data(dev_priv, bdb); 553 parse_lfp_panel_data(dev_priv, bdb);
418 parse_sdvo_panel_data(dev_priv, bdb); 554 parse_sdvo_panel_data(dev_priv, bdb);
419 parse_sdvo_device_mapping(dev_priv, bdb); 555 parse_sdvo_device_mapping(dev_priv, bdb);
556 parse_device_mapping(dev_priv, bdb);
420 parse_driver_features(dev_priv, bdb); 557 parse_driver_features(dev_priv, bdb);
558 parse_edp(dev_priv, bdb);
421 559
422 pci_unmap_rom(pdev, bios); 560 pci_unmap_rom(pdev, bios);
423 561
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 0f8e5f69ac7a..4c18514f6f80 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -98,6 +98,7 @@ struct vbios_data {
98#define BDB_SDVO_LVDS_PNP_IDS 24 98#define BDB_SDVO_LVDS_PNP_IDS 24
99#define BDB_SDVO_LVDS_POWER_SEQ 25 99#define BDB_SDVO_LVDS_POWER_SEQ 25
100#define BDB_TV_OPTIONS 26 100#define BDB_TV_OPTIONS 26
101#define BDB_EDP 27
101#define BDB_LVDS_OPTIONS 40 102#define BDB_LVDS_OPTIONS 40
102#define BDB_LVDS_LFP_DATA_PTRS 41 103#define BDB_LVDS_LFP_DATA_PTRS 41
103#define BDB_LVDS_LFP_DATA 42 104#define BDB_LVDS_LFP_DATA 42
@@ -426,6 +427,45 @@ struct bdb_driver_features {
426 u8 custom_vbt_version; 427 u8 custom_vbt_version;
427} __attribute__((packed)); 428} __attribute__((packed));
428 429
430#define EDP_18BPP 0
431#define EDP_24BPP 1
432#define EDP_30BPP 2
433#define EDP_RATE_1_62 0
434#define EDP_RATE_2_7 1
435#define EDP_LANE_1 0
436#define EDP_LANE_2 1
437#define EDP_LANE_4 3
438#define EDP_PREEMPHASIS_NONE 0
439#define EDP_PREEMPHASIS_3_5dB 1
440#define EDP_PREEMPHASIS_6dB 2
441#define EDP_PREEMPHASIS_9_5dB 3
442#define EDP_VSWING_0_4V 0
443#define EDP_VSWING_0_6V 1
444#define EDP_VSWING_0_8V 2
445#define EDP_VSWING_1_2V 3
446
447struct edp_power_seq {
448 u16 t3;
449 u16 t7;
450 u16 t9;
451 u16 t10;
452 u16 t12;
453} __attribute__ ((packed));
454
455struct edp_link_params {
456 u8 rate:4;
457 u8 lanes:4;
458 u8 preemphasis:4;
459 u8 vswing:4;
460} __attribute__ ((packed));
461
462struct bdb_edp {
463 struct edp_power_seq power_seqs[16];
464 u32 color_depth;
465 u32 sdrrs_msa_timing_delay;
466 struct edp_link_params link_params[16];
467} __attribute__ ((packed));
468
429bool intel_init_bios(struct drm_device *dev); 469bool intel_init_bios(struct drm_device *dev);
430 470
431/* 471/*
@@ -549,4 +589,21 @@ bool intel_init_bios(struct drm_device *dev);
549#define SWF14_APM_STANDBY 0x1 589#define SWF14_APM_STANDBY 0x1
550#define SWF14_APM_RESTORE 0x0 590#define SWF14_APM_RESTORE 0x0
551 591
592/* Add the device class for LFP, TV, HDMI */
593#define DEVICE_TYPE_INT_LFP 0x1022
594#define DEVICE_TYPE_INT_TV 0x1009
595#define DEVICE_TYPE_HDMI 0x60D2
596#define DEVICE_TYPE_DP 0x68C6
597#define DEVICE_TYPE_eDP 0x78C6
598
599/* define the DVO port for HDMI output type */
600#define DVO_B 1
601#define DVO_C 2
602#define DVO_D 3
603
604/* define the PORT for DP output type */
605#define PORT_IDPB 7
606#define PORT_IDPC 8
607#define PORT_IDPD 9
608
552#endif /* _I830_BIOS_H_ */ 609#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e5051446c48e..759c2ef72eff 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -25,6 +25,7 @@
25 */ 25 */
26 26
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/slab.h>
28#include "drmP.h" 29#include "drmP.h"
29#include "drm.h" 30#include "drm.h"
30#include "drm_crtc.h" 31#include "drm_crtc.h"
@@ -39,7 +40,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
39 struct drm_i915_private *dev_priv = dev->dev_private; 40 struct drm_i915_private *dev_priv = dev->dev_private;
40 u32 temp, reg; 41 u32 temp, reg;
41 42
42 if (IS_IGDNG(dev)) 43 if (HAS_PCH_SPLIT(dev))
43 reg = PCH_ADPA; 44 reg = PCH_ADPA;
44 else 45 else
45 reg = ADPA; 46 reg = ADPA;
@@ -64,34 +65,6 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
64 } 65 }
65 66
66 I915_WRITE(reg, temp); 67 I915_WRITE(reg, temp);
67
68 if (IS_IGD(dev)) {
69 if (mode == DRM_MODE_DPMS_OFF) {
70 /* turn off DAC */
71 temp = I915_READ(PORT_HOTPLUG_EN);
72 temp &= ~CRT_EOS_INT_EN;
73 I915_WRITE(PORT_HOTPLUG_EN, temp);
74
75 temp = I915_READ(PORT_HOTPLUG_STAT);
76 if (temp & CRT_EOS_INT_STATUS)
77 I915_WRITE(PORT_HOTPLUG_STAT,
78 CRT_EOS_INT_STATUS);
79 } else {
80 /* turn on DAC. EOS interrupt must be enabled after DAC
81 * is enabled, so it sounds not good to enable it in
82 * i915_driver_irq_postinstall()
83 * wait 12.5ms after DAC is enabled
84 */
85 msleep(13);
86 temp = I915_READ(PORT_HOTPLUG_STAT);
87 if (temp & CRT_EOS_INT_STATUS)
88 I915_WRITE(PORT_HOTPLUG_STAT,
89 CRT_EOS_INT_STATUS);
90 temp = I915_READ(PORT_HOTPLUG_EN);
91 temp |= CRT_EOS_INT_EN;
92 I915_WRITE(PORT_HOTPLUG_EN, temp);
93 }
94 }
95} 68}
96 69
97static int intel_crt_mode_valid(struct drm_connector *connector, 70static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -141,7 +114,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
141 else 114 else
142 dpll_md_reg = DPLL_B_MD; 115 dpll_md_reg = DPLL_B_MD;
143 116
144 if (IS_IGDNG(dev)) 117 if (HAS_PCH_SPLIT(dev))
145 adpa_reg = PCH_ADPA; 118 adpa_reg = PCH_ADPA;
146 else 119 else
147 adpa_reg = ADPA; 120 adpa_reg = ADPA;
@@ -150,7 +123,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
150 * Disable separate mode multiplier used when cloning SDVO to CRT 123 * Disable separate mode multiplier used when cloning SDVO to CRT
151 * XXX this needs to be adjusted when we really are cloning 124 * XXX this needs to be adjusted when we really are cloning
152 */ 125 */
153 if (IS_I965G(dev) && !IS_IGDNG(dev)) { 126 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
154 dpll_md = I915_READ(dpll_md_reg); 127 dpll_md = I915_READ(dpll_md_reg);
155 I915_WRITE(dpll_md_reg, 128 I915_WRITE(dpll_md_reg,
156 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 129 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -164,18 +137,18 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
164 137
165 if (intel_crtc->pipe == 0) { 138 if (intel_crtc->pipe == 0) {
166 adpa |= ADPA_PIPE_A_SELECT; 139 adpa |= ADPA_PIPE_A_SELECT;
167 if (!IS_IGDNG(dev)) 140 if (!HAS_PCH_SPLIT(dev))
168 I915_WRITE(BCLRPAT_A, 0); 141 I915_WRITE(BCLRPAT_A, 0);
169 } else { 142 } else {
170 adpa |= ADPA_PIPE_B_SELECT; 143 adpa |= ADPA_PIPE_B_SELECT;
171 if (!IS_IGDNG(dev)) 144 if (!HAS_PCH_SPLIT(dev))
172 I915_WRITE(BCLRPAT_B, 0); 145 I915_WRITE(BCLRPAT_B, 0);
173 } 146 }
174 147
175 I915_WRITE(adpa_reg, adpa); 148 I915_WRITE(adpa_reg, adpa);
176} 149}
177 150
178static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) 151static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
179{ 152{
180 struct drm_device *dev = connector->dev; 153 struct drm_device *dev = connector->dev;
181 struct drm_i915_private *dev_priv = dev->dev_private; 154 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -185,6 +158,9 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
185 adpa = I915_READ(PCH_ADPA); 158 adpa = I915_READ(PCH_ADPA);
186 159
187 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 160 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
161 /* disable HPD first */
162 I915_WRITE(PCH_ADPA, adpa);
163 (void)I915_READ(PCH_ADPA);
188 164
189 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 165 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
190 ADPA_CRT_HOTPLUG_WARMUP_10MS | 166 ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -194,7 +170,7 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
194 ADPA_CRT_HOTPLUG_ENABLE | 170 ADPA_CRT_HOTPLUG_ENABLE |
195 ADPA_CRT_HOTPLUG_FORCE_TRIGGER); 171 ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
196 172
197 DRM_DEBUG("pch crt adpa 0x%x", adpa); 173 DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
198 I915_WRITE(PCH_ADPA, adpa); 174 I915_WRITE(PCH_ADPA, adpa);
199 175
200 while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) 176 while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
@@ -227,8 +203,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
227 u32 hotplug_en; 203 u32 hotplug_en;
228 int i, tries = 0; 204 int i, tries = 0;
229 205
230 if (IS_IGDNG(dev)) 206 if (HAS_PCH_SPLIT(dev))
231 return intel_igdng_crt_detect_hotplug(connector); 207 return intel_ironlake_crt_detect_hotplug(connector);
232 208
233 /* 209 /*
234 * On 4 series desktop, CRT detect sequence need to be done twice 210 * On 4 series desktop, CRT detect sequence need to be done twice
@@ -271,19 +247,19 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
271 247
272static bool intel_crt_detect_ddc(struct drm_connector *connector) 248static bool intel_crt_detect_ddc(struct drm_connector *connector)
273{ 249{
274 struct intel_output *intel_output = to_intel_output(connector); 250 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
275 251
276 /* CRT should always be at 0, but check anyway */ 252 /* CRT should always be at 0, but check anyway */
277 if (intel_output->type != INTEL_OUTPUT_ANALOG) 253 if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
278 return false; 254 return false;
279 255
280 return intel_ddc_probe(intel_output); 256 return intel_ddc_probe(intel_encoder);
281} 257}
282 258
283static enum drm_connector_status 259static enum drm_connector_status
284intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) 260intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
285{ 261{
286 struct drm_encoder *encoder = &intel_output->enc; 262 struct drm_encoder *encoder = &intel_encoder->enc;
287 struct drm_device *dev = encoder->dev; 263 struct drm_device *dev = encoder->dev;
288 struct drm_i915_private *dev_priv = dev->dev_private; 264 struct drm_i915_private *dev_priv = dev->dev_private;
289 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 265 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -411,8 +387,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
411static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) 387static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
412{ 388{
413 struct drm_device *dev = connector->dev; 389 struct drm_device *dev = connector->dev;
414 struct intel_output *intel_output = to_intel_output(connector); 390 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
415 struct drm_encoder *encoder = &intel_output->enc; 391 struct drm_encoder *encoder = &intel_encoder->enc;
416 struct drm_crtc *crtc; 392 struct drm_crtc *crtc;
417 int dpms_mode; 393 int dpms_mode;
418 enum drm_connector_status status; 394 enum drm_connector_status status;
@@ -429,13 +405,13 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
429 405
430 /* for pre-945g platforms use load detect */ 406 /* for pre-945g platforms use load detect */
431 if (encoder->crtc && encoder->crtc->enabled) { 407 if (encoder->crtc && encoder->crtc->enabled) {
432 status = intel_crt_load_detect(encoder->crtc, intel_output); 408 status = intel_crt_load_detect(encoder->crtc, intel_encoder);
433 } else { 409 } else {
434 crtc = intel_get_load_detect_pipe(intel_output, 410 crtc = intel_get_load_detect_pipe(intel_encoder,
435 NULL, &dpms_mode); 411 NULL, &dpms_mode);
436 if (crtc) { 412 if (crtc) {
437 status = intel_crt_load_detect(crtc, intel_output); 413 status = intel_crt_load_detect(crtc, intel_encoder);
438 intel_release_load_detect_pipe(intel_output, dpms_mode); 414 intel_release_load_detect_pipe(intel_encoder, dpms_mode);
439 } else 415 } else
440 status = connector_status_unknown; 416 status = connector_status_unknown;
441 } 417 }
@@ -445,9 +421,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
445 421
446static void intel_crt_destroy(struct drm_connector *connector) 422static void intel_crt_destroy(struct drm_connector *connector)
447{ 423{
448 struct intel_output *intel_output = to_intel_output(connector); 424 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
449 425
450 intel_i2c_destroy(intel_output->ddc_bus); 426 intel_i2c_destroy(intel_encoder->ddc_bus);
451 drm_sysfs_connector_remove(connector); 427 drm_sysfs_connector_remove(connector);
452 drm_connector_cleanup(connector); 428 drm_connector_cleanup(connector);
453 kfree(connector); 429 kfree(connector);
@@ -456,28 +432,28 @@ static void intel_crt_destroy(struct drm_connector *connector)
456static int intel_crt_get_modes(struct drm_connector *connector) 432static int intel_crt_get_modes(struct drm_connector *connector)
457{ 433{
458 int ret; 434 int ret;
459 struct intel_output *intel_output = to_intel_output(connector); 435 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
460 struct i2c_adapter *ddcbus; 436 struct i2c_adapter *ddcbus;
461 struct drm_device *dev = connector->dev; 437 struct drm_device *dev = connector->dev;
462 438
463 439
464 ret = intel_ddc_get_modes(intel_output); 440 ret = intel_ddc_get_modes(intel_encoder);
465 if (ret || !IS_G4X(dev)) 441 if (ret || !IS_G4X(dev))
466 goto end; 442 goto end;
467 443
468 ddcbus = intel_output->ddc_bus; 444 ddcbus = intel_encoder->ddc_bus;
469 /* Try to probe digital port for output in DVI-I -> VGA mode. */ 445 /* Try to probe digital port for output in DVI-I -> VGA mode. */
470 intel_output->ddc_bus = 446 intel_encoder->ddc_bus =
471 intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); 447 intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
472 448
473 if (!intel_output->ddc_bus) { 449 if (!intel_encoder->ddc_bus) {
474 intel_output->ddc_bus = ddcbus; 450 intel_encoder->ddc_bus = ddcbus;
475 dev_printk(KERN_ERR, &connector->dev->pdev->dev, 451 dev_printk(KERN_ERR, &connector->dev->pdev->dev,
476 "DDC bus registration failed for CRTDDC_D.\n"); 452 "DDC bus registration failed for CRTDDC_D.\n");
477 goto end; 453 goto end;
478 } 454 }
479 /* Try to get modes by GPIOD port */ 455 /* Try to get modes by GPIOD port */
480 ret = intel_ddc_get_modes(intel_output); 456 ret = intel_ddc_get_modes(intel_encoder);
481 intel_i2c_destroy(ddcbus); 457 intel_i2c_destroy(ddcbus);
482 458
483end: 459end:
@@ -530,50 +506,52 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
530void intel_crt_init(struct drm_device *dev) 506void intel_crt_init(struct drm_device *dev)
531{ 507{
532 struct drm_connector *connector; 508 struct drm_connector *connector;
533 struct intel_output *intel_output; 509 struct intel_encoder *intel_encoder;
534 struct drm_i915_private *dev_priv = dev->dev_private; 510 struct drm_i915_private *dev_priv = dev->dev_private;
535 u32 i2c_reg; 511 u32 i2c_reg;
536 512
537 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); 513 intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
538 if (!intel_output) 514 if (!intel_encoder)
539 return; 515 return;
540 516
541 connector = &intel_output->base; 517 connector = &intel_encoder->base;
542 drm_connector_init(dev, &intel_output->base, 518 drm_connector_init(dev, &intel_encoder->base,
543 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 519 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
544 520
545 drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, 521 drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
546 DRM_MODE_ENCODER_DAC); 522 DRM_MODE_ENCODER_DAC);
547 523
548 drm_mode_connector_attach_encoder(&intel_output->base, 524 drm_mode_connector_attach_encoder(&intel_encoder->base,
549 &intel_output->enc); 525 &intel_encoder->enc);
550 526
551 /* Set up the DDC bus. */ 527 /* Set up the DDC bus. */
552 if (IS_IGDNG(dev)) 528 if (HAS_PCH_SPLIT(dev))
553 i2c_reg = PCH_GPIOA; 529 i2c_reg = PCH_GPIOA;
554 else { 530 else {
555 i2c_reg = GPIOA; 531 i2c_reg = GPIOA;
556 /* Use VBT information for CRT DDC if available */ 532 /* Use VBT information for CRT DDC if available */
557 if (dev_priv->crt_ddc_bus != -1) 533 if (dev_priv->crt_ddc_bus != 0)
558 i2c_reg = dev_priv->crt_ddc_bus; 534 i2c_reg = dev_priv->crt_ddc_bus;
559 } 535 }
560 intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); 536 intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
561 if (!intel_output->ddc_bus) { 537 if (!intel_encoder->ddc_bus) {
562 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 538 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
563 "failed.\n"); 539 "failed.\n");
564 return; 540 return;
565 } 541 }
566 542
567 intel_output->type = INTEL_OUTPUT_ANALOG; 543 intel_encoder->type = INTEL_OUTPUT_ANALOG;
568 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 544 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
569 (1 << INTEL_ANALOG_CLONE_BIT) | 545 (1 << INTEL_ANALOG_CLONE_BIT) |
570 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 546 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
571 intel_output->crtc_mask = (1 << 0) | (1 << 1); 547 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
572 connector->interlace_allowed = 0; 548 connector->interlace_allowed = 0;
573 connector->doublescan_allowed = 0; 549 connector->doublescan_allowed = 0;
574 550
575 drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); 551 drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
576 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 552 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
577 553
578 drm_sysfs_connector_add(connector); 554 drm_sysfs_connector_add(connector);
555
556 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
579} 557}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 099f420de57a..c7502b6b1600 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -28,11 +28,12 @@
28#include <linux/input.h> 28#include <linux/input.h>
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/slab.h>
31#include "drmP.h" 32#include "drmP.h"
32#include "intel_drv.h" 33#include "intel_drv.h"
33#include "i915_drm.h" 34#include "i915_drm.h"
34#include "i915_drv.h" 35#include "i915_drv.h"
35#include "intel_dp.h" 36#include "drm_dp_helper.h"
36 37
37#include "drm_crtc_helper.h" 38#include "drm_crtc_helper.h"
38 39
@@ -70,8 +71,6 @@ struct intel_limit {
70 intel_p2_t p2; 71 intel_p2_t p2;
71 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 72 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
72 int, int, intel_clock_t *); 73 int, int, intel_clock_t *);
73 bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *,
74 int, int, intel_clock_t *);
75}; 74};
76 75
77#define I8XX_DOT_MIN 25000 76#define I8XX_DOT_MIN 25000
@@ -102,32 +101,32 @@ struct intel_limit {
102#define I9XX_DOT_MAX 400000 101#define I9XX_DOT_MAX 400000
103#define I9XX_VCO_MIN 1400000 102#define I9XX_VCO_MIN 1400000
104#define I9XX_VCO_MAX 2800000 103#define I9XX_VCO_MAX 2800000
105#define IGD_VCO_MIN 1700000 104#define PINEVIEW_VCO_MIN 1700000
106#define IGD_VCO_MAX 3500000 105#define PINEVIEW_VCO_MAX 3500000
107#define I9XX_N_MIN 1 106#define I9XX_N_MIN 1
108#define I9XX_N_MAX 6 107#define I9XX_N_MAX 6
109/* IGD's Ncounter is a ring counter */ 108/* Pineview's Ncounter is a ring counter */
110#define IGD_N_MIN 3 109#define PINEVIEW_N_MIN 3
111#define IGD_N_MAX 6 110#define PINEVIEW_N_MAX 6
112#define I9XX_M_MIN 70 111#define I9XX_M_MIN 70
113#define I9XX_M_MAX 120 112#define I9XX_M_MAX 120
114#define IGD_M_MIN 2 113#define PINEVIEW_M_MIN 2
115#define IGD_M_MAX 256 114#define PINEVIEW_M_MAX 256
116#define I9XX_M1_MIN 10 115#define I9XX_M1_MIN 10
117#define I9XX_M1_MAX 22 116#define I9XX_M1_MAX 22
118#define I9XX_M2_MIN 5 117#define I9XX_M2_MIN 5
119#define I9XX_M2_MAX 9 118#define I9XX_M2_MAX 9
120/* IGD M1 is reserved, and must be 0 */ 119/* Pineview M1 is reserved, and must be 0 */
121#define IGD_M1_MIN 0 120#define PINEVIEW_M1_MIN 0
122#define IGD_M1_MAX 0 121#define PINEVIEW_M1_MAX 0
123#define IGD_M2_MIN 0 122#define PINEVIEW_M2_MIN 0
124#define IGD_M2_MAX 254 123#define PINEVIEW_M2_MAX 254
125#define I9XX_P_SDVO_DAC_MIN 5 124#define I9XX_P_SDVO_DAC_MIN 5
126#define I9XX_P_SDVO_DAC_MAX 80 125#define I9XX_P_SDVO_DAC_MAX 80
127#define I9XX_P_LVDS_MIN 7 126#define I9XX_P_LVDS_MIN 7
128#define I9XX_P_LVDS_MAX 98 127#define I9XX_P_LVDS_MAX 98
129#define IGD_P_LVDS_MIN 7 128#define PINEVIEW_P_LVDS_MIN 7
130#define IGD_P_LVDS_MAX 112 129#define PINEVIEW_P_LVDS_MAX 112
131#define I9XX_P1_MIN 1 130#define I9XX_P1_MIN 1
132#define I9XX_P1_MAX 8 131#define I9XX_P1_MAX 8
133#define I9XX_P2_SDVO_DAC_SLOW 10 132#define I9XX_P2_SDVO_DAC_SLOW 10
@@ -234,53 +233,108 @@ struct intel_limit {
234#define G4X_P2_DISPLAY_PORT_FAST 10 233#define G4X_P2_DISPLAY_PORT_FAST 10
235#define G4X_P2_DISPLAY_PORT_LIMIT 0 234#define G4X_P2_DISPLAY_PORT_LIMIT 0
236 235
237/* IGDNG */ 236/* Ironlake / Sandybridge */
238/* as we calculate clock using (register_value + 2) for 237/* as we calculate clock using (register_value + 2) for
239 N/M1/M2, so here the range value for them is (actual_value-2). 238 N/M1/M2, so here the range value for them is (actual_value-2).
240 */ 239 */
241#define IGDNG_DOT_MIN 25000 240#define IRONLAKE_DOT_MIN 25000
242#define IGDNG_DOT_MAX 350000 241#define IRONLAKE_DOT_MAX 350000
243#define IGDNG_VCO_MIN 1760000 242#define IRONLAKE_VCO_MIN 1760000
244#define IGDNG_VCO_MAX 3510000 243#define IRONLAKE_VCO_MAX 3510000
245#define IGDNG_N_MIN 1 244#define IRONLAKE_M1_MIN 12
246#define IGDNG_N_MAX 5 245#define IRONLAKE_M1_MAX 22
247#define IGDNG_M_MIN 79 246#define IRONLAKE_M2_MIN 5
248#define IGDNG_M_MAX 118 247#define IRONLAKE_M2_MAX 9
249#define IGDNG_M1_MIN 12 248#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
250#define IGDNG_M1_MAX 23 249
251#define IGDNG_M2_MIN 5 250/* We have parameter ranges for different type of outputs. */
252#define IGDNG_M2_MAX 9 251
253#define IGDNG_P_SDVO_DAC_MIN 5 252/* DAC & HDMI Refclk 120Mhz */
254#define IGDNG_P_SDVO_DAC_MAX 80 253#define IRONLAKE_DAC_N_MIN 1
255#define IGDNG_P_LVDS_MIN 28 254#define IRONLAKE_DAC_N_MAX 5
256#define IGDNG_P_LVDS_MAX 112 255#define IRONLAKE_DAC_M_MIN 79
257#define IGDNG_P1_MIN 1 256#define IRONLAKE_DAC_M_MAX 127
258#define IGDNG_P1_MAX 8 257#define IRONLAKE_DAC_P_MIN 5
259#define IGDNG_P2_SDVO_DAC_SLOW 10 258#define IRONLAKE_DAC_P_MAX 80
260#define IGDNG_P2_SDVO_DAC_FAST 5 259#define IRONLAKE_DAC_P1_MIN 1
261#define IGDNG_P2_LVDS_SLOW 14 /* single channel */ 260#define IRONLAKE_DAC_P1_MAX 8
262#define IGDNG_P2_LVDS_FAST 7 /* double channel */ 261#define IRONLAKE_DAC_P2_SLOW 10
263#define IGDNG_P2_DOT_LIMIT 225000 /* 225Mhz */ 262#define IRONLAKE_DAC_P2_FAST 5
263
264/* LVDS single-channel 120Mhz refclk */
265#define IRONLAKE_LVDS_S_N_MIN 1
266#define IRONLAKE_LVDS_S_N_MAX 3
267#define IRONLAKE_LVDS_S_M_MIN 79
268#define IRONLAKE_LVDS_S_M_MAX 118
269#define IRONLAKE_LVDS_S_P_MIN 28
270#define IRONLAKE_LVDS_S_P_MAX 112
271#define IRONLAKE_LVDS_S_P1_MIN 2
272#define IRONLAKE_LVDS_S_P1_MAX 8
273#define IRONLAKE_LVDS_S_P2_SLOW 14
274#define IRONLAKE_LVDS_S_P2_FAST 14
275
276/* LVDS dual-channel 120Mhz refclk */
277#define IRONLAKE_LVDS_D_N_MIN 1
278#define IRONLAKE_LVDS_D_N_MAX 3
279#define IRONLAKE_LVDS_D_M_MIN 79
280#define IRONLAKE_LVDS_D_M_MAX 127
281#define IRONLAKE_LVDS_D_P_MIN 14
282#define IRONLAKE_LVDS_D_P_MAX 56
283#define IRONLAKE_LVDS_D_P1_MIN 2
284#define IRONLAKE_LVDS_D_P1_MAX 8
285#define IRONLAKE_LVDS_D_P2_SLOW 7
286#define IRONLAKE_LVDS_D_P2_FAST 7
287
288/* LVDS single-channel 100Mhz refclk */
289#define IRONLAKE_LVDS_S_SSC_N_MIN 1
290#define IRONLAKE_LVDS_S_SSC_N_MAX 2
291#define IRONLAKE_LVDS_S_SSC_M_MIN 79
292#define IRONLAKE_LVDS_S_SSC_M_MAX 126
293#define IRONLAKE_LVDS_S_SSC_P_MIN 28
294#define IRONLAKE_LVDS_S_SSC_P_MAX 112
295#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
296#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
297#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
298#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
299
300/* LVDS dual-channel 100Mhz refclk */
301#define IRONLAKE_LVDS_D_SSC_N_MIN 1
302#define IRONLAKE_LVDS_D_SSC_N_MAX 3
303#define IRONLAKE_LVDS_D_SSC_M_MIN 79
304#define IRONLAKE_LVDS_D_SSC_M_MAX 126
305#define IRONLAKE_LVDS_D_SSC_P_MIN 14
306#define IRONLAKE_LVDS_D_SSC_P_MAX 42
307#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
308#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
309#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
310#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
311
312/* DisplayPort */
313#define IRONLAKE_DP_N_MIN 1
314#define IRONLAKE_DP_N_MAX 2
315#define IRONLAKE_DP_M_MIN 81
316#define IRONLAKE_DP_M_MAX 90
317#define IRONLAKE_DP_P_MIN 10
318#define IRONLAKE_DP_P_MAX 20
319#define IRONLAKE_DP_P2_FAST 10
320#define IRONLAKE_DP_P2_SLOW 10
321#define IRONLAKE_DP_P2_LIMIT 0
322#define IRONLAKE_DP_P1_MIN 1
323#define IRONLAKE_DP_P1_MAX 2
264 324
265static bool 325static bool
266intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 326intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
267 int target, int refclk, intel_clock_t *best_clock); 327 int target, int refclk, intel_clock_t *best_clock);
268static bool 328static bool
269intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
270 int target, int refclk, intel_clock_t *best_clock);
271static bool
272intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 329intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
273 int target, int refclk, intel_clock_t *best_clock); 330 int target, int refclk, intel_clock_t *best_clock);
274static bool
275intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
276 int target, int refclk, intel_clock_t *best_clock);
277 331
278static bool 332static bool
279intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, 333intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
280 int target, int refclk, intel_clock_t *best_clock); 334 int target, int refclk, intel_clock_t *best_clock);
281static bool 335static bool
282intel_find_pll_igdng_dp(const intel_limit_t *, struct drm_crtc *crtc, 336intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
283 int target, int refclk, intel_clock_t *best_clock); 337 int target, int refclk, intel_clock_t *best_clock);
284 338
285static const intel_limit_t intel_limits_i8xx_dvo = { 339static const intel_limit_t intel_limits_i8xx_dvo = {
286 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 340 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
@@ -294,7 +348,6 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
294 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 348 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
295 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 349 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
296 .find_pll = intel_find_best_PLL, 350 .find_pll = intel_find_best_PLL,
297 .find_reduced_pll = intel_find_best_reduced_PLL,
298}; 351};
299 352
300static const intel_limit_t intel_limits_i8xx_lvds = { 353static const intel_limit_t intel_limits_i8xx_lvds = {
@@ -309,7 +362,6 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
309 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 362 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
310 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 363 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
311 .find_pll = intel_find_best_PLL, 364 .find_pll = intel_find_best_PLL,
312 .find_reduced_pll = intel_find_best_reduced_PLL,
313}; 365};
314 366
315static const intel_limit_t intel_limits_i9xx_sdvo = { 367static const intel_limit_t intel_limits_i9xx_sdvo = {
@@ -324,7 +376,6 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
324 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 376 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
325 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 377 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
326 .find_pll = intel_find_best_PLL, 378 .find_pll = intel_find_best_PLL,
327 .find_reduced_pll = intel_find_best_reduced_PLL,
328}; 379};
329 380
330static const intel_limit_t intel_limits_i9xx_lvds = { 381static const intel_limit_t intel_limits_i9xx_lvds = {
@@ -342,7 +393,6 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
342 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 393 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
343 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 394 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
344 .find_pll = intel_find_best_PLL, 395 .find_pll = intel_find_best_PLL,
345 .find_reduced_pll = intel_find_best_reduced_PLL,
346}; 396};
347 397
348 /* below parameter and function is for G4X Chipset Family*/ 398 /* below parameter and function is for G4X Chipset Family*/
@@ -360,7 +410,6 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
360 .p2_fast = G4X_P2_SDVO_FAST 410 .p2_fast = G4X_P2_SDVO_FAST
361 }, 411 },
362 .find_pll = intel_g4x_find_best_PLL, 412 .find_pll = intel_g4x_find_best_PLL,
363 .find_reduced_pll = intel_g4x_find_best_PLL,
364}; 413};
365 414
366static const intel_limit_t intel_limits_g4x_hdmi = { 415static const intel_limit_t intel_limits_g4x_hdmi = {
@@ -377,7 +426,6 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
377 .p2_fast = G4X_P2_HDMI_DAC_FAST 426 .p2_fast = G4X_P2_HDMI_DAC_FAST
378 }, 427 },
379 .find_pll = intel_g4x_find_best_PLL, 428 .find_pll = intel_g4x_find_best_PLL,
380 .find_reduced_pll = intel_g4x_find_best_PLL,
381}; 429};
382 430
383static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 431static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
@@ -402,7 +450,6 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
402 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST 450 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
403 }, 451 },
404 .find_pll = intel_g4x_find_best_PLL, 452 .find_pll = intel_g4x_find_best_PLL,
405 .find_reduced_pll = intel_g4x_find_best_PLL,
406}; 453};
407 454
408static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 455static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
@@ -427,7 +474,6 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
427 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST 474 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
428 }, 475 },
429 .find_pll = intel_g4x_find_best_PLL, 476 .find_pll = intel_g4x_find_best_PLL,
430 .find_reduced_pll = intel_g4x_find_best_PLL,
431}; 477};
432 478
433static const intel_limit_t intel_limits_g4x_display_port = { 479static const intel_limit_t intel_limits_g4x_display_port = {
@@ -453,74 +499,162 @@ static const intel_limit_t intel_limits_g4x_display_port = {
453 .find_pll = intel_find_pll_g4x_dp, 499 .find_pll = intel_find_pll_g4x_dp,
454}; 500};
455 501
456static const intel_limit_t intel_limits_igd_sdvo = { 502static const intel_limit_t intel_limits_pineview_sdvo = {
457 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, 503 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
458 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, 504 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
459 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, 505 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
460 .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, 506 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
461 .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, 507 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
462 .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, 508 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
463 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, 509 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
464 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 510 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
465 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 511 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
466 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 512 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
467 .find_pll = intel_find_best_PLL, 513 .find_pll = intel_find_best_PLL,
468 .find_reduced_pll = intel_find_best_reduced_PLL,
469}; 514};
470 515
471static const intel_limit_t intel_limits_igd_lvds = { 516static const intel_limit_t intel_limits_pineview_lvds = {
472 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 517 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
473 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, 518 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
474 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, 519 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
475 .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, 520 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
476 .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, 521 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
477 .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, 522 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
478 .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX }, 523 .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX },
479 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 524 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
480 /* IGD only supports single-channel mode. */ 525 /* Pineview only supports single-channel mode. */
481 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 526 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
482 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 527 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
483 .find_pll = intel_find_best_PLL, 528 .find_pll = intel_find_best_PLL,
484 .find_reduced_pll = intel_find_best_reduced_PLL,
485}; 529};
486 530
487static const intel_limit_t intel_limits_igdng_sdvo = { 531static const intel_limit_t intel_limits_ironlake_dac = {
488 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, 532 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
489 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, 533 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
490 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, 534 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
491 .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX }, 535 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
492 .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX }, 536 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
493 .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX }, 537 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
494 .p = { .min = IGDNG_P_SDVO_DAC_MIN, .max = IGDNG_P_SDVO_DAC_MAX }, 538 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
495 .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX }, 539 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
496 .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT, 540 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
497 .p2_slow = IGDNG_P2_SDVO_DAC_SLOW, 541 .p2_slow = IRONLAKE_DAC_P2_SLOW,
498 .p2_fast = IGDNG_P2_SDVO_DAC_FAST }, 542 .p2_fast = IRONLAKE_DAC_P2_FAST },
499 .find_pll = intel_igdng_find_best_PLL, 543 .find_pll = intel_g4x_find_best_PLL,
544};
545
546static const intel_limit_t intel_limits_ironlake_single_lvds = {
547 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
548 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
549 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
550 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
551 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
552 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
553 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
554 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
555 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
556 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
557 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
558 .find_pll = intel_g4x_find_best_PLL,
559};
560
561static const intel_limit_t intel_limits_ironlake_dual_lvds = {
562 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
563 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
564 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
565 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
566 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
567 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
568 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
569 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
570 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
571 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
572 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
573 .find_pll = intel_g4x_find_best_PLL,
574};
575
576static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
577 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
578 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
579 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
580 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
581 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
582 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
583 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
584 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
585 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
586 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
587 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
588 .find_pll = intel_g4x_find_best_PLL,
500}; 589};
501 590
502static const intel_limit_t intel_limits_igdng_lvds = { 591static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
503 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, 592 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
504 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, 593 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
505 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, 594 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
506 .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX }, 595 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
507 .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX }, 596 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
508 .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX }, 597 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
509 .p = { .min = IGDNG_P_LVDS_MIN, .max = IGDNG_P_LVDS_MAX }, 598 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
510 .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX }, 599 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
511 .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT, 600 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
512 .p2_slow = IGDNG_P2_LVDS_SLOW, 601 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
513 .p2_fast = IGDNG_P2_LVDS_FAST }, 602 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
514 .find_pll = intel_igdng_find_best_PLL, 603 .find_pll = intel_g4x_find_best_PLL,
515}; 604};
516 605
517static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc) 606static const intel_limit_t intel_limits_ironlake_display_port = {
607 .dot = { .min = IRONLAKE_DOT_MIN,
608 .max = IRONLAKE_DOT_MAX },
609 .vco = { .min = IRONLAKE_VCO_MIN,
610 .max = IRONLAKE_VCO_MAX},
611 .n = { .min = IRONLAKE_DP_N_MIN,
612 .max = IRONLAKE_DP_N_MAX },
613 .m = { .min = IRONLAKE_DP_M_MIN,
614 .max = IRONLAKE_DP_M_MAX },
615 .m1 = { .min = IRONLAKE_M1_MIN,
616 .max = IRONLAKE_M1_MAX },
617 .m2 = { .min = IRONLAKE_M2_MIN,
618 .max = IRONLAKE_M2_MAX },
619 .p = { .min = IRONLAKE_DP_P_MIN,
620 .max = IRONLAKE_DP_P_MAX },
621 .p1 = { .min = IRONLAKE_DP_P1_MIN,
622 .max = IRONLAKE_DP_P1_MAX},
623 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
624 .p2_slow = IRONLAKE_DP_P2_SLOW,
625 .p2_fast = IRONLAKE_DP_P2_FAST },
626 .find_pll = intel_find_pll_ironlake_dp,
627};
628
629static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
518{ 630{
631 struct drm_device *dev = crtc->dev;
632 struct drm_i915_private *dev_priv = dev->dev_private;
519 const intel_limit_t *limit; 633 const intel_limit_t *limit;
520 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 634 int refclk = 120;
521 limit = &intel_limits_igdng_lvds; 635
636 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
637 if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
638 refclk = 100;
639
640 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
641 LVDS_CLKB_POWER_UP) {
642 /* LVDS dual channel */
643 if (refclk == 100)
644 limit = &intel_limits_ironlake_dual_lvds_100m;
645 else
646 limit = &intel_limits_ironlake_dual_lvds;
647 } else {
648 if (refclk == 100)
649 limit = &intel_limits_ironlake_single_lvds_100m;
650 else
651 limit = &intel_limits_ironlake_single_lvds;
652 }
653 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
654 HAS_eDP)
655 limit = &intel_limits_ironlake_display_port;
522 else 656 else
523 limit = &intel_limits_igdng_sdvo; 657 limit = &intel_limits_ironlake_dac;
524 658
525 return limit; 659 return limit;
526} 660}
@@ -557,20 +691,20 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
557 struct drm_device *dev = crtc->dev; 691 struct drm_device *dev = crtc->dev;
558 const intel_limit_t *limit; 692 const intel_limit_t *limit;
559 693
560 if (IS_IGDNG(dev)) 694 if (HAS_PCH_SPLIT(dev))
561 limit = intel_igdng_limit(crtc); 695 limit = intel_ironlake_limit(crtc);
562 else if (IS_G4X(dev)) { 696 else if (IS_G4X(dev)) {
563 limit = intel_g4x_limit(crtc); 697 limit = intel_g4x_limit(crtc);
564 } else if (IS_I9XX(dev) && !IS_IGD(dev)) { 698 } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
565 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 699 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
566 limit = &intel_limits_i9xx_lvds; 700 limit = &intel_limits_i9xx_lvds;
567 else 701 else
568 limit = &intel_limits_i9xx_sdvo; 702 limit = &intel_limits_i9xx_sdvo;
569 } else if (IS_IGD(dev)) { 703 } else if (IS_PINEVIEW(dev)) {
570 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 704 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
571 limit = &intel_limits_igd_lvds; 705 limit = &intel_limits_pineview_lvds;
572 else 706 else
573 limit = &intel_limits_igd_sdvo; 707 limit = &intel_limits_pineview_sdvo;
574 } else { 708 } else {
575 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 709 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
576 limit = &intel_limits_i8xx_lvds; 710 limit = &intel_limits_i8xx_lvds;
@@ -580,8 +714,8 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
580 return limit; 714 return limit;
581} 715}
582 716
583/* m1 is reserved as 0 in IGD, n is a ring counter */ 717/* m1 is reserved as 0 in Pineview, n is a ring counter */
584static void igd_clock(int refclk, intel_clock_t *clock) 718static void pineview_clock(int refclk, intel_clock_t *clock)
585{ 719{
586 clock->m = clock->m2 + 2; 720 clock->m = clock->m2 + 2;
587 clock->p = clock->p1 * clock->p2; 721 clock->p = clock->p1 * clock->p2;
@@ -591,8 +725,8 @@ static void igd_clock(int refclk, intel_clock_t *clock)
591 725
592static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) 726static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
593{ 727{
594 if (IS_IGD(dev)) { 728 if (IS_PINEVIEW(dev)) {
595 igd_clock(refclk, clock); 729 pineview_clock(refclk, clock);
596 return; 730 return;
597 } 731 }
598 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 732 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
@@ -613,16 +747,16 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
613 list_for_each_entry(l_entry, &mode_config->connector_list, head) { 747 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
614 if (l_entry->encoder && 748 if (l_entry->encoder &&
615 l_entry->encoder->crtc == crtc) { 749 l_entry->encoder->crtc == crtc) {
616 struct intel_output *intel_output = to_intel_output(l_entry); 750 struct intel_encoder *intel_encoder = to_intel_encoder(l_entry);
617 if (intel_output->type == type) 751 if (intel_encoder->type == type)
618 return true; 752 return true;
619 } 753 }
620 } 754 }
621 return false; 755 return false;
622} 756}
623 757
624struct drm_connector * 758static struct drm_connector *
625intel_pipe_get_output (struct drm_crtc *crtc) 759intel_pipe_get_connector (struct drm_crtc *crtc)
626{ 760{
627 struct drm_device *dev = crtc->dev; 761 struct drm_device *dev = crtc->dev;
628 struct drm_mode_config *mode_config = &dev->mode_config; 762 struct drm_mode_config *mode_config = &dev->mode_config;
@@ -657,7 +791,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
657 INTELPllInvalid ("m2 out of range\n"); 791 INTELPllInvalid ("m2 out of range\n");
658 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 792 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
659 INTELPllInvalid ("m1 out of range\n"); 793 INTELPllInvalid ("m1 out of range\n");
660 if (clock->m1 <= clock->m2 && !IS_IGD(dev)) 794 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
661 INTELPllInvalid ("m1 <= m2\n"); 795 INTELPllInvalid ("m1 <= m2\n");
662 if (clock->m < limit->m.min || limit->m.max < clock->m) 796 if (clock->m < limit->m.min || limit->m.max < clock->m)
663 INTELPllInvalid ("m out of range\n"); 797 INTELPllInvalid ("m out of range\n");
@@ -706,16 +840,17 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
706 840
707 memset (best_clock, 0, sizeof (*best_clock)); 841 memset (best_clock, 0, sizeof (*best_clock));
708 842
709 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 843 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
710 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 844 clock.m1++) {
711 clock.m1++) { 845 for (clock.m2 = limit->m2.min;
712 for (clock.m2 = limit->m2.min; 846 clock.m2 <= limit->m2.max; clock.m2++) {
713 clock.m2 <= limit->m2.max; clock.m2++) { 847 /* m1 is always 0 in Pineview */
714 /* m1 is always 0 in IGD */ 848 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
715 if (clock.m2 >= clock.m1 && !IS_IGD(dev)) 849 break;
716 break; 850 for (clock.n = limit->n.min;
717 for (clock.n = limit->n.min; 851 clock.n <= limit->n.max; clock.n++) {
718 clock.n <= limit->n.max; clock.n++) { 852 for (clock.p1 = limit->p1.min;
853 clock.p1 <= limit->p1.max; clock.p1++) {
719 int this_err; 854 int this_err;
720 855
721 intel_clock(dev, refclk, &clock); 856 intel_clock(dev, refclk, &clock);
@@ -736,46 +871,6 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
736 return (err != target); 871 return (err != target);
737} 872}
738 873
739
740static bool
741intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
742 int target, int refclk, intel_clock_t *best_clock)
743
744{
745 struct drm_device *dev = crtc->dev;
746 intel_clock_t clock;
747 int err = target;
748 bool found = false;
749
750 memcpy(&clock, best_clock, sizeof(intel_clock_t));
751
752 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
753 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
754 /* m1 is always 0 in IGD */
755 if (clock.m2 >= clock.m1 && !IS_IGD(dev))
756 break;
757 for (clock.n = limit->n.min; clock.n <= limit->n.max;
758 clock.n++) {
759 int this_err;
760
761 intel_clock(dev, refclk, &clock);
762
763 if (!intel_PLL_is_valid(crtc, &clock))
764 continue;
765
766 this_err = abs(clock.dot - target);
767 if (this_err < err) {
768 *best_clock = clock;
769 err = this_err;
770 found = true;
771 }
772 }
773 }
774 }
775
776 return found;
777}
778
779static bool 874static bool
780intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 875intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
781 int target, int refclk, intel_clock_t *best_clock) 876 int target, int refclk, intel_clock_t *best_clock)
@@ -790,7 +885,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
790 found = false; 885 found = false;
791 886
792 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 887 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
793 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 888 int lvds_reg;
889
890 if (HAS_PCH_SPLIT(dev))
891 lvds_reg = PCH_LVDS;
892 else
893 lvds_reg = LVDS;
894 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
794 LVDS_CLKB_POWER_UP) 895 LVDS_CLKB_POWER_UP)
795 clock.p2 = limit->p2.p2_fast; 896 clock.p2 = limit->p2.p2_fast;
796 else 897 else
@@ -833,11 +934,16 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
833} 934}
834 935
835static bool 936static bool
836intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 937intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
837 int target, int refclk, intel_clock_t *best_clock) 938 int target, int refclk, intel_clock_t *best_clock)
838{ 939{
839 struct drm_device *dev = crtc->dev; 940 struct drm_device *dev = crtc->dev;
840 intel_clock_t clock; 941 intel_clock_t clock;
942
943 /* return directly when it is eDP */
944 if (HAS_eDP)
945 return true;
946
841 if (target < 200000) { 947 if (target < 200000) {
842 clock.n = 1; 948 clock.n = 1;
843 clock.p1 = 2; 949 clock.p1 = 2;
@@ -856,68 +962,6 @@ intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
856 return true; 962 return true;
857} 963}
858 964
859static bool
860intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
861 int target, int refclk, intel_clock_t *best_clock)
862{
863 struct drm_device *dev = crtc->dev;
864 struct drm_i915_private *dev_priv = dev->dev_private;
865 intel_clock_t clock;
866 int err_most = 47;
867 int err_min = 10000;
868
869 /* eDP has only 2 clock choice, no n/m/p setting */
870 if (HAS_eDP)
871 return true;
872
873 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
874 return intel_find_pll_igdng_dp(limit, crtc, target,
875 refclk, best_clock);
876
877 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
878 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
879 LVDS_CLKB_POWER_UP)
880 clock.p2 = limit->p2.p2_fast;
881 else
882 clock.p2 = limit->p2.p2_slow;
883 } else {
884 if (target < limit->p2.dot_limit)
885 clock.p2 = limit->p2.p2_slow;
886 else
887 clock.p2 = limit->p2.p2_fast;
888 }
889
890 memset(best_clock, 0, sizeof(*best_clock));
891 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
892 /* based on hardware requriment prefer smaller n to precision */
893 for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
894 /* based on hardware requirment prefere larger m1,m2 */
895 for (clock.m1 = limit->m1.max;
896 clock.m1 >= limit->m1.min; clock.m1--) {
897 for (clock.m2 = limit->m2.max;
898 clock.m2 >= limit->m2.min; clock.m2--) {
899 int this_err;
900
901 intel_clock(dev, refclk, &clock);
902 if (!intel_PLL_is_valid(crtc, &clock))
903 continue;
904 this_err = abs((10000 - (target*10000/clock.dot)));
905 if (this_err < err_most) {
906 *best_clock = clock;
907 /* found on first matching */
908 goto out;
909 } else if (this_err < err_min) {
910 *best_clock = clock;
911 err_min = this_err;
912 }
913 }
914 }
915 }
916 }
917out:
918 return true;
919}
920
921/* DisplayPort has only two frequencies, 162MHz and 270MHz */ 965/* DisplayPort has only two frequencies, 162MHz and 270MHz */
922static bool 966static bool
923intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 967intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -949,7 +993,7 @@ void
949intel_wait_for_vblank(struct drm_device *dev) 993intel_wait_for_vblank(struct drm_device *dev)
950{ 994{
951 /* Wait for 20ms, i.e. one cycle at 50hz. */ 995 /* Wait for 20ms, i.e. one cycle at 50hz. */
952 mdelay(20); 996 msleep(20);
953} 997}
954 998
955/* Parameters have changed, update FBC info */ 999/* Parameters have changed, update FBC info */
@@ -959,7 +1003,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
959 struct drm_i915_private *dev_priv = dev->dev_private; 1003 struct drm_i915_private *dev_priv = dev->dev_private;
960 struct drm_framebuffer *fb = crtc->fb; 1004 struct drm_framebuffer *fb = crtc->fb;
961 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1005 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
962 struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; 1006 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
963 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1007 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
964 int plane, i; 1008 int plane, i;
965 u32 fbc_ctl, fbc_ctl2; 1009 u32 fbc_ctl, fbc_ctl2;
@@ -988,13 +1032,15 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
988 1032
989 /* enable it... */ 1033 /* enable it... */
990 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 1034 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1035 if (IS_I945GM(dev))
1036 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
991 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1037 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
992 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1038 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
993 if (obj_priv->tiling_mode != I915_TILING_NONE) 1039 if (obj_priv->tiling_mode != I915_TILING_NONE)
994 fbc_ctl |= dev_priv->cfb_fence; 1040 fbc_ctl |= dev_priv->cfb_fence;
995 I915_WRITE(FBC_CONTROL, fbc_ctl); 1041 I915_WRITE(FBC_CONTROL, fbc_ctl);
996 1042
997 DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ", 1043 DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
998 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); 1044 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
999} 1045}
1000 1046
@@ -1017,7 +1063,7 @@ void i8xx_disable_fbc(struct drm_device *dev)
1017 1063
1018 intel_wait_for_vblank(dev); 1064 intel_wait_for_vblank(dev);
1019 1065
1020 DRM_DEBUG("disabled FBC\n"); 1066 DRM_DEBUG_KMS("disabled FBC\n");
1021} 1067}
1022 1068
1023static bool i8xx_fbc_enabled(struct drm_crtc *crtc) 1069static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
@@ -1034,7 +1080,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1034 struct drm_i915_private *dev_priv = dev->dev_private; 1080 struct drm_i915_private *dev_priv = dev->dev_private;
1035 struct drm_framebuffer *fb = crtc->fb; 1081 struct drm_framebuffer *fb = crtc->fb;
1036 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1082 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1037 struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; 1083 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
1038 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1084 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1039 int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : 1085 int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
1040 DPFC_CTL_PLANEB); 1086 DPFC_CTL_PLANEB);
@@ -1062,7 +1108,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1062 /* enable it... */ 1108 /* enable it... */
1063 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); 1109 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1064 1110
1065 DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane); 1111 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1066} 1112}
1067 1113
1068void g4x_disable_fbc(struct drm_device *dev) 1114void g4x_disable_fbc(struct drm_device *dev)
@@ -1076,7 +1122,7 @@ void g4x_disable_fbc(struct drm_device *dev)
1076 I915_WRITE(DPFC_CONTROL, dpfc_ctl); 1122 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1077 intel_wait_for_vblank(dev); 1123 intel_wait_for_vblank(dev);
1078 1124
1079 DRM_DEBUG("disabled FBC\n"); 1125 DRM_DEBUG_KMS("disabled FBC\n");
1080} 1126}
1081 1127
1082static bool g4x_fbc_enabled(struct drm_crtc *crtc) 1128static bool g4x_fbc_enabled(struct drm_crtc *crtc)
@@ -1130,7 +1176,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1130 return; 1176 return;
1131 1177
1132 intel_fb = to_intel_framebuffer(fb); 1178 intel_fb = to_intel_framebuffer(fb);
1133 obj_priv = intel_fb->obj->driver_private; 1179 obj_priv = to_intel_bo(intel_fb->obj);
1134 1180
1135 /* 1181 /*
1136 * If FBC is already on, we just have to verify that we can 1182 * If FBC is already on, we just have to verify that we can
@@ -1141,25 +1187,32 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1141 * - going to an unsupported config (interlace, pixel multiply, etc.) 1187 * - going to an unsupported config (interlace, pixel multiply, etc.)
1142 */ 1188 */
1143 if (intel_fb->obj->size > dev_priv->cfb_size) { 1189 if (intel_fb->obj->size > dev_priv->cfb_size) {
1144 DRM_DEBUG("framebuffer too large, disabling compression\n"); 1190 DRM_DEBUG_KMS("framebuffer too large, disabling "
1191 "compression\n");
1192 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1145 goto out_disable; 1193 goto out_disable;
1146 } 1194 }
1147 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 1195 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1148 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 1196 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
1149 DRM_DEBUG("mode incompatible with compression, disabling\n"); 1197 DRM_DEBUG_KMS("mode incompatible with compression, "
1198 "disabling\n");
1199 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1150 goto out_disable; 1200 goto out_disable;
1151 } 1201 }
1152 if ((mode->hdisplay > 2048) || 1202 if ((mode->hdisplay > 2048) ||
1153 (mode->vdisplay > 1536)) { 1203 (mode->vdisplay > 1536)) {
1154 DRM_DEBUG("mode too large for compression, disabling\n"); 1204 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1205 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1155 goto out_disable; 1206 goto out_disable;
1156 } 1207 }
1157 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { 1208 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
1158 DRM_DEBUG("plane not 0, disabling compression\n"); 1209 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1210 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1159 goto out_disable; 1211 goto out_disable;
1160 } 1212 }
1161 if (obj_priv->tiling_mode != I915_TILING_X) { 1213 if (obj_priv->tiling_mode != I915_TILING_X) {
1162 DRM_DEBUG("framebuffer not tiled, disabling compression\n"); 1214 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
1215 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1163 goto out_disable; 1216 goto out_disable;
1164 } 1217 }
1165 1218
@@ -1181,13 +1234,57 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1181 return; 1234 return;
1182 1235
1183out_disable: 1236out_disable:
1184 DRM_DEBUG("unsupported config, disabling FBC\n"); 1237 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1185 /* Multiple disables should be harmless */ 1238 /* Multiple disables should be harmless */
1186 if (dev_priv->display.fbc_enabled(crtc)) 1239 if (dev_priv->display.fbc_enabled(crtc))
1187 dev_priv->display.disable_fbc(dev); 1240 dev_priv->display.disable_fbc(dev);
1188} 1241}
1189 1242
1190static int 1243static int
1244intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1245{
1246 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1247 u32 alignment;
1248 int ret;
1249
1250 switch (obj_priv->tiling_mode) {
1251 case I915_TILING_NONE:
1252 alignment = 64 * 1024;
1253 break;
1254 case I915_TILING_X:
1255 /* pin() will align the object as required by fence */
1256 alignment = 0;
1257 break;
1258 case I915_TILING_Y:
1259 /* FIXME: Is this true? */
1260 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1261 return -EINVAL;
1262 default:
1263 BUG();
1264 }
1265
1266 ret = i915_gem_object_pin(obj, alignment);
1267 if (ret != 0)
1268 return ret;
1269
1270 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1271 * fence, whereas 965+ only requires a fence if using
1272 * framebuffer compression. For simplicity, we always install
1273 * a fence as the cost is not that onerous.
1274 */
1275 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1276 obj_priv->tiling_mode != I915_TILING_NONE) {
1277 ret = i915_gem_object_get_fence_reg(obj);
1278 if (ret != 0) {
1279 i915_gem_object_unpin(obj);
1280 return ret;
1281 }
1282 }
1283
1284 return 0;
1285}
1286
1287static int
1191intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 1288intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1192 struct drm_framebuffer *old_fb) 1289 struct drm_framebuffer *old_fb)
1193{ 1290{
@@ -1206,12 +1303,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1206 int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; 1303 int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
1207 int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); 1304 int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
1208 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; 1305 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
1209 u32 dspcntr, alignment; 1306 u32 dspcntr;
1210 int ret; 1307 int ret;
1211 1308
1212 /* no fb bound */ 1309 /* no fb bound */
1213 if (!crtc->fb) { 1310 if (!crtc->fb) {
1214 DRM_DEBUG("No FB bound\n"); 1311 DRM_DEBUG_KMS("No FB bound\n");
1215 return 0; 1312 return 0;
1216 } 1313 }
1217 1314
@@ -1226,52 +1323,22 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1226 1323
1227 intel_fb = to_intel_framebuffer(crtc->fb); 1324 intel_fb = to_intel_framebuffer(crtc->fb);
1228 obj = intel_fb->obj; 1325 obj = intel_fb->obj;
1229 obj_priv = obj->driver_private; 1326 obj_priv = to_intel_bo(obj);
1230
1231 switch (obj_priv->tiling_mode) {
1232 case I915_TILING_NONE:
1233 alignment = 64 * 1024;
1234 break;
1235 case I915_TILING_X:
1236 /* pin() will align the object as required by fence */
1237 alignment = 0;
1238 break;
1239 case I915_TILING_Y:
1240 /* FIXME: Is this true? */
1241 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1242 return -EINVAL;
1243 default:
1244 BUG();
1245 }
1246 1327
1247 mutex_lock(&dev->struct_mutex); 1328 mutex_lock(&dev->struct_mutex);
1248 ret = i915_gem_object_pin(obj, alignment); 1329 ret = intel_pin_and_fence_fb_obj(dev, obj);
1249 if (ret != 0) { 1330 if (ret != 0) {
1250 mutex_unlock(&dev->struct_mutex); 1331 mutex_unlock(&dev->struct_mutex);
1251 return ret; 1332 return ret;
1252 } 1333 }
1253 1334
1254 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 1335 ret = i915_gem_object_set_to_display_plane(obj);
1255 if (ret != 0) { 1336 if (ret != 0) {
1256 i915_gem_object_unpin(obj); 1337 i915_gem_object_unpin(obj);
1257 mutex_unlock(&dev->struct_mutex); 1338 mutex_unlock(&dev->struct_mutex);
1258 return ret; 1339 return ret;
1259 } 1340 }
1260 1341
1261 /* Install a fence for tiled scan-out. Pre-i965 always needs a fence,
1262 * whereas 965+ only requires a fence if using framebuffer compression.
1263 * For simplicity, we always install a fence as the cost is not that onerous.
1264 */
1265 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1266 obj_priv->tiling_mode != I915_TILING_NONE) {
1267 ret = i915_gem_object_get_fence_reg(obj);
1268 if (ret != 0) {
1269 i915_gem_object_unpin(obj);
1270 mutex_unlock(&dev->struct_mutex);
1271 return ret;
1272 }
1273 }
1274
1275 dspcntr = I915_READ(dspcntr_reg); 1342 dspcntr = I915_READ(dspcntr_reg);
1276 /* Mask out pixel format bits in case we change it */ 1343 /* Mask out pixel format bits in case we change it */
1277 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 1344 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
@@ -1287,7 +1354,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1287 break; 1354 break;
1288 case 24: 1355 case 24:
1289 case 32: 1356 case 32:
1290 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 1357 if (crtc->fb->depth == 30)
1358 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
1359 else
1360 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1291 break; 1361 break;
1292 default: 1362 default:
1293 DRM_ERROR("Unknown color depth\n"); 1363 DRM_ERROR("Unknown color depth\n");
@@ -1302,7 +1372,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1302 dspcntr &= ~DISPPLANE_TILED; 1372 dspcntr &= ~DISPPLANE_TILED;
1303 } 1373 }
1304 1374
1305 if (IS_IGDNG(dev)) 1375 if (HAS_PCH_SPLIT(dev))
1306 /* must disable */ 1376 /* must disable */
1307 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1377 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1308 1378
@@ -1311,7 +1381,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1311 Start = obj_priv->gtt_offset; 1381 Start = obj_priv->gtt_offset;
1312 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); 1382 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
1313 1383
1314 DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 1384 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
1315 I915_WRITE(dspstride, crtc->fb->pitch); 1385 I915_WRITE(dspstride, crtc->fb->pitch);
1316 if (IS_I965G(dev)) { 1386 if (IS_I965G(dev)) {
1317 I915_WRITE(dspbase, Offset); 1387 I915_WRITE(dspbase, Offset);
@@ -1331,7 +1401,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1331 1401
1332 if (old_fb) { 1402 if (old_fb) {
1333 intel_fb = to_intel_framebuffer(old_fb); 1403 intel_fb = to_intel_framebuffer(old_fb);
1334 obj_priv = intel_fb->obj->driver_private; 1404 obj_priv = to_intel_bo(intel_fb->obj);
1335 i915_gem_object_unpin(intel_fb->obj); 1405 i915_gem_object_unpin(intel_fb->obj);
1336 } 1406 }
1337 intel_increase_pllclock(crtc, true); 1407 intel_increase_pllclock(crtc, true);
@@ -1363,7 +1433,7 @@ static void i915_disable_vga (struct drm_device *dev)
1363 u8 sr1; 1433 u8 sr1;
1364 u32 vga_reg; 1434 u32 vga_reg;
1365 1435
1366 if (IS_IGDNG(dev)) 1436 if (HAS_PCH_SPLIT(dev))
1367 vga_reg = CPU_VGACNTRL; 1437 vga_reg = CPU_VGACNTRL;
1368 else 1438 else
1369 vga_reg = VGACNTRL; 1439 vga_reg = VGACNTRL;
@@ -1379,19 +1449,19 @@ static void i915_disable_vga (struct drm_device *dev)
1379 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 1449 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
1380} 1450}
1381 1451
1382static void igdng_disable_pll_edp (struct drm_crtc *crtc) 1452static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
1383{ 1453{
1384 struct drm_device *dev = crtc->dev; 1454 struct drm_device *dev = crtc->dev;
1385 struct drm_i915_private *dev_priv = dev->dev_private; 1455 struct drm_i915_private *dev_priv = dev->dev_private;
1386 u32 dpa_ctl; 1456 u32 dpa_ctl;
1387 1457
1388 DRM_DEBUG("\n"); 1458 DRM_DEBUG_KMS("\n");
1389 dpa_ctl = I915_READ(DP_A); 1459 dpa_ctl = I915_READ(DP_A);
1390 dpa_ctl &= ~DP_PLL_ENABLE; 1460 dpa_ctl &= ~DP_PLL_ENABLE;
1391 I915_WRITE(DP_A, dpa_ctl); 1461 I915_WRITE(DP_A, dpa_ctl);
1392} 1462}
1393 1463
1394static void igdng_enable_pll_edp (struct drm_crtc *crtc) 1464static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
1395{ 1465{
1396 struct drm_device *dev = crtc->dev; 1466 struct drm_device *dev = crtc->dev;
1397 struct drm_i915_private *dev_priv = dev->dev_private; 1467 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1404,13 +1474,13 @@ static void igdng_enable_pll_edp (struct drm_crtc *crtc)
1404} 1474}
1405 1475
1406 1476
1407static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock) 1477static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
1408{ 1478{
1409 struct drm_device *dev = crtc->dev; 1479 struct drm_device *dev = crtc->dev;
1410 struct drm_i915_private *dev_priv = dev->dev_private; 1480 struct drm_i915_private *dev_priv = dev->dev_private;
1411 u32 dpa_ctl; 1481 u32 dpa_ctl;
1412 1482
1413 DRM_DEBUG("eDP PLL enable for clock %d\n", clock); 1483 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
1414 dpa_ctl = I915_READ(DP_A); 1484 dpa_ctl = I915_READ(DP_A);
1415 dpa_ctl &= ~DP_PLL_FREQ_MASK; 1485 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1416 1486
@@ -1440,7 +1510,7 @@ static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock)
1440 udelay(500); 1510 udelay(500);
1441} 1511}
1442 1512
1443static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) 1513static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1444{ 1514{
1445 struct drm_device *dev = crtc->dev; 1515 struct drm_device *dev = crtc->dev;
1446 struct drm_i915_private *dev_priv = dev->dev_private; 1516 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1473,6 +1543,10 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1473 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; 1543 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
1474 u32 temp; 1544 u32 temp;
1475 int tries = 5, j, n; 1545 int tries = 5, j, n;
1546 u32 pipe_bpc;
1547
1548 temp = I915_READ(pipeconf_reg);
1549 pipe_bpc = temp & PIPE_BPC_MASK;
1476 1550
1477 /* XXX: When our outputs are all unaware of DPMS modes other than off 1551 /* XXX: When our outputs are all unaware of DPMS modes other than off
1478 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 1552 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -1481,10 +1555,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1481 case DRM_MODE_DPMS_ON: 1555 case DRM_MODE_DPMS_ON:
1482 case DRM_MODE_DPMS_STANDBY: 1556 case DRM_MODE_DPMS_STANDBY:
1483 case DRM_MODE_DPMS_SUSPEND: 1557 case DRM_MODE_DPMS_SUSPEND:
1484 DRM_DEBUG("crtc %d dpms on\n", pipe); 1558 DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
1559
1560 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1561 temp = I915_READ(PCH_LVDS);
1562 if ((temp & LVDS_PORT_EN) == 0) {
1563 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
1564 POSTING_READ(PCH_LVDS);
1565 }
1566 }
1567
1485 if (HAS_eDP) { 1568 if (HAS_eDP) {
1486 /* enable eDP PLL */ 1569 /* enable eDP PLL */
1487 igdng_enable_pll_edp(crtc); 1570 ironlake_enable_pll_edp(crtc);
1488 } else { 1571 } else {
1489 /* enable PCH DPLL */ 1572 /* enable PCH DPLL */
1490 temp = I915_READ(pch_dpll_reg); 1573 temp = I915_READ(pch_dpll_reg);
@@ -1495,13 +1578,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1495 1578
1496 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1579 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1497 temp = I915_READ(fdi_rx_reg); 1580 temp = I915_READ(fdi_rx_reg);
1581 /*
1582 * make the BPC in FDI Rx be consistent with that in
1583 * pipeconf reg.
1584 */
1585 temp &= ~(0x7 << 16);
1586 temp |= (pipe_bpc << 11);
1498 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | 1587 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
1499 FDI_SEL_PCDCLK | 1588 FDI_SEL_PCDCLK |
1500 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ 1589 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
1501 I915_READ(fdi_rx_reg); 1590 I915_READ(fdi_rx_reg);
1502 udelay(200); 1591 udelay(200);
1503 1592
1504 /* Enable CPU FDI TX PLL, always on for IGDNG */ 1593 /* Enable CPU FDI TX PLL, always on for Ironlake */
1505 temp = I915_READ(fdi_tx_reg); 1594 temp = I915_READ(fdi_tx_reg);
1506 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 1595 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1507 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); 1596 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
@@ -1568,12 +1657,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1568 udelay(150); 1657 udelay(150);
1569 1658
1570 temp = I915_READ(fdi_rx_iir_reg); 1659 temp = I915_READ(fdi_rx_iir_reg);
1571 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1660 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1572 1661
1573 if ((temp & FDI_RX_BIT_LOCK) == 0) { 1662 if ((temp & FDI_RX_BIT_LOCK) == 0) {
1574 for (j = 0; j < tries; j++) { 1663 for (j = 0; j < tries; j++) {
1575 temp = I915_READ(fdi_rx_iir_reg); 1664 temp = I915_READ(fdi_rx_iir_reg);
1576 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1665 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
1666 temp);
1577 if (temp & FDI_RX_BIT_LOCK) 1667 if (temp & FDI_RX_BIT_LOCK)
1578 break; 1668 break;
1579 udelay(200); 1669 udelay(200);
@@ -1582,11 +1672,11 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1582 I915_WRITE(fdi_rx_iir_reg, 1672 I915_WRITE(fdi_rx_iir_reg,
1583 temp | FDI_RX_BIT_LOCK); 1673 temp | FDI_RX_BIT_LOCK);
1584 else 1674 else
1585 DRM_DEBUG("train 1 fail\n"); 1675 DRM_DEBUG_KMS("train 1 fail\n");
1586 } else { 1676 } else {
1587 I915_WRITE(fdi_rx_iir_reg, 1677 I915_WRITE(fdi_rx_iir_reg,
1588 temp | FDI_RX_BIT_LOCK); 1678 temp | FDI_RX_BIT_LOCK);
1589 DRM_DEBUG("train 1 ok 2!\n"); 1679 DRM_DEBUG_KMS("train 1 ok 2!\n");
1590 } 1680 }
1591 temp = I915_READ(fdi_tx_reg); 1681 temp = I915_READ(fdi_tx_reg);
1592 temp &= ~FDI_LINK_TRAIN_NONE; 1682 temp &= ~FDI_LINK_TRAIN_NONE;
@@ -1601,12 +1691,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1601 udelay(150); 1691 udelay(150);
1602 1692
1603 temp = I915_READ(fdi_rx_iir_reg); 1693 temp = I915_READ(fdi_rx_iir_reg);
1604 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1694 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1605 1695
1606 if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { 1696 if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
1607 for (j = 0; j < tries; j++) { 1697 for (j = 0; j < tries; j++) {
1608 temp = I915_READ(fdi_rx_iir_reg); 1698 temp = I915_READ(fdi_rx_iir_reg);
1609 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1699 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
1700 temp);
1610 if (temp & FDI_RX_SYMBOL_LOCK) 1701 if (temp & FDI_RX_SYMBOL_LOCK)
1611 break; 1702 break;
1612 udelay(200); 1703 udelay(200);
@@ -1614,15 +1705,15 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1614 if (j != tries) { 1705 if (j != tries) {
1615 I915_WRITE(fdi_rx_iir_reg, 1706 I915_WRITE(fdi_rx_iir_reg,
1616 temp | FDI_RX_SYMBOL_LOCK); 1707 temp | FDI_RX_SYMBOL_LOCK);
1617 DRM_DEBUG("train 2 ok 1!\n"); 1708 DRM_DEBUG_KMS("train 2 ok 1!\n");
1618 } else 1709 } else
1619 DRM_DEBUG("train 2 fail\n"); 1710 DRM_DEBUG_KMS("train 2 fail\n");
1620 } else { 1711 } else {
1621 I915_WRITE(fdi_rx_iir_reg, 1712 I915_WRITE(fdi_rx_iir_reg,
1622 temp | FDI_RX_SYMBOL_LOCK); 1713 temp | FDI_RX_SYMBOL_LOCK);
1623 DRM_DEBUG("train 2 ok 2!\n"); 1714 DRM_DEBUG_KMS("train 2 ok 2!\n");
1624 } 1715 }
1625 DRM_DEBUG("train done\n"); 1716 DRM_DEBUG_KMS("train done\n");
1626 1717
1627 /* set transcoder timing */ 1718 /* set transcoder timing */
1628 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); 1719 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1635,6 +1726,12 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1635 1726
1636 /* enable PCH transcoder */ 1727 /* enable PCH transcoder */
1637 temp = I915_READ(transconf_reg); 1728 temp = I915_READ(transconf_reg);
1729 /*
1730 * make the BPC in transcoder be consistent with
1731 * that in pipeconf reg.
1732 */
1733 temp &= ~PIPE_BPC_MASK;
1734 temp |= pipe_bpc;
1638 I915_WRITE(transconf_reg, temp | TRANS_ENABLE); 1735 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
1639 I915_READ(transconf_reg); 1736 I915_READ(transconf_reg);
1640 1737
@@ -1664,10 +1761,9 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1664 1761
1665 break; 1762 break;
1666 case DRM_MODE_DPMS_OFF: 1763 case DRM_MODE_DPMS_OFF:
1667 DRM_DEBUG("crtc %d dpms off\n", pipe); 1764 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
1668
1669 i915_disable_vga(dev);
1670 1765
1766 drm_vblank_off(dev, pipe);
1671 /* Disable display plane */ 1767 /* Disable display plane */
1672 temp = I915_READ(dspcntr_reg); 1768 temp = I915_READ(dspcntr_reg);
1673 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 1769 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -1677,6 +1773,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1677 I915_READ(dspbase_reg); 1773 I915_READ(dspbase_reg);
1678 } 1774 }
1679 1775
1776 i915_disable_vga(dev);
1777
1680 /* disable cpu pipe, disable after all planes disabled */ 1778 /* disable cpu pipe, disable after all planes disabled */
1681 temp = I915_READ(pipeconf_reg); 1779 temp = I915_READ(pipeconf_reg);
1682 if ((temp & PIPEACONF_ENABLE) != 0) { 1780 if ((temp & PIPEACONF_ENABLE) != 0) {
@@ -1690,16 +1788,23 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1690 udelay(500); 1788 udelay(500);
1691 continue; 1789 continue;
1692 } else { 1790 } else {
1693 DRM_DEBUG("pipe %d off delay\n", pipe); 1791 DRM_DEBUG_KMS("pipe %d off delay\n",
1792 pipe);
1694 break; 1793 break;
1695 } 1794 }
1696 } 1795 }
1697 } else 1796 } else
1698 DRM_DEBUG("crtc %d is disabled\n", pipe); 1797 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
1699 1798
1700 if (HAS_eDP) { 1799 udelay(100);
1701 igdng_disable_pll_edp(crtc); 1800
1801 /* Disable PF */
1802 temp = I915_READ(pf_ctl_reg);
1803 if ((temp & PF_ENABLE) != 0) {
1804 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
1805 I915_READ(pf_ctl_reg);
1702 } 1806 }
1807 I915_WRITE(pf_win_size, 0);
1703 1808
1704 /* disable CPU FDI tx and PCH FDI rx */ 1809 /* disable CPU FDI tx and PCH FDI rx */
1705 temp = I915_READ(fdi_tx_reg); 1810 temp = I915_READ(fdi_tx_reg);
@@ -1707,6 +1812,9 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1707 I915_READ(fdi_tx_reg); 1812 I915_READ(fdi_tx_reg);
1708 1813
1709 temp = I915_READ(fdi_rx_reg); 1814 temp = I915_READ(fdi_rx_reg);
1815 /* BPC in FDI rx is consistent with that in pipeconf */
1816 temp &= ~(0x07 << 16);
1817 temp |= (pipe_bpc << 11);
1710 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); 1818 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
1711 I915_READ(fdi_rx_reg); 1819 I915_READ(fdi_rx_reg);
1712 1820
@@ -1725,6 +1833,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1725 1833
1726 udelay(100); 1834 udelay(100);
1727 1835
1836 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1837 temp = I915_READ(PCH_LVDS);
1838 I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
1839 I915_READ(PCH_LVDS);
1840 udelay(100);
1841 }
1842
1728 /* disable PCH transcoder */ 1843 /* disable PCH transcoder */
1729 temp = I915_READ(transconf_reg); 1844 temp = I915_READ(transconf_reg);
1730 if ((temp & TRANS_ENABLE) != 0) { 1845 if ((temp & TRANS_ENABLE) != 0) {
@@ -1738,11 +1853,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1738 udelay(500); 1853 udelay(500);
1739 continue; 1854 continue;
1740 } else { 1855 } else {
1741 DRM_DEBUG("transcoder %d off delay\n", pipe); 1856 DRM_DEBUG_KMS("transcoder %d off "
1857 "delay\n", pipe);
1742 break; 1858 break;
1743 } 1859 }
1744 } 1860 }
1745 } 1861 }
1862 temp = I915_READ(transconf_reg);
1863 /* BPC in transcoder is consistent with that in pipeconf */
1864 temp &= ~PIPE_BPC_MASK;
1865 temp |= pipe_bpc;
1866 I915_WRITE(transconf_reg, temp);
1867 I915_READ(transconf_reg);
1868 udelay(100);
1746 1869
1747 /* disable PCH DPLL */ 1870 /* disable PCH DPLL */
1748 temp = I915_READ(pch_dpll_reg); 1871 temp = I915_READ(pch_dpll_reg);
@@ -1751,14 +1874,20 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1751 I915_READ(pch_dpll_reg); 1874 I915_READ(pch_dpll_reg);
1752 } 1875 }
1753 1876
1754 temp = I915_READ(fdi_rx_reg); 1877 if (HAS_eDP) {
1755 if ((temp & FDI_RX_PLL_ENABLE) != 0) { 1878 ironlake_disable_pll_edp(crtc);
1756 temp &= ~FDI_SEL_PCDCLK;
1757 temp &= ~FDI_RX_PLL_ENABLE;
1758 I915_WRITE(fdi_rx_reg, temp);
1759 I915_READ(fdi_rx_reg);
1760 } 1879 }
1761 1880
1881 temp = I915_READ(fdi_rx_reg);
1882 temp &= ~FDI_SEL_PCDCLK;
1883 I915_WRITE(fdi_rx_reg, temp);
1884 I915_READ(fdi_rx_reg);
1885
1886 temp = I915_READ(fdi_rx_reg);
1887 temp &= ~FDI_RX_PLL_ENABLE;
1888 I915_WRITE(fdi_rx_reg, temp);
1889 I915_READ(fdi_rx_reg);
1890
1762 /* Disable CPU FDI TX PLL */ 1891 /* Disable CPU FDI TX PLL */
1763 temp = I915_READ(fdi_tx_reg); 1892 temp = I915_READ(fdi_tx_reg);
1764 if ((temp & FDI_TX_PLL_ENABLE) != 0) { 1893 if ((temp & FDI_TX_PLL_ENABLE) != 0) {
@@ -1767,20 +1896,43 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1767 udelay(100); 1896 udelay(100);
1768 } 1897 }
1769 1898
1770 /* Disable PF */
1771 temp = I915_READ(pf_ctl_reg);
1772 if ((temp & PF_ENABLE) != 0) {
1773 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
1774 I915_READ(pf_ctl_reg);
1775 }
1776 I915_WRITE(pf_win_size, 0);
1777
1778 /* Wait for the clocks to turn off. */ 1899 /* Wait for the clocks to turn off. */
1779 udelay(150); 1900 udelay(100);
1780 break; 1901 break;
1781 } 1902 }
1782} 1903}
1783 1904
1905static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
1906{
1907 struct intel_overlay *overlay;
1908 int ret;
1909
1910 if (!enable && intel_crtc->overlay) {
1911 overlay = intel_crtc->overlay;
1912 mutex_lock(&overlay->dev->struct_mutex);
1913 for (;;) {
1914 ret = intel_overlay_switch_off(overlay);
1915 if (ret == 0)
1916 break;
1917
1918 ret = intel_overlay_recover_from_interrupt(overlay, 0);
1919 if (ret != 0) {
1920 /* overlay doesn't react anymore. Usually
1921 * results in a black screen and an unkillable
1922 * X server. */
1923 BUG();
1924 overlay->hw_wedged = HW_WEDGED;
1925 break;
1926 }
1927 }
1928 mutex_unlock(&overlay->dev->struct_mutex);
1929 }
1930 /* Let userspace switch the overlay on again. In most cases userspace
1931 * has to recompute where to put it anyway. */
1932
1933 return;
1934}
1935
1784static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) 1936static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1785{ 1937{
1786 struct drm_device *dev = crtc->dev; 1938 struct drm_device *dev = crtc->dev;
@@ -1839,12 +1991,14 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1839 intel_update_fbc(crtc, &crtc->mode); 1991 intel_update_fbc(crtc, &crtc->mode);
1840 1992
1841 /* Give the overlay scaler a chance to enable if it's on this pipe */ 1993 /* Give the overlay scaler a chance to enable if it's on this pipe */
1842 //intel_crtc_dpms_video(crtc, true); TODO 1994 intel_crtc_dpms_overlay(intel_crtc, true);
1843 break; 1995 break;
1844 case DRM_MODE_DPMS_OFF: 1996 case DRM_MODE_DPMS_OFF:
1845 intel_update_watermarks(dev); 1997 intel_update_watermarks(dev);
1998
1846 /* Give the overlay scaler a chance to disable if it's on this pipe */ 1999 /* Give the overlay scaler a chance to disable if it's on this pipe */
1847 //intel_crtc_dpms_video(crtc, FALSE); TODO 2000 intel_crtc_dpms_overlay(intel_crtc, false);
2001 drm_vblank_off(dev, pipe);
1848 2002
1849 if (dev_priv->cfb_plane == plane && 2003 if (dev_priv->cfb_plane == plane &&
1850 dev_priv->display.disable_fbc) 2004 dev_priv->display.disable_fbc)
@@ -1963,7 +2117,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
1963 struct drm_display_mode *adjusted_mode) 2117 struct drm_display_mode *adjusted_mode)
1964{ 2118{
1965 struct drm_device *dev = crtc->dev; 2119 struct drm_device *dev = crtc->dev;
1966 if (IS_IGDNG(dev)) { 2120 if (HAS_PCH_SPLIT(dev)) {
1967 /* FDI link clock is fixed at 2.7G */ 2121 /* FDI link clock is fixed at 2.7G */
1968 if (mode->clock * 3 > 27000 * 4) 2122 if (mode->clock * 3 > 27000 * 4)
1969 return MODE_CLOCK_HIGH; 2123 return MODE_CLOCK_HIGH;
@@ -2039,7 +2193,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
2039 * Return the pipe currently connected to the panel fitter, 2193 * Return the pipe currently connected to the panel fitter,
2040 * or -1 if the panel fitter is not present or not in use 2194 * or -1 if the panel fitter is not present or not in use
2041 */ 2195 */
2042static int intel_panel_fitter_pipe (struct drm_device *dev) 2196int intel_panel_fitter_pipe (struct drm_device *dev)
2043{ 2197{
2044 struct drm_i915_private *dev_priv = dev->dev_private; 2198 struct drm_i915_private *dev_priv = dev->dev_private;
2045 u32 pfit_control; 2199 u32 pfit_control;
@@ -2083,9 +2237,8 @@ fdi_reduce_ratio(u32 *num, u32 *den)
2083#define LINK_N 0x80000 2237#define LINK_N 0x80000
2084 2238
2085static void 2239static void
2086igdng_compute_m_n(int bits_per_pixel, int nlanes, 2240ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
2087 int pixel_clock, int link_clock, 2241 int link_clock, struct fdi_m_n *m_n)
2088 struct fdi_m_n *m_n)
2089{ 2242{
2090 u64 temp; 2243 u64 temp;
2091 2244
@@ -2113,34 +2266,34 @@ struct intel_watermark_params {
2113 unsigned long cacheline_size; 2266 unsigned long cacheline_size;
2114}; 2267};
2115 2268
2116/* IGD has different values for various configs */ 2269/* Pineview has different values for various configs */
2117static struct intel_watermark_params igd_display_wm = { 2270static struct intel_watermark_params pineview_display_wm = {
2118 IGD_DISPLAY_FIFO, 2271 PINEVIEW_DISPLAY_FIFO,
2119 IGD_MAX_WM, 2272 PINEVIEW_MAX_WM,
2120 IGD_DFT_WM, 2273 PINEVIEW_DFT_WM,
2121 IGD_GUARD_WM, 2274 PINEVIEW_GUARD_WM,
2122 IGD_FIFO_LINE_SIZE 2275 PINEVIEW_FIFO_LINE_SIZE
2123}; 2276};
2124static struct intel_watermark_params igd_display_hplloff_wm = { 2277static struct intel_watermark_params pineview_display_hplloff_wm = {
2125 IGD_DISPLAY_FIFO, 2278 PINEVIEW_DISPLAY_FIFO,
2126 IGD_MAX_WM, 2279 PINEVIEW_MAX_WM,
2127 IGD_DFT_HPLLOFF_WM, 2280 PINEVIEW_DFT_HPLLOFF_WM,
2128 IGD_GUARD_WM, 2281 PINEVIEW_GUARD_WM,
2129 IGD_FIFO_LINE_SIZE 2282 PINEVIEW_FIFO_LINE_SIZE
2130}; 2283};
2131static struct intel_watermark_params igd_cursor_wm = { 2284static struct intel_watermark_params pineview_cursor_wm = {
2132 IGD_CURSOR_FIFO, 2285 PINEVIEW_CURSOR_FIFO,
2133 IGD_CURSOR_MAX_WM, 2286 PINEVIEW_CURSOR_MAX_WM,
2134 IGD_CURSOR_DFT_WM, 2287 PINEVIEW_CURSOR_DFT_WM,
2135 IGD_CURSOR_GUARD_WM, 2288 PINEVIEW_CURSOR_GUARD_WM,
2136 IGD_FIFO_LINE_SIZE, 2289 PINEVIEW_FIFO_LINE_SIZE,
2137}; 2290};
2138static struct intel_watermark_params igd_cursor_hplloff_wm = { 2291static struct intel_watermark_params pineview_cursor_hplloff_wm = {
2139 IGD_CURSOR_FIFO, 2292 PINEVIEW_CURSOR_FIFO,
2140 IGD_CURSOR_MAX_WM, 2293 PINEVIEW_CURSOR_MAX_WM,
2141 IGD_CURSOR_DFT_WM, 2294 PINEVIEW_CURSOR_DFT_WM,
2142 IGD_CURSOR_GUARD_WM, 2295 PINEVIEW_CURSOR_GUARD_WM,
2143 IGD_FIFO_LINE_SIZE 2296 PINEVIEW_FIFO_LINE_SIZE
2144}; 2297};
2145static struct intel_watermark_params g4x_wm_info = { 2298static struct intel_watermark_params g4x_wm_info = {
2146 G4X_FIFO_SIZE, 2299 G4X_FIFO_SIZE,
@@ -2213,11 +2366,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2213 1000; 2366 1000;
2214 entries_required /= wm->cacheline_size; 2367 entries_required /= wm->cacheline_size;
2215 2368
2216 DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); 2369 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
2217 2370
2218 wm_size = wm->fifo_size - (entries_required + wm->guard_size); 2371 wm_size = wm->fifo_size - (entries_required + wm->guard_size);
2219 2372
2220 DRM_DEBUG("FIFO watermark level: %d\n", wm_size); 2373 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
2221 2374
2222 /* Don't promote wm_size to unsigned... */ 2375 /* Don't promote wm_size to unsigned... */
2223 if (wm_size > (long)wm->max_wm) 2376 if (wm_size > (long)wm->max_wm)
@@ -2279,50 +2432,50 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
2279 return latency; 2432 return latency;
2280 } 2433 }
2281 2434
2282 DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); 2435 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
2283 2436
2284 return NULL; 2437 return NULL;
2285} 2438}
2286 2439
2287static void igd_disable_cxsr(struct drm_device *dev) 2440static void pineview_disable_cxsr(struct drm_device *dev)
2288{ 2441{
2289 struct drm_i915_private *dev_priv = dev->dev_private; 2442 struct drm_i915_private *dev_priv = dev->dev_private;
2290 u32 reg; 2443 u32 reg;
2291 2444
2292 /* deactivate cxsr */ 2445 /* deactivate cxsr */
2293 reg = I915_READ(DSPFW3); 2446 reg = I915_READ(DSPFW3);
2294 reg &= ~(IGD_SELF_REFRESH_EN); 2447 reg &= ~(PINEVIEW_SELF_REFRESH_EN);
2295 I915_WRITE(DSPFW3, reg); 2448 I915_WRITE(DSPFW3, reg);
2296 DRM_INFO("Big FIFO is disabled\n"); 2449 DRM_INFO("Big FIFO is disabled\n");
2297} 2450}
2298 2451
2299static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock, 2452static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
2300 int pixel_size) 2453 int pixel_size)
2301{ 2454{
2302 struct drm_i915_private *dev_priv = dev->dev_private; 2455 struct drm_i915_private *dev_priv = dev->dev_private;
2303 u32 reg; 2456 u32 reg;
2304 unsigned long wm; 2457 unsigned long wm;
2305 struct cxsr_latency *latency; 2458 struct cxsr_latency *latency;
2306 2459
2307 latency = intel_get_cxsr_latency(IS_IGDG(dev), dev_priv->fsb_freq, 2460 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
2308 dev_priv->mem_freq); 2461 dev_priv->mem_freq);
2309 if (!latency) { 2462 if (!latency) {
2310 DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); 2463 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
2311 igd_disable_cxsr(dev); 2464 pineview_disable_cxsr(dev);
2312 return; 2465 return;
2313 } 2466 }
2314 2467
2315 /* Display SR */ 2468 /* Display SR */
2316 wm = intel_calculate_wm(clock, &igd_display_wm, pixel_size, 2469 wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
2317 latency->display_sr); 2470 latency->display_sr);
2318 reg = I915_READ(DSPFW1); 2471 reg = I915_READ(DSPFW1);
2319 reg &= 0x7fffff; 2472 reg &= 0x7fffff;
2320 reg |= wm << 23; 2473 reg |= wm << 23;
2321 I915_WRITE(DSPFW1, reg); 2474 I915_WRITE(DSPFW1, reg);
2322 DRM_DEBUG("DSPFW1 register is %x\n", reg); 2475 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
2323 2476
2324 /* cursor SR */ 2477 /* cursor SR */
2325 wm = intel_calculate_wm(clock, &igd_cursor_wm, pixel_size, 2478 wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
2326 latency->cursor_sr); 2479 latency->cursor_sr);
2327 reg = I915_READ(DSPFW3); 2480 reg = I915_READ(DSPFW3);
2328 reg &= ~(0x3f << 24); 2481 reg &= ~(0x3f << 24);
@@ -2330,7 +2483,7 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
2330 I915_WRITE(DSPFW3, reg); 2483 I915_WRITE(DSPFW3, reg);
2331 2484
2332 /* Display HPLL off SR */ 2485 /* Display HPLL off SR */
2333 wm = intel_calculate_wm(clock, &igd_display_hplloff_wm, 2486 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
2334 latency->display_hpll_disable, I915_FIFO_LINE_SIZE); 2487 latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
2335 reg = I915_READ(DSPFW3); 2488 reg = I915_READ(DSPFW3);
2336 reg &= 0xfffffe00; 2489 reg &= 0xfffffe00;
@@ -2338,17 +2491,17 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
2338 I915_WRITE(DSPFW3, reg); 2491 I915_WRITE(DSPFW3, reg);
2339 2492
2340 /* cursor HPLL off SR */ 2493 /* cursor HPLL off SR */
2341 wm = intel_calculate_wm(clock, &igd_cursor_hplloff_wm, pixel_size, 2494 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
2342 latency->cursor_hpll_disable); 2495 latency->cursor_hpll_disable);
2343 reg = I915_READ(DSPFW3); 2496 reg = I915_READ(DSPFW3);
2344 reg &= ~(0x3f << 16); 2497 reg &= ~(0x3f << 16);
2345 reg |= (wm & 0x3f) << 16; 2498 reg |= (wm & 0x3f) << 16;
2346 I915_WRITE(DSPFW3, reg); 2499 I915_WRITE(DSPFW3, reg);
2347 DRM_DEBUG("DSPFW3 register is %x\n", reg); 2500 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
2348 2501
2349 /* activate cxsr */ 2502 /* activate cxsr */
2350 reg = I915_READ(DSPFW3); 2503 reg = I915_READ(DSPFW3);
2351 reg |= IGD_SELF_REFRESH_EN; 2504 reg |= PINEVIEW_SELF_REFRESH_EN;
2352 I915_WRITE(DSPFW3, reg); 2505 I915_WRITE(DSPFW3, reg);
2353 2506
2354 DRM_INFO("Big FIFO is enabled\n"); 2507 DRM_INFO("Big FIFO is enabled\n");
@@ -2370,7 +2523,7 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
2370 * A value of 5us seems to be a good balance; safe for very low end 2523 * A value of 5us seems to be a good balance; safe for very low end
2371 * platforms but not overly aggressive on lower latency configs. 2524 * platforms but not overly aggressive on lower latency configs.
2372 */ 2525 */
2373const static int latency_ns = 5000; 2526static const int latency_ns = 5000;
2374 2527
2375static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 2528static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2376{ 2529{
@@ -2384,8 +2537,8 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2384 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - 2537 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
2385 (dsparb & 0x7f); 2538 (dsparb & 0x7f);
2386 2539
2387 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2540 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2388 size); 2541 plane ? "B" : "A", size);
2389 2542
2390 return size; 2543 return size;
2391} 2544}
@@ -2403,8 +2556,8 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
2403 (dsparb & 0x1ff); 2556 (dsparb & 0x1ff);
2404 size >>= 1; /* Convert to cachelines */ 2557 size >>= 1; /* Convert to cachelines */
2405 2558
2406 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2559 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2407 size); 2560 plane ? "B" : "A", size);
2408 2561
2409 return size; 2562 return size;
2410} 2563}
@@ -2418,7 +2571,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
2418 size = dsparb & 0x7f; 2571 size = dsparb & 0x7f;
2419 size >>= 2; /* Convert to cachelines */ 2572 size >>= 2; /* Convert to cachelines */
2420 2573
2421 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2574 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2575 plane ? "B" : "A",
2422 size); 2576 size);
2423 2577
2424 return size; 2578 return size;
@@ -2433,8 +2587,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
2433 size = dsparb & 0x7f; 2587 size = dsparb & 0x7f;
2434 size >>= 1; /* Convert to cachelines */ 2588 size >>= 1; /* Convert to cachelines */
2435 2589
2436 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2590 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2437 size); 2591 plane ? "B" : "A", size);
2438 2592
2439 return size; 2593 return size;
2440} 2594}
@@ -2480,7 +2634,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2480 /* Calc sr entries for one plane configs */ 2634 /* Calc sr entries for one plane configs */
2481 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2635 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2482 /* self-refresh has much higher latency */ 2636 /* self-refresh has much higher latency */
2483 const static int sr_latency_ns = 12000; 2637 static const int sr_latency_ns = 12000;
2484 2638
2485 sr_clock = planea_clock ? planea_clock : planeb_clock; 2639 sr_clock = planea_clock ? planea_clock : planeb_clock;
2486 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2640 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2491,6 +2645,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2491 sr_entries = roundup(sr_entries / cacheline_size, 1); 2645 sr_entries = roundup(sr_entries / cacheline_size, 1);
2492 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 2646 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2493 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2647 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2648 } else {
2649 /* Turn off self refresh if both pipes are enabled */
2650 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2651 & ~FW_BLC_SELF_EN);
2494 } 2652 }
2495 2653
2496 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", 2654 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2509,15 +2667,43 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2509 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 2667 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
2510} 2668}
2511 2669
2512static void i965_update_wm(struct drm_device *dev, int unused, int unused2, 2670static void i965_update_wm(struct drm_device *dev, int planea_clock,
2513 int unused3, int unused4) 2671 int planeb_clock, int sr_hdisplay, int pixel_size)
2514{ 2672{
2515 struct drm_i915_private *dev_priv = dev->dev_private; 2673 struct drm_i915_private *dev_priv = dev->dev_private;
2674 unsigned long line_time_us;
2675 int sr_clock, sr_entries, srwm = 1;
2676
2677 /* Calc sr entries for one plane configs */
2678 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2679 /* self-refresh has much higher latency */
2680 static const int sr_latency_ns = 12000;
2516 2681
2517 DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n"); 2682 sr_clock = planea_clock ? planea_clock : planeb_clock;
2683 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
2684
2685 /* Use ns/us then divide to preserve precision */
2686 sr_entries = (((sr_latency_ns / line_time_us) + 1) *
2687 pixel_size * sr_hdisplay) / 1000;
2688 sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1);
2689 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2690 srwm = I945_FIFO_SIZE - sr_entries;
2691 if (srwm < 0)
2692 srwm = 1;
2693 srwm &= 0x3f;
2694 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2695 } else {
2696 /* Turn off self refresh if both pipes are enabled */
2697 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2698 & ~FW_BLC_SELF_EN);
2699 }
2700
2701 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2702 srwm);
2518 2703
2519 /* 965 has limitations... */ 2704 /* 965 has limitations... */
2520 I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0)); 2705 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
2706 (8 << 0));
2521 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); 2707 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
2522} 2708}
2523 2709
@@ -2553,7 +2739,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2553 pixel_size, latency_ns); 2739 pixel_size, latency_ns);
2554 planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params, 2740 planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
2555 pixel_size, latency_ns); 2741 pixel_size, latency_ns);
2556 DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 2742 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2557 2743
2558 /* 2744 /*
2559 * Overlay gets an aggressive default since video jitter is bad. 2745 * Overlay gets an aggressive default since video jitter is bad.
@@ -2564,7 +2750,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2564 if (HAS_FW_BLC(dev) && sr_hdisplay && 2750 if (HAS_FW_BLC(dev) && sr_hdisplay &&
2565 (!planea_clock || !planeb_clock)) { 2751 (!planea_clock || !planeb_clock)) {
2566 /* self-refresh has much higher latency */ 2752 /* self-refresh has much higher latency */
2567 const static int sr_latency_ns = 6000; 2753 static const int sr_latency_ns = 6000;
2568 2754
2569 sr_clock = planea_clock ? planea_clock : planeb_clock; 2755 sr_clock = planea_clock ? planea_clock : planeb_clock;
2570 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2756 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2573,14 +2759,29 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2573 sr_entries = (((sr_latency_ns / line_time_us) + 1) * 2759 sr_entries = (((sr_latency_ns / line_time_us) + 1) *
2574 pixel_size * sr_hdisplay) / 1000; 2760 pixel_size * sr_hdisplay) / 1000;
2575 sr_entries = roundup(sr_entries / cacheline_size, 1); 2761 sr_entries = roundup(sr_entries / cacheline_size, 1);
2576 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 2762 DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
2577 srwm = total_size - sr_entries; 2763 srwm = total_size - sr_entries;
2578 if (srwm < 0) 2764 if (srwm < 0)
2579 srwm = 1; 2765 srwm = 1;
2580 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2766
2767 if (IS_I945G(dev) || IS_I945GM(dev))
2768 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2769 else if (IS_I915GM(dev)) {
2770 /* 915M has a smaller SRWM field */
2771 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2772 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
2773 }
2774 } else {
2775 /* Turn off self refresh if both pipes are enabled */
2776 if (IS_I945G(dev) || IS_I945GM(dev)) {
2777 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2778 & ~FW_BLC_SELF_EN);
2779 } else if (IS_I915GM(dev)) {
2780 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
2781 }
2581 } 2782 }
2582 2783
2583 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2784 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2584 planea_wm, planeb_wm, cwm, srwm); 2785 planea_wm, planeb_wm, cwm, srwm);
2585 2786
2586 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 2787 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
@@ -2607,7 +2808,7 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
2607 pixel_size, latency_ns); 2808 pixel_size, latency_ns);
2608 fwater_lo |= (3<<8) | planea_wm; 2809 fwater_lo |= (3<<8) | planea_wm;
2609 2810
2610 DRM_DEBUG("Setting FIFO watermarks - A: %d\n", planea_wm); 2811 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
2611 2812
2612 I915_WRITE(FW_BLC, fwater_lo); 2813 I915_WRITE(FW_BLC, fwater_lo);
2613} 2814}
@@ -2661,11 +2862,11 @@ static void intel_update_watermarks(struct drm_device *dev)
2661 if (crtc->enabled) { 2862 if (crtc->enabled) {
2662 enabled++; 2863 enabled++;
2663 if (intel_crtc->plane == 0) { 2864 if (intel_crtc->plane == 0) {
2664 DRM_DEBUG("plane A (pipe %d) clock: %d\n", 2865 DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
2665 intel_crtc->pipe, crtc->mode.clock); 2866 intel_crtc->pipe, crtc->mode.clock);
2666 planea_clock = crtc->mode.clock; 2867 planea_clock = crtc->mode.clock;
2667 } else { 2868 } else {
2668 DRM_DEBUG("plane B (pipe %d) clock: %d\n", 2869 DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
2669 intel_crtc->pipe, crtc->mode.clock); 2870 intel_crtc->pipe, crtc->mode.clock);
2670 planeb_clock = crtc->mode.clock; 2871 planeb_clock = crtc->mode.clock;
2671 } 2872 }
@@ -2682,10 +2883,10 @@ static void intel_update_watermarks(struct drm_device *dev)
2682 return; 2883 return;
2683 2884
2684 /* Single plane configs can enable self refresh */ 2885 /* Single plane configs can enable self refresh */
2685 if (enabled == 1 && IS_IGD(dev)) 2886 if (enabled == 1 && IS_PINEVIEW(dev))
2686 igd_enable_cxsr(dev, sr_clock, pixel_size); 2887 pineview_enable_cxsr(dev, sr_clock, pixel_size);
2687 else if (IS_IGD(dev)) 2888 else if (IS_PINEVIEW(dev))
2688 igd_disable_cxsr(dev); 2889 pineview_disable_cxsr(dev);
2689 2890
2690 dev_priv->display.update_wm(dev, planea_clock, planeb_clock, 2891 dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
2691 sr_hdisplay, pixel_size); 2892 sr_hdisplay, pixel_size);
@@ -2716,7 +2917,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2716 int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; 2917 int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
2717 int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; 2918 int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
2718 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; 2919 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
2719 int refclk, num_outputs = 0; 2920 int refclk, num_connectors = 0;
2720 intel_clock_t clock, reduced_clock; 2921 intel_clock_t clock, reduced_clock;
2721 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; 2922 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
2722 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 2923 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
@@ -2742,19 +2943,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2742 drm_vblank_pre_modeset(dev, pipe); 2943 drm_vblank_pre_modeset(dev, pipe);
2743 2944
2744 list_for_each_entry(connector, &mode_config->connector_list, head) { 2945 list_for_each_entry(connector, &mode_config->connector_list, head) {
2745 struct intel_output *intel_output = to_intel_output(connector); 2946 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
2746 2947
2747 if (!connector->encoder || connector->encoder->crtc != crtc) 2948 if (!connector->encoder || connector->encoder->crtc != crtc)
2748 continue; 2949 continue;
2749 2950
2750 switch (intel_output->type) { 2951 switch (intel_encoder->type) {
2751 case INTEL_OUTPUT_LVDS: 2952 case INTEL_OUTPUT_LVDS:
2752 is_lvds = true; 2953 is_lvds = true;
2753 break; 2954 break;
2754 case INTEL_OUTPUT_SDVO: 2955 case INTEL_OUTPUT_SDVO:
2755 case INTEL_OUTPUT_HDMI: 2956 case INTEL_OUTPUT_HDMI:
2756 is_sdvo = true; 2957 is_sdvo = true;
2757 if (intel_output->needs_tv_clock) 2958 if (intel_encoder->needs_tv_clock)
2758 is_tv = true; 2959 is_tv = true;
2759 break; 2960 break;
2760 case INTEL_OUTPUT_DVO: 2961 case INTEL_OUTPUT_DVO:
@@ -2774,15 +2975,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2774 break; 2975 break;
2775 } 2976 }
2776 2977
2777 num_outputs++; 2978 num_connectors++;
2778 } 2979 }
2779 2980
2780 if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { 2981 if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
2781 refclk = dev_priv->lvds_ssc_freq * 1000; 2982 refclk = dev_priv->lvds_ssc_freq * 1000;
2782 DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000); 2983 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
2984 refclk / 1000);
2783 } else if (IS_I9XX(dev)) { 2985 } else if (IS_I9XX(dev)) {
2784 refclk = 96000; 2986 refclk = 96000;
2785 if (IS_IGDNG(dev)) 2987 if (HAS_PCH_SPLIT(dev))
2786 refclk = 120000; /* 120Mhz refclk */ 2988 refclk = 120000; /* 120Mhz refclk */
2787 } else { 2989 } else {
2788 refclk = 48000; 2990 refclk = 48000;
@@ -2802,14 +3004,23 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2802 return -EINVAL; 3004 return -EINVAL;
2803 } 3005 }
2804 3006
2805 if (limit->find_reduced_pll && dev_priv->lvds_downclock_avail) { 3007 if (is_lvds && dev_priv->lvds_downclock_avail) {
2806 memcpy(&reduced_clock, &clock, sizeof(intel_clock_t)); 3008 has_reduced_clock = limit->find_pll(limit, crtc,
2807 has_reduced_clock = limit->find_reduced_pll(limit, crtc, 3009 dev_priv->lvds_downclock,
2808 (adjusted_mode->clock*3/4),
2809 refclk, 3010 refclk,
2810 &reduced_clock); 3011 &reduced_clock);
3012 if (has_reduced_clock && (clock.p != reduced_clock.p)) {
3013 /*
3014 * If the different P is found, it means that we can't
3015 * switch the display clock by using the FP0/FP1.
3016 * In such case we will disable the LVDS downclock
3017 * feature.
3018 */
3019 DRM_DEBUG_KMS("Different P is found for "
3020 "LVDS clock/downclock\n");
3021 has_reduced_clock = 0;
3022 }
2811 } 3023 }
2812
2813 /* SDVO TV has fixed PLL values depend on its clock range, 3024 /* SDVO TV has fixed PLL values depend on its clock range,
2814 this mirrors vbios setting. */ 3025 this mirrors vbios setting. */
2815 if (is_sdvo && is_tv) { 3026 if (is_sdvo && is_tv) {
@@ -2831,15 +3042,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2831 } 3042 }
2832 3043
2833 /* FDI link */ 3044 /* FDI link */
2834 if (IS_IGDNG(dev)) { 3045 if (HAS_PCH_SPLIT(dev)) {
2835 int lane, link_bw, bpp; 3046 int lane, link_bw, bpp;
2836 /* eDP doesn't require FDI link, so just set DP M/N 3047 /* eDP doesn't require FDI link, so just set DP M/N
2837 according to current link config */ 3048 according to current link config */
2838 if (is_edp) { 3049 if (is_edp) {
2839 struct drm_connector *edp; 3050 struct drm_connector *edp;
2840 target_clock = mode->clock; 3051 target_clock = mode->clock;
2841 edp = intel_pipe_get_output(crtc); 3052 edp = intel_pipe_get_connector(crtc);
2842 intel_edp_link_config(to_intel_output(edp), 3053 intel_edp_link_config(to_intel_encoder(edp),
2843 &lane, &link_bw); 3054 &lane, &link_bw);
2844 } else { 3055 } else {
2845 /* DP over FDI requires target mode clock 3056 /* DP over FDI requires target mode clock
@@ -2854,6 +3065,33 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2854 3065
2855 /* determine panel color depth */ 3066 /* determine panel color depth */
2856 temp = I915_READ(pipeconf_reg); 3067 temp = I915_READ(pipeconf_reg);
3068 temp &= ~PIPE_BPC_MASK;
3069 if (is_lvds) {
3070 int lvds_reg = I915_READ(PCH_LVDS);
3071 /* the BPC will be 6 if it is 18-bit LVDS panel */
3072 if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
3073 temp |= PIPE_8BPC;
3074 else
3075 temp |= PIPE_6BPC;
3076 } else if (is_edp) {
3077 switch (dev_priv->edp_bpp/3) {
3078 case 8:
3079 temp |= PIPE_8BPC;
3080 break;
3081 case 10:
3082 temp |= PIPE_10BPC;
3083 break;
3084 case 6:
3085 temp |= PIPE_6BPC;
3086 break;
3087 case 12:
3088 temp |= PIPE_12BPC;
3089 break;
3090 }
3091 } else
3092 temp |= PIPE_8BPC;
3093 I915_WRITE(pipeconf_reg, temp);
3094 I915_READ(pipeconf_reg);
2857 3095
2858 switch (temp & PIPE_BPC_MASK) { 3096 switch (temp & PIPE_BPC_MASK) {
2859 case PIPE_8BPC: 3097 case PIPE_8BPC:
@@ -2873,8 +3111,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2873 bpp = 24; 3111 bpp = 24;
2874 } 3112 }
2875 3113
2876 igdng_compute_m_n(bpp, lane, target_clock, 3114 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
2877 link_bw, &m_n);
2878 } 3115 }
2879 3116
2880 /* Ironlake: try to setup display ref clock before DPLL 3117 /* Ironlake: try to setup display ref clock before DPLL
@@ -2882,7 +3119,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2882 * PCH B stepping, previous chipset stepping should be 3119 * PCH B stepping, previous chipset stepping should be
2883 * ignoring this setting. 3120 * ignoring this setting.
2884 */ 3121 */
2885 if (IS_IGDNG(dev)) { 3122 if (HAS_PCH_SPLIT(dev)) {
2886 temp = I915_READ(PCH_DREF_CONTROL); 3123 temp = I915_READ(PCH_DREF_CONTROL);
2887 /* Always enable nonspread source */ 3124 /* Always enable nonspread source */
2888 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 3125 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
@@ -2917,7 +3154,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2917 } 3154 }
2918 } 3155 }
2919 3156
2920 if (IS_IGD(dev)) { 3157 if (IS_PINEVIEW(dev)) {
2921 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; 3158 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
2922 if (has_reduced_clock) 3159 if (has_reduced_clock)
2923 fp2 = (1 << reduced_clock.n) << 16 | 3160 fp2 = (1 << reduced_clock.n) << 16 |
@@ -2929,7 +3166,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2929 reduced_clock.m2; 3166 reduced_clock.m2;
2930 } 3167 }
2931 3168
2932 if (!IS_IGDNG(dev)) 3169 if (!HAS_PCH_SPLIT(dev))
2933 dpll = DPLL_VGA_MODE_DIS; 3170 dpll = DPLL_VGA_MODE_DIS;
2934 3171
2935 if (IS_I9XX(dev)) { 3172 if (IS_I9XX(dev)) {
@@ -2942,19 +3179,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2942 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3179 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
2943 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 3180 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
2944 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 3181 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
2945 else if (IS_IGDNG(dev)) 3182 else if (HAS_PCH_SPLIT(dev))
2946 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 3183 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
2947 } 3184 }
2948 if (is_dp) 3185 if (is_dp)
2949 dpll |= DPLL_DVO_HIGH_SPEED; 3186 dpll |= DPLL_DVO_HIGH_SPEED;
2950 3187
2951 /* compute bitmask from p1 value */ 3188 /* compute bitmask from p1 value */
2952 if (IS_IGD(dev)) 3189 if (IS_PINEVIEW(dev))
2953 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD; 3190 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
2954 else { 3191 else {
2955 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3192 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
2956 /* also FPA1 */ 3193 /* also FPA1 */
2957 if (IS_IGDNG(dev)) 3194 if (HAS_PCH_SPLIT(dev))
2958 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3195 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
2959 if (IS_G4X(dev) && has_reduced_clock) 3196 if (IS_G4X(dev) && has_reduced_clock)
2960 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3197 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
@@ -2973,7 +3210,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2973 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 3210 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
2974 break; 3211 break;
2975 } 3212 }
2976 if (IS_I965G(dev) && !IS_IGDNG(dev)) 3213 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
2977 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 3214 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
2978 } else { 3215 } else {
2979 if (is_lvds) { 3216 if (is_lvds) {
@@ -2994,7 +3231,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2994 /* XXX: just matching BIOS for now */ 3231 /* XXX: just matching BIOS for now */
2995 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 3232 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
2996 dpll |= 3; 3233 dpll |= 3;
2997 else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) 3234 else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2)
2998 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 3235 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
2999 else 3236 else
3000 dpll |= PLL_REF_INPUT_DREFCLK; 3237 dpll |= PLL_REF_INPUT_DREFCLK;
@@ -3005,9 +3242,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3005 /* Set up the display plane register */ 3242 /* Set up the display plane register */
3006 dspcntr = DISPPLANE_GAMMA_ENABLE; 3243 dspcntr = DISPPLANE_GAMMA_ENABLE;
3007 3244
3008 /* IGDNG's plane is forced to pipe, bit 24 is to 3245 /* Ironlake's plane is forced to pipe, bit 24 is to
3009 enable color space conversion */ 3246 enable color space conversion */
3010 if (!IS_IGDNG(dev)) { 3247 if (!HAS_PCH_SPLIT(dev)) {
3011 if (pipe == 0) 3248 if (pipe == 0)
3012 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 3249 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
3013 else 3250 else
@@ -3034,20 +3271,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3034 3271
3035 3272
3036 /* Disable the panel fitter if it was on our pipe */ 3273 /* Disable the panel fitter if it was on our pipe */
3037 if (!IS_IGDNG(dev) && intel_panel_fitter_pipe(dev) == pipe) 3274 if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
3038 I915_WRITE(PFIT_CONTROL, 0); 3275 I915_WRITE(PFIT_CONTROL, 0);
3039 3276
3040 DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 3277 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
3041 drm_mode_debug_printmodeline(mode); 3278 drm_mode_debug_printmodeline(mode);
3042 3279
3043 /* assign to IGDNG registers */ 3280 /* assign to Ironlake registers */
3044 if (IS_IGDNG(dev)) { 3281 if (HAS_PCH_SPLIT(dev)) {
3045 fp_reg = pch_fp_reg; 3282 fp_reg = pch_fp_reg;
3046 dpll_reg = pch_dpll_reg; 3283 dpll_reg = pch_dpll_reg;
3047 } 3284 }
3048 3285
3049 if (is_edp) { 3286 if (is_edp) {
3050 igdng_disable_pll_edp(crtc); 3287 ironlake_disable_pll_edp(crtc);
3051 } else if ((dpll & DPLL_VCO_ENABLE)) { 3288 } else if ((dpll & DPLL_VCO_ENABLE)) {
3052 I915_WRITE(fp_reg, fp); 3289 I915_WRITE(fp_reg, fp);
3053 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 3290 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
@@ -3062,7 +3299,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3062 if (is_lvds) { 3299 if (is_lvds) {
3063 u32 lvds; 3300 u32 lvds;
3064 3301
3065 if (IS_IGDNG(dev)) 3302 if (HAS_PCH_SPLIT(dev))
3066 lvds_reg = PCH_LVDS; 3303 lvds_reg = PCH_LVDS;
3067 3304
3068 lvds = I915_READ(lvds_reg); 3305 lvds = I915_READ(lvds_reg);
@@ -3081,7 +3318,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3081 * appropriately here, but we need to look more thoroughly into how 3318 * appropriately here, but we need to look more thoroughly into how
3082 * panels behave in the two modes. 3319 * panels behave in the two modes.
3083 */ 3320 */
3084 3321 /* set the dithering flag */
3322 if (IS_I965G(dev)) {
3323 if (dev_priv->lvds_dither) {
3324 if (HAS_PCH_SPLIT(dev))
3325 pipeconf |= PIPE_ENABLE_DITHER;
3326 else
3327 lvds |= LVDS_ENABLE_DITHER;
3328 } else {
3329 if (HAS_PCH_SPLIT(dev))
3330 pipeconf &= ~PIPE_ENABLE_DITHER;
3331 else
3332 lvds &= ~LVDS_ENABLE_DITHER;
3333 }
3334 }
3085 I915_WRITE(lvds_reg, lvds); 3335 I915_WRITE(lvds_reg, lvds);
3086 I915_READ(lvds_reg); 3336 I915_READ(lvds_reg);
3087 } 3337 }
@@ -3095,7 +3345,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3095 /* Wait for the clocks to stabilize. */ 3345 /* Wait for the clocks to stabilize. */
3096 udelay(150); 3346 udelay(150);
3097 3347
3098 if (IS_I965G(dev) && !IS_IGDNG(dev)) { 3348 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
3099 if (is_sdvo) { 3349 if (is_sdvo) {
3100 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3350 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3101 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 3351 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
@@ -3115,14 +3365,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3115 I915_WRITE(fp_reg + 4, fp2); 3365 I915_WRITE(fp_reg + 4, fp2);
3116 intel_crtc->lowfreq_avail = true; 3366 intel_crtc->lowfreq_avail = true;
3117 if (HAS_PIPE_CXSR(dev)) { 3367 if (HAS_PIPE_CXSR(dev)) {
3118 DRM_DEBUG("enabling CxSR downclocking\n"); 3368 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
3119 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 3369 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
3120 } 3370 }
3121 } else { 3371 } else {
3122 I915_WRITE(fp_reg + 4, fp); 3372 I915_WRITE(fp_reg + 4, fp);
3123 intel_crtc->lowfreq_avail = false; 3373 intel_crtc->lowfreq_avail = false;
3124 if (HAS_PIPE_CXSR(dev)) { 3374 if (HAS_PIPE_CXSR(dev)) {
3125 DRM_DEBUG("disabling CxSR downclocking\n"); 3375 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
3126 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 3376 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
3127 } 3377 }
3128 } 3378 }
@@ -3142,21 +3392,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3142 /* pipesrc and dspsize control the size that is scaled from, which should 3392 /* pipesrc and dspsize control the size that is scaled from, which should
3143 * always be the user's requested size. 3393 * always be the user's requested size.
3144 */ 3394 */
3145 if (!IS_IGDNG(dev)) { 3395 if (!HAS_PCH_SPLIT(dev)) {
3146 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | 3396 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
3147 (mode->hdisplay - 1)); 3397 (mode->hdisplay - 1));
3148 I915_WRITE(dsppos_reg, 0); 3398 I915_WRITE(dsppos_reg, 0);
3149 } 3399 }
3150 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 3400 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
3151 3401
3152 if (IS_IGDNG(dev)) { 3402 if (HAS_PCH_SPLIT(dev)) {
3153 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); 3403 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
3154 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); 3404 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
3155 I915_WRITE(link_m1_reg, m_n.link_m); 3405 I915_WRITE(link_m1_reg, m_n.link_m);
3156 I915_WRITE(link_n1_reg, m_n.link_n); 3406 I915_WRITE(link_n1_reg, m_n.link_n);
3157 3407
3158 if (is_edp) { 3408 if (is_edp) {
3159 igdng_set_pll_edp(crtc, adjusted_mode->clock); 3409 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
3160 } else { 3410 } else {
3161 /* enable FDI RX PLL too */ 3411 /* enable FDI RX PLL too */
3162 temp = I915_READ(fdi_rx_reg); 3412 temp = I915_READ(fdi_rx_reg);
@@ -3170,7 +3420,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3170 3420
3171 intel_wait_for_vblank(dev); 3421 intel_wait_for_vblank(dev);
3172 3422
3173 if (IS_IGDNG(dev)) { 3423 if (IS_IRONLAKE(dev)) {
3174 /* enable address swizzle for tiling buffer */ 3424 /* enable address swizzle for tiling buffer */
3175 temp = I915_READ(DISP_ARB_CTL); 3425 temp = I915_READ(DISP_ARB_CTL);
3176 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); 3426 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
@@ -3204,8 +3454,8 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
3204 if (!crtc->enabled) 3454 if (!crtc->enabled)
3205 return; 3455 return;
3206 3456
3207 /* use legacy palette for IGDNG */ 3457 /* use legacy palette for Ironlake */
3208 if (IS_IGDNG(dev)) 3458 if (HAS_PCH_SPLIT(dev))
3209 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : 3459 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
3210 LGC_PALETTE_B; 3460 LGC_PALETTE_B;
3211 3461
@@ -3234,11 +3484,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3234 size_t addr; 3484 size_t addr;
3235 int ret; 3485 int ret;
3236 3486
3237 DRM_DEBUG("\n"); 3487 DRM_DEBUG_KMS("\n");
3238 3488
3239 /* if we want to turn off the cursor ignore width and height */ 3489 /* if we want to turn off the cursor ignore width and height */
3240 if (!handle) { 3490 if (!handle) {
3241 DRM_DEBUG("cursor off\n"); 3491 DRM_DEBUG_KMS("cursor off\n");
3242 if (IS_MOBILE(dev) || IS_I9XX(dev)) { 3492 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
3243 temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 3493 temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
3244 temp |= CURSOR_MODE_DISABLE; 3494 temp |= CURSOR_MODE_DISABLE;
@@ -3261,7 +3511,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3261 if (!bo) 3511 if (!bo)
3262 return -ENOENT; 3512 return -ENOENT;
3263 3513
3264 obj_priv = bo->driver_private; 3514 obj_priv = to_intel_bo(bo);
3265 3515
3266 if (bo->size < width * height * 4) { 3516 if (bo->size < width * height * 4) {
3267 DRM_ERROR("buffer is to small\n"); 3517 DRM_ERROR("buffer is to small\n");
@@ -3271,7 +3521,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3271 3521
3272 /* we only need to pin inside GTT if cursor is non-phy */ 3522 /* we only need to pin inside GTT if cursor is non-phy */
3273 mutex_lock(&dev->struct_mutex); 3523 mutex_lock(&dev->struct_mutex);
3274 if (!dev_priv->cursor_needs_physical) { 3524 if (!dev_priv->info->cursor_needs_physical) {
3275 ret = i915_gem_object_pin(bo, PAGE_SIZE); 3525 ret = i915_gem_object_pin(bo, PAGE_SIZE);
3276 if (ret) { 3526 if (ret) {
3277 DRM_ERROR("failed to pin cursor bo\n"); 3527 DRM_ERROR("failed to pin cursor bo\n");
@@ -3306,7 +3556,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3306 I915_WRITE(base, addr); 3556 I915_WRITE(base, addr);
3307 3557
3308 if (intel_crtc->cursor_bo) { 3558 if (intel_crtc->cursor_bo) {
3309 if (dev_priv->cursor_needs_physical) { 3559 if (dev_priv->info->cursor_needs_physical) {
3310 if (intel_crtc->cursor_bo != bo) 3560 if (intel_crtc->cursor_bo != bo)
3311 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 3561 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
3312 } else 3562 } else
@@ -3320,11 +3570,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3320 intel_crtc->cursor_bo = bo; 3570 intel_crtc->cursor_bo = bo;
3321 3571
3322 return 0; 3572 return 0;
3323fail:
3324 mutex_lock(&dev->struct_mutex);
3325fail_locked: 3573fail_locked:
3326 drm_gem_object_unreference(bo);
3327 mutex_unlock(&dev->struct_mutex); 3574 mutex_unlock(&dev->struct_mutex);
3575fail:
3576 drm_gem_object_unreference_unlocked(bo);
3328 return ret; 3577 return ret;
3329} 3578}
3330 3579
@@ -3406,9 +3655,9 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
3406 * detection. 3655 * detection.
3407 * 3656 *
3408 * It will be up to the load-detect code to adjust the pipe as appropriate for 3657 * It will be up to the load-detect code to adjust the pipe as appropriate for
3409 * its requirements. The pipe will be connected to no other outputs. 3658 * its requirements. The pipe will be connected to no other encoders.
3410 * 3659 *
3411 * Currently this code will only succeed if there is a pipe with no outputs 3660 * Currently this code will only succeed if there is a pipe with no encoders
3412 * configured for it. In the future, it could choose to temporarily disable 3661 * configured for it. In the future, it could choose to temporarily disable
3413 * some outputs to free up a pipe for its use. 3662 * some outputs to free up a pipe for its use.
3414 * 3663 *
@@ -3421,14 +3670,14 @@ static struct drm_display_mode load_detect_mode = {
3421 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 3670 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
3422}; 3671};
3423 3672
3424struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, 3673struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
3425 struct drm_display_mode *mode, 3674 struct drm_display_mode *mode,
3426 int *dpms_mode) 3675 int *dpms_mode)
3427{ 3676{
3428 struct intel_crtc *intel_crtc; 3677 struct intel_crtc *intel_crtc;
3429 struct drm_crtc *possible_crtc; 3678 struct drm_crtc *possible_crtc;
3430 struct drm_crtc *supported_crtc =NULL; 3679 struct drm_crtc *supported_crtc =NULL;
3431 struct drm_encoder *encoder = &intel_output->enc; 3680 struct drm_encoder *encoder = &intel_encoder->enc;
3432 struct drm_crtc *crtc = NULL; 3681 struct drm_crtc *crtc = NULL;
3433 struct drm_device *dev = encoder->dev; 3682 struct drm_device *dev = encoder->dev;
3434 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3683 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -3480,8 +3729,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
3480 } 3729 }
3481 3730
3482 encoder->crtc = crtc; 3731 encoder->crtc = crtc;
3483 intel_output->base.encoder = encoder; 3732 intel_encoder->base.encoder = encoder;
3484 intel_output->load_detect_temp = true; 3733 intel_encoder->load_detect_temp = true;
3485 3734
3486 intel_crtc = to_intel_crtc(crtc); 3735 intel_crtc = to_intel_crtc(crtc);
3487 *dpms_mode = intel_crtc->dpms_mode; 3736 *dpms_mode = intel_crtc->dpms_mode;
@@ -3506,23 +3755,23 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
3506 return crtc; 3755 return crtc;
3507} 3756}
3508 3757
3509void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode) 3758void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
3510{ 3759{
3511 struct drm_encoder *encoder = &intel_output->enc; 3760 struct drm_encoder *encoder = &intel_encoder->enc;
3512 struct drm_device *dev = encoder->dev; 3761 struct drm_device *dev = encoder->dev;
3513 struct drm_crtc *crtc = encoder->crtc; 3762 struct drm_crtc *crtc = encoder->crtc;
3514 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3763 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3515 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 3764 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3516 3765
3517 if (intel_output->load_detect_temp) { 3766 if (intel_encoder->load_detect_temp) {
3518 encoder->crtc = NULL; 3767 encoder->crtc = NULL;
3519 intel_output->base.encoder = NULL; 3768 intel_encoder->base.encoder = NULL;
3520 intel_output->load_detect_temp = false; 3769 intel_encoder->load_detect_temp = false;
3521 crtc->enabled = drm_helper_crtc_in_use(crtc); 3770 crtc->enabled = drm_helper_crtc_in_use(crtc);
3522 drm_helper_disable_unused_functions(dev); 3771 drm_helper_disable_unused_functions(dev);
3523 } 3772 }
3524 3773
3525 /* Switch crtc and output back off if necessary */ 3774 /* Switch crtc and encoder back off if necessary */
3526 if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { 3775 if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) {
3527 if (encoder->crtc == crtc) 3776 if (encoder->crtc == crtc)
3528 encoder_funcs->dpms(encoder, dpms_mode); 3777 encoder_funcs->dpms(encoder, dpms_mode);
@@ -3546,18 +3795,18 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
3546 fp = I915_READ((pipe == 0) ? FPA1 : FPB1); 3795 fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
3547 3796
3548 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 3797 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
3549 if (IS_IGD(dev)) { 3798 if (IS_PINEVIEW(dev)) {
3550 clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 3799 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
3551 clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT; 3800 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
3552 } else { 3801 } else {
3553 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 3802 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
3554 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 3803 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
3555 } 3804 }
3556 3805
3557 if (IS_I9XX(dev)) { 3806 if (IS_I9XX(dev)) {
3558 if (IS_IGD(dev)) 3807 if (IS_PINEVIEW(dev))
3559 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >> 3808 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
3560 DPLL_FPA01_P1_POST_DIV_SHIFT_IGD); 3809 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
3561 else 3810 else
3562 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 3811 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
3563 DPLL_FPA01_P1_POST_DIV_SHIFT); 3812 DPLL_FPA01_P1_POST_DIV_SHIFT);
@@ -3572,7 +3821,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
3572 7 : 14; 3821 7 : 14;
3573 break; 3822 break;
3574 default: 3823 default:
3575 DRM_DEBUG("Unknown DPLL mode %08x in programmed " 3824 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
3576 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 3825 "mode\n", (int)(dpll & DPLL_MODE_MASK));
3577 return 0; 3826 return 0;
3578 } 3827 }
@@ -3658,132 +3907,13 @@ static void intel_gpu_idle_timer(unsigned long arg)
3658 struct drm_device *dev = (struct drm_device *)arg; 3907 struct drm_device *dev = (struct drm_device *)arg;
3659 drm_i915_private_t *dev_priv = dev->dev_private; 3908 drm_i915_private_t *dev_priv = dev->dev_private;
3660 3909
3661 DRM_DEBUG("idle timer fired, downclocking\n"); 3910 DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
3662 3911
3663 dev_priv->busy = false; 3912 dev_priv->busy = false;
3664 3913
3665 queue_work(dev_priv->wq, &dev_priv->idle_work); 3914 queue_work(dev_priv->wq, &dev_priv->idle_work);
3666} 3915}
3667 3916
3668void intel_increase_renderclock(struct drm_device *dev, bool schedule)
3669{
3670 drm_i915_private_t *dev_priv = dev->dev_private;
3671
3672 if (IS_IGDNG(dev))
3673 return;
3674
3675 if (!dev_priv->render_reclock_avail) {
3676 DRM_DEBUG("not reclocking render clock\n");
3677 return;
3678 }
3679
3680 /* Restore render clock frequency to original value */
3681 if (IS_G4X(dev) || IS_I9XX(dev))
3682 pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
3683 else if (IS_I85X(dev))
3684 pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
3685 DRM_DEBUG("increasing render clock frequency\n");
3686
3687 /* Schedule downclock */
3688 if (schedule)
3689 mod_timer(&dev_priv->idle_timer, jiffies +
3690 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
3691}
3692
3693void intel_decrease_renderclock(struct drm_device *dev)
3694{
3695 drm_i915_private_t *dev_priv = dev->dev_private;
3696
3697 if (IS_IGDNG(dev))
3698 return;
3699
3700 if (!dev_priv->render_reclock_avail) {
3701 DRM_DEBUG("not reclocking render clock\n");
3702 return;
3703 }
3704
3705 if (IS_G4X(dev)) {
3706 u16 gcfgc;
3707
3708 /* Adjust render clock... */
3709 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3710
3711 /* Down to minimum... */
3712 gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
3713 gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
3714
3715 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3716 } else if (IS_I965G(dev)) {
3717 u16 gcfgc;
3718
3719 /* Adjust render clock... */
3720 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3721
3722 /* Down to minimum... */
3723 gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
3724 gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
3725
3726 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3727 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
3728 u16 gcfgc;
3729
3730 /* Adjust render clock... */
3731 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3732
3733 /* Down to minimum... */
3734 gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
3735 gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
3736
3737 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3738 } else if (IS_I915G(dev)) {
3739 u16 gcfgc;
3740
3741 /* Adjust render clock... */
3742 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3743
3744 /* Down to minimum... */
3745 gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
3746 gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
3747
3748 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3749 } else if (IS_I85X(dev)) {
3750 u16 hpllcc;
3751
3752 /* Adjust render clock... */
3753 pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
3754
3755 /* Up to maximum... */
3756 hpllcc &= ~GC_CLOCK_CONTROL_MASK;
3757 hpllcc |= GC_CLOCK_133_200;
3758
3759 pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
3760 }
3761 DRM_DEBUG("decreasing render clock frequency\n");
3762}
3763
3764/* Note that no increase function is needed for this - increase_renderclock()
3765 * will also rewrite these bits
3766 */
3767void intel_decrease_displayclock(struct drm_device *dev)
3768{
3769 if (IS_IGDNG(dev))
3770 return;
3771
3772 if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
3773 IS_I915GM(dev)) {
3774 u16 gcfgc;
3775
3776 /* Adjust render clock... */
3777 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3778
3779 /* Down to minimum... */
3780 gcfgc &= ~0xf0;
3781 gcfgc |= 0x80;
3782
3783 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3784 }
3785}
3786
3787#define CRTC_IDLE_TIMEOUT 1000 /* ms */ 3917#define CRTC_IDLE_TIMEOUT 1000 /* ms */
3788 3918
3789static void intel_crtc_idle_timer(unsigned long arg) 3919static void intel_crtc_idle_timer(unsigned long arg)
@@ -3792,7 +3922,7 @@ static void intel_crtc_idle_timer(unsigned long arg)
3792 struct drm_crtc *crtc = &intel_crtc->base; 3922 struct drm_crtc *crtc = &intel_crtc->base;
3793 drm_i915_private_t *dev_priv = crtc->dev->dev_private; 3923 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
3794 3924
3795 DRM_DEBUG("idle timer fired, downclocking\n"); 3925 DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
3796 3926
3797 intel_crtc->busy = false; 3927 intel_crtc->busy = false;
3798 3928
@@ -3808,14 +3938,14 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
3808 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3938 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3809 int dpll = I915_READ(dpll_reg); 3939 int dpll = I915_READ(dpll_reg);
3810 3940
3811 if (IS_IGDNG(dev)) 3941 if (HAS_PCH_SPLIT(dev))
3812 return; 3942 return;
3813 3943
3814 if (!dev_priv->lvds_downclock_avail) 3944 if (!dev_priv->lvds_downclock_avail)
3815 return; 3945 return;
3816 3946
3817 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { 3947 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
3818 DRM_DEBUG("upclocking LVDS\n"); 3948 DRM_DEBUG_DRIVER("upclocking LVDS\n");
3819 3949
3820 /* Unlock panel regs */ 3950 /* Unlock panel regs */
3821 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); 3951 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
@@ -3826,7 +3956,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
3826 intel_wait_for_vblank(dev); 3956 intel_wait_for_vblank(dev);
3827 dpll = I915_READ(dpll_reg); 3957 dpll = I915_READ(dpll_reg);
3828 if (dpll & DISPLAY_RATE_SELECT_FPA1) 3958 if (dpll & DISPLAY_RATE_SELECT_FPA1)
3829 DRM_DEBUG("failed to upclock LVDS!\n"); 3959 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
3830 3960
3831 /* ...and lock them again */ 3961 /* ...and lock them again */
3832 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); 3962 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
@@ -3847,7 +3977,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
3847 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3977 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3848 int dpll = I915_READ(dpll_reg); 3978 int dpll = I915_READ(dpll_reg);
3849 3979
3850 if (IS_IGDNG(dev)) 3980 if (HAS_PCH_SPLIT(dev))
3851 return; 3981 return;
3852 3982
3853 if (!dev_priv->lvds_downclock_avail) 3983 if (!dev_priv->lvds_downclock_avail)
@@ -3858,7 +3988,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
3858 * the manual case. 3988 * the manual case.
3859 */ 3989 */
3860 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 3990 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
3861 DRM_DEBUG("downclocking LVDS\n"); 3991 DRM_DEBUG_DRIVER("downclocking LVDS\n");
3862 3992
3863 /* Unlock panel regs */ 3993 /* Unlock panel regs */
3864 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); 3994 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
@@ -3869,7 +3999,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
3869 intel_wait_for_vblank(dev); 3999 intel_wait_for_vblank(dev);
3870 dpll = I915_READ(dpll_reg); 4000 dpll = I915_READ(dpll_reg);
3871 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 4001 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
3872 DRM_DEBUG("failed to downclock LVDS!\n"); 4002 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
3873 4003
3874 /* ...and lock them again */ 4004 /* ...and lock them again */
3875 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); 4005 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
@@ -3897,10 +4027,9 @@ static void intel_idle_update(struct work_struct *work)
3897 4027
3898 mutex_lock(&dev->struct_mutex); 4028 mutex_lock(&dev->struct_mutex);
3899 4029
3900 /* GPU isn't processing, downclock it. */ 4030 if (IS_I945G(dev) || IS_I945GM(dev)) {
3901 if (!dev_priv->busy) { 4031 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
3902 intel_decrease_renderclock(dev); 4032 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
3903 intel_decrease_displayclock(dev);
3904 } 4033 }
3905 4034
3906 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4035 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -3936,8 +4065,19 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
3936 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4065 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3937 return; 4066 return;
3938 4067
3939 dev_priv->busy = true; 4068 if (!dev_priv->busy) {
3940 intel_increase_renderclock(dev, true); 4069 if (IS_I945G(dev) || IS_I945GM(dev)) {
4070 u32 fw_blc_self;
4071
4072 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
4073 fw_blc_self = I915_READ(FW_BLC_SELF);
4074 fw_blc_self &= ~FW_BLC_SELF_EN;
4075 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4076 }
4077 dev_priv->busy = true;
4078 } else
4079 mod_timer(&dev_priv->idle_timer, jiffies +
4080 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
3941 4081
3942 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4082 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3943 if (!crtc->fb) 4083 if (!crtc->fb)
@@ -3947,6 +4087,14 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
3947 intel_fb = to_intel_framebuffer(crtc->fb); 4087 intel_fb = to_intel_framebuffer(crtc->fb);
3948 if (intel_fb->obj == obj) { 4088 if (intel_fb->obj == obj) {
3949 if (!intel_crtc->busy) { 4089 if (!intel_crtc->busy) {
4090 if (IS_I945G(dev) || IS_I945GM(dev)) {
4091 u32 fw_blc_self;
4092
4093 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
4094 fw_blc_self = I915_READ(FW_BLC_SELF);
4095 fw_blc_self &= ~FW_BLC_SELF_EN;
4096 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4097 }
3950 /* Non-busy -> busy, upclock */ 4098 /* Non-busy -> busy, upclock */
3951 intel_increase_pllclock(crtc, true); 4099 intel_increase_pllclock(crtc, true);
3952 intel_crtc->busy = true; 4100 intel_crtc->busy = true;
@@ -3967,6 +4115,180 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
3967 kfree(intel_crtc); 4115 kfree(intel_crtc);
3968} 4116}
3969 4117
4118struct intel_unpin_work {
4119 struct work_struct work;
4120 struct drm_device *dev;
4121 struct drm_gem_object *old_fb_obj;
4122 struct drm_gem_object *pending_flip_obj;
4123 struct drm_pending_vblank_event *event;
4124 int pending;
4125};
4126
4127static void intel_unpin_work_fn(struct work_struct *__work)
4128{
4129 struct intel_unpin_work *work =
4130 container_of(__work, struct intel_unpin_work, work);
4131
4132 mutex_lock(&work->dev->struct_mutex);
4133 i915_gem_object_unpin(work->old_fb_obj);
4134 drm_gem_object_unreference(work->pending_flip_obj);
4135 drm_gem_object_unreference(work->old_fb_obj);
4136 mutex_unlock(&work->dev->struct_mutex);
4137 kfree(work);
4138}
4139
4140void intel_finish_page_flip(struct drm_device *dev, int pipe)
4141{
4142 drm_i915_private_t *dev_priv = dev->dev_private;
4143 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
4144 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4145 struct intel_unpin_work *work;
4146 struct drm_i915_gem_object *obj_priv;
4147 struct drm_pending_vblank_event *e;
4148 struct timeval now;
4149 unsigned long flags;
4150
4151 /* Ignore early vblank irqs */
4152 if (intel_crtc == NULL)
4153 return;
4154
4155 spin_lock_irqsave(&dev->event_lock, flags);
4156 work = intel_crtc->unpin_work;
4157 if (work == NULL || !work->pending) {
4158 if (work && !work->pending) {
4159 obj_priv = to_intel_bo(work->pending_flip_obj);
4160 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4161 obj_priv,
4162 atomic_read(&obj_priv->pending_flip));
4163 }
4164 spin_unlock_irqrestore(&dev->event_lock, flags);
4165 return;
4166 }
4167
4168 intel_crtc->unpin_work = NULL;
4169 drm_vblank_put(dev, intel_crtc->pipe);
4170
4171 if (work->event) {
4172 e = work->event;
4173 do_gettimeofday(&now);
4174 e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
4175 e->event.tv_sec = now.tv_sec;
4176 e->event.tv_usec = now.tv_usec;
4177 list_add_tail(&e->base.link,
4178 &e->base.file_priv->event_list);
4179 wake_up_interruptible(&e->base.file_priv->event_wait);
4180 }
4181
4182 spin_unlock_irqrestore(&dev->event_lock, flags);
4183
4184 obj_priv = to_intel_bo(work->pending_flip_obj);
4185
4186 /* Initial scanout buffer will have a 0 pending flip count */
4187 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
4188 atomic_dec_and_test(&obj_priv->pending_flip))
4189 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4190 schedule_work(&work->work);
4191}
4192
4193void intel_prepare_page_flip(struct drm_device *dev, int plane)
4194{
4195 drm_i915_private_t *dev_priv = dev->dev_private;
4196 struct intel_crtc *intel_crtc =
4197 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
4198 unsigned long flags;
4199
4200 spin_lock_irqsave(&dev->event_lock, flags);
4201 if (intel_crtc->unpin_work) {
4202 intel_crtc->unpin_work->pending = 1;
4203 } else {
4204 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
4205 }
4206 spin_unlock_irqrestore(&dev->event_lock, flags);
4207}
4208
4209static int intel_crtc_page_flip(struct drm_crtc *crtc,
4210 struct drm_framebuffer *fb,
4211 struct drm_pending_vblank_event *event)
4212{
4213 struct drm_device *dev = crtc->dev;
4214 struct drm_i915_private *dev_priv = dev->dev_private;
4215 struct intel_framebuffer *intel_fb;
4216 struct drm_i915_gem_object *obj_priv;
4217 struct drm_gem_object *obj;
4218 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4219 struct intel_unpin_work *work;
4220 unsigned long flags;
4221 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4222 int ret, pipesrc;
4223 RING_LOCALS;
4224
4225 work = kzalloc(sizeof *work, GFP_KERNEL);
4226 if (work == NULL)
4227 return -ENOMEM;
4228
4229 mutex_lock(&dev->struct_mutex);
4230
4231 work->event = event;
4232 work->dev = crtc->dev;
4233 intel_fb = to_intel_framebuffer(crtc->fb);
4234 work->old_fb_obj = intel_fb->obj;
4235 INIT_WORK(&work->work, intel_unpin_work_fn);
4236
4237 /* We borrow the event spin lock for protecting unpin_work */
4238 spin_lock_irqsave(&dev->event_lock, flags);
4239 if (intel_crtc->unpin_work) {
4240 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4241 spin_unlock_irqrestore(&dev->event_lock, flags);
4242 kfree(work);
4243 mutex_unlock(&dev->struct_mutex);
4244 return -EBUSY;
4245 }
4246 intel_crtc->unpin_work = work;
4247 spin_unlock_irqrestore(&dev->event_lock, flags);
4248
4249 intel_fb = to_intel_framebuffer(fb);
4250 obj = intel_fb->obj;
4251
4252 ret = intel_pin_and_fence_fb_obj(dev, obj);
4253 if (ret != 0) {
4254 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4255 to_intel_bo(obj));
4256 kfree(work);
4257 intel_crtc->unpin_work = NULL;
4258 mutex_unlock(&dev->struct_mutex);
4259 return ret;
4260 }
4261
4262 /* Reference the objects for the scheduled work. */
4263 drm_gem_object_reference(work->old_fb_obj);
4264 drm_gem_object_reference(obj);
4265
4266 crtc->fb = fb;
4267 i915_gem_object_flush_write_domain(obj);
4268 drm_vblank_get(dev, intel_crtc->pipe);
4269 obj_priv = to_intel_bo(obj);
4270 atomic_inc(&obj_priv->pending_flip);
4271 work->pending_flip_obj = obj;
4272
4273 BEGIN_LP_RING(4);
4274 OUT_RING(MI_DISPLAY_FLIP |
4275 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
4276 OUT_RING(fb->pitch);
4277 if (IS_I965G(dev)) {
4278 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
4279 pipesrc = I915_READ(pipesrc_reg);
4280 OUT_RING(pipesrc & 0x0fff0fff);
4281 } else {
4282 OUT_RING(obj_priv->gtt_offset);
4283 OUT_RING(MI_NOOP);
4284 }
4285 ADVANCE_LP_RING();
4286
4287 mutex_unlock(&dev->struct_mutex);
4288
4289 return 0;
4290}
4291
3970static const struct drm_crtc_helper_funcs intel_helper_funcs = { 4292static const struct drm_crtc_helper_funcs intel_helper_funcs = {
3971 .dpms = intel_crtc_dpms, 4293 .dpms = intel_crtc_dpms,
3972 .mode_fixup = intel_crtc_mode_fixup, 4294 .mode_fixup = intel_crtc_mode_fixup,
@@ -3983,11 +4305,13 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
3983 .gamma_set = intel_crtc_gamma_set, 4305 .gamma_set = intel_crtc_gamma_set,
3984 .set_config = drm_crtc_helper_set_config, 4306 .set_config = drm_crtc_helper_set_config,
3985 .destroy = intel_crtc_destroy, 4307 .destroy = intel_crtc_destroy,
4308 .page_flip = intel_crtc_page_flip,
3986}; 4309};
3987 4310
3988 4311
3989static void intel_crtc_init(struct drm_device *dev, int pipe) 4312static void intel_crtc_init(struct drm_device *dev, int pipe)
3990{ 4313{
4314 drm_i915_private_t *dev_priv = dev->dev_private;
3991 struct intel_crtc *intel_crtc; 4315 struct intel_crtc *intel_crtc;
3992 int i; 4316 int i;
3993 4317
@@ -4010,10 +4334,15 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
4010 intel_crtc->pipe = pipe; 4334 intel_crtc->pipe = pipe;
4011 intel_crtc->plane = pipe; 4335 intel_crtc->plane = pipe;
4012 if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { 4336 if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
4013 DRM_DEBUG("swapping pipes & planes for FBC\n"); 4337 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
4014 intel_crtc->plane = ((pipe == 0) ? 1 : 0); 4338 intel_crtc->plane = ((pipe == 0) ? 1 : 0);
4015 } 4339 }
4016 4340
4341 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
4342 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
4343 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
4344 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
4345
4017 intel_crtc->cursor_addr = 0; 4346 intel_crtc->cursor_addr = 0;
4018 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; 4347 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
4019 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 4348 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
@@ -4070,8 +4399,8 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
4070 int entry = 0; 4399 int entry = 0;
4071 4400
4072 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 4401 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4073 struct intel_output *intel_output = to_intel_output(connector); 4402 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
4074 if (type_mask & intel_output->clone_mask) 4403 if (type_mask & intel_encoder->clone_mask)
4075 index_mask |= (1 << entry); 4404 index_mask |= (1 << entry);
4076 entry++; 4405 entry++;
4077 } 4406 }
@@ -4090,7 +4419,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4090 if (IS_MOBILE(dev) && !IS_I830(dev)) 4419 if (IS_MOBILE(dev) && !IS_I830(dev))
4091 intel_lvds_init(dev); 4420 intel_lvds_init(dev);
4092 4421
4093 if (IS_IGDNG(dev)) { 4422 if (HAS_PCH_SPLIT(dev)) {
4094 int found; 4423 int found;
4095 4424
4096 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) 4425 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
@@ -4118,46 +4447,60 @@ static void intel_setup_outputs(struct drm_device *dev)
4118 if (I915_READ(PCH_DP_D) & DP_DETECTED) 4447 if (I915_READ(PCH_DP_D) & DP_DETECTED)
4119 intel_dp_init(dev, PCH_DP_D); 4448 intel_dp_init(dev, PCH_DP_D);
4120 4449
4121 } else if (IS_I9XX(dev)) { 4450 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
4122 bool found = false; 4451 bool found = false;
4123 4452
4124 if (I915_READ(SDVOB) & SDVO_DETECTED) { 4453 if (I915_READ(SDVOB) & SDVO_DETECTED) {
4454 DRM_DEBUG_KMS("probing SDVOB\n");
4125 found = intel_sdvo_init(dev, SDVOB); 4455 found = intel_sdvo_init(dev, SDVOB);
4126 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 4456 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
4457 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
4127 intel_hdmi_init(dev, SDVOB); 4458 intel_hdmi_init(dev, SDVOB);
4459 }
4128 4460
4129 if (!found && SUPPORTS_INTEGRATED_DP(dev)) 4461 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
4462 DRM_DEBUG_KMS("probing DP_B\n");
4130 intel_dp_init(dev, DP_B); 4463 intel_dp_init(dev, DP_B);
4464 }
4131 } 4465 }
4132 4466
4133 /* Before G4X SDVOC doesn't have its own detect register */ 4467 /* Before G4X SDVOC doesn't have its own detect register */
4134 4468
4135 if (I915_READ(SDVOB) & SDVO_DETECTED) 4469 if (I915_READ(SDVOB) & SDVO_DETECTED) {
4470 DRM_DEBUG_KMS("probing SDVOC\n");
4136 found = intel_sdvo_init(dev, SDVOC); 4471 found = intel_sdvo_init(dev, SDVOC);
4472 }
4137 4473
4138 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 4474 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
4139 4475
4140 if (SUPPORTS_INTEGRATED_HDMI(dev)) 4476 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
4477 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
4141 intel_hdmi_init(dev, SDVOC); 4478 intel_hdmi_init(dev, SDVOC);
4142 if (SUPPORTS_INTEGRATED_DP(dev)) 4479 }
4480 if (SUPPORTS_INTEGRATED_DP(dev)) {
4481 DRM_DEBUG_KMS("probing DP_C\n");
4143 intel_dp_init(dev, DP_C); 4482 intel_dp_init(dev, DP_C);
4483 }
4144 } 4484 }
4145 4485
4146 if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) 4486 if (SUPPORTS_INTEGRATED_DP(dev) &&
4487 (I915_READ(DP_D) & DP_DETECTED)) {
4488 DRM_DEBUG_KMS("probing DP_D\n");
4147 intel_dp_init(dev, DP_D); 4489 intel_dp_init(dev, DP_D);
4148 } else 4490 }
4491 } else if (IS_GEN2(dev))
4149 intel_dvo_init(dev); 4492 intel_dvo_init(dev);
4150 4493
4151 if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev)) 4494 if (SUPPORTS_TV(dev))
4152 intel_tv_init(dev); 4495 intel_tv_init(dev);
4153 4496
4154 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 4497 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4155 struct intel_output *intel_output = to_intel_output(connector); 4498 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
4156 struct drm_encoder *encoder = &intel_output->enc; 4499 struct drm_encoder *encoder = &intel_encoder->enc;
4157 4500
4158 encoder->possible_crtcs = intel_output->crtc_mask; 4501 encoder->possible_crtcs = intel_encoder->crtc_mask;
4159 encoder->possible_clones = intel_connector_clones(dev, 4502 encoder->possible_clones = intel_connector_clones(dev,
4160 intel_output->clone_mask); 4503 intel_encoder->clone_mask);
4161 } 4504 }
4162} 4505}
4163 4506
@@ -4170,9 +4513,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
4170 intelfb_remove(dev, fb); 4513 intelfb_remove(dev, fb);
4171 4514
4172 drm_framebuffer_cleanup(fb); 4515 drm_framebuffer_cleanup(fb);
4173 mutex_lock(&dev->struct_mutex); 4516 drm_gem_object_unreference_unlocked(intel_fb->obj);
4174 drm_gem_object_unreference(intel_fb->obj);
4175 mutex_unlock(&dev->struct_mutex);
4176 4517
4177 kfree(intel_fb); 4518 kfree(intel_fb);
4178} 4519}
@@ -4235,9 +4576,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
4235 4576
4236 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); 4577 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
4237 if (ret) { 4578 if (ret) {
4238 mutex_lock(&dev->struct_mutex); 4579 drm_gem_object_unreference_unlocked(obj);
4239 drm_gem_object_unreference(obj);
4240 mutex_unlock(&dev->struct_mutex);
4241 return NULL; 4580 return NULL;
4242 } 4581 }
4243 4582
@@ -4249,6 +4588,127 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
4249 .fb_changed = intelfb_probe, 4588 .fb_changed = intelfb_probe,
4250}; 4589};
4251 4590
4591static struct drm_gem_object *
4592intel_alloc_power_context(struct drm_device *dev)
4593{
4594 struct drm_gem_object *pwrctx;
4595 int ret;
4596
4597 pwrctx = drm_gem_object_alloc(dev, 4096);
4598 if (!pwrctx) {
4599 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
4600 return NULL;
4601 }
4602
4603 mutex_lock(&dev->struct_mutex);
4604 ret = i915_gem_object_pin(pwrctx, 4096);
4605 if (ret) {
4606 DRM_ERROR("failed to pin power context: %d\n", ret);
4607 goto err_unref;
4608 }
4609
4610 ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4611 if (ret) {
4612 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
4613 goto err_unpin;
4614 }
4615 mutex_unlock(&dev->struct_mutex);
4616
4617 return pwrctx;
4618
4619err_unpin:
4620 i915_gem_object_unpin(pwrctx);
4621err_unref:
4622 drm_gem_object_unreference(pwrctx);
4623 mutex_unlock(&dev->struct_mutex);
4624 return NULL;
4625}
4626
4627void ironlake_enable_drps(struct drm_device *dev)
4628{
4629 struct drm_i915_private *dev_priv = dev->dev_private;
4630 u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl;
4631 u8 fmax, fmin, fstart, vstart;
4632 int i = 0;
4633
4634 /* 100ms RC evaluation intervals */
4635 I915_WRITE(RCUPEI, 100000);
4636 I915_WRITE(RCDNEI, 100000);
4637
4638 /* Set max/min thresholds to 90ms and 80ms respectively */
4639 I915_WRITE(RCBMAXAVG, 90000);
4640 I915_WRITE(RCBMINAVG, 80000);
4641
4642 I915_WRITE(MEMIHYST, 1);
4643
4644 /* Set up min, max, and cur for interrupt handling */
4645 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4646 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4647 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4648 MEMMODE_FSTART_SHIFT;
4649 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
4650 PXVFREQ_PX_SHIFT;
4651
4652 dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */
4653 dev_priv->min_delay = fmin;
4654 dev_priv->cur_delay = fstart;
4655
4656 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4657
4658 /*
4659 * Interrupts will be enabled in ironlake_irq_postinstall
4660 */
4661
4662 I915_WRITE(VIDSTART, vstart);
4663 POSTING_READ(VIDSTART);
4664
4665 rgvmodectl |= MEMMODE_SWMODE_EN;
4666 I915_WRITE(MEMMODECTL, rgvmodectl);
4667
4668 while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
4669 if (i++ > 100) {
4670 DRM_ERROR("stuck trying to change perf mode\n");
4671 break;
4672 }
4673 msleep(1);
4674 }
4675 msleep(1);
4676
4677 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4678 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4679 I915_WRITE(MEMSWCTL, rgvswctl);
4680 POSTING_READ(MEMSWCTL);
4681
4682 rgvswctl |= MEMCTL_CMD_STS;
4683 I915_WRITE(MEMSWCTL, rgvswctl);
4684}
4685
4686void ironlake_disable_drps(struct drm_device *dev)
4687{
4688 struct drm_i915_private *dev_priv = dev->dev_private;
4689 u32 rgvswctl;
4690 u8 fstart;
4691
4692 /* Ack interrupts, disable EFC interrupt */
4693 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4694 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4695 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4696 I915_WRITE(DEIIR, DE_PCU_EVENT);
4697 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4698
4699 /* Go back to the starting frequency */
4700 fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >>
4701 MEMMODE_FSTART_SHIFT;
4702 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4703 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4704 I915_WRITE(MEMSWCTL, rgvswctl);
4705 msleep(1);
4706 rgvswctl |= MEMCTL_CMD_STS;
4707 I915_WRITE(MEMSWCTL, rgvswctl);
4708 msleep(1);
4709
4710}
4711
4252void intel_init_clock_gating(struct drm_device *dev) 4712void intel_init_clock_gating(struct drm_device *dev)
4253{ 4713{
4254 struct drm_i915_private *dev_priv = dev->dev_private; 4714 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4257,7 +4717,21 @@ void intel_init_clock_gating(struct drm_device *dev)
4257 * Disable clock gating reported to work incorrectly according to the 4717 * Disable clock gating reported to work incorrectly according to the
4258 * specs, but enable as much else as we can. 4718 * specs, but enable as much else as we can.
4259 */ 4719 */
4260 if (IS_IGDNG(dev)) { 4720 if (HAS_PCH_SPLIT(dev)) {
4721 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
4722
4723 if (IS_IRONLAKE(dev)) {
4724 /* Required for FBC */
4725 dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
4726 /* Required for CxSR */
4727 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
4728
4729 I915_WRITE(PCH_3DCGDIS0,
4730 MARIUNIT_CLOCK_GATE_DISABLE |
4731 SVSMUNIT_CLOCK_GATE_DISABLE);
4732 }
4733
4734 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
4261 return; 4735 return;
4262 } else if (IS_G4X(dev)) { 4736 } else if (IS_G4X(dev)) {
4263 uint32_t dspclk_gate; 4737 uint32_t dspclk_gate;
@@ -4291,11 +4765,37 @@ void intel_init_clock_gating(struct drm_device *dev)
4291 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 4765 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
4292 DSTATE_DOT_CLOCK_GATING; 4766 DSTATE_DOT_CLOCK_GATING;
4293 I915_WRITE(D_STATE, dstate); 4767 I915_WRITE(D_STATE, dstate);
4294 } else if (IS_I855(dev) || IS_I865G(dev)) { 4768 } else if (IS_I85X(dev) || IS_I865G(dev)) {
4295 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 4769 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
4296 } else if (IS_I830(dev)) { 4770 } else if (IS_I830(dev)) {
4297 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 4771 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
4298 } 4772 }
4773
4774 /*
4775 * GPU can automatically power down the render unit if given a page
4776 * to save state.
4777 */
4778 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
4779 struct drm_i915_gem_object *obj_priv = NULL;
4780
4781 if (dev_priv->pwrctx) {
4782 obj_priv = to_intel_bo(dev_priv->pwrctx);
4783 } else {
4784 struct drm_gem_object *pwrctx;
4785
4786 pwrctx = intel_alloc_power_context(dev);
4787 if (pwrctx) {
4788 dev_priv->pwrctx = pwrctx;
4789 obj_priv = to_intel_bo(pwrctx);
4790 }
4791 }
4792
4793 if (obj_priv) {
4794 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
4795 I915_WRITE(MCHBAR_RENDER_STANDBY,
4796 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
4797 }
4798 }
4299} 4799}
4300 4800
4301/* Set up chip specific display functions */ 4801/* Set up chip specific display functions */
@@ -4304,8 +4804,8 @@ static void intel_init_display(struct drm_device *dev)
4304 struct drm_i915_private *dev_priv = dev->dev_private; 4804 struct drm_i915_private *dev_priv = dev->dev_private;
4305 4805
4306 /* We always want a DPMS function */ 4806 /* We always want a DPMS function */
4307 if (IS_IGDNG(dev)) 4807 if (HAS_PCH_SPLIT(dev))
4308 dev_priv->display.dpms = igdng_crtc_dpms; 4808 dev_priv->display.dpms = ironlake_crtc_dpms;
4309 else 4809 else
4310 dev_priv->display.dpms = i9xx_crtc_dpms; 4810 dev_priv->display.dpms = i9xx_crtc_dpms;
4311 4811
@@ -4315,7 +4815,7 @@ static void intel_init_display(struct drm_device *dev)
4315 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 4815 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
4316 dev_priv->display.enable_fbc = g4x_enable_fbc; 4816 dev_priv->display.enable_fbc = g4x_enable_fbc;
4317 dev_priv->display.disable_fbc = g4x_disable_fbc; 4817 dev_priv->display.disable_fbc = g4x_disable_fbc;
4318 } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) { 4818 } else if (IS_I965GM(dev)) {
4319 dev_priv->display.fbc_enabled = i8xx_fbc_enabled; 4819 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
4320 dev_priv->display.enable_fbc = i8xx_enable_fbc; 4820 dev_priv->display.enable_fbc = i8xx_enable_fbc;
4321 dev_priv->display.disable_fbc = i8xx_disable_fbc; 4821 dev_priv->display.disable_fbc = i8xx_disable_fbc;
@@ -4324,13 +4824,13 @@ static void intel_init_display(struct drm_device *dev)
4324 } 4824 }
4325 4825
4326 /* Returns the core display clock speed */ 4826 /* Returns the core display clock speed */
4327 if (IS_I945G(dev)) 4827 if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
4328 dev_priv->display.get_display_clock_speed = 4828 dev_priv->display.get_display_clock_speed =
4329 i945_get_display_clock_speed; 4829 i945_get_display_clock_speed;
4330 else if (IS_I915G(dev)) 4830 else if (IS_I915G(dev))
4331 dev_priv->display.get_display_clock_speed = 4831 dev_priv->display.get_display_clock_speed =
4332 i915_get_display_clock_speed; 4832 i915_get_display_clock_speed;
4333 else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) 4833 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
4334 dev_priv->display.get_display_clock_speed = 4834 dev_priv->display.get_display_clock_speed =
4335 i9xx_misc_get_display_clock_speed; 4835 i9xx_misc_get_display_clock_speed;
4336 else if (IS_I915GM(dev)) 4836 else if (IS_I915GM(dev))
@@ -4339,7 +4839,7 @@ static void intel_init_display(struct drm_device *dev)
4339 else if (IS_I865G(dev)) 4839 else if (IS_I865G(dev))
4340 dev_priv->display.get_display_clock_speed = 4840 dev_priv->display.get_display_clock_speed =
4341 i865_get_display_clock_speed; 4841 i865_get_display_clock_speed;
4342 else if (IS_I855(dev)) 4842 else if (IS_I85X(dev))
4343 dev_priv->display.get_display_clock_speed = 4843 dev_priv->display.get_display_clock_speed =
4344 i855_get_display_clock_speed; 4844 i855_get_display_clock_speed;
4345 else /* 852, 830 */ 4845 else /* 852, 830 */
@@ -4347,23 +4847,24 @@ static void intel_init_display(struct drm_device *dev)
4347 i830_get_display_clock_speed; 4847 i830_get_display_clock_speed;
4348 4848
4349 /* For FIFO watermark updates */ 4849 /* For FIFO watermark updates */
4350 if (IS_IGDNG(dev)) 4850 if (HAS_PCH_SPLIT(dev))
4351 dev_priv->display.update_wm = NULL; 4851 dev_priv->display.update_wm = NULL;
4352 else if (IS_G4X(dev)) 4852 else if (IS_G4X(dev))
4353 dev_priv->display.update_wm = g4x_update_wm; 4853 dev_priv->display.update_wm = g4x_update_wm;
4354 else if (IS_I965G(dev)) 4854 else if (IS_I965G(dev))
4355 dev_priv->display.update_wm = i965_update_wm; 4855 dev_priv->display.update_wm = i965_update_wm;
4356 else if (IS_I9XX(dev) || IS_MOBILE(dev)) { 4856 else if (IS_I9XX(dev)) {
4357 dev_priv->display.update_wm = i9xx_update_wm; 4857 dev_priv->display.update_wm = i9xx_update_wm;
4358 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 4858 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
4859 } else if (IS_I85X(dev)) {
4860 dev_priv->display.update_wm = i9xx_update_wm;
4861 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
4359 } else { 4862 } else {
4360 if (IS_I85X(dev)) 4863 dev_priv->display.update_wm = i830_update_wm;
4361 dev_priv->display.get_fifo_size = i85x_get_fifo_size; 4864 if (IS_845G(dev))
4362 else if (IS_845G(dev))
4363 dev_priv->display.get_fifo_size = i845_get_fifo_size; 4865 dev_priv->display.get_fifo_size = i845_get_fifo_size;
4364 else 4866 else
4365 dev_priv->display.get_fifo_size = i830_get_fifo_size; 4867 dev_priv->display.get_fifo_size = i830_get_fifo_size;
4366 dev_priv->display.update_wm = i830_update_wm;
4367 } 4868 }
4368} 4869}
4369 4870
@@ -4403,14 +4904,9 @@ void intel_modeset_init(struct drm_device *dev)
4403 num_pipe = 2; 4904 num_pipe = 2;
4404 else 4905 else
4405 num_pipe = 1; 4906 num_pipe = 1;
4406 DRM_DEBUG("%d display pipe%s available.\n", 4907 DRM_DEBUG_KMS("%d display pipe%s available.\n",
4407 num_pipe, num_pipe > 1 ? "s" : ""); 4908 num_pipe, num_pipe > 1 ? "s" : "");
4408 4909
4409 if (IS_I85X(dev))
4410 pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock);
4411 else if (IS_I9XX(dev) || IS_G4X(dev))
4412 pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock);
4413
4414 for (i = 0; i < num_pipe; i++) { 4910 for (i = 0; i < num_pipe; i++) {
4415 intel_crtc_init(dev, i); 4911 intel_crtc_init(dev, i);
4416 } 4912 }
@@ -4419,9 +4915,21 @@ void intel_modeset_init(struct drm_device *dev)
4419 4915
4420 intel_init_clock_gating(dev); 4916 intel_init_clock_gating(dev);
4421 4917
4918 if (IS_IRONLAKE_M(dev))
4919 ironlake_enable_drps(dev);
4920
4422 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 4921 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
4423 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 4922 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
4424 (unsigned long)dev); 4923 (unsigned long)dev);
4924
4925 intel_setup_overlay(dev);
4926
4927 if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
4928 dev_priv->fsb_freq,
4929 dev_priv->mem_freq))
4930 DRM_INFO("failed to find known CxSR latency "
4931 "(found fsb freq %d, mem freq %d), disabling CxSR\n",
4932 dev_priv->fsb_freq, dev_priv->mem_freq);
4425} 4933}
4426 4934
4427void intel_modeset_cleanup(struct drm_device *dev) 4935void intel_modeset_cleanup(struct drm_device *dev)
@@ -4442,14 +4950,26 @@ void intel_modeset_cleanup(struct drm_device *dev)
4442 del_timer_sync(&intel_crtc->idle_timer); 4950 del_timer_sync(&intel_crtc->idle_timer);
4443 } 4951 }
4444 4952
4445 intel_increase_renderclock(dev, false);
4446 del_timer_sync(&dev_priv->idle_timer); 4953 del_timer_sync(&dev_priv->idle_timer);
4447 4954
4448 mutex_unlock(&dev->struct_mutex);
4449
4450 if (dev_priv->display.disable_fbc) 4955 if (dev_priv->display.disable_fbc)
4451 dev_priv->display.disable_fbc(dev); 4956 dev_priv->display.disable_fbc(dev);
4452 4957
4958 if (dev_priv->pwrctx) {
4959 struct drm_i915_gem_object *obj_priv;
4960
4961 obj_priv = to_intel_bo(dev_priv->pwrctx);
4962 I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
4963 I915_READ(PWRCTXA);
4964 i915_gem_object_unpin(dev_priv->pwrctx);
4965 drm_gem_object_unreference(dev_priv->pwrctx);
4966 }
4967
4968 if (IS_IRONLAKE_M(dev))
4969 ironlake_disable_drps(dev);
4970
4971 mutex_unlock(&dev->struct_mutex);
4972
4453 drm_mode_config_cleanup(dev); 4973 drm_mode_config_cleanup(dev);
4454} 4974}
4455 4975
@@ -4459,9 +4979,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
4459*/ 4979*/
4460struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 4980struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
4461{ 4981{
4462 struct intel_output *intel_output = to_intel_output(connector); 4982 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
4463 4983
4464 return &intel_output->enc; 4984 return &intel_encoder->enc;
4465} 4985}
4466 4986
4467/* 4987/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d83447557f9b..77e40cfcf216 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/slab.h>
29#include "drmP.h" 30#include "drmP.h"
30#include "drm.h" 31#include "drm.h"
31#include "drm_crtc.h" 32#include "drm_crtc.h"
@@ -33,7 +34,8 @@
33#include "intel_drv.h" 34#include "intel_drv.h"
34#include "i915_drm.h" 35#include "i915_drm.h"
35#include "i915_drv.h" 36#include "i915_drv.h"
36#include "intel_dp.h" 37#include "drm_dp_helper.h"
38
37 39
38#define DP_LINK_STATUS_SIZE 6 40#define DP_LINK_STATUS_SIZE 6
39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 41#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
@@ -53,23 +55,23 @@ struct intel_dp_priv {
53 uint8_t link_bw; 55 uint8_t link_bw;
54 uint8_t lane_count; 56 uint8_t lane_count;
55 uint8_t dpcd[4]; 57 uint8_t dpcd[4];
56 struct intel_output *intel_output; 58 struct intel_encoder *intel_encoder;
57 struct i2c_adapter adapter; 59 struct i2c_adapter adapter;
58 struct i2c_algo_dp_aux_data algo; 60 struct i2c_algo_dp_aux_data algo;
59}; 61};
60 62
61static void 63static void
62intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, 64intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
63 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); 65 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
64 66
65static void 67static void
66intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); 68intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP);
67 69
68void 70void
69intel_edp_link_config (struct intel_output *intel_output, 71intel_edp_link_config (struct intel_encoder *intel_encoder,
70 int *lane_num, int *link_bw) 72 int *lane_num, int *link_bw)
71{ 73{
72 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 74 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
73 75
74 *lane_num = dp_priv->lane_count; 76 *lane_num = dp_priv->lane_count;
75 if (dp_priv->link_bw == DP_LINK_BW_1_62) 77 if (dp_priv->link_bw == DP_LINK_BW_1_62)
@@ -79,9 +81,9 @@ intel_edp_link_config (struct intel_output *intel_output,
79} 81}
80 82
81static int 83static int
82intel_dp_max_lane_count(struct intel_output *intel_output) 84intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
83{ 85{
84 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 86 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
85 int max_lane_count = 4; 87 int max_lane_count = 4;
86 88
87 if (dp_priv->dpcd[0] >= 0x11) { 89 if (dp_priv->dpcd[0] >= 0x11) {
@@ -97,9 +99,9 @@ intel_dp_max_lane_count(struct intel_output *intel_output)
97} 99}
98 100
99static int 101static int
100intel_dp_max_link_bw(struct intel_output *intel_output) 102intel_dp_max_link_bw(struct intel_encoder *intel_encoder)
101{ 103{
102 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 104 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
103 int max_link_bw = dp_priv->dpcd[1]; 105 int max_link_bw = dp_priv->dpcd[1];
104 106
105 switch (max_link_bw) { 107 switch (max_link_bw) {
@@ -124,20 +126,27 @@ intel_dp_link_clock(uint8_t link_bw)
124 126
125/* I think this is a fiction */ 127/* I think this is a fiction */
126static int 128static int
127intel_dp_link_required(int pixel_clock) 129intel_dp_link_required(struct drm_device *dev,
130 struct intel_encoder *intel_encoder, int pixel_clock)
128{ 131{
129 return pixel_clock * 3; 132 struct drm_i915_private *dev_priv = dev->dev_private;
133
134 if (IS_eDP(intel_encoder))
135 return (pixel_clock * dev_priv->edp_bpp) / 8;
136 else
137 return pixel_clock * 3;
130} 138}
131 139
132static int 140static int
133intel_dp_mode_valid(struct drm_connector *connector, 141intel_dp_mode_valid(struct drm_connector *connector,
134 struct drm_display_mode *mode) 142 struct drm_display_mode *mode)
135{ 143{
136 struct intel_output *intel_output = to_intel_output(connector); 144 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
137 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); 145 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
138 int max_lanes = intel_dp_max_lane_count(intel_output); 146 int max_lanes = intel_dp_max_lane_count(intel_encoder);
139 147
140 if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes) 148 if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock)
149 > max_link_clock * max_lanes)
141 return MODE_CLOCK_HIGH; 150 return MODE_CLOCK_HIGH;
142 151
143 if (mode->clock < 10000) 152 if (mode->clock < 10000)
@@ -200,13 +209,13 @@ intel_hrawclk(struct drm_device *dev)
200} 209}
201 210
202static int 211static int
203intel_dp_aux_ch(struct intel_output *intel_output, 212intel_dp_aux_ch(struct intel_encoder *intel_encoder,
204 uint8_t *send, int send_bytes, 213 uint8_t *send, int send_bytes,
205 uint8_t *recv, int recv_size) 214 uint8_t *recv, int recv_size)
206{ 215{
207 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 216 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
208 uint32_t output_reg = dp_priv->output_reg; 217 uint32_t output_reg = dp_priv->output_reg;
209 struct drm_device *dev = intel_output->base.dev; 218 struct drm_device *dev = intel_encoder->base.dev;
210 struct drm_i915_private *dev_priv = dev->dev_private; 219 struct drm_i915_private *dev_priv = dev->dev_private;
211 uint32_t ch_ctl = output_reg + 0x10; 220 uint32_t ch_ctl = output_reg + 0x10;
212 uint32_t ch_data = ch_ctl + 4; 221 uint32_t ch_data = ch_ctl + 4;
@@ -221,10 +230,10 @@ intel_dp_aux_ch(struct intel_output *intel_output,
221 * and would like to run at 2MHz. So, take the 230 * and would like to run at 2MHz. So, take the
222 * hrawclk value and divide by 2 and use that 231 * hrawclk value and divide by 2 and use that
223 */ 232 */
224 if (IS_eDP(intel_output)) 233 if (IS_eDP(intel_encoder))
225 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 234 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
226 else if (IS_IGDNG(dev)) 235 else if (HAS_PCH_SPLIT(dev))
227 aux_clock_divider = 62; /* IGDNG: input clock fixed at 125Mhz */ 236 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
228 else 237 else
229 aux_clock_divider = intel_hrawclk(dev) / 2; 238 aux_clock_divider = intel_hrawclk(dev) / 2;
230 239
@@ -282,7 +291,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
282 /* Timeouts occur when the device isn't connected, so they're 291 /* Timeouts occur when the device isn't connected, so they're
283 * "normal" -- don't fill the kernel log with these */ 292 * "normal" -- don't fill the kernel log with these */
284 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 293 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
285 DRM_DEBUG("dp_aux_ch timeout status 0x%08x\n", status); 294 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
286 return -ETIMEDOUT; 295 return -ETIMEDOUT;
287 } 296 }
288 297
@@ -304,7 +313,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
304 313
305/* Write data to the aux channel in native mode */ 314/* Write data to the aux channel in native mode */
306static int 315static int
307intel_dp_aux_native_write(struct intel_output *intel_output, 316intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
308 uint16_t address, uint8_t *send, int send_bytes) 317 uint16_t address, uint8_t *send, int send_bytes)
309{ 318{
310 int ret; 319 int ret;
@@ -321,7 +330,7 @@ intel_dp_aux_native_write(struct intel_output *intel_output,
321 memcpy(&msg[4], send, send_bytes); 330 memcpy(&msg[4], send, send_bytes);
322 msg_bytes = send_bytes + 4; 331 msg_bytes = send_bytes + 4;
323 for (;;) { 332 for (;;) {
324 ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1); 333 ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1);
325 if (ret < 0) 334 if (ret < 0)
326 return ret; 335 return ret;
327 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 336 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
@@ -336,15 +345,15 @@ intel_dp_aux_native_write(struct intel_output *intel_output,
336 345
337/* Write a single byte to the aux channel in native mode */ 346/* Write a single byte to the aux channel in native mode */
338static int 347static int
339intel_dp_aux_native_write_1(struct intel_output *intel_output, 348intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder,
340 uint16_t address, uint8_t byte) 349 uint16_t address, uint8_t byte)
341{ 350{
342 return intel_dp_aux_native_write(intel_output, address, &byte, 1); 351 return intel_dp_aux_native_write(intel_encoder, address, &byte, 1);
343} 352}
344 353
345/* read bytes from a native aux channel */ 354/* read bytes from a native aux channel */
346static int 355static int
347intel_dp_aux_native_read(struct intel_output *intel_output, 356intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
348 uint16_t address, uint8_t *recv, int recv_bytes) 357 uint16_t address, uint8_t *recv, int recv_bytes)
349{ 358{
350 uint8_t msg[4]; 359 uint8_t msg[4];
@@ -363,7 +372,7 @@ intel_dp_aux_native_read(struct intel_output *intel_output,
363 reply_bytes = recv_bytes + 1; 372 reply_bytes = recv_bytes + 1;
364 373
365 for (;;) { 374 for (;;) {
366 ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, 375 ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes,
367 reply, reply_bytes); 376 reply, reply_bytes);
368 if (ret == 0) 377 if (ret == 0)
369 return -EPROTO; 378 return -EPROTO;
@@ -382,23 +391,83 @@ intel_dp_aux_native_read(struct intel_output *intel_output,
382} 391}
383 392
384static int 393static int
385intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, 394intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
386 uint8_t *send, int send_bytes, 395 uint8_t write_byte, uint8_t *read_byte)
387 uint8_t *recv, int recv_bytes)
388{ 396{
397 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
389 struct intel_dp_priv *dp_priv = container_of(adapter, 398 struct intel_dp_priv *dp_priv = container_of(adapter,
390 struct intel_dp_priv, 399 struct intel_dp_priv,
391 adapter); 400 adapter);
392 struct intel_output *intel_output = dp_priv->intel_output; 401 struct intel_encoder *intel_encoder = dp_priv->intel_encoder;
402 uint16_t address = algo_data->address;
403 uint8_t msg[5];
404 uint8_t reply[2];
405 int msg_bytes;
406 int reply_bytes;
407 int ret;
393 408
394 return intel_dp_aux_ch(intel_output, 409 /* Set up the command byte */
395 send, send_bytes, recv, recv_bytes); 410 if (mode & MODE_I2C_READ)
411 msg[0] = AUX_I2C_READ << 4;
412 else
413 msg[0] = AUX_I2C_WRITE << 4;
414
415 if (!(mode & MODE_I2C_STOP))
416 msg[0] |= AUX_I2C_MOT << 4;
417
418 msg[1] = address >> 8;
419 msg[2] = address;
420
421 switch (mode) {
422 case MODE_I2C_WRITE:
423 msg[3] = 0;
424 msg[4] = write_byte;
425 msg_bytes = 5;
426 reply_bytes = 1;
427 break;
428 case MODE_I2C_READ:
429 msg[3] = 0;
430 msg_bytes = 4;
431 reply_bytes = 2;
432 break;
433 default:
434 msg_bytes = 3;
435 reply_bytes = 1;
436 break;
437 }
438
439 for (;;) {
440 ret = intel_dp_aux_ch(intel_encoder,
441 msg, msg_bytes,
442 reply, reply_bytes);
443 if (ret < 0) {
444 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
445 return ret;
446 }
447 switch (reply[0] & AUX_I2C_REPLY_MASK) {
448 case AUX_I2C_REPLY_ACK:
449 if (mode == MODE_I2C_READ) {
450 *read_byte = reply[1];
451 }
452 return reply_bytes - 1;
453 case AUX_I2C_REPLY_NACK:
454 DRM_DEBUG_KMS("aux_ch nack\n");
455 return -EREMOTEIO;
456 case AUX_I2C_REPLY_DEFER:
457 DRM_DEBUG_KMS("aux_ch defer\n");
458 udelay(100);
459 break;
460 default:
461 DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
462 return -EREMOTEIO;
463 }
464 }
396} 465}
397 466
398static int 467static int
399intel_dp_i2c_init(struct intel_output *intel_output, const char *name) 468intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
400{ 469{
401 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 470 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
402 471
403 DRM_DEBUG_KMS("i2c_init %s\n", name); 472 DRM_DEBUG_KMS("i2c_init %s\n", name);
404 dp_priv->algo.running = false; 473 dp_priv->algo.running = false;
@@ -411,7 +480,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
411 strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); 480 strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
412 dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; 481 dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
413 dp_priv->adapter.algo_data = &dp_priv->algo; 482 dp_priv->adapter.algo_data = &dp_priv->algo;
414 dp_priv->adapter.dev.parent = &intel_output->base.kdev; 483 dp_priv->adapter.dev.parent = &intel_encoder->base.kdev;
415 484
416 return i2c_dp_aux_add_bus(&dp_priv->adapter); 485 return i2c_dp_aux_add_bus(&dp_priv->adapter);
417} 486}
@@ -420,22 +489,24 @@ static bool
420intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, 489intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
421 struct drm_display_mode *adjusted_mode) 490 struct drm_display_mode *adjusted_mode)
422{ 491{
423 struct intel_output *intel_output = enc_to_intel_output(encoder); 492 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
424 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 493 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
425 int lane_count, clock; 494 int lane_count, clock;
426 int max_lane_count = intel_dp_max_lane_count(intel_output); 495 int max_lane_count = intel_dp_max_lane_count(intel_encoder);
427 int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0; 496 int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
428 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 497 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
429 498
430 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 499 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
431 for (clock = 0; clock <= max_clock; clock++) { 500 for (clock = 0; clock <= max_clock; clock++) {
432 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; 501 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
433 502
434 if (intel_dp_link_required(mode->clock) <= link_avail) { 503 if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock)
504 <= link_avail) {
435 dp_priv->link_bw = bws[clock]; 505 dp_priv->link_bw = bws[clock];
436 dp_priv->lane_count = lane_count; 506 dp_priv->lane_count = lane_count;
437 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); 507 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
438 DRM_DEBUG("Display port link bw %02x lane count %d clock %d\n", 508 DRM_DEBUG_KMS("Display port link bw %02x lane "
509 "count %d clock %d\n",
439 dp_priv->link_bw, dp_priv->lane_count, 510 dp_priv->link_bw, dp_priv->lane_count,
440 adjusted_mode->clock); 511 adjusted_mode->clock);
441 return true; 512 return true;
@@ -491,16 +562,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
491 struct intel_dp_m_n m_n; 562 struct intel_dp_m_n m_n;
492 563
493 /* 564 /*
494 * Find the lane count in the intel_output private 565 * Find the lane count in the intel_encoder private
495 */ 566 */
496 list_for_each_entry(connector, &mode_config->connector_list, head) { 567 list_for_each_entry(connector, &mode_config->connector_list, head) {
497 struct intel_output *intel_output = to_intel_output(connector); 568 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
498 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 569 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
499 570
500 if (!connector->encoder || connector->encoder->crtc != crtc) 571 if (!connector->encoder || connector->encoder->crtc != crtc)
501 continue; 572 continue;
502 573
503 if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) { 574 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
504 lane_count = dp_priv->lane_count; 575 lane_count = dp_priv->lane_count;
505 break; 576 break;
506 } 577 }
@@ -514,7 +585,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
514 intel_dp_compute_m_n(3, lane_count, 585 intel_dp_compute_m_n(3, lane_count,
515 mode->clock, adjusted_mode->clock, &m_n); 586 mode->clock, adjusted_mode->clock, &m_n);
516 587
517 if (IS_IGDNG(dev)) { 588 if (HAS_PCH_SPLIT(dev)) {
518 if (intel_crtc->pipe == 0) { 589 if (intel_crtc->pipe == 0) {
519 I915_WRITE(TRANSA_DATA_M1, 590 I915_WRITE(TRANSA_DATA_M1,
520 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 591 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
@@ -555,9 +626,9 @@ static void
555intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 626intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
556 struct drm_display_mode *adjusted_mode) 627 struct drm_display_mode *adjusted_mode)
557{ 628{
558 struct intel_output *intel_output = enc_to_intel_output(encoder); 629 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
559 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 630 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
560 struct drm_crtc *crtc = intel_output->enc.crtc; 631 struct drm_crtc *crtc = intel_encoder->enc.crtc;
561 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
562 633
563 dp_priv->DP = (DP_LINK_TRAIN_OFF | 634 dp_priv->DP = (DP_LINK_TRAIN_OFF |
@@ -596,7 +667,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
596 if (intel_crtc->pipe == 1) 667 if (intel_crtc->pipe == 1)
597 dp_priv->DP |= DP_PIPEB_SELECT; 668 dp_priv->DP |= DP_PIPEB_SELECT;
598 669
599 if (IS_eDP(intel_output)) { 670 if (IS_eDP(intel_encoder)) {
600 /* don't miss out required setting for eDP */ 671 /* don't miss out required setting for eDP */
601 dp_priv->DP |= DP_PLL_ENABLE; 672 dp_priv->DP |= DP_PLL_ENABLE;
602 if (adjusted_mode->clock < 200000) 673 if (adjusted_mode->clock < 200000)
@@ -606,23 +677,23 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
606 } 677 }
607} 678}
608 679
609static void igdng_edp_backlight_on (struct drm_device *dev) 680static void ironlake_edp_backlight_on (struct drm_device *dev)
610{ 681{
611 struct drm_i915_private *dev_priv = dev->dev_private; 682 struct drm_i915_private *dev_priv = dev->dev_private;
612 u32 pp; 683 u32 pp;
613 684
614 DRM_DEBUG("\n"); 685 DRM_DEBUG_KMS("\n");
615 pp = I915_READ(PCH_PP_CONTROL); 686 pp = I915_READ(PCH_PP_CONTROL);
616 pp |= EDP_BLC_ENABLE; 687 pp |= EDP_BLC_ENABLE;
617 I915_WRITE(PCH_PP_CONTROL, pp); 688 I915_WRITE(PCH_PP_CONTROL, pp);
618} 689}
619 690
620static void igdng_edp_backlight_off (struct drm_device *dev) 691static void ironlake_edp_backlight_off (struct drm_device *dev)
621{ 692{
622 struct drm_i915_private *dev_priv = dev->dev_private; 693 struct drm_i915_private *dev_priv = dev->dev_private;
623 u32 pp; 694 u32 pp;
624 695
625 DRM_DEBUG("\n"); 696 DRM_DEBUG_KMS("\n");
626 pp = I915_READ(PCH_PP_CONTROL); 697 pp = I915_READ(PCH_PP_CONTROL);
627 pp &= ~EDP_BLC_ENABLE; 698 pp &= ~EDP_BLC_ENABLE;
628 I915_WRITE(PCH_PP_CONTROL, pp); 699 I915_WRITE(PCH_PP_CONTROL, pp);
@@ -631,23 +702,23 @@ static void igdng_edp_backlight_off (struct drm_device *dev)
631static void 702static void
632intel_dp_dpms(struct drm_encoder *encoder, int mode) 703intel_dp_dpms(struct drm_encoder *encoder, int mode)
633{ 704{
634 struct intel_output *intel_output = enc_to_intel_output(encoder); 705 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
635 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 706 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
636 struct drm_device *dev = intel_output->base.dev; 707 struct drm_device *dev = intel_encoder->base.dev;
637 struct drm_i915_private *dev_priv = dev->dev_private; 708 struct drm_i915_private *dev_priv = dev->dev_private;
638 uint32_t dp_reg = I915_READ(dp_priv->output_reg); 709 uint32_t dp_reg = I915_READ(dp_priv->output_reg);
639 710
640 if (mode != DRM_MODE_DPMS_ON) { 711 if (mode != DRM_MODE_DPMS_ON) {
641 if (dp_reg & DP_PORT_EN) { 712 if (dp_reg & DP_PORT_EN) {
642 intel_dp_link_down(intel_output, dp_priv->DP); 713 intel_dp_link_down(intel_encoder, dp_priv->DP);
643 if (IS_eDP(intel_output)) 714 if (IS_eDP(intel_encoder))
644 igdng_edp_backlight_off(dev); 715 ironlake_edp_backlight_off(dev);
645 } 716 }
646 } else { 717 } else {
647 if (!(dp_reg & DP_PORT_EN)) { 718 if (!(dp_reg & DP_PORT_EN)) {
648 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); 719 intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
649 if (IS_eDP(intel_output)) 720 if (IS_eDP(intel_encoder))
650 igdng_edp_backlight_on(dev); 721 ironlake_edp_backlight_on(dev);
651 } 722 }
652 } 723 }
653 dp_priv->dpms_mode = mode; 724 dp_priv->dpms_mode = mode;
@@ -658,12 +729,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
658 * link status information 729 * link status information
659 */ 730 */
660static bool 731static bool
661intel_dp_get_link_status(struct intel_output *intel_output, 732intel_dp_get_link_status(struct intel_encoder *intel_encoder,
662 uint8_t link_status[DP_LINK_STATUS_SIZE]) 733 uint8_t link_status[DP_LINK_STATUS_SIZE])
663{ 734{
664 int ret; 735 int ret;
665 736
666 ret = intel_dp_aux_native_read(intel_output, 737 ret = intel_dp_aux_native_read(intel_encoder,
667 DP_LANE0_1_STATUS, 738 DP_LANE0_1_STATUS,
668 link_status, DP_LINK_STATUS_SIZE); 739 link_status, DP_LINK_STATUS_SIZE);
669 if (ret != DP_LINK_STATUS_SIZE) 740 if (ret != DP_LINK_STATUS_SIZE)
@@ -681,13 +752,13 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
681static void 752static void
682intel_dp_save(struct drm_connector *connector) 753intel_dp_save(struct drm_connector *connector)
683{ 754{
684 struct intel_output *intel_output = to_intel_output(connector); 755 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
685 struct drm_device *dev = intel_output->base.dev; 756 struct drm_device *dev = intel_encoder->base.dev;
686 struct drm_i915_private *dev_priv = dev->dev_private; 757 struct drm_i915_private *dev_priv = dev->dev_private;
687 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 758 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
688 759
689 dp_priv->save_DP = I915_READ(dp_priv->output_reg); 760 dp_priv->save_DP = I915_READ(dp_priv->output_reg);
690 intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET, 761 intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET,
691 dp_priv->save_link_configuration, 762 dp_priv->save_link_configuration,
692 sizeof (dp_priv->save_link_configuration)); 763 sizeof (dp_priv->save_link_configuration));
693} 764}
@@ -754,7 +825,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
754} 825}
755 826
756static void 827static void
757intel_get_adjust_train(struct intel_output *intel_output, 828intel_get_adjust_train(struct intel_encoder *intel_encoder,
758 uint8_t link_status[DP_LINK_STATUS_SIZE], 829 uint8_t link_status[DP_LINK_STATUS_SIZE],
759 int lane_count, 830 int lane_count,
760 uint8_t train_set[4]) 831 uint8_t train_set[4])
@@ -871,15 +942,15 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
871} 942}
872 943
873static bool 944static bool
874intel_dp_set_link_train(struct intel_output *intel_output, 945intel_dp_set_link_train(struct intel_encoder *intel_encoder,
875 uint32_t dp_reg_value, 946 uint32_t dp_reg_value,
876 uint8_t dp_train_pat, 947 uint8_t dp_train_pat,
877 uint8_t train_set[4], 948 uint8_t train_set[4],
878 bool first) 949 bool first)
879{ 950{
880 struct drm_device *dev = intel_output->base.dev; 951 struct drm_device *dev = intel_encoder->base.dev;
881 struct drm_i915_private *dev_priv = dev->dev_private; 952 struct drm_i915_private *dev_priv = dev->dev_private;
882 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 953 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
883 int ret; 954 int ret;
884 955
885 I915_WRITE(dp_priv->output_reg, dp_reg_value); 956 I915_WRITE(dp_priv->output_reg, dp_reg_value);
@@ -887,11 +958,11 @@ intel_dp_set_link_train(struct intel_output *intel_output,
887 if (first) 958 if (first)
888 intel_wait_for_vblank(dev); 959 intel_wait_for_vblank(dev);
889 960
890 intel_dp_aux_native_write_1(intel_output, 961 intel_dp_aux_native_write_1(intel_encoder,
891 DP_TRAINING_PATTERN_SET, 962 DP_TRAINING_PATTERN_SET,
892 dp_train_pat); 963 dp_train_pat);
893 964
894 ret = intel_dp_aux_native_write(intel_output, 965 ret = intel_dp_aux_native_write(intel_encoder,
895 DP_TRAINING_LANE0_SET, train_set, 4); 966 DP_TRAINING_LANE0_SET, train_set, 4);
896 if (ret != 4) 967 if (ret != 4)
897 return false; 968 return false;
@@ -900,12 +971,12 @@ intel_dp_set_link_train(struct intel_output *intel_output,
900} 971}
901 972
902static void 973static void
903intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, 974intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
904 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) 975 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
905{ 976{
906 struct drm_device *dev = intel_output->base.dev; 977 struct drm_device *dev = intel_encoder->base.dev;
907 struct drm_i915_private *dev_priv = dev->dev_private; 978 struct drm_i915_private *dev_priv = dev->dev_private;
908 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 979 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
909 uint8_t train_set[4]; 980 uint8_t train_set[4];
910 uint8_t link_status[DP_LINK_STATUS_SIZE]; 981 uint8_t link_status[DP_LINK_STATUS_SIZE];
911 int i; 982 int i;
@@ -916,7 +987,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
916 int tries; 987 int tries;
917 988
918 /* Write the link configuration data */ 989 /* Write the link configuration data */
919 intel_dp_aux_native_write(intel_output, 0x100, 990 intel_dp_aux_native_write(intel_encoder, 0x100,
920 link_configuration, DP_LINK_CONFIGURATION_SIZE); 991 link_configuration, DP_LINK_CONFIGURATION_SIZE);
921 992
922 DP |= DP_PORT_EN; 993 DP |= DP_PORT_EN;
@@ -930,14 +1001,14 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
930 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); 1001 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
931 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1002 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
932 1003
933 if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1, 1004 if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1,
934 DP_TRAINING_PATTERN_1, train_set, first)) 1005 DP_TRAINING_PATTERN_1, train_set, first))
935 break; 1006 break;
936 first = false; 1007 first = false;
937 /* Set training pattern 1 */ 1008 /* Set training pattern 1 */
938 1009
939 udelay(100); 1010 udelay(100);
940 if (!intel_dp_get_link_status(intel_output, link_status)) 1011 if (!intel_dp_get_link_status(intel_encoder, link_status))
941 break; 1012 break;
942 1013
943 if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { 1014 if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
@@ -962,7 +1033,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
962 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1033 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
963 1034
964 /* Compute new train_set as requested by target */ 1035 /* Compute new train_set as requested by target */
965 intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); 1036 intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
966 } 1037 }
967 1038
968 /* channel equalization */ 1039 /* channel equalization */
@@ -974,13 +1045,13 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
974 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1045 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
975 1046
976 /* channel eq pattern */ 1047 /* channel eq pattern */
977 if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2, 1048 if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2,
978 DP_TRAINING_PATTERN_2, train_set, 1049 DP_TRAINING_PATTERN_2, train_set,
979 false)) 1050 false))
980 break; 1051 break;
981 1052
982 udelay(400); 1053 udelay(400);
983 if (!intel_dp_get_link_status(intel_output, link_status)) 1054 if (!intel_dp_get_link_status(intel_encoder, link_status))
984 break; 1055 break;
985 1056
986 if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { 1057 if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
@@ -993,26 +1064,26 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
993 break; 1064 break;
994 1065
995 /* Compute new train_set as requested by target */ 1066 /* Compute new train_set as requested by target */
996 intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); 1067 intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
997 ++tries; 1068 ++tries;
998 } 1069 }
999 1070
1000 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); 1071 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
1001 POSTING_READ(dp_priv->output_reg); 1072 POSTING_READ(dp_priv->output_reg);
1002 intel_dp_aux_native_write_1(intel_output, 1073 intel_dp_aux_native_write_1(intel_encoder,
1003 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); 1074 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
1004} 1075}
1005 1076
1006static void 1077static void
1007intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) 1078intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
1008{ 1079{
1009 struct drm_device *dev = intel_output->base.dev; 1080 struct drm_device *dev = intel_encoder->base.dev;
1010 struct drm_i915_private *dev_priv = dev->dev_private; 1081 struct drm_i915_private *dev_priv = dev->dev_private;
1011 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1082 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1012 1083
1013 DRM_DEBUG("\n"); 1084 DRM_DEBUG_KMS("\n");
1014 1085
1015 if (IS_eDP(intel_output)) { 1086 if (IS_eDP(intel_encoder)) {
1016 DP &= ~DP_PLL_ENABLE; 1087 DP &= ~DP_PLL_ENABLE;
1017 I915_WRITE(dp_priv->output_reg, DP); 1088 I915_WRITE(dp_priv->output_reg, DP);
1018 POSTING_READ(dp_priv->output_reg); 1089 POSTING_READ(dp_priv->output_reg);
@@ -1025,7 +1096,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
1025 1096
1026 udelay(17000); 1097 udelay(17000);
1027 1098
1028 if (IS_eDP(intel_output)) 1099 if (IS_eDP(intel_encoder))
1029 DP |= DP_LINK_TRAIN_OFF; 1100 DP |= DP_LINK_TRAIN_OFF;
1030 I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); 1101 I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
1031 POSTING_READ(dp_priv->output_reg); 1102 POSTING_READ(dp_priv->output_reg);
@@ -1034,13 +1105,13 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
1034static void 1105static void
1035intel_dp_restore(struct drm_connector *connector) 1106intel_dp_restore(struct drm_connector *connector)
1036{ 1107{
1037 struct intel_output *intel_output = to_intel_output(connector); 1108 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1038 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1109 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1039 1110
1040 if (dp_priv->save_DP & DP_PORT_EN) 1111 if (dp_priv->save_DP & DP_PORT_EN)
1041 intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration); 1112 intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration);
1042 else 1113 else
1043 intel_dp_link_down(intel_output, dp_priv->save_DP); 1114 intel_dp_link_down(intel_encoder, dp_priv->save_DP);
1044} 1115}
1045 1116
1046/* 1117/*
@@ -1053,32 +1124,32 @@ intel_dp_restore(struct drm_connector *connector)
1053 */ 1124 */
1054 1125
1055static void 1126static void
1056intel_dp_check_link_status(struct intel_output *intel_output) 1127intel_dp_check_link_status(struct intel_encoder *intel_encoder)
1057{ 1128{
1058 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1129 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1059 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1130 uint8_t link_status[DP_LINK_STATUS_SIZE];
1060 1131
1061 if (!intel_output->enc.crtc) 1132 if (!intel_encoder->enc.crtc)
1062 return; 1133 return;
1063 1134
1064 if (!intel_dp_get_link_status(intel_output, link_status)) { 1135 if (!intel_dp_get_link_status(intel_encoder, link_status)) {
1065 intel_dp_link_down(intel_output, dp_priv->DP); 1136 intel_dp_link_down(intel_encoder, dp_priv->DP);
1066 return; 1137 return;
1067 } 1138 }
1068 1139
1069 if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) 1140 if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
1070 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); 1141 intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
1071} 1142}
1072 1143
1073static enum drm_connector_status 1144static enum drm_connector_status
1074igdng_dp_detect(struct drm_connector *connector) 1145ironlake_dp_detect(struct drm_connector *connector)
1075{ 1146{
1076 struct intel_output *intel_output = to_intel_output(connector); 1147 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1077 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1148 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1078 enum drm_connector_status status; 1149 enum drm_connector_status status;
1079 1150
1080 status = connector_status_disconnected; 1151 status = connector_status_disconnected;
1081 if (intel_dp_aux_native_read(intel_output, 1152 if (intel_dp_aux_native_read(intel_encoder,
1082 0x000, dp_priv->dpcd, 1153 0x000, dp_priv->dpcd,
1083 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) 1154 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
1084 { 1155 {
@@ -1097,17 +1168,17 @@ igdng_dp_detect(struct drm_connector *connector)
1097static enum drm_connector_status 1168static enum drm_connector_status
1098intel_dp_detect(struct drm_connector *connector) 1169intel_dp_detect(struct drm_connector *connector)
1099{ 1170{
1100 struct intel_output *intel_output = to_intel_output(connector); 1171 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1101 struct drm_device *dev = intel_output->base.dev; 1172 struct drm_device *dev = intel_encoder->base.dev;
1102 struct drm_i915_private *dev_priv = dev->dev_private; 1173 struct drm_i915_private *dev_priv = dev->dev_private;
1103 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1174 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1104 uint32_t temp, bit; 1175 uint32_t temp, bit;
1105 enum drm_connector_status status; 1176 enum drm_connector_status status;
1106 1177
1107 dp_priv->has_audio = false; 1178 dp_priv->has_audio = false;
1108 1179
1109 if (IS_IGDNG(dev)) 1180 if (HAS_PCH_SPLIT(dev))
1110 return igdng_dp_detect(connector); 1181 return ironlake_dp_detect(connector);
1111 1182
1112 temp = I915_READ(PORT_HOTPLUG_EN); 1183 temp = I915_READ(PORT_HOTPLUG_EN);
1113 1184
@@ -1139,7 +1210,7 @@ intel_dp_detect(struct drm_connector *connector)
1139 return connector_status_disconnected; 1210 return connector_status_disconnected;
1140 1211
1141 status = connector_status_disconnected; 1212 status = connector_status_disconnected;
1142 if (intel_dp_aux_native_read(intel_output, 1213 if (intel_dp_aux_native_read(intel_encoder,
1143 0x000, dp_priv->dpcd, 1214 0x000, dp_priv->dpcd,
1144 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) 1215 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
1145 { 1216 {
@@ -1151,20 +1222,20 @@ intel_dp_detect(struct drm_connector *connector)
1151 1222
1152static int intel_dp_get_modes(struct drm_connector *connector) 1223static int intel_dp_get_modes(struct drm_connector *connector)
1153{ 1224{
1154 struct intel_output *intel_output = to_intel_output(connector); 1225 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1155 struct drm_device *dev = intel_output->base.dev; 1226 struct drm_device *dev = intel_encoder->base.dev;
1156 struct drm_i915_private *dev_priv = dev->dev_private; 1227 struct drm_i915_private *dev_priv = dev->dev_private;
1157 int ret; 1228 int ret;
1158 1229
1159 /* We should parse the EDID data and find out if it has an audio sink 1230 /* We should parse the EDID data and find out if it has an audio sink
1160 */ 1231 */
1161 1232
1162 ret = intel_ddc_get_modes(intel_output); 1233 ret = intel_ddc_get_modes(intel_encoder);
1163 if (ret) 1234 if (ret)
1164 return ret; 1235 return ret;
1165 1236
1166 /* if eDP has no EDID, try to use fixed panel mode from VBT */ 1237 /* if eDP has no EDID, try to use fixed panel mode from VBT */
1167 if (IS_eDP(intel_output)) { 1238 if (IS_eDP(intel_encoder)) {
1168 if (dev_priv->panel_fixed_mode != NULL) { 1239 if (dev_priv->panel_fixed_mode != NULL) {
1169 struct drm_display_mode *mode; 1240 struct drm_display_mode *mode;
1170 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); 1241 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1178,13 +1249,13 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1178static void 1249static void
1179intel_dp_destroy (struct drm_connector *connector) 1250intel_dp_destroy (struct drm_connector *connector)
1180{ 1251{
1181 struct intel_output *intel_output = to_intel_output(connector); 1252 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1182 1253
1183 if (intel_output->i2c_bus) 1254 if (intel_encoder->i2c_bus)
1184 intel_i2c_destroy(intel_output->i2c_bus); 1255 intel_i2c_destroy(intel_encoder->i2c_bus);
1185 drm_sysfs_connector_remove(connector); 1256 drm_sysfs_connector_remove(connector);
1186 drm_connector_cleanup(connector); 1257 drm_connector_cleanup(connector);
1187 kfree(intel_output); 1258 kfree(intel_encoder);
1188} 1259}
1189 1260
1190static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 1261static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -1220,12 +1291,12 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
1220}; 1291};
1221 1292
1222void 1293void
1223intel_dp_hot_plug(struct intel_output *intel_output) 1294intel_dp_hot_plug(struct intel_encoder *intel_encoder)
1224{ 1295{
1225 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1296 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1226 1297
1227 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) 1298 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
1228 intel_dp_check_link_status(intel_output); 1299 intel_dp_check_link_status(intel_encoder);
1229} 1300}
1230 1301
1231void 1302void
@@ -1233,54 +1304,53 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1233{ 1304{
1234 struct drm_i915_private *dev_priv = dev->dev_private; 1305 struct drm_i915_private *dev_priv = dev->dev_private;
1235 struct drm_connector *connector; 1306 struct drm_connector *connector;
1236 struct intel_output *intel_output; 1307 struct intel_encoder *intel_encoder;
1237 struct intel_dp_priv *dp_priv; 1308 struct intel_dp_priv *dp_priv;
1238 const char *name = NULL; 1309 const char *name = NULL;
1239 1310
1240 intel_output = kcalloc(sizeof(struct intel_output) + 1311 intel_encoder = kcalloc(sizeof(struct intel_encoder) +
1241 sizeof(struct intel_dp_priv), 1, GFP_KERNEL); 1312 sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
1242 if (!intel_output) 1313 if (!intel_encoder)
1243 return; 1314 return;
1244 1315
1245 dp_priv = (struct intel_dp_priv *)(intel_output + 1); 1316 dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
1246 1317
1247 connector = &intel_output->base; 1318 connector = &intel_encoder->base;
1248 drm_connector_init(dev, connector, &intel_dp_connector_funcs, 1319 drm_connector_init(dev, connector, &intel_dp_connector_funcs,
1249 DRM_MODE_CONNECTOR_DisplayPort); 1320 DRM_MODE_CONNECTOR_DisplayPort);
1250 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 1321 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
1251 1322
1252 if (output_reg == DP_A) 1323 if (output_reg == DP_A)
1253 intel_output->type = INTEL_OUTPUT_EDP; 1324 intel_encoder->type = INTEL_OUTPUT_EDP;
1254 else 1325 else
1255 intel_output->type = INTEL_OUTPUT_DISPLAYPORT; 1326 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
1256 1327
1257 if (output_reg == DP_B) 1328 if (output_reg == DP_B || output_reg == PCH_DP_B)
1258 intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); 1329 intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
1259 else if (output_reg == DP_C) 1330 else if (output_reg == DP_C || output_reg == PCH_DP_C)
1260 intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); 1331 intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
1261 else if (output_reg == DP_D) 1332 else if (output_reg == DP_D || output_reg == PCH_DP_D)
1262 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); 1333 intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
1263 1334
1264 if (IS_eDP(intel_output)) { 1335 if (IS_eDP(intel_encoder))
1265 intel_output->crtc_mask = (1 << 1); 1336 intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
1266 intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); 1337
1267 } else 1338 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1268 intel_output->crtc_mask = (1 << 0) | (1 << 1);
1269 connector->interlace_allowed = true; 1339 connector->interlace_allowed = true;
1270 connector->doublescan_allowed = 0; 1340 connector->doublescan_allowed = 0;
1271 1341
1272 dp_priv->intel_output = intel_output; 1342 dp_priv->intel_encoder = intel_encoder;
1273 dp_priv->output_reg = output_reg; 1343 dp_priv->output_reg = output_reg;
1274 dp_priv->has_audio = false; 1344 dp_priv->has_audio = false;
1275 dp_priv->dpms_mode = DRM_MODE_DPMS_ON; 1345 dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
1276 intel_output->dev_priv = dp_priv; 1346 intel_encoder->dev_priv = dp_priv;
1277 1347
1278 drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs, 1348 drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
1279 DRM_MODE_ENCODER_TMDS); 1349 DRM_MODE_ENCODER_TMDS);
1280 drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs); 1350 drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
1281 1351
1282 drm_mode_connector_attach_encoder(&intel_output->base, 1352 drm_mode_connector_attach_encoder(&intel_encoder->base,
1283 &intel_output->enc); 1353 &intel_encoder->enc);
1284 drm_sysfs_connector_add(connector); 1354 drm_sysfs_connector_add(connector);
1285 1355
1286 /* Set up the DDC bus. */ 1356 /* Set up the DDC bus. */
@@ -1290,22 +1360,28 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1290 break; 1360 break;
1291 case DP_B: 1361 case DP_B:
1292 case PCH_DP_B: 1362 case PCH_DP_B:
1363 dev_priv->hotplug_supported_mask |=
1364 HDMIB_HOTPLUG_INT_STATUS;
1293 name = "DPDDC-B"; 1365 name = "DPDDC-B";
1294 break; 1366 break;
1295 case DP_C: 1367 case DP_C:
1296 case PCH_DP_C: 1368 case PCH_DP_C:
1369 dev_priv->hotplug_supported_mask |=
1370 HDMIC_HOTPLUG_INT_STATUS;
1297 name = "DPDDC-C"; 1371 name = "DPDDC-C";
1298 break; 1372 break;
1299 case DP_D: 1373 case DP_D:
1300 case PCH_DP_D: 1374 case PCH_DP_D:
1375 dev_priv->hotplug_supported_mask |=
1376 HDMID_HOTPLUG_INT_STATUS;
1301 name = "DPDDC-D"; 1377 name = "DPDDC-D";
1302 break; 1378 break;
1303 } 1379 }
1304 1380
1305 intel_dp_i2c_init(intel_output, name); 1381 intel_dp_i2c_init(intel_encoder, name);
1306 1382
1307 intel_output->ddc_bus = &dp_priv->adapter; 1383 intel_encoder->ddc_bus = &dp_priv->adapter;
1308 intel_output->hot_plug = intel_dp_hot_plug; 1384 intel_encoder->hot_plug = intel_dp_hot_plug;
1309 1385
1310 if (output_reg == DP_A) { 1386 if (output_reg == DP_A) {
1311 /* initialize panel mode from VBT if available for eDP */ 1387 /* initialize panel mode from VBT if available for eDP */
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h
deleted file mode 100644
index 2b38054d3b6d..000000000000
--- a/drivers/gpu/drm/i915/intel_dp.h
+++ /dev/null
@@ -1,144 +0,0 @@
1/*
2 * Copyright © 2008 Keith Packard
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23#ifndef _INTEL_DP_H_
24#define _INTEL_DP_H_
25
26/* From the VESA DisplayPort spec */
27
28#define AUX_NATIVE_WRITE 0x8
29#define AUX_NATIVE_READ 0x9
30#define AUX_I2C_WRITE 0x0
31#define AUX_I2C_READ 0x1
32#define AUX_I2C_STATUS 0x2
33#define AUX_I2C_MOT 0x4
34
35#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
36#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
37#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
38#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
39
40#define AUX_I2C_REPLY_ACK (0x0 << 6)
41#define AUX_I2C_REPLY_NACK (0x1 << 6)
42#define AUX_I2C_REPLY_DEFER (0x2 << 6)
43#define AUX_I2C_REPLY_MASK (0x3 << 6)
44
45/* AUX CH addresses */
46#define DP_LINK_BW_SET 0x100
47# define DP_LINK_BW_1_62 0x06
48# define DP_LINK_BW_2_7 0x0a
49
50#define DP_LANE_COUNT_SET 0x101
51# define DP_LANE_COUNT_MASK 0x0f
52# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
53
54#define DP_TRAINING_PATTERN_SET 0x102
55
56# define DP_TRAINING_PATTERN_DISABLE 0
57# define DP_TRAINING_PATTERN_1 1
58# define DP_TRAINING_PATTERN_2 2
59# define DP_TRAINING_PATTERN_MASK 0x3
60
61# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
62# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
63# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
64# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
65# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
66
67# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
68# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
69
70# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
71# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
72# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
73# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
74
75#define DP_TRAINING_LANE0_SET 0x103
76#define DP_TRAINING_LANE1_SET 0x104
77#define DP_TRAINING_LANE2_SET 0x105
78#define DP_TRAINING_LANE3_SET 0x106
79
80# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
81# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
82# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
83# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
84# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
85# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
86# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
87
88# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
89# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
90# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
91# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
92# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
93
94# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
95# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
96
97#define DP_DOWNSPREAD_CTRL 0x107
98# define DP_SPREAD_AMP_0_5 (1 << 4)
99
100#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
101# define DP_SET_ANSI_8B10B (1 << 0)
102
103#define DP_LANE0_1_STATUS 0x202
104#define DP_LANE2_3_STATUS 0x203
105
106# define DP_LANE_CR_DONE (1 << 0)
107# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
108# define DP_LANE_SYMBOL_LOCKED (1 << 2)
109
110#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
111
112#define DP_INTERLANE_ALIGN_DONE (1 << 0)
113#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
114#define DP_LINK_STATUS_UPDATED (1 << 7)
115
116#define DP_SINK_STATUS 0x205
117
118#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
119#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
120
121#define DP_ADJUST_REQUEST_LANE0_1 0x206
122#define DP_ADJUST_REQUEST_LANE2_3 0x207
123
124#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
125#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
126#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
127#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
128#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
129#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
130#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
131#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
132
133struct i2c_algo_dp_aux_data {
134 bool running;
135 u16 address;
136 int (*aux_ch) (struct i2c_adapter *adapter,
137 uint8_t *send, int send_bytes,
138 uint8_t *recv, int recv_bytes);
139};
140
141int
142i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
143
144#endif /* _INTEL_DP_H_ */
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/i915/intel_dp_i2c.c
deleted file mode 100644
index a63b6f57d2d4..000000000000
--- a/drivers/gpu/drm/i915/intel_dp_i2c.c
+++ /dev/null
@@ -1,273 +0,0 @@
1/*
2 * Copyright © 2009 Keith Packard
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/delay.h>
26#include <linux/slab.h>
27#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/sched.h>
30#include <linux/i2c.h>
31#include "intel_dp.h"
32#include "drmP.h"
33
34/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
35
36#define MODE_I2C_START 1
37#define MODE_I2C_WRITE 2
38#define MODE_I2C_READ 4
39#define MODE_I2C_STOP 8
40
41static int
42i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
43 uint8_t write_byte, uint8_t *read_byte)
44{
45 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
46 uint16_t address = algo_data->address;
47 uint8_t msg[5];
48 uint8_t reply[2];
49 int msg_bytes;
50 int reply_bytes;
51 int ret;
52
53 /* Set up the command byte */
54 if (mode & MODE_I2C_READ)
55 msg[0] = AUX_I2C_READ << 4;
56 else
57 msg[0] = AUX_I2C_WRITE << 4;
58
59 if (!(mode & MODE_I2C_STOP))
60 msg[0] |= AUX_I2C_MOT << 4;
61
62 msg[1] = address >> 8;
63 msg[2] = address;
64
65 switch (mode) {
66 case MODE_I2C_WRITE:
67 msg[3] = 0;
68 msg[4] = write_byte;
69 msg_bytes = 5;
70 reply_bytes = 1;
71 break;
72 case MODE_I2C_READ:
73 msg[3] = 0;
74 msg_bytes = 4;
75 reply_bytes = 2;
76 break;
77 default:
78 msg_bytes = 3;
79 reply_bytes = 1;
80 break;
81 }
82
83 for (;;) {
84 ret = (*algo_data->aux_ch)(adapter,
85 msg, msg_bytes,
86 reply, reply_bytes);
87 if (ret < 0) {
88 DRM_DEBUG("aux_ch failed %d\n", ret);
89 return ret;
90 }
91 switch (reply[0] & AUX_I2C_REPLY_MASK) {
92 case AUX_I2C_REPLY_ACK:
93 if (mode == MODE_I2C_READ) {
94 *read_byte = reply[1];
95 }
96 return reply_bytes - 1;
97 case AUX_I2C_REPLY_NACK:
98 DRM_DEBUG("aux_ch nack\n");
99 return -EREMOTEIO;
100 case AUX_I2C_REPLY_DEFER:
101 DRM_DEBUG("aux_ch defer\n");
102 udelay(100);
103 break;
104 default:
105 DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
106 return -EREMOTEIO;
107 }
108 }
109}
110
111/*
112 * I2C over AUX CH
113 */
114
115/*
116 * Send the address. If the I2C link is running, this 'restarts'
117 * the connection with the new address, this is used for doing
118 * a write followed by a read (as needed for DDC)
119 */
120static int
121i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
122{
123 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
124 int mode = MODE_I2C_START;
125 int ret;
126
127 if (reading)
128 mode |= MODE_I2C_READ;
129 else
130 mode |= MODE_I2C_WRITE;
131 algo_data->address = address;
132 algo_data->running = true;
133 ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
134 return ret;
135}
136
137/*
138 * Stop the I2C transaction. This closes out the link, sending
139 * a bare address packet with the MOT bit turned off
140 */
141static void
142i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
143{
144 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
145 int mode = MODE_I2C_STOP;
146
147 if (reading)
148 mode |= MODE_I2C_READ;
149 else
150 mode |= MODE_I2C_WRITE;
151 if (algo_data->running) {
152 (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
153 algo_data->running = false;
154 }
155}
156
157/*
158 * Write a single byte to the current I2C address, the
159 * the I2C link must be running or this returns -EIO
160 */
161static int
162i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
163{
164 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
165 int ret;
166
167 if (!algo_data->running)
168 return -EIO;
169
170 ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
171 return ret;
172}
173
174/*
175 * Read a single byte from the current I2C address, the
176 * I2C link must be running or this returns -EIO
177 */
178static int
179i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
180{
181 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
182 int ret;
183
184 if (!algo_data->running)
185 return -EIO;
186
187 ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
188 return ret;
189}
190
191static int
192i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
193 struct i2c_msg *msgs,
194 int num)
195{
196 int ret = 0;
197 bool reading = false;
198 int m;
199 int b;
200
201 for (m = 0; m < num; m++) {
202 u16 len = msgs[m].len;
203 u8 *buf = msgs[m].buf;
204 reading = (msgs[m].flags & I2C_M_RD) != 0;
205 ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
206 if (ret < 0)
207 break;
208 if (reading) {
209 for (b = 0; b < len; b++) {
210 ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
211 if (ret < 0)
212 break;
213 }
214 } else {
215 for (b = 0; b < len; b++) {
216 ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
217 if (ret < 0)
218 break;
219 }
220 }
221 if (ret < 0)
222 break;
223 }
224 if (ret >= 0)
225 ret = num;
226 i2c_algo_dp_aux_stop(adapter, reading);
227 DRM_DEBUG("dp_aux_xfer return %d\n", ret);
228 return ret;
229}
230
231static u32
232i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
233{
234 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
235 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
236 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
237 I2C_FUNC_10BIT_ADDR;
238}
239
240static const struct i2c_algorithm i2c_dp_aux_algo = {
241 .master_xfer = i2c_algo_dp_aux_xfer,
242 .functionality = i2c_algo_dp_aux_functionality,
243};
244
245static void
246i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
247{
248 (void) i2c_algo_dp_aux_address(adapter, 0, false);
249 (void) i2c_algo_dp_aux_stop(adapter, false);
250
251}
252
253static int
254i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
255{
256 adapter->algo = &i2c_dp_aux_algo;
257 adapter->retries = 3;
258 i2c_dp_aux_reset_bus(adapter);
259 return 0;
260}
261
262int
263i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
264{
265 int error;
266
267 error = i2c_dp_aux_prepare_bus(adapter);
268 if (error)
269 return error;
270 error = i2c_add_adapter(adapter);
271 return error;
272}
273EXPORT_SYMBOL(i2c_dp_aux_add_bus);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ef61fe9507e2..e30253755f12 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -95,7 +95,7 @@ struct intel_framebuffer {
95}; 95};
96 96
97 97
98struct intel_output { 98struct intel_encoder {
99 struct drm_connector base; 99 struct drm_connector base;
100 100
101 struct drm_encoder enc; 101 struct drm_encoder enc;
@@ -105,11 +105,37 @@ struct intel_output {
105 bool load_detect_temp; 105 bool load_detect_temp;
106 bool needs_tv_clock; 106 bool needs_tv_clock;
107 void *dev_priv; 107 void *dev_priv;
108 void (*hot_plug)(struct intel_output *); 108 void (*hot_plug)(struct intel_encoder *);
109 int crtc_mask; 109 int crtc_mask;
110 int clone_mask; 110 int clone_mask;
111}; 111};
112 112
113struct intel_crtc;
114struct intel_overlay {
115 struct drm_device *dev;
116 struct intel_crtc *crtc;
117 struct drm_i915_gem_object *vid_bo;
118 struct drm_i915_gem_object *old_vid_bo;
119 int active;
120 int pfit_active;
121 u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
122 u32 color_key;
123 u32 brightness, contrast, saturation;
124 u32 old_xscale, old_yscale;
125 /* register access */
126 u32 flip_addr;
127 struct drm_i915_gem_object *reg_bo;
128 void *virt_addr;
129 /* flip handling */
130 uint32_t last_flip_req;
131 int hw_wedged;
132#define HW_WEDGED 1
133#define NEEDS_WAIT_FOR_FLIP 2
134#define RELEASE_OLD_VID 3
135#define SWITCH_OFF_STAGE_1 4
136#define SWITCH_OFF_STAGE_2 5
137};
138
113struct intel_crtc { 139struct intel_crtc {
114 struct drm_crtc base; 140 struct drm_crtc base;
115 enum pipe pipe; 141 enum pipe pipe;
@@ -121,19 +147,23 @@ struct intel_crtc {
121 bool busy; /* is scanout buffer being updated frequently? */ 147 bool busy; /* is scanout buffer being updated frequently? */
122 struct timer_list idle_timer; 148 struct timer_list idle_timer;
123 bool lowfreq_avail; 149 bool lowfreq_avail;
150 struct intel_overlay *overlay;
151 struct intel_unpin_work *unpin_work;
124}; 152};
125 153
126#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 154#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
127#define to_intel_output(x) container_of(x, struct intel_output, base) 155#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
128#define enc_to_intel_output(x) container_of(x, struct intel_output, enc) 156#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
129#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 157#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
130 158
131struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, 159struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
132 const char *name); 160 const char *name);
133void intel_i2c_destroy(struct i2c_adapter *adapter); 161void intel_i2c_destroy(struct i2c_adapter *adapter);
134int intel_ddc_get_modes(struct intel_output *intel_output); 162int intel_ddc_get_modes(struct intel_encoder *intel_encoder);
135extern bool intel_ddc_probe(struct intel_output *intel_output); 163extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
136void intel_i2c_quirk_set(struct drm_device *dev, bool enable); 164void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
165void intel_i2c_reset_gmbus(struct drm_device *dev);
166
137extern void intel_crt_init(struct drm_device *dev); 167extern void intel_crt_init(struct drm_device *dev);
138extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); 168extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
139extern bool intel_sdvo_init(struct drm_device *dev, int output_device); 169extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
@@ -145,9 +175,10 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
145void 175void
146intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 176intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
147 struct drm_display_mode *adjusted_mode); 177 struct drm_display_mode *adjusted_mode);
148extern void intel_edp_link_config (struct intel_output *, int *, int *); 178extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
149 179
150 180
181extern int intel_panel_fitter_pipe (struct drm_device *dev);
151extern void intel_crtc_load_lut(struct drm_crtc *crtc); 182extern void intel_crtc_load_lut(struct drm_crtc *crtc);
152extern void intel_encoder_prepare (struct drm_encoder *encoder); 183extern void intel_encoder_prepare (struct drm_encoder *encoder);
153extern void intel_encoder_commit (struct drm_encoder *encoder); 184extern void intel_encoder_commit (struct drm_encoder *encoder);
@@ -160,10 +191,10 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
160 struct drm_file *file_priv); 191 struct drm_file *file_priv);
161extern void intel_wait_for_vblank(struct drm_device *dev); 192extern void intel_wait_for_vblank(struct drm_device *dev);
162extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); 193extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
163extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, 194extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
164 struct drm_display_mode *mode, 195 struct drm_display_mode *mode,
165 int *dpms_mode); 196 int *dpms_mode);
166extern void intel_release_load_detect_pipe(struct intel_output *intel_output, 197extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
167 int dpms_mode); 198 int dpms_mode);
168 199
169extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); 200extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
@@ -177,10 +208,25 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
177 u16 blue, int regno); 208 u16 blue, int regno);
178extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 209extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
179 u16 *blue, int regno); 210 u16 *blue, int regno);
211extern void intel_init_clock_gating(struct drm_device *dev);
212extern void ironlake_enable_drps(struct drm_device *dev);
213extern void ironlake_disable_drps(struct drm_device *dev);
180 214
181extern int intel_framebuffer_create(struct drm_device *dev, 215extern int intel_framebuffer_create(struct drm_device *dev,
182 struct drm_mode_fb_cmd *mode_cmd, 216 struct drm_mode_fb_cmd *mode_cmd,
183 struct drm_framebuffer **fb, 217 struct drm_framebuffer **fb,
184 struct drm_gem_object *obj); 218 struct drm_gem_object *obj);
185 219
220extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
221extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
222
223extern void intel_setup_overlay(struct drm_device *dev);
224extern void intel_cleanup_overlay(struct drm_device *dev);
225extern int intel_overlay_switch_off(struct intel_overlay *overlay);
226extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
227 int interruptible);
228extern int intel_overlay_put_image(struct drm_device *dev, void *data,
229 struct drm_file *file_priv);
230extern int intel_overlay_attrs(struct drm_device *dev, void *data,
231 struct drm_file *file_priv);
186#endif /* __INTEL_DRV_H__ */ 232#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a4d2606de778..ebf213c96b9c 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -25,6 +25,7 @@
25 * Eric Anholt <eric@anholt.net> 25 * Eric Anholt <eric@anholt.net>
26 */ 26 */
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/slab.h>
28#include "drmP.h" 29#include "drmP.h"
29#include "drm.h" 30#include "drm.h"
30#include "drm_crtc.h" 31#include "drm_crtc.h"
@@ -79,8 +80,8 @@ static struct intel_dvo_device intel_dvo_devices[] = {
79static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) 80static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
80{ 81{
81 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 82 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
82 struct intel_output *intel_output = enc_to_intel_output(encoder); 83 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
83 struct intel_dvo_device *dvo = intel_output->dev_priv; 84 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
84 u32 dvo_reg = dvo->dvo_reg; 85 u32 dvo_reg = dvo->dvo_reg;
85 u32 temp = I915_READ(dvo_reg); 86 u32 temp = I915_READ(dvo_reg);
86 87
@@ -98,8 +99,8 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
98static void intel_dvo_save(struct drm_connector *connector) 99static void intel_dvo_save(struct drm_connector *connector)
99{ 100{
100 struct drm_i915_private *dev_priv = connector->dev->dev_private; 101 struct drm_i915_private *dev_priv = connector->dev->dev_private;
101 struct intel_output *intel_output = to_intel_output(connector); 102 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
102 struct intel_dvo_device *dvo = intel_output->dev_priv; 103 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
103 104
104 /* Each output should probably just save the registers it touches, 105 /* Each output should probably just save the registers it touches,
105 * but for now, use more overkill. 106 * but for now, use more overkill.
@@ -114,8 +115,8 @@ static void intel_dvo_save(struct drm_connector *connector)
114static void intel_dvo_restore(struct drm_connector *connector) 115static void intel_dvo_restore(struct drm_connector *connector)
115{ 116{
116 struct drm_i915_private *dev_priv = connector->dev->dev_private; 117 struct drm_i915_private *dev_priv = connector->dev->dev_private;
117 struct intel_output *intel_output = to_intel_output(connector); 118 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
118 struct intel_dvo_device *dvo = intel_output->dev_priv; 119 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
119 120
120 dvo->dev_ops->restore(dvo); 121 dvo->dev_ops->restore(dvo);
121 122
@@ -127,8 +128,8 @@ static void intel_dvo_restore(struct drm_connector *connector)
127static int intel_dvo_mode_valid(struct drm_connector *connector, 128static int intel_dvo_mode_valid(struct drm_connector *connector,
128 struct drm_display_mode *mode) 129 struct drm_display_mode *mode)
129{ 130{
130 struct intel_output *intel_output = to_intel_output(connector); 131 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
131 struct intel_dvo_device *dvo = intel_output->dev_priv; 132 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
132 133
133 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 134 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
134 return MODE_NO_DBLESCAN; 135 return MODE_NO_DBLESCAN;
@@ -149,8 +150,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
149 struct drm_display_mode *mode, 150 struct drm_display_mode *mode,
150 struct drm_display_mode *adjusted_mode) 151 struct drm_display_mode *adjusted_mode)
151{ 152{
152 struct intel_output *intel_output = enc_to_intel_output(encoder); 153 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
153 struct intel_dvo_device *dvo = intel_output->dev_priv; 154 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
154 155
155 /* If we have timings from the BIOS for the panel, put them in 156 /* If we have timings from the BIOS for the panel, put them in
156 * to the adjusted mode. The CRTC will be set up for this mode, 157 * to the adjusted mode. The CRTC will be set up for this mode,
@@ -185,8 +186,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
185 struct drm_device *dev = encoder->dev; 186 struct drm_device *dev = encoder->dev;
186 struct drm_i915_private *dev_priv = dev->dev_private; 187 struct drm_i915_private *dev_priv = dev->dev_private;
187 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 188 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
188 struct intel_output *intel_output = enc_to_intel_output(encoder); 189 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
189 struct intel_dvo_device *dvo = intel_output->dev_priv; 190 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
190 int pipe = intel_crtc->pipe; 191 int pipe = intel_crtc->pipe;
191 u32 dvo_val; 192 u32 dvo_val;
192 u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; 193 u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
@@ -240,23 +241,23 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
240 */ 241 */
241static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) 242static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
242{ 243{
243 struct intel_output *intel_output = to_intel_output(connector); 244 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
244 struct intel_dvo_device *dvo = intel_output->dev_priv; 245 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
245 246
246 return dvo->dev_ops->detect(dvo); 247 return dvo->dev_ops->detect(dvo);
247} 248}
248 249
249static int intel_dvo_get_modes(struct drm_connector *connector) 250static int intel_dvo_get_modes(struct drm_connector *connector)
250{ 251{
251 struct intel_output *intel_output = to_intel_output(connector); 252 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
252 struct intel_dvo_device *dvo = intel_output->dev_priv; 253 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
253 254
254 /* We should probably have an i2c driver get_modes function for those 255 /* We should probably have an i2c driver get_modes function for those
255 * devices which will have a fixed set of modes determined by the chip 256 * devices which will have a fixed set of modes determined by the chip
256 * (TV-out, for example), but for now with just TMDS and LVDS, 257 * (TV-out, for example), but for now with just TMDS and LVDS,
257 * that's not the case. 258 * that's not the case.
258 */ 259 */
259 intel_ddc_get_modes(intel_output); 260 intel_ddc_get_modes(intel_encoder);
260 if (!list_empty(&connector->probed_modes)) 261 if (!list_empty(&connector->probed_modes))
261 return 1; 262 return 1;
262 263
@@ -274,8 +275,8 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
274 275
275static void intel_dvo_destroy (struct drm_connector *connector) 276static void intel_dvo_destroy (struct drm_connector *connector)
276{ 277{
277 struct intel_output *intel_output = to_intel_output(connector); 278 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
278 struct intel_dvo_device *dvo = intel_output->dev_priv; 279 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
279 280
280 if (dvo) { 281 if (dvo) {
281 if (dvo->dev_ops->destroy) 282 if (dvo->dev_ops->destroy)
@@ -285,13 +286,13 @@ static void intel_dvo_destroy (struct drm_connector *connector)
285 /* no need, in i830_dvoices[] now */ 286 /* no need, in i830_dvoices[] now */
286 //kfree(dvo); 287 //kfree(dvo);
287 } 288 }
288 if (intel_output->i2c_bus) 289 if (intel_encoder->i2c_bus)
289 intel_i2c_destroy(intel_output->i2c_bus); 290 intel_i2c_destroy(intel_encoder->i2c_bus);
290 if (intel_output->ddc_bus) 291 if (intel_encoder->ddc_bus)
291 intel_i2c_destroy(intel_output->ddc_bus); 292 intel_i2c_destroy(intel_encoder->ddc_bus);
292 drm_sysfs_connector_remove(connector); 293 drm_sysfs_connector_remove(connector);
293 drm_connector_cleanup(connector); 294 drm_connector_cleanup(connector);
294 kfree(intel_output); 295 kfree(intel_encoder);
295} 296}
296 297
297#ifdef RANDR_GET_CRTC_INTERFACE 298#ifdef RANDR_GET_CRTC_INTERFACE
@@ -299,8 +300,8 @@ static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
299{ 300{
300 struct drm_device *dev = connector->dev; 301 struct drm_device *dev = connector->dev;
301 struct drm_i915_private *dev_priv = dev->dev_private; 302 struct drm_i915_private *dev_priv = dev->dev_private;
302 struct intel_output *intel_output = to_intel_output(connector); 303 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
303 struct intel_dvo_device *dvo = intel_output->dev_priv; 304 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
304 int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); 305 int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
305 306
306 return intel_pipe_to_crtc(pScrn, pipe); 307 return intel_pipe_to_crtc(pScrn, pipe);
@@ -351,8 +352,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
351{ 352{
352 struct drm_device *dev = connector->dev; 353 struct drm_device *dev = connector->dev;
353 struct drm_i915_private *dev_priv = dev->dev_private; 354 struct drm_i915_private *dev_priv = dev->dev_private;
354 struct intel_output *intel_output = to_intel_output(connector); 355 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
355 struct intel_dvo_device *dvo = intel_output->dev_priv; 356 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
356 uint32_t dvo_reg = dvo->dvo_reg; 357 uint32_t dvo_reg = dvo->dvo_reg;
357 uint32_t dvo_val = I915_READ(dvo_reg); 358 uint32_t dvo_val = I915_READ(dvo_reg);
358 struct drm_display_mode *mode = NULL; 359 struct drm_display_mode *mode = NULL;
@@ -382,24 +383,24 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
382 383
383void intel_dvo_init(struct drm_device *dev) 384void intel_dvo_init(struct drm_device *dev)
384{ 385{
385 struct intel_output *intel_output; 386 struct intel_encoder *intel_encoder;
386 struct intel_dvo_device *dvo; 387 struct intel_dvo_device *dvo;
387 struct i2c_adapter *i2cbus = NULL; 388 struct i2c_adapter *i2cbus = NULL;
388 int ret = 0; 389 int ret = 0;
389 int i; 390 int i;
390 int encoder_type = DRM_MODE_ENCODER_NONE; 391 int encoder_type = DRM_MODE_ENCODER_NONE;
391 intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); 392 intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL);
392 if (!intel_output) 393 if (!intel_encoder)
393 return; 394 return;
394 395
395 /* Set up the DDC bus */ 396 /* Set up the DDC bus */
396 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); 397 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
397 if (!intel_output->ddc_bus) 398 if (!intel_encoder->ddc_bus)
398 goto free_intel; 399 goto free_intel;
399 400
400 /* Now, try to find a controller */ 401 /* Now, try to find a controller */
401 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { 402 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
402 struct drm_connector *connector = &intel_output->base; 403 struct drm_connector *connector = &intel_encoder->base;
403 int gpio; 404 int gpio;
404 405
405 dvo = &intel_dvo_devices[i]; 406 dvo = &intel_dvo_devices[i];
@@ -434,11 +435,11 @@ void intel_dvo_init(struct drm_device *dev)
434 if (!ret) 435 if (!ret)
435 continue; 436 continue;
436 437
437 intel_output->type = INTEL_OUTPUT_DVO; 438 intel_encoder->type = INTEL_OUTPUT_DVO;
438 intel_output->crtc_mask = (1 << 0) | (1 << 1); 439 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
439 switch (dvo->type) { 440 switch (dvo->type) {
440 case INTEL_DVO_CHIP_TMDS: 441 case INTEL_DVO_CHIP_TMDS:
441 intel_output->clone_mask = 442 intel_encoder->clone_mask =
442 (1 << INTEL_DVO_TMDS_CLONE_BIT) | 443 (1 << INTEL_DVO_TMDS_CLONE_BIT) |
443 (1 << INTEL_ANALOG_CLONE_BIT); 444 (1 << INTEL_ANALOG_CLONE_BIT);
444 drm_connector_init(dev, connector, 445 drm_connector_init(dev, connector,
@@ -447,7 +448,7 @@ void intel_dvo_init(struct drm_device *dev)
447 encoder_type = DRM_MODE_ENCODER_TMDS; 448 encoder_type = DRM_MODE_ENCODER_TMDS;
448 break; 449 break;
449 case INTEL_DVO_CHIP_LVDS: 450 case INTEL_DVO_CHIP_LVDS:
450 intel_output->clone_mask = 451 intel_encoder->clone_mask =
451 (1 << INTEL_DVO_LVDS_CLONE_BIT); 452 (1 << INTEL_DVO_LVDS_CLONE_BIT);
452 drm_connector_init(dev, connector, 453 drm_connector_init(dev, connector,
453 &intel_dvo_connector_funcs, 454 &intel_dvo_connector_funcs,
@@ -462,16 +463,16 @@ void intel_dvo_init(struct drm_device *dev)
462 connector->interlace_allowed = false; 463 connector->interlace_allowed = false;
463 connector->doublescan_allowed = false; 464 connector->doublescan_allowed = false;
464 465
465 intel_output->dev_priv = dvo; 466 intel_encoder->dev_priv = dvo;
466 intel_output->i2c_bus = i2cbus; 467 intel_encoder->i2c_bus = i2cbus;
467 468
468 drm_encoder_init(dev, &intel_output->enc, 469 drm_encoder_init(dev, &intel_encoder->enc,
469 &intel_dvo_enc_funcs, encoder_type); 470 &intel_dvo_enc_funcs, encoder_type);
470 drm_encoder_helper_add(&intel_output->enc, 471 drm_encoder_helper_add(&intel_encoder->enc,
471 &intel_dvo_helper_funcs); 472 &intel_dvo_helper_funcs);
472 473
473 drm_mode_connector_attach_encoder(&intel_output->base, 474 drm_mode_connector_attach_encoder(&intel_encoder->base,
474 &intel_output->enc); 475 &intel_encoder->enc);
475 if (dvo->type == INTEL_DVO_CHIP_LVDS) { 476 if (dvo->type == INTEL_DVO_CHIP_LVDS) {
476 /* For our LVDS chipsets, we should hopefully be able 477 /* For our LVDS chipsets, we should hopefully be able
477 * to dig the fixed panel mode out of the BIOS data. 478 * to dig the fixed panel mode out of the BIOS data.
@@ -489,10 +490,10 @@ void intel_dvo_init(struct drm_device *dev)
489 return; 490 return;
490 } 491 }
491 492
492 intel_i2c_destroy(intel_output->ddc_bus); 493 intel_i2c_destroy(intel_encoder->ddc_bus);
493 /* Didn't find a chip, so tear down. */ 494 /* Didn't find a chip, so tear down. */
494 if (i2cbus != NULL) 495 if (i2cbus != NULL)
495 intel_i2c_destroy(i2cbus); 496 intel_i2c_destroy(i2cbus);
496free_intel: 497free_intel:
497 kfree(intel_output); 498 kfree(intel_encoder);
498} 499}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 2b0fe54cd92c..8a0b3bcdc7b1 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -30,11 +30,11 @@
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/mm.h> 31#include <linux/mm.h>
32#include <linux/tty.h> 32#include <linux/tty.h>
33#include <linux/slab.h>
34#include <linux/sysrq.h> 33#include <linux/sysrq.h>
35#include <linux/delay.h> 34#include <linux/delay.h>
36#include <linux/fb.h> 35#include <linux/fb.h>
37#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/vga_switcheroo.h>
38 38
39#include "drmP.h" 39#include "drmP.h"
40#include "drm.h" 40#include "drm.h"
@@ -70,7 +70,7 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
70 70
71 71
72/** 72/**
73 * Curretly it is assumed that the old framebuffer is reused. 73 * Currently it is assumed that the old framebuffer is reused.
74 * 74 *
75 * LOCKING 75 * LOCKING
76 * caller should hold the mode config lock. 76 * caller should hold the mode config lock.
@@ -144,11 +144,11 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
144 ret = -ENOMEM; 144 ret = -ENOMEM;
145 goto out; 145 goto out;
146 } 146 }
147 obj_priv = fbo->driver_private; 147 obj_priv = to_intel_bo(fbo);
148 148
149 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
150 150
151 ret = i915_gem_object_pin(fbo, PAGE_SIZE); 151 ret = i915_gem_object_pin(fbo, 64*1024);
152 if (ret) { 152 if (ret) {
153 DRM_ERROR("failed to pin fb: %d\n", ret); 153 DRM_ERROR("failed to pin fb: %d\n", ret);
154 goto out_unref; 154 goto out_unref;
@@ -230,10 +230,12 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
230 par->intel_fb = intel_fb; 230 par->intel_fb = intel_fb;
231 231
232 /* To allow resizeing without swapping buffers */ 232 /* To allow resizeing without swapping buffers */
233 DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width, 233 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
234 intel_fb->base.height, obj_priv->gtt_offset, fbo); 234 intel_fb->base.width, intel_fb->base.height,
235 obj_priv->gtt_offset, fbo);
235 236
236 mutex_unlock(&dev->struct_mutex); 237 mutex_unlock(&dev->struct_mutex);
238 vga_switcheroo_client_fb_set(dev->pdev, info);
237 return 0; 239 return 0;
238 240
239out_unpin: 241out_unpin:
@@ -249,7 +251,7 @@ int intelfb_probe(struct drm_device *dev)
249{ 251{
250 int ret; 252 int ret;
251 253
252 DRM_DEBUG("\n"); 254 DRM_DEBUG_KMS("\n");
253 ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create); 255 ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
254 return ret; 256 return ret;
255} 257}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index c33451aec1bd..48cade0cf7b1 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/slab.h>
30#include <linux/delay.h> 31#include <linux/delay.h>
31#include "drmP.h" 32#include "drmP.h"
32#include "drm.h" 33#include "drm.h"
@@ -50,8 +51,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
50 struct drm_i915_private *dev_priv = dev->dev_private; 51 struct drm_i915_private *dev_priv = dev->dev_private;
51 struct drm_crtc *crtc = encoder->crtc; 52 struct drm_crtc *crtc = encoder->crtc;
52 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 53 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
53 struct intel_output *intel_output = enc_to_intel_output(encoder); 54 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
54 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 55 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
55 u32 sdvox; 56 u32 sdvox;
56 57
57 sdvox = SDVO_ENCODING_HDMI | 58 sdvox = SDVO_ENCODING_HDMI |
@@ -73,8 +74,8 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
73{ 74{
74 struct drm_device *dev = encoder->dev; 75 struct drm_device *dev = encoder->dev;
75 struct drm_i915_private *dev_priv = dev->dev_private; 76 struct drm_i915_private *dev_priv = dev->dev_private;
76 struct intel_output *intel_output = enc_to_intel_output(encoder); 77 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
77 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 78 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
78 u32 temp; 79 u32 temp;
79 80
80 temp = I915_READ(hdmi_priv->sdvox_reg); 81 temp = I915_READ(hdmi_priv->sdvox_reg);
@@ -82,7 +83,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
82 /* HW workaround, need to toggle enable bit off and on for 12bpc, but 83 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
83 * we do this anyway which shows more stable in testing. 84 * we do this anyway which shows more stable in testing.
84 */ 85 */
85 if (IS_IGDNG(dev)) { 86 if (HAS_PCH_SPLIT(dev)) {
86 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); 87 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
87 POSTING_READ(hdmi_priv->sdvox_reg); 88 POSTING_READ(hdmi_priv->sdvox_reg);
88 } 89 }
@@ -99,7 +100,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
99 /* HW workaround, need to write this twice for issue that may result 100 /* HW workaround, need to write this twice for issue that may result
100 * in first write getting masked. 101 * in first write getting masked.
101 */ 102 */
102 if (IS_IGDNG(dev)) { 103 if (HAS_PCH_SPLIT(dev)) {
103 I915_WRITE(hdmi_priv->sdvox_reg, temp); 104 I915_WRITE(hdmi_priv->sdvox_reg, temp);
104 POSTING_READ(hdmi_priv->sdvox_reg); 105 POSTING_READ(hdmi_priv->sdvox_reg);
105 } 106 }
@@ -109,8 +110,8 @@ static void intel_hdmi_save(struct drm_connector *connector)
109{ 110{
110 struct drm_device *dev = connector->dev; 111 struct drm_device *dev = connector->dev;
111 struct drm_i915_private *dev_priv = dev->dev_private; 112 struct drm_i915_private *dev_priv = dev->dev_private;
112 struct intel_output *intel_output = to_intel_output(connector); 113 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
113 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 114 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
114 115
115 hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); 116 hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
116} 117}
@@ -119,8 +120,8 @@ static void intel_hdmi_restore(struct drm_connector *connector)
119{ 120{
120 struct drm_device *dev = connector->dev; 121 struct drm_device *dev = connector->dev;
121 struct drm_i915_private *dev_priv = dev->dev_private; 122 struct drm_i915_private *dev_priv = dev->dev_private;
122 struct intel_output *intel_output = to_intel_output(connector); 123 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
123 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 124 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
124 125
125 I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); 126 I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
126 POSTING_READ(hdmi_priv->sdvox_reg); 127 POSTING_READ(hdmi_priv->sdvox_reg);
@@ -150,21 +151,21 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
150static enum drm_connector_status 151static enum drm_connector_status
151intel_hdmi_detect(struct drm_connector *connector) 152intel_hdmi_detect(struct drm_connector *connector)
152{ 153{
153 struct intel_output *intel_output = to_intel_output(connector); 154 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
154 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 155 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
155 struct edid *edid = NULL; 156 struct edid *edid = NULL;
156 enum drm_connector_status status = connector_status_disconnected; 157 enum drm_connector_status status = connector_status_disconnected;
157 158
158 hdmi_priv->has_hdmi_sink = false; 159 hdmi_priv->has_hdmi_sink = false;
159 edid = drm_get_edid(&intel_output->base, 160 edid = drm_get_edid(&intel_encoder->base,
160 intel_output->ddc_bus); 161 intel_encoder->ddc_bus);
161 162
162 if (edid) { 163 if (edid) {
163 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 164 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
164 status = connector_status_connected; 165 status = connector_status_connected;
165 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 166 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
166 } 167 }
167 intel_output->base.display_info.raw_edid = NULL; 168 intel_encoder->base.display_info.raw_edid = NULL;
168 kfree(edid); 169 kfree(edid);
169 } 170 }
170 171
@@ -173,24 +174,24 @@ intel_hdmi_detect(struct drm_connector *connector)
173 174
174static int intel_hdmi_get_modes(struct drm_connector *connector) 175static int intel_hdmi_get_modes(struct drm_connector *connector)
175{ 176{
176 struct intel_output *intel_output = to_intel_output(connector); 177 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
177 178
178 /* We should parse the EDID data and find out if it's an HDMI sink so 179 /* We should parse the EDID data and find out if it's an HDMI sink so
179 * we can send audio to it. 180 * we can send audio to it.
180 */ 181 */
181 182
182 return intel_ddc_get_modes(intel_output); 183 return intel_ddc_get_modes(intel_encoder);
183} 184}
184 185
185static void intel_hdmi_destroy(struct drm_connector *connector) 186static void intel_hdmi_destroy(struct drm_connector *connector)
186{ 187{
187 struct intel_output *intel_output = to_intel_output(connector); 188 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
188 189
189 if (intel_output->i2c_bus) 190 if (intel_encoder->i2c_bus)
190 intel_i2c_destroy(intel_output->i2c_bus); 191 intel_i2c_destroy(intel_encoder->i2c_bus);
191 drm_sysfs_connector_remove(connector); 192 drm_sysfs_connector_remove(connector);
192 drm_connector_cleanup(connector); 193 drm_connector_cleanup(connector);
193 kfree(intel_output); 194 kfree(intel_encoder);
194} 195}
195 196
196static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { 197static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
@@ -225,63 +226,67 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
225 .destroy = intel_hdmi_enc_destroy, 226 .destroy = intel_hdmi_enc_destroy,
226}; 227};
227 228
228
229void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) 229void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
230{ 230{
231 struct drm_i915_private *dev_priv = dev->dev_private; 231 struct drm_i915_private *dev_priv = dev->dev_private;
232 struct drm_connector *connector; 232 struct drm_connector *connector;
233 struct intel_output *intel_output; 233 struct intel_encoder *intel_encoder;
234 struct intel_hdmi_priv *hdmi_priv; 234 struct intel_hdmi_priv *hdmi_priv;
235 235
236 intel_output = kcalloc(sizeof(struct intel_output) + 236 intel_encoder = kcalloc(sizeof(struct intel_encoder) +
237 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); 237 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
238 if (!intel_output) 238 if (!intel_encoder)
239 return; 239 return;
240 hdmi_priv = (struct intel_hdmi_priv *)(intel_output + 1); 240 hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
241 241
242 connector = &intel_output->base; 242 connector = &intel_encoder->base;
243 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, 243 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
244 DRM_MODE_CONNECTOR_HDMIA); 244 DRM_MODE_CONNECTOR_HDMIA);
245 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); 245 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
246 246
247 intel_output->type = INTEL_OUTPUT_HDMI; 247 intel_encoder->type = INTEL_OUTPUT_HDMI;
248 248
249 connector->interlace_allowed = 0; 249 connector->interlace_allowed = 0;
250 connector->doublescan_allowed = 0; 250 connector->doublescan_allowed = 0;
251 intel_output->crtc_mask = (1 << 0) | (1 << 1); 251 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
252 252
253 /* Set up the DDC bus. */ 253 /* Set up the DDC bus. */
254 if (sdvox_reg == SDVOB) { 254 if (sdvox_reg == SDVOB) {
255 intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); 255 intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
256 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); 256 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
257 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
257 } else if (sdvox_reg == SDVOC) { 258 } else if (sdvox_reg == SDVOC) {
258 intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); 259 intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
259 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); 260 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
261 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
260 } else if (sdvox_reg == HDMIB) { 262 } else if (sdvox_reg == HDMIB) {
261 intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); 263 intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
262 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, 264 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
263 "HDMIB"); 265 "HDMIB");
266 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
264 } else if (sdvox_reg == HDMIC) { 267 } else if (sdvox_reg == HDMIC) {
265 intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); 268 intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
266 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, 269 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
267 "HDMIC"); 270 "HDMIC");
271 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
268 } else if (sdvox_reg == HDMID) { 272 } else if (sdvox_reg == HDMID) {
269 intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); 273 intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
270 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, 274 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
271 "HDMID"); 275 "HDMID");
276 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
272 } 277 }
273 if (!intel_output->ddc_bus) 278 if (!intel_encoder->ddc_bus)
274 goto err_connector; 279 goto err_connector;
275 280
276 hdmi_priv->sdvox_reg = sdvox_reg; 281 hdmi_priv->sdvox_reg = sdvox_reg;
277 intel_output->dev_priv = hdmi_priv; 282 intel_encoder->dev_priv = hdmi_priv;
278 283
279 drm_encoder_init(dev, &intel_output->enc, &intel_hdmi_enc_funcs, 284 drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
280 DRM_MODE_ENCODER_TMDS); 285 DRM_MODE_ENCODER_TMDS);
281 drm_encoder_helper_add(&intel_output->enc, &intel_hdmi_helper_funcs); 286 drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
282 287
283 drm_mode_connector_attach_encoder(&intel_output->base, 288 drm_mode_connector_attach_encoder(&intel_encoder->base,
284 &intel_output->enc); 289 &intel_encoder->enc);
285 drm_sysfs_connector_add(connector); 290 drm_sysfs_connector_add(connector);
286 291
287 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 292 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -297,7 +302,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
297 302
298err_connector: 303err_connector:
299 drm_connector_cleanup(connector); 304 drm_connector_cleanup(connector);
300 kfree(intel_output); 305 kfree(intel_encoder);
301 306
302 return; 307 return;
303} 308}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c7eab724c418..c2649c7df14c 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -26,6 +26,7 @@
26 * Eric Anholt <eric@anholt.net> 26 * Eric Anholt <eric@anholt.net>
27 */ 27 */
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/slab.h>
29#include <linux/i2c-id.h> 30#include <linux/i2c-id.h>
30#include <linux/i2c-algo-bit.h> 31#include <linux/i2c-algo-bit.h>
31#include "drmP.h" 32#include "drmP.h"
@@ -39,7 +40,7 @@ void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
39 struct drm_i915_private *dev_priv = dev->dev_private; 40 struct drm_i915_private *dev_priv = dev->dev_private;
40 41
41 /* When using bit bashing for I2C, this bit needs to be set to 1 */ 42 /* When using bit bashing for I2C, this bit needs to be set to 1 */
42 if (!IS_IGD(dev)) 43 if (!IS_PINEVIEW(dev))
43 return; 44 return;
44 if (enable) 45 if (enable)
45 I915_WRITE(DSPCLK_GATE_D, 46 I915_WRITE(DSPCLK_GATE_D,
@@ -118,6 +119,23 @@ static void set_data(void *data, int state_high)
118 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ 119 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
119} 120}
120 121
122/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C
123 * engine, but if the BIOS leaves it enabled, then that can break our use
124 * of the bit-banging I2C interfaces. This is notably the case with the
125 * Mac Mini in EFI mode.
126 */
127void
128intel_i2c_reset_gmbus(struct drm_device *dev)
129{
130 struct drm_i915_private *dev_priv = dev->dev_private;
131
132 if (HAS_PCH_SPLIT(dev)) {
133 I915_WRITE(PCH_GMBUS0, 0);
134 } else {
135 I915_WRITE(GMBUS0, 0);
136 }
137}
138
121/** 139/**
122 * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg 140 * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
123 * @dev: DRM device 141 * @dev: DRM device
@@ -168,6 +186,8 @@ struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
168 if(i2c_bit_add_bus(&chan->adapter)) 186 if(i2c_bit_add_bus(&chan->adapter))
169 goto out_free; 187 goto out_free;
170 188
189 intel_i2c_reset_gmbus(dev);
190
171 /* JJJ: raise SCL and SDA? */ 191 /* JJJ: raise SCL and SDA? */
172 intel_i2c_quirk_set(dev, true); 192 intel_i2c_quirk_set(dev, true);
173 set_data(chan, 1); 193 set_data(chan, 1);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 05598ae10c4b..b66806a37d37 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -30,6 +30,7 @@
30#include <acpi/button.h> 30#include <acpi/button.h>
31#include <linux/dmi.h> 31#include <linux/dmi.h>
32#include <linux/i2c.h> 32#include <linux/i2c.h>
33#include <linux/slab.h>
33#include "drmP.h" 34#include "drmP.h"
34#include "drm.h" 35#include "drm.h"
35#include "drm_crtc.h" 36#include "drm_crtc.h"
@@ -56,7 +57,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
56 struct drm_i915_private *dev_priv = dev->dev_private; 57 struct drm_i915_private *dev_priv = dev->dev_private;
57 u32 blc_pwm_ctl, reg; 58 u32 blc_pwm_ctl, reg;
58 59
59 if (IS_IGDNG(dev)) 60 if (HAS_PCH_SPLIT(dev))
60 reg = BLC_PWM_CPU_CTL; 61 reg = BLC_PWM_CPU_CTL;
61 else 62 else
62 reg = BLC_PWM_CTL; 63 reg = BLC_PWM_CTL;
@@ -74,7 +75,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
74 struct drm_i915_private *dev_priv = dev->dev_private; 75 struct drm_i915_private *dev_priv = dev->dev_private;
75 u32 reg; 76 u32 reg;
76 77
77 if (IS_IGDNG(dev)) 78 if (HAS_PCH_SPLIT(dev))
78 reg = BLC_PWM_PCH_CTL2; 79 reg = BLC_PWM_PCH_CTL2;
79 else 80 else
80 reg = BLC_PWM_CTL; 81 reg = BLC_PWM_CTL;
@@ -89,17 +90,22 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
89static void intel_lvds_set_power(struct drm_device *dev, bool on) 90static void intel_lvds_set_power(struct drm_device *dev, bool on)
90{ 91{
91 struct drm_i915_private *dev_priv = dev->dev_private; 92 struct drm_i915_private *dev_priv = dev->dev_private;
92 u32 pp_status, ctl_reg, status_reg; 93 u32 pp_status, ctl_reg, status_reg, lvds_reg;
93 94
94 if (IS_IGDNG(dev)) { 95 if (HAS_PCH_SPLIT(dev)) {
95 ctl_reg = PCH_PP_CONTROL; 96 ctl_reg = PCH_PP_CONTROL;
96 status_reg = PCH_PP_STATUS; 97 status_reg = PCH_PP_STATUS;
98 lvds_reg = PCH_LVDS;
97 } else { 99 } else {
98 ctl_reg = PP_CONTROL; 100 ctl_reg = PP_CONTROL;
99 status_reg = PP_STATUS; 101 status_reg = PP_STATUS;
102 lvds_reg = LVDS;
100 } 103 }
101 104
102 if (on) { 105 if (on) {
106 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
107 POSTING_READ(lvds_reg);
108
103 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | 109 I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
104 POWER_TARGET_ON); 110 POWER_TARGET_ON);
105 do { 111 do {
@@ -115,6 +121,9 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
115 do { 121 do {
116 pp_status = I915_READ(status_reg); 122 pp_status = I915_READ(status_reg);
117 } while (pp_status & PP_ON); 123 } while (pp_status & PP_ON);
124
125 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
126 POSTING_READ(lvds_reg);
118 } 127 }
119} 128}
120 129
@@ -137,7 +146,7 @@ static void intel_lvds_save(struct drm_connector *connector)
137 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; 146 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
138 u32 pwm_ctl_reg; 147 u32 pwm_ctl_reg;
139 148
140 if (IS_IGDNG(dev)) { 149 if (HAS_PCH_SPLIT(dev)) {
141 pp_on_reg = PCH_PP_ON_DELAYS; 150 pp_on_reg = PCH_PP_ON_DELAYS;
142 pp_off_reg = PCH_PP_OFF_DELAYS; 151 pp_off_reg = PCH_PP_OFF_DELAYS;
143 pp_ctl_reg = PCH_PP_CONTROL; 152 pp_ctl_reg = PCH_PP_CONTROL;
@@ -174,7 +183,7 @@ static void intel_lvds_restore(struct drm_connector *connector)
174 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; 183 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
175 u32 pwm_ctl_reg; 184 u32 pwm_ctl_reg;
176 185
177 if (IS_IGDNG(dev)) { 186 if (HAS_PCH_SPLIT(dev)) {
178 pp_on_reg = PCH_PP_ON_DELAYS; 187 pp_on_reg = PCH_PP_ON_DELAYS;
179 pp_off_reg = PCH_PP_OFF_DELAYS; 188 pp_off_reg = PCH_PP_OFF_DELAYS;
180 pp_ctl_reg = PCH_PP_CONTROL; 189 pp_ctl_reg = PCH_PP_CONTROL;
@@ -230,8 +239,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
230 struct drm_i915_private *dev_priv = dev->dev_private; 239 struct drm_i915_private *dev_priv = dev->dev_private;
231 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 240 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
232 struct drm_encoder *tmp_encoder; 241 struct drm_encoder *tmp_encoder;
233 struct intel_output *intel_output = enc_to_intel_output(encoder); 242 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
234 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; 243 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
235 u32 pfit_control = 0, pfit_pgm_ratios = 0; 244 u32 pfit_control = 0, pfit_pgm_ratios = 0;
236 int left_border = 0, right_border = 0, top_border = 0; 245 int left_border = 0, right_border = 0, top_border = 0;
237 int bottom_border = 0; 246 int bottom_border = 0;
@@ -297,7 +306,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
297 } 306 }
298 307
299 /* full screen scale for now */ 308 /* full screen scale for now */
300 if (IS_IGDNG(dev)) 309 if (HAS_PCH_SPLIT(dev))
301 goto out; 310 goto out;
302 311
303 /* 965+ wants fuzzy fitting */ 312 /* 965+ wants fuzzy fitting */
@@ -327,7 +336,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
327 * to register description and PRM. 336 * to register description and PRM.
328 * Change the value here to see the borders for debugging 337 * Change the value here to see the borders for debugging
329 */ 338 */
330 if (!IS_IGDNG(dev)) { 339 if (!HAS_PCH_SPLIT(dev)) {
331 I915_WRITE(BCLRPAT_A, 0); 340 I915_WRITE(BCLRPAT_A, 0);
332 I915_WRITE(BCLRPAT_B, 0); 341 I915_WRITE(BCLRPAT_B, 0);
333 } 342 }
@@ -548,7 +557,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
548 struct drm_i915_private *dev_priv = dev->dev_private; 557 struct drm_i915_private *dev_priv = dev->dev_private;
549 u32 reg; 558 u32 reg;
550 559
551 if (IS_IGDNG(dev)) 560 if (HAS_PCH_SPLIT(dev))
552 reg = BLC_PWM_CPU_CTL; 561 reg = BLC_PWM_CPU_CTL;
553 else 562 else
554 reg = BLC_PWM_CTL; 563 reg = BLC_PWM_CTL;
@@ -578,8 +587,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
578{ 587{
579 struct drm_device *dev = encoder->dev; 588 struct drm_device *dev = encoder->dev;
580 struct drm_i915_private *dev_priv = dev->dev_private; 589 struct drm_i915_private *dev_priv = dev->dev_private;
581 struct intel_output *intel_output = enc_to_intel_output(encoder); 590 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
582 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; 591 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
583 592
584 /* 593 /*
585 * The LVDS pin pair will already have been turned on in the 594 * The LVDS pin pair will already have been turned on in the
@@ -587,7 +596,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
587 * settings. 596 * settings.
588 */ 597 */
589 598
590 if (IS_IGDNG(dev)) 599 if (HAS_PCH_SPLIT(dev))
591 return; 600 return;
592 601
593 /* 602 /*
@@ -599,18 +608,6 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
599 I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); 608 I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
600} 609}
601 610
602/* Some lid devices report incorrect lid status, assume they're connected */
603static const struct dmi_system_id bad_lid_status[] = {
604 {
605 .ident = "Aspire One",
606 .matches = {
607 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
608 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
609 },
610 },
611 { }
612};
613
614/** 611/**
615 * Detect the LVDS connection. 612 * Detect the LVDS connection.
616 * 613 *
@@ -620,10 +617,14 @@ static const struct dmi_system_id bad_lid_status[] = {
620 */ 617 */
621static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) 618static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
622{ 619{
620 struct drm_device *dev = connector->dev;
623 enum drm_connector_status status = connector_status_connected; 621 enum drm_connector_status status = connector_status_connected;
624 622
625 if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) 623 /* ACPI lid methods were generally unreliable in this generation, so
626 status = connector_status_disconnected; 624 * don't even bother.
625 */
626 if (IS_GEN2(dev) || IS_GEN3(dev))
627 return connector_status_connected;
627 628
628 return status; 629 return status;
629} 630}
@@ -634,14 +635,16 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
634static int intel_lvds_get_modes(struct drm_connector *connector) 635static int intel_lvds_get_modes(struct drm_connector *connector)
635{ 636{
636 struct drm_device *dev = connector->dev; 637 struct drm_device *dev = connector->dev;
637 struct intel_output *intel_output = to_intel_output(connector); 638 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
638 struct drm_i915_private *dev_priv = dev->dev_private; 639 struct drm_i915_private *dev_priv = dev->dev_private;
639 int ret = 0; 640 int ret = 0;
640 641
641 ret = intel_ddc_get_modes(intel_output); 642 if (dev_priv->lvds_edid_good) {
643 ret = intel_ddc_get_modes(intel_encoder);
642 644
643 if (ret) 645 if (ret)
644 return ret; 646 return ret;
647 }
645 648
646 /* Didn't get an EDID, so 649 /* Didn't get an EDID, so
647 * Set wide sync ranges so we get all modes 650 * Set wide sync ranges so we get all modes
@@ -679,7 +682,14 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
679 struct drm_i915_private *dev_priv = 682 struct drm_i915_private *dev_priv =
680 container_of(nb, struct drm_i915_private, lid_notifier); 683 container_of(nb, struct drm_i915_private, lid_notifier);
681 struct drm_device *dev = dev_priv->dev; 684 struct drm_device *dev = dev_priv->dev;
685 struct drm_connector *connector = dev_priv->int_lvds_connector;
682 686
687 /*
688 * check and update the status of LVDS connector after receiving
689 * the LID nofication event.
690 */
691 if (connector)
692 connector->status = connector->funcs->detect(connector);
683 if (!acpi_lid_open()) { 693 if (!acpi_lid_open()) {
684 dev_priv->modeset_on_lid = 1; 694 dev_priv->modeset_on_lid = 1;
685 return NOTIFY_OK; 695 return NOTIFY_OK;
@@ -707,11 +717,11 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
707static void intel_lvds_destroy(struct drm_connector *connector) 717static void intel_lvds_destroy(struct drm_connector *connector)
708{ 718{
709 struct drm_device *dev = connector->dev; 719 struct drm_device *dev = connector->dev;
710 struct intel_output *intel_output = to_intel_output(connector); 720 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
711 struct drm_i915_private *dev_priv = dev->dev_private; 721 struct drm_i915_private *dev_priv = dev->dev_private;
712 722
713 if (intel_output->ddc_bus) 723 if (intel_encoder->ddc_bus)
714 intel_i2c_destroy(intel_output->ddc_bus); 724 intel_i2c_destroy(intel_encoder->ddc_bus);
715 if (dev_priv->lid_notifier.notifier_call) 725 if (dev_priv->lid_notifier.notifier_call)
716 acpi_lid_notifier_unregister(&dev_priv->lid_notifier); 726 acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
717 drm_sysfs_connector_remove(connector); 727 drm_sysfs_connector_remove(connector);
@@ -724,13 +734,13 @@ static int intel_lvds_set_property(struct drm_connector *connector,
724 uint64_t value) 734 uint64_t value)
725{ 735{
726 struct drm_device *dev = connector->dev; 736 struct drm_device *dev = connector->dev;
727 struct intel_output *intel_output = 737 struct intel_encoder *intel_encoder =
728 to_intel_output(connector); 738 to_intel_encoder(connector);
729 739
730 if (property == dev->mode_config.scaling_mode_property && 740 if (property == dev->mode_config.scaling_mode_property &&
731 connector->encoder) { 741 connector->encoder) {
732 struct drm_crtc *crtc = connector->encoder->crtc; 742 struct drm_crtc *crtc = connector->encoder->crtc;
733 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; 743 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
734 if (value == DRM_MODE_SCALE_NONE) { 744 if (value == DRM_MODE_SCALE_NONE) {
735 DRM_DEBUG_KMS("no scaling not supported\n"); 745 DRM_DEBUG_KMS("no scaling not supported\n");
736 return 0; 746 return 0;
@@ -850,68 +860,113 @@ static const struct dmi_system_id intel_no_lvds[] = {
850 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), 860 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
851 }, 861 },
852 }, 862 },
863 {
864 .callback = intel_no_lvds_dmi_callback,
865 .ident = "Clientron U800",
866 .matches = {
867 DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
868 DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
869 },
870 },
853 871
854 { } /* terminating entry */ 872 { } /* terminating entry */
855}; 873};
856 874
857#ifdef CONFIG_ACPI 875/**
858/* 876 * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
859 * check_lid_device -- check whether @handle is an ACPI LID device. 877 * @dev: drm device
860 * @handle: ACPI device handle 878 * @connector: LVDS connector
861 * @level : depth in the ACPI namespace tree 879 *
862 * @context: the number of LID device when we find the device 880 * Find the reduced downclock for LVDS in EDID.
863 * @rv: a return value to fill if desired (Not use)
864 */ 881 */
865static acpi_status 882static void intel_find_lvds_downclock(struct drm_device *dev,
866check_lid_device(acpi_handle handle, u32 level, void *context, 883 struct drm_connector *connector)
867 void **return_value)
868{ 884{
869 struct acpi_device *acpi_dev; 885 struct drm_i915_private *dev_priv = dev->dev_private;
870 int *lid_present = context; 886 struct drm_display_mode *scan, *panel_fixed_mode;
871 887 int temp_downclock;
872 acpi_dev = NULL;
873 /* Get the acpi device for device handle */
874 if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) {
875 /* If there is no ACPI device for handle, return */
876 return AE_OK;
877 }
878 888
879 if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7)) 889 panel_fixed_mode = dev_priv->panel_fixed_mode;
880 *lid_present = 1; 890 temp_downclock = panel_fixed_mode->clock;
881 891
882 return AE_OK; 892 mutex_lock(&dev->mode_config.mutex);
893 list_for_each_entry(scan, &connector->probed_modes, head) {
894 /*
895 * If one mode has the same resolution with the fixed_panel
896 * mode while they have the different refresh rate, it means
897 * that the reduced downclock is found for the LVDS. In such
898 * case we can set the different FPx0/1 to dynamically select
899 * between low and high frequency.
900 */
901 if (scan->hdisplay == panel_fixed_mode->hdisplay &&
902 scan->hsync_start == panel_fixed_mode->hsync_start &&
903 scan->hsync_end == panel_fixed_mode->hsync_end &&
904 scan->htotal == panel_fixed_mode->htotal &&
905 scan->vdisplay == panel_fixed_mode->vdisplay &&
906 scan->vsync_start == panel_fixed_mode->vsync_start &&
907 scan->vsync_end == panel_fixed_mode->vsync_end &&
908 scan->vtotal == panel_fixed_mode->vtotal) {
909 if (scan->clock < temp_downclock) {
910 /*
911 * The downclock is already found. But we
912 * expect to find the lower downclock.
913 */
914 temp_downclock = scan->clock;
915 }
916 }
917 }
918 mutex_unlock(&dev->mode_config.mutex);
919 if (temp_downclock < panel_fixed_mode->clock &&
920 i915_lvds_downclock) {
921 /* We found the downclock for LVDS. */
922 dev_priv->lvds_downclock_avail = 1;
923 dev_priv->lvds_downclock = temp_downclock;
924 DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
925 "Normal clock %dKhz, downclock %dKhz\n",
926 panel_fixed_mode->clock, temp_downclock);
927 }
928 return;
883} 929}
884 930
885/** 931/*
886 * check whether there exists the ACPI LID device by enumerating the ACPI 932 * Enumerate the child dev array parsed from VBT to check whether
887 * device tree. 933 * the LVDS is present.
934 * If it is present, return 1.
935 * If it is not present, return false.
936 * If no child dev is parsed from VBT, it assumes that the LVDS is present.
937 * Note: The addin_offset should also be checked for LVDS panel.
938 * Only when it is non-zero, it is assumed that it is present.
888 */ 939 */
889static int intel_lid_present(void) 940static int lvds_is_present_in_vbt(struct drm_device *dev)
890{ 941{
891 int lid_present = 0; 942 struct drm_i915_private *dev_priv = dev->dev_private;
943 struct child_device_config *p_child;
944 int i, ret;
892 945
893 if (acpi_disabled) { 946 if (!dev_priv->child_dev_num)
894 /* If ACPI is disabled, there is no ACPI device tree to
895 * check, so assume the LID device would have been present.
896 */
897 return 1; 947 return 1;
898 }
899 948
900 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 949 ret = 0;
901 ACPI_UINT32_MAX, 950 for (i = 0; i < dev_priv->child_dev_num; i++) {
902 check_lid_device, &lid_present, NULL); 951 p_child = dev_priv->child_dev + i;
952 /*
953 * If the device type is not LFP, continue.
954 * If the device type is 0x22, it is also regarded as LFP.
955 */
956 if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
957 p_child->device_type != DEVICE_TYPE_LFP)
958 continue;
903 959
904 return lid_present; 960 /* The addin_offset should be checked. Only when it is
905} 961 * non-zero, it is regarded as present.
906#else 962 */
907static int intel_lid_present(void) 963 if (p_child->addin_offset) {
908{ 964 ret = 1;
909 /* In the absence of ACPI built in, assume that the LID device would 965 break;
910 * have been present. 966 }
911 */ 967 }
912 return 1; 968 return ret;
913} 969}
914#endif
915 970
916/** 971/**
917 * intel_lvds_init - setup LVDS connectors on this device 972 * intel_lvds_init - setup LVDS connectors on this device
@@ -923,7 +978,7 @@ static int intel_lid_present(void)
923void intel_lvds_init(struct drm_device *dev) 978void intel_lvds_init(struct drm_device *dev)
924{ 979{
925 struct drm_i915_private *dev_priv = dev->dev_private; 980 struct drm_i915_private *dev_priv = dev->dev_private;
926 struct intel_output *intel_output; 981 struct intel_encoder *intel_encoder;
927 struct drm_connector *connector; 982 struct drm_connector *connector;
928 struct drm_encoder *encoder; 983 struct drm_encoder *encoder;
929 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 984 struct drm_display_mode *scan; /* *modes, *bios_mode; */
@@ -936,60 +991,55 @@ void intel_lvds_init(struct drm_device *dev)
936 if (dmi_check_system(intel_no_lvds)) 991 if (dmi_check_system(intel_no_lvds))
937 return; 992 return;
938 993
939 /* Assume that any device without an ACPI LID device also doesn't 994 if (!lvds_is_present_in_vbt(dev)) {
940 * have an integrated LVDS. We would be better off parsing the BIOS 995 DRM_DEBUG_KMS("LVDS is not present in VBT\n");
941 * to get a reliable indicator, but that code isn't written yet.
942 *
943 * In the case of all-in-one desktops using LVDS that we've seen,
944 * they're using SDVO LVDS.
945 */
946 if (!intel_lid_present())
947 return; 996 return;
997 }
948 998
949 if (IS_IGDNG(dev)) { 999 if (HAS_PCH_SPLIT(dev)) {
950 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) 1000 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
951 return; 1001 return;
952 if (dev_priv->edp_support) { 1002 if (dev_priv->edp_support) {
953 DRM_DEBUG("disable LVDS for eDP support\n"); 1003 DRM_DEBUG_KMS("disable LVDS for eDP support\n");
954 return; 1004 return;
955 } 1005 }
956 gpio = PCH_GPIOC; 1006 gpio = PCH_GPIOC;
957 } 1007 }
958 1008
959 intel_output = kzalloc(sizeof(struct intel_output) + 1009 intel_encoder = kzalloc(sizeof(struct intel_encoder) +
960 sizeof(struct intel_lvds_priv), GFP_KERNEL); 1010 sizeof(struct intel_lvds_priv), GFP_KERNEL);
961 if (!intel_output) { 1011 if (!intel_encoder) {
962 return; 1012 return;
963 } 1013 }
964 1014
965 connector = &intel_output->base; 1015 connector = &intel_encoder->base;
966 encoder = &intel_output->enc; 1016 encoder = &intel_encoder->enc;
967 drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs, 1017 drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs,
968 DRM_MODE_CONNECTOR_LVDS); 1018 DRM_MODE_CONNECTOR_LVDS);
969 1019
970 drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs, 1020 drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
971 DRM_MODE_ENCODER_LVDS); 1021 DRM_MODE_ENCODER_LVDS);
972 1022
973 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1023 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
974 intel_output->type = INTEL_OUTPUT_LVDS; 1024 intel_encoder->type = INTEL_OUTPUT_LVDS;
975 1025
976 intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 1026 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
977 intel_output->crtc_mask = (1 << 1); 1027 intel_encoder->crtc_mask = (1 << 1);
978 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); 1028 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
979 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 1029 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
980 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 1030 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
981 connector->interlace_allowed = false; 1031 connector->interlace_allowed = false;
982 connector->doublescan_allowed = false; 1032 connector->doublescan_allowed = false;
983 1033
984 lvds_priv = (struct intel_lvds_priv *)(intel_output + 1); 1034 lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1);
985 intel_output->dev_priv = lvds_priv; 1035 intel_encoder->dev_priv = lvds_priv;
986 /* create the scaling mode property */ 1036 /* create the scaling mode property */
987 drm_mode_create_scaling_mode_property(dev); 1037 drm_mode_create_scaling_mode_property(dev);
988 /* 1038 /*
989 * the initial panel fitting mode will be FULL_SCREEN. 1039 * the initial panel fitting mode will be FULL_SCREEN.
990 */ 1040 */
991 1041
992 drm_connector_attach_property(&intel_output->base, 1042 drm_connector_attach_property(&intel_encoder->base,
993 dev->mode_config.scaling_mode_property, 1043 dev->mode_config.scaling_mode_property,
994 DRM_MODE_SCALE_FULLSCREEN); 1044 DRM_MODE_SCALE_FULLSCREEN);
995 lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; 1045 lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
@@ -1004,8 +1054,8 @@ void intel_lvds_init(struct drm_device *dev)
1004 */ 1054 */
1005 1055
1006 /* Set up the DDC bus. */ 1056 /* Set up the DDC bus. */
1007 intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); 1057 intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
1008 if (!intel_output->ddc_bus) { 1058 if (!intel_encoder->ddc_bus) {
1009 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 1059 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
1010 "failed.\n"); 1060 "failed.\n");
1011 goto failed; 1061 goto failed;
@@ -1015,7 +1065,10 @@ void intel_lvds_init(struct drm_device *dev)
1015 * Attempt to get the fixed panel mode from DDC. Assume that the 1065 * Attempt to get the fixed panel mode from DDC. Assume that the
1016 * preferred mode is the right one. 1066 * preferred mode is the right one.
1017 */ 1067 */
1018 intel_ddc_get_modes(intel_output); 1068 dev_priv->lvds_edid_good = true;
1069
1070 if (!intel_ddc_get_modes(intel_encoder))
1071 dev_priv->lvds_edid_good = false;
1019 1072
1020 list_for_each_entry(scan, &connector->probed_modes, head) { 1073 list_for_each_entry(scan, &connector->probed_modes, head) {
1021 mutex_lock(&dev->mode_config.mutex); 1074 mutex_lock(&dev->mode_config.mutex);
@@ -1023,6 +1076,7 @@ void intel_lvds_init(struct drm_device *dev)
1023 dev_priv->panel_fixed_mode = 1076 dev_priv->panel_fixed_mode =
1024 drm_mode_duplicate(dev, scan); 1077 drm_mode_duplicate(dev, scan);
1025 mutex_unlock(&dev->mode_config.mutex); 1078 mutex_unlock(&dev->mode_config.mutex);
1079 intel_find_lvds_downclock(dev, connector);
1026 goto out; 1080 goto out;
1027 } 1081 }
1028 mutex_unlock(&dev->mode_config.mutex); 1082 mutex_unlock(&dev->mode_config.mutex);
@@ -1047,8 +1101,8 @@ void intel_lvds_init(struct drm_device *dev)
1047 * correct mode. 1101 * correct mode.
1048 */ 1102 */
1049 1103
1050 /* IGDNG: FIXME if still fail, not try pipe mode now */ 1104 /* Ironlake: FIXME if still fail, not try pipe mode now */
1051 if (IS_IGDNG(dev)) 1105 if (HAS_PCH_SPLIT(dev))
1052 goto failed; 1106 goto failed;
1053 1107
1054 lvds = I915_READ(LVDS); 1108 lvds = I915_READ(LVDS);
@@ -1069,7 +1123,7 @@ void intel_lvds_init(struct drm_device *dev)
1069 goto failed; 1123 goto failed;
1070 1124
1071out: 1125out:
1072 if (IS_IGDNG(dev)) { 1126 if (HAS_PCH_SPLIT(dev)) {
1073 u32 pwm; 1127 u32 pwm;
1074 /* make sure PWM is enabled */ 1128 /* make sure PWM is enabled */
1075 pwm = I915_READ(BLC_PWM_CPU_CTL2); 1129 pwm = I915_READ(BLC_PWM_CPU_CTL2);
@@ -1082,16 +1136,19 @@ out:
1082 } 1136 }
1083 dev_priv->lid_notifier.notifier_call = intel_lid_notify; 1137 dev_priv->lid_notifier.notifier_call = intel_lid_notify;
1084 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { 1138 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
1085 DRM_DEBUG("lid notifier registration failed\n"); 1139 DRM_DEBUG_KMS("lid notifier registration failed\n");
1086 dev_priv->lid_notifier.notifier_call = NULL; 1140 dev_priv->lid_notifier.notifier_call = NULL;
1087 } 1141 }
1142 /* keep the LVDS connector */
1143 dev_priv->int_lvds_connector = connector;
1088 drm_sysfs_connector_add(connector); 1144 drm_sysfs_connector_add(connector);
1089 return; 1145 return;
1090 1146
1091failed: 1147failed:
1092 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); 1148 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
1093 if (intel_output->ddc_bus) 1149 if (intel_encoder->ddc_bus)
1094 intel_i2c_destroy(intel_output->ddc_bus); 1150 intel_i2c_destroy(intel_encoder->ddc_bus);
1095 drm_connector_cleanup(connector); 1151 drm_connector_cleanup(connector);
1096 kfree(intel_output); 1152 drm_encoder_cleanup(encoder);
1153 kfree(intel_encoder);
1097} 1154}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 67e2f4632a24..8e5c83b2d120 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -23,6 +23,7 @@
23 * DEALINGS IN THE SOFTWARE. 23 * DEALINGS IN THE SOFTWARE.
24 */ 24 */
25 25
26#include <linux/slab.h>
26#include <linux/i2c.h> 27#include <linux/i2c.h>
27#include <linux/fb.h> 28#include <linux/fb.h>
28#include "drmP.h" 29#include "drmP.h"
@@ -33,7 +34,7 @@
33 * intel_ddc_probe 34 * intel_ddc_probe
34 * 35 *
35 */ 36 */
36bool intel_ddc_probe(struct intel_output *intel_output) 37bool intel_ddc_probe(struct intel_encoder *intel_encoder)
37{ 38{
38 u8 out_buf[] = { 0x0, 0x0}; 39 u8 out_buf[] = { 0x0, 0x0};
39 u8 buf[2]; 40 u8 buf[2];
@@ -53,9 +54,9 @@ bool intel_ddc_probe(struct intel_output *intel_output)
53 } 54 }
54 }; 55 };
55 56
56 intel_i2c_quirk_set(intel_output->base.dev, true); 57 intel_i2c_quirk_set(intel_encoder->base.dev, true);
57 ret = i2c_transfer(intel_output->ddc_bus, msgs, 2); 58 ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
58 intel_i2c_quirk_set(intel_output->base.dev, false); 59 intel_i2c_quirk_set(intel_encoder->base.dev, false);
59 if (ret == 2) 60 if (ret == 2)
60 return true; 61 return true;
61 62
@@ -68,19 +69,19 @@ bool intel_ddc_probe(struct intel_output *intel_output)
68 * 69 *
69 * Fetch the EDID information from @connector using the DDC bus. 70 * Fetch the EDID information from @connector using the DDC bus.
70 */ 71 */
71int intel_ddc_get_modes(struct intel_output *intel_output) 72int intel_ddc_get_modes(struct intel_encoder *intel_encoder)
72{ 73{
73 struct edid *edid; 74 struct edid *edid;
74 int ret = 0; 75 int ret = 0;
75 76
76 intel_i2c_quirk_set(intel_output->base.dev, true); 77 intel_i2c_quirk_set(intel_encoder->base.dev, true);
77 edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus); 78 edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus);
78 intel_i2c_quirk_set(intel_output->base.dev, false); 79 intel_i2c_quirk_set(intel_encoder->base.dev, false);
79 if (edid) { 80 if (edid) {
80 drm_mode_connector_update_edid_property(&intel_output->base, 81 drm_mode_connector_update_edid_property(&intel_encoder->base,
81 edid); 82 edid);
82 ret = drm_add_edid_modes(&intel_output->base, edid); 83 ret = drm_add_edid_modes(&intel_encoder->base, edid);
83 intel_output->base.display_info.raw_edid = NULL; 84 intel_encoder->base.display_info.raw_edid = NULL;
84 kfree(edid); 85 kfree(edid);
85 } 86 }
86 87
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
new file mode 100644
index 000000000000..6d524a1fc271
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -0,0 +1,1406 @@
1/*
2 * Copyright © 2009
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Daniel Vetter <daniel@ffwll.ch>
25 *
26 * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
27 */
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
32#include "i915_reg.h"
33#include "intel_drv.h"
34
35/* Limits for overlay size. According to intel doc, the real limits are:
36 * Y width: 4095, UV width (planar): 2047, Y height: 2047,
37 * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
38 * the mininum of both. */
39#define IMAGE_MAX_WIDTH 2048
40#define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */
41/* on 830 and 845 these large limits result in the card hanging */
42#define IMAGE_MAX_WIDTH_LEGACY 1024
43#define IMAGE_MAX_HEIGHT_LEGACY 1088
44
45/* overlay register definitions */
46/* OCMD register */
47#define OCMD_TILED_SURFACE (0x1<<19)
48#define OCMD_MIRROR_MASK (0x3<<17)
49#define OCMD_MIRROR_MODE (0x3<<17)
50#define OCMD_MIRROR_HORIZONTAL (0x1<<17)
51#define OCMD_MIRROR_VERTICAL (0x2<<17)
52#define OCMD_MIRROR_BOTH (0x3<<17)
53#define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */
54#define OCMD_UV_SWAP (0x1<<14) /* YVYU */
55#define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */
56#define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */
57#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
58#define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */
59#define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */
60#define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */
61#define OCMD_YUV_422_PACKED (0x8<<10)
62#define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */
63#define OCMD_YUV_420_PLANAR (0xc<<10)
64#define OCMD_YUV_422_PLANAR (0xd<<10)
65#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
66#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
67#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
68#define OCMD_BUF_TYPE_MASK (Ox1<<5)
69#define OCMD_BUF_TYPE_FRAME (0x0<<5)
70#define OCMD_BUF_TYPE_FIELD (0x1<<5)
71#define OCMD_TEST_MODE (0x1<<4)
72#define OCMD_BUFFER_SELECT (0x3<<2)
73#define OCMD_BUFFER0 (0x0<<2)
74#define OCMD_BUFFER1 (0x1<<2)
75#define OCMD_FIELD_SELECT (0x1<<2)
76#define OCMD_FIELD0 (0x0<<1)
77#define OCMD_FIELD1 (0x1<<1)
78#define OCMD_ENABLE (0x1<<0)
79
80/* OCONFIG register */
81#define OCONF_PIPE_MASK (0x1<<18)
82#define OCONF_PIPE_A (0x0<<18)
83#define OCONF_PIPE_B (0x1<<18)
84#define OCONF_GAMMA2_ENABLE (0x1<<16)
85#define OCONF_CSC_MODE_BT601 (0x0<<5)
86#define OCONF_CSC_MODE_BT709 (0x1<<5)
87#define OCONF_CSC_BYPASS (0x1<<4)
88#define OCONF_CC_OUT_8BIT (0x1<<3)
89#define OCONF_TEST_MODE (0x1<<2)
90#define OCONF_THREE_LINE_BUFFER (0x1<<0)
91#define OCONF_TWO_LINE_BUFFER (0x0<<0)
92
93/* DCLRKM (dst-key) register */
94#define DST_KEY_ENABLE (0x1<<31)
95#define CLK_RGB24_MASK 0x0
96#define CLK_RGB16_MASK 0x070307
97#define CLK_RGB15_MASK 0x070707
98#define CLK_RGB8I_MASK 0xffffff
99
100#define RGB16_TO_COLORKEY(c) \
101 (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
102#define RGB15_TO_COLORKEY(c) \
103 (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
104
105/* overlay flip addr flag */
106#define OFC_UPDATE 0x1
107
108/* polyphase filter coefficients */
109#define N_HORIZ_Y_TAPS 5
110#define N_VERT_Y_TAPS 3
111#define N_HORIZ_UV_TAPS 3
112#define N_VERT_UV_TAPS 3
113#define N_PHASES 17
114#define MAX_TAPS 5
115
116/* memory bufferd overlay registers */
117struct overlay_registers {
118 u32 OBUF_0Y;
119 u32 OBUF_1Y;
120 u32 OBUF_0U;
121 u32 OBUF_0V;
122 u32 OBUF_1U;
123 u32 OBUF_1V;
124 u32 OSTRIDE;
125 u32 YRGB_VPH;
126 u32 UV_VPH;
127 u32 HORZ_PH;
128 u32 INIT_PHS;
129 u32 DWINPOS;
130 u32 DWINSZ;
131 u32 SWIDTH;
132 u32 SWIDTHSW;
133 u32 SHEIGHT;
134 u32 YRGBSCALE;
135 u32 UVSCALE;
136 u32 OCLRC0;
137 u32 OCLRC1;
138 u32 DCLRKV;
139 u32 DCLRKM;
140 u32 SCLRKVH;
141 u32 SCLRKVL;
142 u32 SCLRKEN;
143 u32 OCONFIG;
144 u32 OCMD;
145 u32 RESERVED1; /* 0x6C */
146 u32 OSTART_0Y;
147 u32 OSTART_1Y;
148 u32 OSTART_0U;
149 u32 OSTART_0V;
150 u32 OSTART_1U;
151 u32 OSTART_1V;
152 u32 OTILEOFF_0Y;
153 u32 OTILEOFF_1Y;
154 u32 OTILEOFF_0U;
155 u32 OTILEOFF_0V;
156 u32 OTILEOFF_1U;
157 u32 OTILEOFF_1V;
158 u32 FASTHSCALE; /* 0xA0 */
159 u32 UVSCALEV; /* 0xA4 */
160 u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
161 u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
162 u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
163 u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
164 u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
165 u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
166 u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
167 u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
168 u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
169};
170
171/* overlay flip addr flag */
172#define OFC_UPDATE 0x1
173
174#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
175#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
176
177
178static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
179{
180 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
181 struct overlay_registers *regs;
182
183 /* no recursive mappings */
184 BUG_ON(overlay->virt_addr);
185
186 if (OVERLAY_NONPHYSICAL(overlay->dev)) {
187 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
188 overlay->reg_bo->gtt_offset);
189
190 if (!regs) {
191 DRM_ERROR("failed to map overlay regs in GTT\n");
192 return NULL;
193 }
194 } else
195 regs = overlay->reg_bo->phys_obj->handle->vaddr;
196
197 return overlay->virt_addr = regs;
198}
199
200static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
201{
202 if (OVERLAY_NONPHYSICAL(overlay->dev))
203 io_mapping_unmap_atomic(overlay->virt_addr);
204
205 overlay->virt_addr = NULL;
206
207 return;
208}
209
210/* overlay needs to be disable in OCMD reg */
211static int intel_overlay_on(struct intel_overlay *overlay)
212{
213 struct drm_device *dev = overlay->dev;
214 drm_i915_private_t *dev_priv = dev->dev_private;
215 int ret;
216 RING_LOCALS;
217
218 BUG_ON(overlay->active);
219
220 overlay->active = 1;
221 overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
222
223 BEGIN_LP_RING(4);
224 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
225 OUT_RING(overlay->flip_addr | OFC_UPDATE);
226 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
227 OUT_RING(MI_NOOP);
228 ADVANCE_LP_RING();
229
230 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
231 if (overlay->last_flip_req == 0)
232 return -ENOMEM;
233
234 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
235 if (ret != 0)
236 return ret;
237
238 overlay->hw_wedged = 0;
239 overlay->last_flip_req = 0;
240 return 0;
241}
242
243/* overlay needs to be enabled in OCMD reg */
244static void intel_overlay_continue(struct intel_overlay *overlay,
245 bool load_polyphase_filter)
246{
247 struct drm_device *dev = overlay->dev;
248 drm_i915_private_t *dev_priv = dev->dev_private;
249 u32 flip_addr = overlay->flip_addr;
250 u32 tmp;
251 RING_LOCALS;
252
253 BUG_ON(!overlay->active);
254
255 if (load_polyphase_filter)
256 flip_addr |= OFC_UPDATE;
257
258 /* check for underruns */
259 tmp = I915_READ(DOVSTA);
260 if (tmp & (1 << 17))
261 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
262
263 BEGIN_LP_RING(2);
264 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
265 OUT_RING(flip_addr);
266 ADVANCE_LP_RING();
267
268 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
269}
270
271static int intel_overlay_wait_flip(struct intel_overlay *overlay)
272{
273 struct drm_device *dev = overlay->dev;
274 drm_i915_private_t *dev_priv = dev->dev_private;
275 int ret;
276 u32 tmp;
277 RING_LOCALS;
278
279 if (overlay->last_flip_req != 0) {
280 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
281 if (ret == 0) {
282 overlay->last_flip_req = 0;
283
284 tmp = I915_READ(ISR);
285
286 if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
287 return 0;
288 }
289 }
290
291 /* synchronous slowpath */
292 overlay->hw_wedged = RELEASE_OLD_VID;
293
294 BEGIN_LP_RING(2);
295 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
296 OUT_RING(MI_NOOP);
297 ADVANCE_LP_RING();
298
299 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
300 if (overlay->last_flip_req == 0)
301 return -ENOMEM;
302
303 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
304 if (ret != 0)
305 return ret;
306
307 overlay->hw_wedged = 0;
308 overlay->last_flip_req = 0;
309 return 0;
310}
311
312/* overlay needs to be disabled in OCMD reg */
313static int intel_overlay_off(struct intel_overlay *overlay)
314{
315 u32 flip_addr = overlay->flip_addr;
316 struct drm_device *dev = overlay->dev;
317 drm_i915_private_t *dev_priv = dev->dev_private;
318 int ret;
319 RING_LOCALS;
320
321 BUG_ON(!overlay->active);
322
323 /* According to intel docs the overlay hw may hang (when switching
324 * off) without loading the filter coeffs. It is however unclear whether
325 * this applies to the disabling of the overlay or to the switching off
326 * of the hw. Do it in both cases */
327 flip_addr |= OFC_UPDATE;
328
329 /* wait for overlay to go idle */
330 overlay->hw_wedged = SWITCH_OFF_STAGE_1;
331
332 BEGIN_LP_RING(4);
333 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
334 OUT_RING(flip_addr);
335 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
336 OUT_RING(MI_NOOP);
337 ADVANCE_LP_RING();
338
339 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
340 if (overlay->last_flip_req == 0)
341 return -ENOMEM;
342
343 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
344 if (ret != 0)
345 return ret;
346
347 /* turn overlay off */
348 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
349
350 BEGIN_LP_RING(4);
351 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
352 OUT_RING(flip_addr);
353 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
354 OUT_RING(MI_NOOP);
355 ADVANCE_LP_RING();
356
357 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
358 if (overlay->last_flip_req == 0)
359 return -ENOMEM;
360
361 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
362 if (ret != 0)
363 return ret;
364
365 overlay->hw_wedged = 0;
366 overlay->last_flip_req = 0;
367 return ret;
368}
369
370static void intel_overlay_off_tail(struct intel_overlay *overlay)
371{
372 struct drm_gem_object *obj;
373
374 /* never have the overlay hw on without showing a frame */
375 BUG_ON(!overlay->vid_bo);
376 obj = overlay->vid_bo->obj;
377
378 i915_gem_object_unpin(obj);
379 drm_gem_object_unreference(obj);
380 overlay->vid_bo = NULL;
381
382 overlay->crtc->overlay = NULL;
383 overlay->crtc = NULL;
384 overlay->active = 0;
385}
386
387/* recover from an interruption due to a signal
388 * We have to be careful not to repeat work forever an make forward progess. */
389int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
390 int interruptible)
391{
392 struct drm_device *dev = overlay->dev;
393 drm_i915_private_t *dev_priv = dev->dev_private;
394 struct drm_gem_object *obj;
395 u32 flip_addr;
396 int ret;
397 RING_LOCALS;
398
399 if (overlay->hw_wedged == HW_WEDGED)
400 return -EIO;
401
402 if (overlay->last_flip_req == 0) {
403 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
404 if (overlay->last_flip_req == 0)
405 return -ENOMEM;
406 }
407
408 ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
409 if (ret != 0)
410 return ret;
411
412 switch (overlay->hw_wedged) {
413 case RELEASE_OLD_VID:
414 obj = overlay->old_vid_bo->obj;
415 i915_gem_object_unpin(obj);
416 drm_gem_object_unreference(obj);
417 overlay->old_vid_bo = NULL;
418 break;
419 case SWITCH_OFF_STAGE_1:
420 flip_addr = overlay->flip_addr;
421 flip_addr |= OFC_UPDATE;
422
423 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
424
425 BEGIN_LP_RING(4);
426 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
427 OUT_RING(flip_addr);
428 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
429 OUT_RING(MI_NOOP);
430 ADVANCE_LP_RING();
431
432 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
433 if (overlay->last_flip_req == 0)
434 return -ENOMEM;
435
436 ret = i915_do_wait_request(dev, overlay->last_flip_req,
437 interruptible);
438 if (ret != 0)
439 return ret;
440
441 case SWITCH_OFF_STAGE_2:
442 intel_overlay_off_tail(overlay);
443 break;
444 default:
445 BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
446 }
447
448 overlay->hw_wedged = 0;
449 overlay->last_flip_req = 0;
450 return 0;
451}
452
453/* Wait for pending overlay flip and release old frame.
454 * Needs to be called before the overlay register are changed
455 * via intel_overlay_(un)map_regs_atomic */
456static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
457{
458 int ret;
459 struct drm_gem_object *obj;
460
461 /* only wait if there is actually an old frame to release to
462 * guarantee forward progress */
463 if (!overlay->old_vid_bo)
464 return 0;
465
466 ret = intel_overlay_wait_flip(overlay);
467 if (ret != 0)
468 return ret;
469
470 obj = overlay->old_vid_bo->obj;
471 i915_gem_object_unpin(obj);
472 drm_gem_object_unreference(obj);
473 overlay->old_vid_bo = NULL;
474
475 return 0;
476}
477
478struct put_image_params {
479 int format;
480 short dst_x;
481 short dst_y;
482 short dst_w;
483 short dst_h;
484 short src_w;
485 short src_scan_h;
486 short src_scan_w;
487 short src_h;
488 short stride_Y;
489 short stride_UV;
490 int offset_Y;
491 int offset_U;
492 int offset_V;
493};
494
495static int packed_depth_bytes(u32 format)
496{
497 switch (format & I915_OVERLAY_DEPTH_MASK) {
498 case I915_OVERLAY_YUV422:
499 return 4;
500 case I915_OVERLAY_YUV411:
501 /* return 6; not implemented */
502 default:
503 return -EINVAL;
504 }
505}
506
507static int packed_width_bytes(u32 format, short width)
508{
509 switch (format & I915_OVERLAY_DEPTH_MASK) {
510 case I915_OVERLAY_YUV422:
511 return width << 1;
512 default:
513 return -EINVAL;
514 }
515}
516
517static int uv_hsubsampling(u32 format)
518{
519 switch (format & I915_OVERLAY_DEPTH_MASK) {
520 case I915_OVERLAY_YUV422:
521 case I915_OVERLAY_YUV420:
522 return 2;
523 case I915_OVERLAY_YUV411:
524 case I915_OVERLAY_YUV410:
525 return 4;
526 default:
527 return -EINVAL;
528 }
529}
530
531static int uv_vsubsampling(u32 format)
532{
533 switch (format & I915_OVERLAY_DEPTH_MASK) {
534 case I915_OVERLAY_YUV420:
535 case I915_OVERLAY_YUV410:
536 return 2;
537 case I915_OVERLAY_YUV422:
538 case I915_OVERLAY_YUV411:
539 return 1;
540 default:
541 return -EINVAL;
542 }
543}
544
545static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
546{
547 u32 mask, shift, ret;
548 if (IS_I9XX(dev)) {
549 mask = 0x3f;
550 shift = 6;
551 } else {
552 mask = 0x1f;
553 shift = 5;
554 }
555 ret = ((offset + width + mask) >> shift) - (offset >> shift);
556 if (IS_I9XX(dev))
557 ret <<= 1;
558 ret -=1;
559 return ret << 2;
560}
561
562static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
563 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
564 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
565 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
566 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
567 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
568 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
569 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
570 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
571 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
572 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
573 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
574 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
575 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
576 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
577 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
578 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
579 0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
580static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
581 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
582 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
583 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
584 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
585 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
586 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
587 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
588 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
589 0x3000, 0x0800, 0x3000};
590
591static void update_polyphase_filter(struct overlay_registers *regs)
592{
593 memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
594 memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
595}
596
597static bool update_scaling_factors(struct intel_overlay *overlay,
598 struct overlay_registers *regs,
599 struct put_image_params *params)
600{
601 /* fixed point with a 12 bit shift */
602 u32 xscale, yscale, xscale_UV, yscale_UV;
603#define FP_SHIFT 12
604#define FRACT_MASK 0xfff
605 bool scale_changed = false;
606 int uv_hscale = uv_hsubsampling(params->format);
607 int uv_vscale = uv_vsubsampling(params->format);
608
609 if (params->dst_w > 1)
610 xscale = ((params->src_scan_w - 1) << FP_SHIFT)
611 /(params->dst_w);
612 else
613 xscale = 1 << FP_SHIFT;
614
615 if (params->dst_h > 1)
616 yscale = ((params->src_scan_h - 1) << FP_SHIFT)
617 /(params->dst_h);
618 else
619 yscale = 1 << FP_SHIFT;
620
621 /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
622 xscale_UV = xscale/uv_hscale;
623 yscale_UV = yscale/uv_vscale;
624 /* make the Y scale to UV scale ratio an exact multiply */
625 xscale = xscale_UV * uv_hscale;
626 yscale = yscale_UV * uv_vscale;
627 /*} else {
628 xscale_UV = 0;
629 yscale_UV = 0;
630 }*/
631
632 if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
633 scale_changed = true;
634 overlay->old_xscale = xscale;
635 overlay->old_yscale = yscale;
636
637 regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
638 | ((xscale >> FP_SHIFT) << 16)
639 | ((xscale & FRACT_MASK) << 3);
640 regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
641 | ((xscale_UV >> FP_SHIFT) << 16)
642 | ((xscale_UV & FRACT_MASK) << 3);
643 regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
644 | ((yscale_UV >> FP_SHIFT) << 0);
645
646 if (scale_changed)
647 update_polyphase_filter(regs);
648
649 return scale_changed;
650}
651
652static void update_colorkey(struct intel_overlay *overlay,
653 struct overlay_registers *regs)
654{
655 u32 key = overlay->color_key;
656 switch (overlay->crtc->base.fb->bits_per_pixel) {
657 case 8:
658 regs->DCLRKV = 0;
659 regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
660 case 16:
661 if (overlay->crtc->base.fb->depth == 15) {
662 regs->DCLRKV = RGB15_TO_COLORKEY(key);
663 regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
664 } else {
665 regs->DCLRKV = RGB16_TO_COLORKEY(key);
666 regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
667 }
668 case 24:
669 case 32:
670 regs->DCLRKV = key;
671 regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
672 }
673}
674
675static u32 overlay_cmd_reg(struct put_image_params *params)
676{
677 u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
678
679 if (params->format & I915_OVERLAY_YUV_PLANAR) {
680 switch (params->format & I915_OVERLAY_DEPTH_MASK) {
681 case I915_OVERLAY_YUV422:
682 cmd |= OCMD_YUV_422_PLANAR;
683 break;
684 case I915_OVERLAY_YUV420:
685 cmd |= OCMD_YUV_420_PLANAR;
686 break;
687 case I915_OVERLAY_YUV411:
688 case I915_OVERLAY_YUV410:
689 cmd |= OCMD_YUV_410_PLANAR;
690 break;
691 }
692 } else { /* YUV packed */
693 switch (params->format & I915_OVERLAY_DEPTH_MASK) {
694 case I915_OVERLAY_YUV422:
695 cmd |= OCMD_YUV_422_PACKED;
696 break;
697 case I915_OVERLAY_YUV411:
698 cmd |= OCMD_YUV_411_PACKED;
699 break;
700 }
701
702 switch (params->format & I915_OVERLAY_SWAP_MASK) {
703 case I915_OVERLAY_NO_SWAP:
704 break;
705 case I915_OVERLAY_UV_SWAP:
706 cmd |= OCMD_UV_SWAP;
707 break;
708 case I915_OVERLAY_Y_SWAP:
709 cmd |= OCMD_Y_SWAP;
710 break;
711 case I915_OVERLAY_Y_AND_UV_SWAP:
712 cmd |= OCMD_Y_AND_UV_SWAP;
713 break;
714 }
715 }
716
717 return cmd;
718}
719
720int intel_overlay_do_put_image(struct intel_overlay *overlay,
721 struct drm_gem_object *new_bo,
722 struct put_image_params *params)
723{
724 int ret, tmp_width;
725 struct overlay_registers *regs;
726 bool scale_changed = false;
727 struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
728 struct drm_device *dev = overlay->dev;
729
730 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
731 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
732 BUG_ON(!overlay);
733
734 ret = intel_overlay_release_old_vid(overlay);
735 if (ret != 0)
736 return ret;
737
738 ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
739 if (ret != 0)
740 return ret;
741
742 ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
743 if (ret != 0)
744 goto out_unpin;
745
746 if (!overlay->active) {
747 regs = intel_overlay_map_regs_atomic(overlay);
748 if (!regs) {
749 ret = -ENOMEM;
750 goto out_unpin;
751 }
752 regs->OCONFIG = OCONF_CC_OUT_8BIT;
753 if (IS_I965GM(overlay->dev))
754 regs->OCONFIG |= OCONF_CSC_MODE_BT709;
755 regs->OCONFIG |= overlay->crtc->pipe == 0 ?
756 OCONF_PIPE_A : OCONF_PIPE_B;
757 intel_overlay_unmap_regs_atomic(overlay);
758
759 ret = intel_overlay_on(overlay);
760 if (ret != 0)
761 goto out_unpin;
762 }
763
764 regs = intel_overlay_map_regs_atomic(overlay);
765 if (!regs) {
766 ret = -ENOMEM;
767 goto out_unpin;
768 }
769
770 regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
771 regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
772
773 if (params->format & I915_OVERLAY_YUV_PACKED)
774 tmp_width = packed_width_bytes(params->format, params->src_w);
775 else
776 tmp_width = params->src_w;
777
778 regs->SWIDTH = params->src_w;
779 regs->SWIDTHSW = calc_swidthsw(overlay->dev,
780 params->offset_Y, tmp_width);
781 regs->SHEIGHT = params->src_h;
782 regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
783 regs->OSTRIDE = params->stride_Y;
784
785 if (params->format & I915_OVERLAY_YUV_PLANAR) {
786 int uv_hscale = uv_hsubsampling(params->format);
787 int uv_vscale = uv_vsubsampling(params->format);
788 u32 tmp_U, tmp_V;
789 regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
790 tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
791 params->src_w/uv_hscale);
792 tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
793 params->src_w/uv_hscale);
794 regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
795 regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
796 regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
797 regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
798 regs->OSTRIDE |= params->stride_UV << 16;
799 }
800
801 scale_changed = update_scaling_factors(overlay, regs, params);
802
803 update_colorkey(overlay, regs);
804
805 regs->OCMD = overlay_cmd_reg(params);
806
807 intel_overlay_unmap_regs_atomic(overlay);
808
809 intel_overlay_continue(overlay, scale_changed);
810
811 overlay->old_vid_bo = overlay->vid_bo;
812 overlay->vid_bo = to_intel_bo(new_bo);
813
814 return 0;
815
816out_unpin:
817 i915_gem_object_unpin(new_bo);
818 return ret;
819}
820
821int intel_overlay_switch_off(struct intel_overlay *overlay)
822{
823 int ret;
824 struct overlay_registers *regs;
825 struct drm_device *dev = overlay->dev;
826
827 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
828 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
829
830 if (overlay->hw_wedged) {
831 ret = intel_overlay_recover_from_interrupt(overlay, 1);
832 if (ret != 0)
833 return ret;
834 }
835
836 if (!overlay->active)
837 return 0;
838
839 ret = intel_overlay_release_old_vid(overlay);
840 if (ret != 0)
841 return ret;
842
843 regs = intel_overlay_map_regs_atomic(overlay);
844 regs->OCMD = 0;
845 intel_overlay_unmap_regs_atomic(overlay);
846
847 ret = intel_overlay_off(overlay);
848 if (ret != 0)
849 return ret;
850
851 intel_overlay_off_tail(overlay);
852
853 return 0;
854}
855
856static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
857 struct intel_crtc *crtc)
858{
859 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
860 u32 pipeconf;
861 int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
862
863 if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
864 return -EINVAL;
865
866 pipeconf = I915_READ(pipeconf_reg);
867
868 /* can't use the overlay with double wide pipe */
869 if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
870 return -EINVAL;
871
872 return 0;
873}
874
875static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
876{
877 struct drm_device *dev = overlay->dev;
878 drm_i915_private_t *dev_priv = dev->dev_private;
879 u32 ratio;
880 u32 pfit_control = I915_READ(PFIT_CONTROL);
881
882 /* XXX: This is not the same logic as in the xorg driver, but more in
883 * line with the intel documentation for the i965 */
884 if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
885 ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
886 } else { /* on i965 use the PGM reg to read out the autoscaler values */
887 ratio = I915_READ(PFIT_PGM_RATIOS);
888 if (IS_I965G(dev))
889 ratio >>= PFIT_VERT_SCALE_SHIFT_965;
890 else
891 ratio >>= PFIT_VERT_SCALE_SHIFT;
892 }
893
894 overlay->pfit_vscale_ratio = ratio;
895}
896
897static int check_overlay_dst(struct intel_overlay *overlay,
898 struct drm_intel_overlay_put_image *rec)
899{
900 struct drm_display_mode *mode = &overlay->crtc->base.mode;
901
902 if ((rec->dst_x < mode->crtc_hdisplay)
903 && (rec->dst_x + rec->dst_width
904 <= mode->crtc_hdisplay)
905 && (rec->dst_y < mode->crtc_vdisplay)
906 && (rec->dst_y + rec->dst_height
907 <= mode->crtc_vdisplay))
908 return 0;
909 else
910 return -EINVAL;
911}
912
913static int check_overlay_scaling(struct put_image_params *rec)
914{
915 u32 tmp;
916
917 /* downscaling limit is 8.0 */
918 tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
919 if (tmp > 7)
920 return -EINVAL;
921 tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
922 if (tmp > 7)
923 return -EINVAL;
924
925 return 0;
926}
927
928static int check_overlay_src(struct drm_device *dev,
929 struct drm_intel_overlay_put_image *rec,
930 struct drm_gem_object *new_bo)
931{
932 u32 stride_mask;
933 int depth;
934 int uv_hscale = uv_hsubsampling(rec->flags);
935 int uv_vscale = uv_vsubsampling(rec->flags);
936 size_t tmp;
937
938 /* check src dimensions */
939 if (IS_845G(dev) || IS_I830(dev)) {
940 if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
941 || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
942 return -EINVAL;
943 } else {
944 if (rec->src_height > IMAGE_MAX_HEIGHT
945 || rec->src_width > IMAGE_MAX_WIDTH)
946 return -EINVAL;
947 }
948 /* better safe than sorry, use 4 as the maximal subsampling ratio */
949 if (rec->src_height < N_VERT_Y_TAPS*4
950 || rec->src_width < N_HORIZ_Y_TAPS*4)
951 return -EINVAL;
952
953 /* check alingment constrains */
954 switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
955 case I915_OVERLAY_RGB:
956 /* not implemented */
957 return -EINVAL;
958 case I915_OVERLAY_YUV_PACKED:
959 depth = packed_depth_bytes(rec->flags);
960 if (uv_vscale != 1)
961 return -EINVAL;
962 if (depth < 0)
963 return depth;
964 /* ignore UV planes */
965 rec->stride_UV = 0;
966 rec->offset_U = 0;
967 rec->offset_V = 0;
968 /* check pixel alignment */
969 if (rec->offset_Y % depth)
970 return -EINVAL;
971 break;
972 case I915_OVERLAY_YUV_PLANAR:
973 if (uv_vscale < 0 || uv_hscale < 0)
974 return -EINVAL;
975 /* no offset restrictions for planar formats */
976 break;
977 default:
978 return -EINVAL;
979 }
980
981 if (rec->src_width % uv_hscale)
982 return -EINVAL;
983
984 /* stride checking */
985 stride_mask = 63;
986
987 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
988 return -EINVAL;
989 if (IS_I965G(dev) && rec->stride_Y < 512)
990 return -EINVAL;
991
992 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
993 4 : 8;
994 if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
995 return -EINVAL;
996
997 /* check buffer dimensions */
998 switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
999 case I915_OVERLAY_RGB:
1000 case I915_OVERLAY_YUV_PACKED:
1001 /* always 4 Y values per depth pixels */
1002 if (packed_width_bytes(rec->flags, rec->src_width)
1003 > rec->stride_Y)
1004 return -EINVAL;
1005
1006 tmp = rec->stride_Y*rec->src_height;
1007 if (rec->offset_Y + tmp > new_bo->size)
1008 return -EINVAL;
1009 break;
1010 case I915_OVERLAY_YUV_PLANAR:
1011 if (rec->src_width > rec->stride_Y)
1012 return -EINVAL;
1013 if (rec->src_width/uv_hscale > rec->stride_UV)
1014 return -EINVAL;
1015
1016 tmp = rec->stride_Y*rec->src_height;
1017 if (rec->offset_Y + tmp > new_bo->size)
1018 return -EINVAL;
1019 tmp = rec->stride_UV*rec->src_height;
1020 tmp /= uv_vscale;
1021 if (rec->offset_U + tmp > new_bo->size
1022 || rec->offset_V + tmp > new_bo->size)
1023 return -EINVAL;
1024 break;
1025 }
1026
1027 return 0;
1028}
1029
1030int intel_overlay_put_image(struct drm_device *dev, void *data,
1031 struct drm_file *file_priv)
1032{
1033 struct drm_intel_overlay_put_image *put_image_rec = data;
1034 drm_i915_private_t *dev_priv = dev->dev_private;
1035 struct intel_overlay *overlay;
1036 struct drm_mode_object *drmmode_obj;
1037 struct intel_crtc *crtc;
1038 struct drm_gem_object *new_bo;
1039 struct put_image_params *params;
1040 int ret;
1041
1042 if (!dev_priv) {
1043 DRM_ERROR("called with no initialization\n");
1044 return -EINVAL;
1045 }
1046
1047 overlay = dev_priv->overlay;
1048 if (!overlay) {
1049 DRM_DEBUG("userspace bug: no overlay\n");
1050 return -ENODEV;
1051 }
1052
1053 if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
1054 mutex_lock(&dev->mode_config.mutex);
1055 mutex_lock(&dev->struct_mutex);
1056
1057 ret = intel_overlay_switch_off(overlay);
1058
1059 mutex_unlock(&dev->struct_mutex);
1060 mutex_unlock(&dev->mode_config.mutex);
1061
1062 return ret;
1063 }
1064
1065 params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
1066 if (!params)
1067 return -ENOMEM;
1068
1069 drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
1070 DRM_MODE_OBJECT_CRTC);
1071 if (!drmmode_obj) {
1072 ret = -ENOENT;
1073 goto out_free;
1074 }
1075 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
1076
1077 new_bo = drm_gem_object_lookup(dev, file_priv,
1078 put_image_rec->bo_handle);
1079 if (!new_bo) {
1080 ret = -ENOENT;
1081 goto out_free;
1082 }
1083
1084 mutex_lock(&dev->mode_config.mutex);
1085 mutex_lock(&dev->struct_mutex);
1086
1087 if (overlay->hw_wedged) {
1088 ret = intel_overlay_recover_from_interrupt(overlay, 1);
1089 if (ret != 0)
1090 goto out_unlock;
1091 }
1092
1093 if (overlay->crtc != crtc) {
1094 struct drm_display_mode *mode = &crtc->base.mode;
1095 ret = intel_overlay_switch_off(overlay);
1096 if (ret != 0)
1097 goto out_unlock;
1098
1099 ret = check_overlay_possible_on_crtc(overlay, crtc);
1100 if (ret != 0)
1101 goto out_unlock;
1102
1103 overlay->crtc = crtc;
1104 crtc->overlay = overlay;
1105
1106 if (intel_panel_fitter_pipe(dev) == crtc->pipe
1107 /* and line to wide, i.e. one-line-mode */
1108 && mode->hdisplay > 1024) {
1109 overlay->pfit_active = 1;
1110 update_pfit_vscale_ratio(overlay);
1111 } else
1112 overlay->pfit_active = 0;
1113 }
1114
1115 ret = check_overlay_dst(overlay, put_image_rec);
1116 if (ret != 0)
1117 goto out_unlock;
1118
1119 if (overlay->pfit_active) {
1120 params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
1121 overlay->pfit_vscale_ratio);
1122 /* shifting right rounds downwards, so add 1 */
1123 params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
1124 overlay->pfit_vscale_ratio) + 1;
1125 } else {
1126 params->dst_y = put_image_rec->dst_y;
1127 params->dst_h = put_image_rec->dst_height;
1128 }
1129 params->dst_x = put_image_rec->dst_x;
1130 params->dst_w = put_image_rec->dst_width;
1131
1132 params->src_w = put_image_rec->src_width;
1133 params->src_h = put_image_rec->src_height;
1134 params->src_scan_w = put_image_rec->src_scan_width;
1135 params->src_scan_h = put_image_rec->src_scan_height;
1136 if (params->src_scan_h > params->src_h
1137 || params->src_scan_w > params->src_w) {
1138 ret = -EINVAL;
1139 goto out_unlock;
1140 }
1141
1142 ret = check_overlay_src(dev, put_image_rec, new_bo);
1143 if (ret != 0)
1144 goto out_unlock;
1145 params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
1146 params->stride_Y = put_image_rec->stride_Y;
1147 params->stride_UV = put_image_rec->stride_UV;
1148 params->offset_Y = put_image_rec->offset_Y;
1149 params->offset_U = put_image_rec->offset_U;
1150 params->offset_V = put_image_rec->offset_V;
1151
1152 /* Check scaling after src size to prevent a divide-by-zero. */
1153 ret = check_overlay_scaling(params);
1154 if (ret != 0)
1155 goto out_unlock;
1156
1157 ret = intel_overlay_do_put_image(overlay, new_bo, params);
1158 if (ret != 0)
1159 goto out_unlock;
1160
1161 mutex_unlock(&dev->struct_mutex);
1162 mutex_unlock(&dev->mode_config.mutex);
1163
1164 kfree(params);
1165
1166 return 0;
1167
1168out_unlock:
1169 mutex_unlock(&dev->struct_mutex);
1170 mutex_unlock(&dev->mode_config.mutex);
1171 drm_gem_object_unreference_unlocked(new_bo);
1172out_free:
1173 kfree(params);
1174
1175 return ret;
1176}
1177
1178static void update_reg_attrs(struct intel_overlay *overlay,
1179 struct overlay_registers *regs)
1180{
1181 regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
1182 regs->OCLRC1 = overlay->saturation;
1183}
1184
1185static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
1186{
1187 int i;
1188
1189 if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
1190 return false;
1191
1192 for (i = 0; i < 3; i++) {
1193 if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
1194 return false;
1195 }
1196
1197 return true;
1198}
1199
1200static bool check_gamma5_errata(u32 gamma5)
1201{
1202 int i;
1203
1204 for (i = 0; i < 3; i++) {
1205 if (((gamma5 >> i*8) & 0xff) == 0x80)
1206 return false;
1207 }
1208
1209 return true;
1210}
1211
1212static int check_gamma(struct drm_intel_overlay_attrs *attrs)
1213{
1214 if (!check_gamma_bounds(0, attrs->gamma0)
1215 || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
1216 || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
1217 || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
1218 || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
1219 || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
1220 || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
1221 return -EINVAL;
1222 if (!check_gamma5_errata(attrs->gamma5))
1223 return -EINVAL;
1224 return 0;
1225}
1226
1227int intel_overlay_attrs(struct drm_device *dev, void *data,
1228 struct drm_file *file_priv)
1229{
1230 struct drm_intel_overlay_attrs *attrs = data;
1231 drm_i915_private_t *dev_priv = dev->dev_private;
1232 struct intel_overlay *overlay;
1233 struct overlay_registers *regs;
1234 int ret;
1235
1236 if (!dev_priv) {
1237 DRM_ERROR("called with no initialization\n");
1238 return -EINVAL;
1239 }
1240
1241 overlay = dev_priv->overlay;
1242 if (!overlay) {
1243 DRM_DEBUG("userspace bug: no overlay\n");
1244 return -ENODEV;
1245 }
1246
1247 mutex_lock(&dev->mode_config.mutex);
1248 mutex_lock(&dev->struct_mutex);
1249
1250 if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
1251 attrs->color_key = overlay->color_key;
1252 attrs->brightness = overlay->brightness;
1253 attrs->contrast = overlay->contrast;
1254 attrs->saturation = overlay->saturation;
1255
1256 if (IS_I9XX(dev)) {
1257 attrs->gamma0 = I915_READ(OGAMC0);
1258 attrs->gamma1 = I915_READ(OGAMC1);
1259 attrs->gamma2 = I915_READ(OGAMC2);
1260 attrs->gamma3 = I915_READ(OGAMC3);
1261 attrs->gamma4 = I915_READ(OGAMC4);
1262 attrs->gamma5 = I915_READ(OGAMC5);
1263 }
1264 ret = 0;
1265 } else {
1266 overlay->color_key = attrs->color_key;
1267 if (attrs->brightness >= -128 && attrs->brightness <= 127) {
1268 overlay->brightness = attrs->brightness;
1269 } else {
1270 ret = -EINVAL;
1271 goto out_unlock;
1272 }
1273 if (attrs->contrast <= 255) {
1274 overlay->contrast = attrs->contrast;
1275 } else {
1276 ret = -EINVAL;
1277 goto out_unlock;
1278 }
1279 if (attrs->saturation <= 1023) {
1280 overlay->saturation = attrs->saturation;
1281 } else {
1282 ret = -EINVAL;
1283 goto out_unlock;
1284 }
1285
1286 regs = intel_overlay_map_regs_atomic(overlay);
1287 if (!regs) {
1288 ret = -ENOMEM;
1289 goto out_unlock;
1290 }
1291
1292 update_reg_attrs(overlay, regs);
1293
1294 intel_overlay_unmap_regs_atomic(overlay);
1295
1296 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
1297 if (!IS_I9XX(dev)) {
1298 ret = -EINVAL;
1299 goto out_unlock;
1300 }
1301
1302 if (overlay->active) {
1303 ret = -EBUSY;
1304 goto out_unlock;
1305 }
1306
1307 ret = check_gamma(attrs);
1308 if (ret != 0)
1309 goto out_unlock;
1310
1311 I915_WRITE(OGAMC0, attrs->gamma0);
1312 I915_WRITE(OGAMC1, attrs->gamma1);
1313 I915_WRITE(OGAMC2, attrs->gamma2);
1314 I915_WRITE(OGAMC3, attrs->gamma3);
1315 I915_WRITE(OGAMC4, attrs->gamma4);
1316 I915_WRITE(OGAMC5, attrs->gamma5);
1317 }
1318 ret = 0;
1319 }
1320
1321out_unlock:
1322 mutex_unlock(&dev->struct_mutex);
1323 mutex_unlock(&dev->mode_config.mutex);
1324
1325 return ret;
1326}
1327
1328void intel_setup_overlay(struct drm_device *dev)
1329{
1330 drm_i915_private_t *dev_priv = dev->dev_private;
1331 struct intel_overlay *overlay;
1332 struct drm_gem_object *reg_bo;
1333 struct overlay_registers *regs;
1334 int ret;
1335
1336 if (!OVERLAY_EXISTS(dev))
1337 return;
1338
1339 overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
1340 if (!overlay)
1341 return;
1342 overlay->dev = dev;
1343
1344 reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
1345 if (!reg_bo)
1346 goto out_free;
1347 overlay->reg_bo = to_intel_bo(reg_bo);
1348
1349 if (OVERLAY_NONPHYSICAL(dev)) {
1350 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
1351 if (ret) {
1352 DRM_ERROR("failed to pin overlay register bo\n");
1353 goto out_free_bo;
1354 }
1355 overlay->flip_addr = overlay->reg_bo->gtt_offset;
1356 } else {
1357 ret = i915_gem_attach_phys_object(dev, reg_bo,
1358 I915_GEM_PHYS_OVERLAY_REGS);
1359 if (ret) {
1360 DRM_ERROR("failed to attach phys overlay regs\n");
1361 goto out_free_bo;
1362 }
1363 overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
1364 }
1365
1366 /* init all values */
1367 overlay->color_key = 0x0101fe;
1368 overlay->brightness = -19;
1369 overlay->contrast = 75;
1370 overlay->saturation = 146;
1371
1372 regs = intel_overlay_map_regs_atomic(overlay);
1373 if (!regs)
1374 goto out_free_bo;
1375
1376 memset(regs, 0, sizeof(struct overlay_registers));
1377 update_polyphase_filter(regs);
1378
1379 update_reg_attrs(overlay, regs);
1380
1381 intel_overlay_unmap_regs_atomic(overlay);
1382
1383 dev_priv->overlay = overlay;
1384 DRM_INFO("initialized overlay support\n");
1385 return;
1386
1387out_free_bo:
1388 drm_gem_object_unreference(reg_bo);
1389out_free:
1390 kfree(overlay);
1391 return;
1392}
1393
1394void intel_cleanup_overlay(struct drm_device *dev)
1395{
1396 drm_i915_private_t *dev_priv = dev->dev_private;
1397
1398 if (dev_priv->overlay) {
1399 /* The bo's should be free'd by the generic code already.
1400 * Furthermore modesetting teardown happens beforehand so the
1401 * hardware should be off already */
1402 BUG_ON(dev_priv->overlay->active);
1403
1404 kfree(dev_priv->overlay);
1405 }
1406}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 083bec2e50f9..87d953664cb0 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -26,6 +26,7 @@
26 * Eric Anholt <eric@anholt.net> 26 * Eric Anholt <eric@anholt.net>
27 */ 27 */
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/slab.h>
29#include <linux/delay.h> 30#include <linux/delay.h>
30#include "drmP.h" 31#include "drmP.h"
31#include "drm.h" 32#include "drm.h"
@@ -35,8 +36,7 @@
35#include "i915_drm.h" 36#include "i915_drm.h"
36#include "i915_drv.h" 37#include "i915_drv.h"
37#include "intel_sdvo_regs.h" 38#include "intel_sdvo_regs.h"
38 39#include <linux/dmi.h>
39#undef SDVO_DEBUG
40 40
41static char *tv_format_names[] = { 41static char *tv_format_names[] = {
42 "NTSC_M" , "NTSC_J" , "NTSC_443", 42 "NTSC_M" , "NTSC_J" , "NTSC_443",
@@ -54,7 +54,7 @@ struct intel_sdvo_priv {
54 u8 slave_addr; 54 u8 slave_addr;
55 55
56 /* Register for the SDVO device: SDVOB or SDVOC */ 56 /* Register for the SDVO device: SDVOB or SDVOC */
57 int output_device; 57 int sdvo_reg;
58 58
59 /* Active outputs controlled by this SDVO output */ 59 /* Active outputs controlled by this SDVO output */
60 uint16_t controlled_output; 60 uint16_t controlled_output;
@@ -124,7 +124,7 @@ struct intel_sdvo_priv {
124 */ 124 */
125 struct intel_sdvo_encode encode; 125 struct intel_sdvo_encode encode;
126 126
127 /* DDC bus used by this SDVO output */ 127 /* DDC bus used by this SDVO encoder */
128 uint8_t ddc_bus; 128 uint8_t ddc_bus;
129 129
130 /* Mac mini hack -- use the same DDC as the analog connector */ 130 /* Mac mini hack -- use the same DDC as the analog connector */
@@ -162,22 +162,22 @@ struct intel_sdvo_priv {
162}; 162};
163 163
164static bool 164static bool
165intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags); 165intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
166 166
167/** 167/**
168 * Writes the SDVOB or SDVOC with the given value, but always writes both 168 * Writes the SDVOB or SDVOC with the given value, but always writes both
169 * SDVOB and SDVOC to work around apparent hardware issues (according to 169 * SDVOB and SDVOC to work around apparent hardware issues (according to
170 * comments in the BIOS). 170 * comments in the BIOS).
171 */ 171 */
172static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) 172static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
173{ 173{
174 struct drm_device *dev = intel_output->base.dev; 174 struct drm_device *dev = intel_encoder->base.dev;
175 struct drm_i915_private *dev_priv = dev->dev_private; 175 struct drm_i915_private *dev_priv = dev->dev_private;
176 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 176 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
177 u32 bval = val, cval = val; 177 u32 bval = val, cval = val;
178 int i; 178 int i;
179 179
180 if (sdvo_priv->output_device == SDVOB) { 180 if (sdvo_priv->sdvo_reg == SDVOB) {
181 cval = I915_READ(SDVOC); 181 cval = I915_READ(SDVOC);
182 } else { 182 } else {
183 bval = I915_READ(SDVOB); 183 bval = I915_READ(SDVOB);
@@ -196,10 +196,10 @@ static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val)
196 } 196 }
197} 197}
198 198
199static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, 199static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr,
200 u8 *ch) 200 u8 *ch)
201{ 201{
202 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 202 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
203 u8 out_buf[2]; 203 u8 out_buf[2];
204 u8 buf[2]; 204 u8 buf[2];
205 int ret; 205 int ret;
@@ -222,7 +222,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
222 out_buf[0] = addr; 222 out_buf[0] = addr;
223 out_buf[1] = 0; 223 out_buf[1] = 0;
224 224
225 if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2) 225 if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2)
226 { 226 {
227 *ch = buf[0]; 227 *ch = buf[0];
228 return true; 228 return true;
@@ -232,10 +232,10 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
232 return false; 232 return false;
233} 233}
234 234
235static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, 235static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr,
236 u8 ch) 236 u8 ch)
237{ 237{
238 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 238 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
239 u8 out_buf[2]; 239 u8 out_buf[2];
240 struct i2c_msg msgs[] = { 240 struct i2c_msg msgs[] = {
241 { 241 {
@@ -249,7 +249,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
249 out_buf[0] = addr; 249 out_buf[0] = addr;
250 out_buf[1] = ch; 250 out_buf[1] = ch;
251 251
252 if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1) 252 if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1)
253 { 253 {
254 return true; 254 return true;
255 } 255 }
@@ -353,14 +353,13 @@ static const struct _sdvo_cmd_name {
353 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), 353 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
354}; 354};
355 355
356#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") 356#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC")
357#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) 357#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
358 358
359#ifdef SDVO_DEBUG 359static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
360static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
361 void *args, int args_len) 360 void *args, int args_len)
362{ 361{
363 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 362 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
364 int i; 363 int i;
365 364
366 DRM_DEBUG_KMS("%s: W: %02X ", 365 DRM_DEBUG_KMS("%s: W: %02X ",
@@ -379,26 +378,22 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
379 DRM_LOG_KMS("(%02X)", cmd); 378 DRM_LOG_KMS("(%02X)", cmd);
380 DRM_LOG_KMS("\n"); 379 DRM_LOG_KMS("\n");
381} 380}
382#else
383#define intel_sdvo_debug_write(o, c, a, l)
384#endif
385 381
386static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, 382static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd,
387 void *args, int args_len) 383 void *args, int args_len)
388{ 384{
389 int i; 385 int i;
390 386
391 intel_sdvo_debug_write(intel_output, cmd, args, args_len); 387 intel_sdvo_debug_write(intel_encoder, cmd, args, args_len);
392 388
393 for (i = 0; i < args_len; i++) { 389 for (i = 0; i < args_len; i++) {
394 intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i, 390 intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i,
395 ((u8*)args)[i]); 391 ((u8*)args)[i]);
396 } 392 }
397 393
398 intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); 394 intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd);
399} 395}
400 396
401#ifdef SDVO_DEBUG
402static const char *cmd_status_names[] = { 397static const char *cmd_status_names[] = {
403 "Power on", 398 "Power on",
404 "Success", 399 "Success",
@@ -409,11 +404,11 @@ static const char *cmd_status_names[] = {
409 "Scaling not supported" 404 "Scaling not supported"
410}; 405};
411 406
412static void intel_sdvo_debug_response(struct intel_output *intel_output, 407static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder,
413 void *response, int response_len, 408 void *response, int response_len,
414 u8 status) 409 u8 status)
415{ 410{
416 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 411 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
417 int i; 412 int i;
418 413
419 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); 414 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv));
@@ -427,11 +422,8 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
427 DRM_LOG_KMS("(??? %d)", status); 422 DRM_LOG_KMS("(??? %d)", status);
428 DRM_LOG_KMS("\n"); 423 DRM_LOG_KMS("\n");
429} 424}
430#else
431#define intel_sdvo_debug_response(o, r, l, s)
432#endif
433 425
434static u8 intel_sdvo_read_response(struct intel_output *intel_output, 426static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder,
435 void *response, int response_len) 427 void *response, int response_len)
436{ 428{
437 int i; 429 int i;
@@ -441,16 +433,16 @@ static u8 intel_sdvo_read_response(struct intel_output *intel_output,
441 while (retry--) { 433 while (retry--) {
442 /* Read the command response */ 434 /* Read the command response */
443 for (i = 0; i < response_len; i++) { 435 for (i = 0; i < response_len; i++) {
444 intel_sdvo_read_byte(intel_output, 436 intel_sdvo_read_byte(intel_encoder,
445 SDVO_I2C_RETURN_0 + i, 437 SDVO_I2C_RETURN_0 + i,
446 &((u8 *)response)[i]); 438 &((u8 *)response)[i]);
447 } 439 }
448 440
449 /* read the return status */ 441 /* read the return status */
450 intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS, 442 intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS,
451 &status); 443 &status);
452 444
453 intel_sdvo_debug_response(intel_output, response, response_len, 445 intel_sdvo_debug_response(intel_encoder, response, response_len,
454 status); 446 status);
455 if (status != SDVO_CMD_STATUS_PENDING) 447 if (status != SDVO_CMD_STATUS_PENDING)
456 return status; 448 return status;
@@ -472,17 +464,66 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
472} 464}
473 465
474/** 466/**
475 * Don't check status code from this as it switches the bus back to the 467 * Try to read the response after issuie the DDC switch command. But it
476 * SDVO chips which defeats the purpose of doing a bus switch in the first 468 * is noted that we must do the action of reading response and issuing DDC
477 * place. 469 * switch command in one I2C transaction. Otherwise when we try to start
470 * another I2C transaction after issuing the DDC bus switch, it will be
471 * switched to the internal SDVO register.
478 */ 472 */
479static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, 473static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder,
480 u8 target) 474 u8 target)
481{ 475{
482 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); 476 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
477 u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
478 struct i2c_msg msgs[] = {
479 {
480 .addr = sdvo_priv->slave_addr >> 1,
481 .flags = 0,
482 .len = 2,
483 .buf = out_buf,
484 },
485 /* the following two are to read the response */
486 {
487 .addr = sdvo_priv->slave_addr >> 1,
488 .flags = 0,
489 .len = 1,
490 .buf = cmd_buf,
491 },
492 {
493 .addr = sdvo_priv->slave_addr >> 1,
494 .flags = I2C_M_RD,
495 .len = 1,
496 .buf = ret_value,
497 },
498 };
499
500 intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
501 &target, 1);
502 /* write the DDC switch command argument */
503 intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target);
504
505 out_buf[0] = SDVO_I2C_OPCODE;
506 out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
507 cmd_buf[0] = SDVO_I2C_CMD_STATUS;
508 cmd_buf[1] = 0;
509 ret_value[0] = 0;
510 ret_value[1] = 0;
511
512 ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3);
513 if (ret != 3) {
514 /* failure in I2C transfer */
515 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
516 return;
517 }
518 if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
519 DRM_DEBUG_KMS("DDC switch command returns response %d\n",
520 ret_value[0]);
521 return;
522 }
523 return;
483} 524}
484 525
485static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) 526static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1)
486{ 527{
487 struct intel_sdvo_set_target_input_args targets = {0}; 528 struct intel_sdvo_set_target_input_args targets = {0};
488 u8 status; 529 u8 status;
@@ -493,10 +534,10 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool
493 if (target_1) 534 if (target_1)
494 targets.target_1 = 1; 535 targets.target_1 = 1;
495 536
496 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets, 537 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets,
497 sizeof(targets)); 538 sizeof(targets));
498 539
499 status = intel_sdvo_read_response(intel_output, NULL, 0); 540 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
500 541
501 return (status == SDVO_CMD_STATUS_SUCCESS); 542 return (status == SDVO_CMD_STATUS_SUCCESS);
502} 543}
@@ -507,13 +548,13 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool
507 * This function is making an assumption about the layout of the response, 548 * This function is making an assumption about the layout of the response,
508 * which should be checked against the docs. 549 * which should be checked against the docs.
509 */ 550 */
510static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2) 551static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2)
511{ 552{
512 struct intel_sdvo_get_trained_inputs_response response; 553 struct intel_sdvo_get_trained_inputs_response response;
513 u8 status; 554 u8 status;
514 555
515 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); 556 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
516 status = intel_sdvo_read_response(intel_output, &response, sizeof(response)); 557 status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response));
517 if (status != SDVO_CMD_STATUS_SUCCESS) 558 if (status != SDVO_CMD_STATUS_SUCCESS)
518 return false; 559 return false;
519 560
@@ -522,29 +563,29 @@ static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, boo
522 return true; 563 return true;
523} 564}
524 565
525static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output, 566static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder,
526 u16 *outputs) 567 u16 *outputs)
527{ 568{
528 u8 status; 569 u8 status;
529 570
530 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); 571 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
531 status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs)); 572 status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs));
532 573
533 return (status == SDVO_CMD_STATUS_SUCCESS); 574 return (status == SDVO_CMD_STATUS_SUCCESS);
534} 575}
535 576
536static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output, 577static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
537 u16 outputs) 578 u16 outputs)
538{ 579{
539 u8 status; 580 u8 status;
540 581
541 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, 582 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
542 sizeof(outputs)); 583 sizeof(outputs));
543 status = intel_sdvo_read_response(intel_output, NULL, 0); 584 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
544 return (status == SDVO_CMD_STATUS_SUCCESS); 585 return (status == SDVO_CMD_STATUS_SUCCESS);
545} 586}
546 587
547static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output, 588static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder,
548 int mode) 589 int mode)
549{ 590{
550 u8 status, state = SDVO_ENCODER_STATE_ON; 591 u8 status, state = SDVO_ENCODER_STATE_ON;
@@ -564,24 +605,24 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output
564 break; 605 break;
565 } 606 }
566 607
567 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, 608 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
568 sizeof(state)); 609 sizeof(state));
569 status = intel_sdvo_read_response(intel_output, NULL, 0); 610 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
570 611
571 return (status == SDVO_CMD_STATUS_SUCCESS); 612 return (status == SDVO_CMD_STATUS_SUCCESS);
572} 613}
573 614
574static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output, 615static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder,
575 int *clock_min, 616 int *clock_min,
576 int *clock_max) 617 int *clock_max)
577{ 618{
578 struct intel_sdvo_pixel_clock_range clocks; 619 struct intel_sdvo_pixel_clock_range clocks;
579 u8 status; 620 u8 status;
580 621
581 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, 622 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
582 NULL, 0); 623 NULL, 0);
583 624
584 status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks)); 625 status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks));
585 626
586 if (status != SDVO_CMD_STATUS_SUCCESS) 627 if (status != SDVO_CMD_STATUS_SUCCESS)
587 return false; 628 return false;
@@ -593,31 +634,31 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_ou
593 return true; 634 return true;
594} 635}
595 636
596static bool intel_sdvo_set_target_output(struct intel_output *intel_output, 637static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
597 u16 outputs) 638 u16 outputs)
598{ 639{
599 u8 status; 640 u8 status;
600 641
601 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, 642 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
602 sizeof(outputs)); 643 sizeof(outputs));
603 644
604 status = intel_sdvo_read_response(intel_output, NULL, 0); 645 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
605 return (status == SDVO_CMD_STATUS_SUCCESS); 646 return (status == SDVO_CMD_STATUS_SUCCESS);
606} 647}
607 648
608static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, 649static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd,
609 struct intel_sdvo_dtd *dtd) 650 struct intel_sdvo_dtd *dtd)
610{ 651{
611 u8 status; 652 u8 status;
612 653
613 intel_sdvo_write_cmd(intel_output, cmd, NULL, 0); 654 intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0);
614 status = intel_sdvo_read_response(intel_output, &dtd->part1, 655 status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
615 sizeof(dtd->part1)); 656 sizeof(dtd->part1));
616 if (status != SDVO_CMD_STATUS_SUCCESS) 657 if (status != SDVO_CMD_STATUS_SUCCESS)
617 return false; 658 return false;
618 659
619 intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0); 660 intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0);
620 status = intel_sdvo_read_response(intel_output, &dtd->part2, 661 status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
621 sizeof(dtd->part2)); 662 sizeof(dtd->part2));
622 if (status != SDVO_CMD_STATUS_SUCCESS) 663 if (status != SDVO_CMD_STATUS_SUCCESS)
623 return false; 664 return false;
@@ -625,60 +666,60 @@ static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd,
625 return true; 666 return true;
626} 667}
627 668
628static bool intel_sdvo_get_input_timing(struct intel_output *intel_output, 669static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder,
629 struct intel_sdvo_dtd *dtd) 670 struct intel_sdvo_dtd *dtd)
630{ 671{
631 return intel_sdvo_get_timing(intel_output, 672 return intel_sdvo_get_timing(intel_encoder,
632 SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); 673 SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
633} 674}
634 675
635static bool intel_sdvo_get_output_timing(struct intel_output *intel_output, 676static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder,
636 struct intel_sdvo_dtd *dtd) 677 struct intel_sdvo_dtd *dtd)
637{ 678{
638 return intel_sdvo_get_timing(intel_output, 679 return intel_sdvo_get_timing(intel_encoder,
639 SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); 680 SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
640} 681}
641 682
642static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd, 683static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
643 struct intel_sdvo_dtd *dtd) 684 struct intel_sdvo_dtd *dtd)
644{ 685{
645 u8 status; 686 u8 status;
646 687
647 intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1)); 688 intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1));
648 status = intel_sdvo_read_response(intel_output, NULL, 0); 689 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
649 if (status != SDVO_CMD_STATUS_SUCCESS) 690 if (status != SDVO_CMD_STATUS_SUCCESS)
650 return false; 691 return false;
651 692
652 intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2)); 693 intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2));
653 status = intel_sdvo_read_response(intel_output, NULL, 0); 694 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
654 if (status != SDVO_CMD_STATUS_SUCCESS) 695 if (status != SDVO_CMD_STATUS_SUCCESS)
655 return false; 696 return false;
656 697
657 return true; 698 return true;
658} 699}
659 700
660static bool intel_sdvo_set_input_timing(struct intel_output *intel_output, 701static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder,
661 struct intel_sdvo_dtd *dtd) 702 struct intel_sdvo_dtd *dtd)
662{ 703{
663 return intel_sdvo_set_timing(intel_output, 704 return intel_sdvo_set_timing(intel_encoder,
664 SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); 705 SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
665} 706}
666 707
667static bool intel_sdvo_set_output_timing(struct intel_output *intel_output, 708static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder,
668 struct intel_sdvo_dtd *dtd) 709 struct intel_sdvo_dtd *dtd)
669{ 710{
670 return intel_sdvo_set_timing(intel_output, 711 return intel_sdvo_set_timing(intel_encoder,
671 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); 712 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
672} 713}
673 714
674static bool 715static bool
675intel_sdvo_create_preferred_input_timing(struct intel_output *output, 716intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder,
676 uint16_t clock, 717 uint16_t clock,
677 uint16_t width, 718 uint16_t width,
678 uint16_t height) 719 uint16_t height)
679{ 720{
680 struct intel_sdvo_preferred_input_timing_args args; 721 struct intel_sdvo_preferred_input_timing_args args;
681 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 722 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
682 uint8_t status; 723 uint8_t status;
683 724
684 memset(&args, 0, sizeof(args)); 725 memset(&args, 0, sizeof(args));
@@ -692,32 +733,33 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output,
692 sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) 733 sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height))
693 args.scaled = 1; 734 args.scaled = 1;
694 735
695 intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, 736 intel_sdvo_write_cmd(intel_encoder,
737 SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
696 &args, sizeof(args)); 738 &args, sizeof(args));
697 status = intel_sdvo_read_response(output, NULL, 0); 739 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
698 if (status != SDVO_CMD_STATUS_SUCCESS) 740 if (status != SDVO_CMD_STATUS_SUCCESS)
699 return false; 741 return false;
700 742
701 return true; 743 return true;
702} 744}
703 745
704static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, 746static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder,
705 struct intel_sdvo_dtd *dtd) 747 struct intel_sdvo_dtd *dtd)
706{ 748{
707 bool status; 749 bool status;
708 750
709 intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, 751 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
710 NULL, 0); 752 NULL, 0);
711 753
712 status = intel_sdvo_read_response(output, &dtd->part1, 754 status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
713 sizeof(dtd->part1)); 755 sizeof(dtd->part1));
714 if (status != SDVO_CMD_STATUS_SUCCESS) 756 if (status != SDVO_CMD_STATUS_SUCCESS)
715 return false; 757 return false;
716 758
717 intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, 759 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
718 NULL, 0); 760 NULL, 0);
719 761
720 status = intel_sdvo_read_response(output, &dtd->part2, 762 status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
721 sizeof(dtd->part2)); 763 sizeof(dtd->part2));
722 if (status != SDVO_CMD_STATUS_SUCCESS) 764 if (status != SDVO_CMD_STATUS_SUCCESS)
723 return false; 765 return false;
@@ -725,12 +767,12 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output,
725 return false; 767 return false;
726} 768}
727 769
728static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) 770static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder)
729{ 771{
730 u8 response, status; 772 u8 response, status;
731 773
732 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); 774 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
733 status = intel_sdvo_read_response(intel_output, &response, 1); 775 status = intel_sdvo_read_response(intel_encoder, &response, 1);
734 776
735 if (status != SDVO_CMD_STATUS_SUCCESS) { 777 if (status != SDVO_CMD_STATUS_SUCCESS) {
736 DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); 778 DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
@@ -742,12 +784,12 @@ static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
742 return response; 784 return response;
743} 785}
744 786
745static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val) 787static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
746{ 788{
747 u8 status; 789 u8 status;
748 790
749 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); 791 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
750 status = intel_sdvo_read_response(intel_output, NULL, 0); 792 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
751 if (status != SDVO_CMD_STATUS_SUCCESS) 793 if (status != SDVO_CMD_STATUS_SUCCESS)
752 return false; 794 return false;
753 795
@@ -836,13 +878,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
836 mode->flags |= DRM_MODE_FLAG_PVSYNC; 878 mode->flags |= DRM_MODE_FLAG_PVSYNC;
837} 879}
838 880
839static bool intel_sdvo_get_supp_encode(struct intel_output *output, 881static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder,
840 struct intel_sdvo_encode *encode) 882 struct intel_sdvo_encode *encode)
841{ 883{
842 uint8_t status; 884 uint8_t status;
843 885
844 intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); 886 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
845 status = intel_sdvo_read_response(output, encode, sizeof(*encode)); 887 status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode));
846 if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ 888 if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
847 memset(encode, 0, sizeof(*encode)); 889 memset(encode, 0, sizeof(*encode));
848 return false; 890 return false;
@@ -851,29 +893,30 @@ static bool intel_sdvo_get_supp_encode(struct intel_output *output,
851 return true; 893 return true;
852} 894}
853 895
854static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode) 896static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder,
897 uint8_t mode)
855{ 898{
856 uint8_t status; 899 uint8_t status;
857 900
858 intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1); 901 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1);
859 status = intel_sdvo_read_response(output, NULL, 0); 902 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
860 903
861 return (status == SDVO_CMD_STATUS_SUCCESS); 904 return (status == SDVO_CMD_STATUS_SUCCESS);
862} 905}
863 906
864static bool intel_sdvo_set_colorimetry(struct intel_output *output, 907static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder,
865 uint8_t mode) 908 uint8_t mode)
866{ 909{
867 uint8_t status; 910 uint8_t status;
868 911
869 intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1); 912 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
870 status = intel_sdvo_read_response(output, NULL, 0); 913 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
871 914
872 return (status == SDVO_CMD_STATUS_SUCCESS); 915 return (status == SDVO_CMD_STATUS_SUCCESS);
873} 916}
874 917
875#if 0 918#if 0
876static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) 919static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
877{ 920{
878 int i, j; 921 int i, j;
879 uint8_t set_buf_index[2]; 922 uint8_t set_buf_index[2];
@@ -882,43 +925,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_output *output)
882 uint8_t buf[48]; 925 uint8_t buf[48];
883 uint8_t *pos; 926 uint8_t *pos;
884 927
885 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); 928 intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
886 intel_sdvo_read_response(output, &av_split, 1); 929 intel_sdvo_read_response(encoder, &av_split, 1);
887 930
888 for (i = 0; i <= av_split; i++) { 931 for (i = 0; i <= av_split; i++) {
889 set_buf_index[0] = i; set_buf_index[1] = 0; 932 set_buf_index[0] = i; set_buf_index[1] = 0;
890 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, 933 intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
891 set_buf_index, 2); 934 set_buf_index, 2);
892 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0); 935 intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
893 intel_sdvo_read_response(output, &buf_size, 1); 936 intel_sdvo_read_response(encoder, &buf_size, 1);
894 937
895 pos = buf; 938 pos = buf;
896 for (j = 0; j <= buf_size; j += 8) { 939 for (j = 0; j <= buf_size; j += 8) {
897 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA, 940 intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
898 NULL, 0); 941 NULL, 0);
899 intel_sdvo_read_response(output, pos, 8); 942 intel_sdvo_read_response(encoder, pos, 8);
900 pos += 8; 943 pos += 8;
901 } 944 }
902 } 945 }
903} 946}
904#endif 947#endif
905 948
906static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index, 949static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder,
907 uint8_t *data, int8_t size, uint8_t tx_rate) 950 int index,
951 uint8_t *data, int8_t size, uint8_t tx_rate)
908{ 952{
909 uint8_t set_buf_index[2]; 953 uint8_t set_buf_index[2];
910 954
911 set_buf_index[0] = index; 955 set_buf_index[0] = index;
912 set_buf_index[1] = 0; 956 set_buf_index[1] = 0;
913 957
914 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); 958 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX,
959 set_buf_index, 2);
915 960
916 for (; size > 0; size -= 8) { 961 for (; size > 0; size -= 8) {
917 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8); 962 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8);
918 data += 8; 963 data += 8;
919 } 964 }
920 965
921 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); 966 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
922} 967}
923 968
924static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) 969static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
@@ -993,7 +1038,7 @@ struct dip_infoframe {
993 } __attribute__ ((packed)) u; 1038 } __attribute__ ((packed)) u;
994} __attribute__((packed)); 1039} __attribute__((packed));
995 1040
996static void intel_sdvo_set_avi_infoframe(struct intel_output *output, 1041static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder,
997 struct drm_display_mode * mode) 1042 struct drm_display_mode * mode)
998{ 1043{
999 struct dip_infoframe avi_if = { 1044 struct dip_infoframe avi_if = {
@@ -1004,15 +1049,16 @@ static void intel_sdvo_set_avi_infoframe(struct intel_output *output,
1004 1049
1005 avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, 1050 avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
1006 4 + avi_if.len); 1051 4 + avi_if.len);
1007 intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len, 1052 intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if,
1053 4 + avi_if.len,
1008 SDVO_HBUF_TX_VSYNC); 1054 SDVO_HBUF_TX_VSYNC);
1009} 1055}
1010 1056
1011static void intel_sdvo_set_tv_format(struct intel_output *output) 1057static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
1012{ 1058{
1013 1059
1014 struct intel_sdvo_tv_format format; 1060 struct intel_sdvo_tv_format format;
1015 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1061 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1016 uint32_t format_map, i; 1062 uint32_t format_map, i;
1017 uint8_t status; 1063 uint8_t status;
1018 1064
@@ -1025,10 +1071,10 @@ static void intel_sdvo_set_tv_format(struct intel_output *output)
1025 memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? 1071 memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
1026 sizeof(format) : sizeof(format_map)); 1072 sizeof(format) : sizeof(format_map));
1027 1073
1028 intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, &format_map, 1074 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map,
1029 sizeof(format)); 1075 sizeof(format));
1030 1076
1031 status = intel_sdvo_read_response(output, NULL, 0); 1077 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
1032 if (status != SDVO_CMD_STATUS_SUCCESS) 1078 if (status != SDVO_CMD_STATUS_SUCCESS)
1033 DRM_DEBUG_KMS("%s: Failed to set TV format\n", 1079 DRM_DEBUG_KMS("%s: Failed to set TV format\n",
1034 SDVO_NAME(sdvo_priv)); 1080 SDVO_NAME(sdvo_priv));
@@ -1038,8 +1084,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1038 struct drm_display_mode *mode, 1084 struct drm_display_mode *mode,
1039 struct drm_display_mode *adjusted_mode) 1085 struct drm_display_mode *adjusted_mode)
1040{ 1086{
1041 struct intel_output *output = enc_to_intel_output(encoder); 1087 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1042 struct intel_sdvo_priv *dev_priv = output->dev_priv; 1088 struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv;
1043 1089
1044 if (dev_priv->is_tv) { 1090 if (dev_priv->is_tv) {
1045 struct intel_sdvo_dtd output_dtd; 1091 struct intel_sdvo_dtd output_dtd;
@@ -1054,22 +1100,22 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1054 1100
1055 /* Set output timings */ 1101 /* Set output timings */
1056 intel_sdvo_get_dtd_from_mode(&output_dtd, mode); 1102 intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
1057 intel_sdvo_set_target_output(output, 1103 intel_sdvo_set_target_output(intel_encoder,
1058 dev_priv->controlled_output); 1104 dev_priv->controlled_output);
1059 intel_sdvo_set_output_timing(output, &output_dtd); 1105 intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
1060 1106
1061 /* Set the input timing to the screen. Assume always input 0. */ 1107 /* Set the input timing to the screen. Assume always input 0. */
1062 intel_sdvo_set_target_input(output, true, false); 1108 intel_sdvo_set_target_input(intel_encoder, true, false);
1063 1109
1064 1110
1065 success = intel_sdvo_create_preferred_input_timing(output, 1111 success = intel_sdvo_create_preferred_input_timing(intel_encoder,
1066 mode->clock / 10, 1112 mode->clock / 10,
1067 mode->hdisplay, 1113 mode->hdisplay,
1068 mode->vdisplay); 1114 mode->vdisplay);
1069 if (success) { 1115 if (success) {
1070 struct intel_sdvo_dtd input_dtd; 1116 struct intel_sdvo_dtd input_dtd;
1071 1117
1072 intel_sdvo_get_preferred_input_timing(output, 1118 intel_sdvo_get_preferred_input_timing(intel_encoder,
1073 &input_dtd); 1119 &input_dtd);
1074 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); 1120 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
1075 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; 1121 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
@@ -1092,16 +1138,16 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1092 intel_sdvo_get_dtd_from_mode(&output_dtd, 1138 intel_sdvo_get_dtd_from_mode(&output_dtd,
1093 dev_priv->sdvo_lvds_fixed_mode); 1139 dev_priv->sdvo_lvds_fixed_mode);
1094 1140
1095 intel_sdvo_set_target_output(output, 1141 intel_sdvo_set_target_output(intel_encoder,
1096 dev_priv->controlled_output); 1142 dev_priv->controlled_output);
1097 intel_sdvo_set_output_timing(output, &output_dtd); 1143 intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
1098 1144
1099 /* Set the input timing to the screen. Assume always input 0. */ 1145 /* Set the input timing to the screen. Assume always input 0. */
1100 intel_sdvo_set_target_input(output, true, false); 1146 intel_sdvo_set_target_input(intel_encoder, true, false);
1101 1147
1102 1148
1103 success = intel_sdvo_create_preferred_input_timing( 1149 success = intel_sdvo_create_preferred_input_timing(
1104 output, 1150 intel_encoder,
1105 mode->clock / 10, 1151 mode->clock / 10,
1106 mode->hdisplay, 1152 mode->hdisplay,
1107 mode->vdisplay); 1153 mode->vdisplay);
@@ -1109,7 +1155,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1109 if (success) { 1155 if (success) {
1110 struct intel_sdvo_dtd input_dtd; 1156 struct intel_sdvo_dtd input_dtd;
1111 1157
1112 intel_sdvo_get_preferred_input_timing(output, 1158 intel_sdvo_get_preferred_input_timing(intel_encoder,
1113 &input_dtd); 1159 &input_dtd);
1114 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); 1160 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
1115 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; 1161 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
@@ -1141,8 +1187,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1141 struct drm_i915_private *dev_priv = dev->dev_private; 1187 struct drm_i915_private *dev_priv = dev->dev_private;
1142 struct drm_crtc *crtc = encoder->crtc; 1188 struct drm_crtc *crtc = encoder->crtc;
1143 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1189 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1144 struct intel_output *output = enc_to_intel_output(encoder); 1190 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1145 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1191 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1146 u32 sdvox = 0; 1192 u32 sdvox = 0;
1147 int sdvo_pixel_multiply; 1193 int sdvo_pixel_multiply;
1148 struct intel_sdvo_in_out_map in_out; 1194 struct intel_sdvo_in_out_map in_out;
@@ -1161,12 +1207,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1161 in_out.in0 = sdvo_priv->controlled_output; 1207 in_out.in0 = sdvo_priv->controlled_output;
1162 in_out.in1 = 0; 1208 in_out.in1 = 0;
1163 1209
1164 intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, 1210 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
1165 &in_out, sizeof(in_out)); 1211 &in_out, sizeof(in_out));
1166 status = intel_sdvo_read_response(output, NULL, 0); 1212 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
1167 1213
1168 if (sdvo_priv->is_hdmi) { 1214 if (sdvo_priv->is_hdmi) {
1169 intel_sdvo_set_avi_infoframe(output, mode); 1215 intel_sdvo_set_avi_infoframe(intel_encoder, mode);
1170 sdvox |= SDVO_AUDIO_ENABLE; 1216 sdvox |= SDVO_AUDIO_ENABLE;
1171 } 1217 }
1172 1218
@@ -1183,16 +1229,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1183 */ 1229 */
1184 if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { 1230 if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
1185 /* Set the output timing to the screen */ 1231 /* Set the output timing to the screen */
1186 intel_sdvo_set_target_output(output, 1232 intel_sdvo_set_target_output(intel_encoder,
1187 sdvo_priv->controlled_output); 1233 sdvo_priv->controlled_output);
1188 intel_sdvo_set_output_timing(output, &input_dtd); 1234 intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
1189 } 1235 }
1190 1236
1191 /* Set the input timing to the screen. Assume always input 0. */ 1237 /* Set the input timing to the screen. Assume always input 0. */
1192 intel_sdvo_set_target_input(output, true, false); 1238 intel_sdvo_set_target_input(intel_encoder, true, false);
1193 1239
1194 if (sdvo_priv->is_tv) 1240 if (sdvo_priv->is_tv)
1195 intel_sdvo_set_tv_format(output); 1241 intel_sdvo_set_tv_format(intel_encoder);
1196 1242
1197 /* We would like to use intel_sdvo_create_preferred_input_timing() to 1243 /* We would like to use intel_sdvo_create_preferred_input_timing() to
1198 * provide the device with a timing it can support, if it supports that 1244 * provide the device with a timing it can support, if it supports that
@@ -1200,29 +1246,29 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1200 * output the preferred timing, and we don't support that currently. 1246 * output the preferred timing, and we don't support that currently.
1201 */ 1247 */
1202#if 0 1248#if 0
1203 success = intel_sdvo_create_preferred_input_timing(output, clock, 1249 success = intel_sdvo_create_preferred_input_timing(encoder, clock,
1204 width, height); 1250 width, height);
1205 if (success) { 1251 if (success) {
1206 struct intel_sdvo_dtd *input_dtd; 1252 struct intel_sdvo_dtd *input_dtd;
1207 1253
1208 intel_sdvo_get_preferred_input_timing(output, &input_dtd); 1254 intel_sdvo_get_preferred_input_timing(encoder, &input_dtd);
1209 intel_sdvo_set_input_timing(output, &input_dtd); 1255 intel_sdvo_set_input_timing(encoder, &input_dtd);
1210 } 1256 }
1211#else 1257#else
1212 intel_sdvo_set_input_timing(output, &input_dtd); 1258 intel_sdvo_set_input_timing(intel_encoder, &input_dtd);
1213#endif 1259#endif
1214 1260
1215 switch (intel_sdvo_get_pixel_multiplier(mode)) { 1261 switch (intel_sdvo_get_pixel_multiplier(mode)) {
1216 case 1: 1262 case 1:
1217 intel_sdvo_set_clock_rate_mult(output, 1263 intel_sdvo_set_clock_rate_mult(intel_encoder,
1218 SDVO_CLOCK_RATE_MULT_1X); 1264 SDVO_CLOCK_RATE_MULT_1X);
1219 break; 1265 break;
1220 case 2: 1266 case 2:
1221 intel_sdvo_set_clock_rate_mult(output, 1267 intel_sdvo_set_clock_rate_mult(intel_encoder,
1222 SDVO_CLOCK_RATE_MULT_2X); 1268 SDVO_CLOCK_RATE_MULT_2X);
1223 break; 1269 break;
1224 case 4: 1270 case 4:
1225 intel_sdvo_set_clock_rate_mult(output, 1271 intel_sdvo_set_clock_rate_mult(intel_encoder,
1226 SDVO_CLOCK_RATE_MULT_4X); 1272 SDVO_CLOCK_RATE_MULT_4X);
1227 break; 1273 break;
1228 } 1274 }
@@ -1233,8 +1279,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1233 SDVO_VSYNC_ACTIVE_HIGH | 1279 SDVO_VSYNC_ACTIVE_HIGH |
1234 SDVO_HSYNC_ACTIVE_HIGH; 1280 SDVO_HSYNC_ACTIVE_HIGH;
1235 } else { 1281 } else {
1236 sdvox |= I915_READ(sdvo_priv->output_device); 1282 sdvox |= I915_READ(sdvo_priv->sdvo_reg);
1237 switch (sdvo_priv->output_device) { 1283 switch (sdvo_priv->sdvo_reg) {
1238 case SDVOB: 1284 case SDVOB:
1239 sdvox &= SDVOB_PRESERVE_MASK; 1285 sdvox &= SDVOB_PRESERVE_MASK;
1240 break; 1286 break;
@@ -1258,26 +1304,26 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1258 1304
1259 if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) 1305 if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL)
1260 sdvox |= SDVO_STALL_SELECT; 1306 sdvox |= SDVO_STALL_SELECT;
1261 intel_sdvo_write_sdvox(output, sdvox); 1307 intel_sdvo_write_sdvox(intel_encoder, sdvox);
1262} 1308}
1263 1309
1264static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) 1310static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1265{ 1311{
1266 struct drm_device *dev = encoder->dev; 1312 struct drm_device *dev = encoder->dev;
1267 struct drm_i915_private *dev_priv = dev->dev_private; 1313 struct drm_i915_private *dev_priv = dev->dev_private;
1268 struct intel_output *intel_output = enc_to_intel_output(encoder); 1314 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1269 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1315 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1270 u32 temp; 1316 u32 temp;
1271 1317
1272 if (mode != DRM_MODE_DPMS_ON) { 1318 if (mode != DRM_MODE_DPMS_ON) {
1273 intel_sdvo_set_active_outputs(intel_output, 0); 1319 intel_sdvo_set_active_outputs(intel_encoder, 0);
1274 if (0) 1320 if (0)
1275 intel_sdvo_set_encoder_power_state(intel_output, mode); 1321 intel_sdvo_set_encoder_power_state(intel_encoder, mode);
1276 1322
1277 if (mode == DRM_MODE_DPMS_OFF) { 1323 if (mode == DRM_MODE_DPMS_OFF) {
1278 temp = I915_READ(sdvo_priv->output_device); 1324 temp = I915_READ(sdvo_priv->sdvo_reg);
1279 if ((temp & SDVO_ENABLE) != 0) { 1325 if ((temp & SDVO_ENABLE) != 0) {
1280 intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE); 1326 intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE);
1281 } 1327 }
1282 } 1328 }
1283 } else { 1329 } else {
@@ -1285,13 +1331,13 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1285 int i; 1331 int i;
1286 u8 status; 1332 u8 status;
1287 1333
1288 temp = I915_READ(sdvo_priv->output_device); 1334 temp = I915_READ(sdvo_priv->sdvo_reg);
1289 if ((temp & SDVO_ENABLE) == 0) 1335 if ((temp & SDVO_ENABLE) == 0)
1290 intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE); 1336 intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE);
1291 for (i = 0; i < 2; i++) 1337 for (i = 0; i < 2; i++)
1292 intel_wait_for_vblank(dev); 1338 intel_wait_for_vblank(dev);
1293 1339
1294 status = intel_sdvo_get_trained_inputs(intel_output, &input1, 1340 status = intel_sdvo_get_trained_inputs(intel_encoder, &input1,
1295 &input2); 1341 &input2);
1296 1342
1297 1343
@@ -1305,8 +1351,8 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1305 } 1351 }
1306 1352
1307 if (0) 1353 if (0)
1308 intel_sdvo_set_encoder_power_state(intel_output, mode); 1354 intel_sdvo_set_encoder_power_state(intel_encoder, mode);
1309 intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output); 1355 intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output);
1310 } 1356 }
1311 return; 1357 return;
1312} 1358}
@@ -1315,22 +1361,22 @@ static void intel_sdvo_save(struct drm_connector *connector)
1315{ 1361{
1316 struct drm_device *dev = connector->dev; 1362 struct drm_device *dev = connector->dev;
1317 struct drm_i915_private *dev_priv = dev->dev_private; 1363 struct drm_i915_private *dev_priv = dev->dev_private;
1318 struct intel_output *intel_output = to_intel_output(connector); 1364 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1319 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1365 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1320 int o; 1366 int o;
1321 1367
1322 sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output); 1368 sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder);
1323 intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs); 1369 intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs);
1324 1370
1325 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { 1371 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
1326 intel_sdvo_set_target_input(intel_output, true, false); 1372 intel_sdvo_set_target_input(intel_encoder, true, false);
1327 intel_sdvo_get_input_timing(intel_output, 1373 intel_sdvo_get_input_timing(intel_encoder,
1328 &sdvo_priv->save_input_dtd_1); 1374 &sdvo_priv->save_input_dtd_1);
1329 } 1375 }
1330 1376
1331 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { 1377 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
1332 intel_sdvo_set_target_input(intel_output, false, true); 1378 intel_sdvo_set_target_input(intel_encoder, false, true);
1333 intel_sdvo_get_input_timing(intel_output, 1379 intel_sdvo_get_input_timing(intel_encoder,
1334 &sdvo_priv->save_input_dtd_2); 1380 &sdvo_priv->save_input_dtd_2);
1335 } 1381 }
1336 1382
@@ -1339,8 +1385,8 @@ static void intel_sdvo_save(struct drm_connector *connector)
1339 u16 this_output = (1 << o); 1385 u16 this_output = (1 << o);
1340 if (sdvo_priv->caps.output_flags & this_output) 1386 if (sdvo_priv->caps.output_flags & this_output)
1341 { 1387 {
1342 intel_sdvo_set_target_output(intel_output, this_output); 1388 intel_sdvo_set_target_output(intel_encoder, this_output);
1343 intel_sdvo_get_output_timing(intel_output, 1389 intel_sdvo_get_output_timing(intel_encoder,
1344 &sdvo_priv->save_output_dtd[o]); 1390 &sdvo_priv->save_output_dtd[o]);
1345 } 1391 }
1346 } 1392 }
@@ -1348,66 +1394,66 @@ static void intel_sdvo_save(struct drm_connector *connector)
1348 /* XXX: Save TV format/enhancements. */ 1394 /* XXX: Save TV format/enhancements. */
1349 } 1395 }
1350 1396
1351 sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); 1397 sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg);
1352} 1398}
1353 1399
1354static void intel_sdvo_restore(struct drm_connector *connector) 1400static void intel_sdvo_restore(struct drm_connector *connector)
1355{ 1401{
1356 struct drm_device *dev = connector->dev; 1402 struct drm_device *dev = connector->dev;
1357 struct intel_output *intel_output = to_intel_output(connector); 1403 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1358 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1404 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1359 int o; 1405 int o;
1360 int i; 1406 int i;
1361 bool input1, input2; 1407 bool input1, input2;
1362 u8 status; 1408 u8 status;
1363 1409
1364 intel_sdvo_set_active_outputs(intel_output, 0); 1410 intel_sdvo_set_active_outputs(intel_encoder, 0);
1365 1411
1366 for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) 1412 for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
1367 { 1413 {
1368 u16 this_output = (1 << o); 1414 u16 this_output = (1 << o);
1369 if (sdvo_priv->caps.output_flags & this_output) { 1415 if (sdvo_priv->caps.output_flags & this_output) {
1370 intel_sdvo_set_target_output(intel_output, this_output); 1416 intel_sdvo_set_target_output(intel_encoder, this_output);
1371 intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]); 1417 intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]);
1372 } 1418 }
1373 } 1419 }
1374 1420
1375 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { 1421 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
1376 intel_sdvo_set_target_input(intel_output, true, false); 1422 intel_sdvo_set_target_input(intel_encoder, true, false);
1377 intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1); 1423 intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1);
1378 } 1424 }
1379 1425
1380 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { 1426 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
1381 intel_sdvo_set_target_input(intel_output, false, true); 1427 intel_sdvo_set_target_input(intel_encoder, false, true);
1382 intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2); 1428 intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2);
1383 } 1429 }
1384 1430
1385 intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); 1431 intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult);
1386 1432
1387 if (sdvo_priv->is_tv) { 1433 if (sdvo_priv->is_tv) {
1388 /* XXX: Restore TV format/enhancements. */ 1434 /* XXX: Restore TV format/enhancements. */
1389 } 1435 }
1390 1436
1391 intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX); 1437 intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX);
1392 1438
1393 if (sdvo_priv->save_SDVOX & SDVO_ENABLE) 1439 if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
1394 { 1440 {
1395 for (i = 0; i < 2; i++) 1441 for (i = 0; i < 2; i++)
1396 intel_wait_for_vblank(dev); 1442 intel_wait_for_vblank(dev);
1397 status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2); 1443 status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2);
1398 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) 1444 if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
1399 DRM_DEBUG_KMS("First %s output reported failure to " 1445 DRM_DEBUG_KMS("First %s output reported failure to "
1400 "sync\n", SDVO_NAME(sdvo_priv)); 1446 "sync\n", SDVO_NAME(sdvo_priv));
1401 } 1447 }
1402 1448
1403 intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs); 1449 intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs);
1404} 1450}
1405 1451
1406static int intel_sdvo_mode_valid(struct drm_connector *connector, 1452static int intel_sdvo_mode_valid(struct drm_connector *connector,
1407 struct drm_display_mode *mode) 1453 struct drm_display_mode *mode)
1408{ 1454{
1409 struct intel_output *intel_output = to_intel_output(connector); 1455 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1410 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1456 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1411 1457
1412 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1458 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1413 return MODE_NO_DBLESCAN; 1459 return MODE_NO_DBLESCAN;
@@ -1432,12 +1478,12 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
1432 return MODE_OK; 1478 return MODE_OK;
1433} 1479}
1434 1480
1435static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps) 1481static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps)
1436{ 1482{
1437 u8 status; 1483 u8 status;
1438 1484
1439 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); 1485 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
1440 status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps)); 1486 status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps));
1441 if (status != SDVO_CMD_STATUS_SUCCESS) 1487 if (status != SDVO_CMD_STATUS_SUCCESS)
1442 return false; 1488 return false;
1443 1489
@@ -1447,22 +1493,22 @@ static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struc
1447struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) 1493struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
1448{ 1494{
1449 struct drm_connector *connector = NULL; 1495 struct drm_connector *connector = NULL;
1450 struct intel_output *iout = NULL; 1496 struct intel_encoder *iout = NULL;
1451 struct intel_sdvo_priv *sdvo; 1497 struct intel_sdvo_priv *sdvo;
1452 1498
1453 /* find the sdvo connector */ 1499 /* find the sdvo connector */
1454 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1500 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1455 iout = to_intel_output(connector); 1501 iout = to_intel_encoder(connector);
1456 1502
1457 if (iout->type != INTEL_OUTPUT_SDVO) 1503 if (iout->type != INTEL_OUTPUT_SDVO)
1458 continue; 1504 continue;
1459 1505
1460 sdvo = iout->dev_priv; 1506 sdvo = iout->dev_priv;
1461 1507
1462 if (sdvo->output_device == SDVOB && sdvoB) 1508 if (sdvo->sdvo_reg == SDVOB && sdvoB)
1463 return connector; 1509 return connector;
1464 1510
1465 if (sdvo->output_device == SDVOC && !sdvoB) 1511 if (sdvo->sdvo_reg == SDVOC && !sdvoB)
1466 return connector; 1512 return connector;
1467 1513
1468 } 1514 }
@@ -1474,16 +1520,16 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector)
1474{ 1520{
1475 u8 response[2]; 1521 u8 response[2];
1476 u8 status; 1522 u8 status;
1477 struct intel_output *intel_output; 1523 struct intel_encoder *intel_encoder;
1478 DRM_DEBUG_KMS("\n"); 1524 DRM_DEBUG_KMS("\n");
1479 1525
1480 if (!connector) 1526 if (!connector)
1481 return 0; 1527 return 0;
1482 1528
1483 intel_output = to_intel_output(connector); 1529 intel_encoder = to_intel_encoder(connector);
1484 1530
1485 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); 1531 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
1486 status = intel_sdvo_read_response(intel_output, &response, 2); 1532 status = intel_sdvo_read_response(intel_encoder, &response, 2);
1487 1533
1488 if (response[0] !=0) 1534 if (response[0] !=0)
1489 return 1; 1535 return 1;
@@ -1495,30 +1541,30 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
1495{ 1541{
1496 u8 response[2]; 1542 u8 response[2];
1497 u8 status; 1543 u8 status;
1498 struct intel_output *intel_output = to_intel_output(connector); 1544 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1499 1545
1500 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1546 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1501 intel_sdvo_read_response(intel_output, &response, 2); 1547 intel_sdvo_read_response(intel_encoder, &response, 2);
1502 1548
1503 if (on) { 1549 if (on) {
1504 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); 1550 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
1505 status = intel_sdvo_read_response(intel_output, &response, 2); 1551 status = intel_sdvo_read_response(intel_encoder, &response, 2);
1506 1552
1507 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); 1553 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1508 } else { 1554 } else {
1509 response[0] = 0; 1555 response[0] = 0;
1510 response[1] = 0; 1556 response[1] = 0;
1511 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); 1557 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1512 } 1558 }
1513 1559
1514 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1560 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1515 intel_sdvo_read_response(intel_output, &response, 2); 1561 intel_sdvo_read_response(intel_encoder, &response, 2);
1516} 1562}
1517 1563
1518static bool 1564static bool
1519intel_sdvo_multifunc_encoder(struct intel_output *intel_output) 1565intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
1520{ 1566{
1521 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1567 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1522 int caps = 0; 1568 int caps = 0;
1523 1569
1524 if (sdvo_priv->caps.output_flags & 1570 if (sdvo_priv->caps.output_flags &
@@ -1552,11 +1598,11 @@ static struct drm_connector *
1552intel_find_analog_connector(struct drm_device *dev) 1598intel_find_analog_connector(struct drm_device *dev)
1553{ 1599{
1554 struct drm_connector *connector; 1600 struct drm_connector *connector;
1555 struct intel_output *intel_output; 1601 struct intel_encoder *intel_encoder;
1556 1602
1557 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1603 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1558 intel_output = to_intel_output(connector); 1604 intel_encoder = to_intel_encoder(connector);
1559 if (intel_output->type == INTEL_OUTPUT_ANALOG) 1605 if (intel_encoder->type == INTEL_OUTPUT_ANALOG)
1560 return connector; 1606 return connector;
1561 } 1607 }
1562 return NULL; 1608 return NULL;
@@ -1581,21 +1627,47 @@ intel_analog_is_connected(struct drm_device *dev)
1581enum drm_connector_status 1627enum drm_connector_status
1582intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) 1628intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1583{ 1629{
1584 struct intel_output *intel_output = to_intel_output(connector); 1630 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1585 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1631 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1586 enum drm_connector_status status = connector_status_connected; 1632 enum drm_connector_status status = connector_status_connected;
1587 struct edid *edid = NULL; 1633 struct edid *edid = NULL;
1588 1634
1589 edid = drm_get_edid(&intel_output->base, 1635 edid = drm_get_edid(&intel_encoder->base,
1590 intel_output->ddc_bus); 1636 intel_encoder->ddc_bus);
1591 1637
1638 /* This is only applied to SDVO cards with multiple outputs */
1639 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
1640 uint8_t saved_ddc, temp_ddc;
1641 saved_ddc = sdvo_priv->ddc_bus;
1642 temp_ddc = sdvo_priv->ddc_bus >> 1;
1643 /*
1644 * Don't use the 1 as the argument of DDC bus switch to get
1645 * the EDID. It is used for SDVO SPD ROM.
1646 */
1647 while(temp_ddc > 1) {
1648 sdvo_priv->ddc_bus = temp_ddc;
1649 edid = drm_get_edid(&intel_encoder->base,
1650 intel_encoder->ddc_bus);
1651 if (edid) {
1652 /*
1653 * When we can get the EDID, maybe it is the
1654 * correct DDC bus. Update it.
1655 */
1656 sdvo_priv->ddc_bus = temp_ddc;
1657 break;
1658 }
1659 temp_ddc >>= 1;
1660 }
1661 if (edid == NULL)
1662 sdvo_priv->ddc_bus = saved_ddc;
1663 }
1592 /* when there is no edid and no monitor is connected with VGA 1664 /* when there is no edid and no monitor is connected with VGA
1593 * port, try to use the CRT ddc to read the EDID for DVI-connector 1665 * port, try to use the CRT ddc to read the EDID for DVI-connector
1594 */ 1666 */
1595 if (edid == NULL && 1667 if (edid == NULL &&
1596 sdvo_priv->analog_ddc_bus && 1668 sdvo_priv->analog_ddc_bus &&
1597 !intel_analog_is_connected(intel_output->base.dev)) 1669 !intel_analog_is_connected(intel_encoder->base.dev))
1598 edid = drm_get_edid(&intel_output->base, 1670 edid = drm_get_edid(&intel_encoder->base,
1599 sdvo_priv->analog_ddc_bus); 1671 sdvo_priv->analog_ddc_bus);
1600 if (edid != NULL) { 1672 if (edid != NULL) {
1601 /* Don't report the output as connected if it's a DVI-I 1673 /* Don't report the output as connected if it's a DVI-I
@@ -1610,7 +1682,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1610 } 1682 }
1611 1683
1612 kfree(edid); 1684 kfree(edid);
1613 intel_output->base.display_info.raw_edid = NULL; 1685 intel_encoder->base.display_info.raw_edid = NULL;
1614 1686
1615 } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) 1687 } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
1616 status = connector_status_disconnected; 1688 status = connector_status_disconnected;
@@ -1622,12 +1694,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1622{ 1694{
1623 uint16_t response; 1695 uint16_t response;
1624 u8 status; 1696 u8 status;
1625 struct intel_output *intel_output = to_intel_output(connector); 1697 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1626 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1698 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1627 1699
1628 intel_sdvo_write_cmd(intel_output, 1700 intel_sdvo_write_cmd(intel_encoder,
1629 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); 1701 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
1630 status = intel_sdvo_read_response(intel_output, &response, 2); 1702 if (sdvo_priv->is_tv) {
1703 /* add 30ms delay when the output type is SDVO-TV */
1704 mdelay(30);
1705 }
1706 status = intel_sdvo_read_response(intel_encoder, &response, 2);
1631 1707
1632 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); 1708 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
1633 1709
@@ -1637,10 +1713,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1637 if (response == 0) 1713 if (response == 0)
1638 return connector_status_disconnected; 1714 return connector_status_disconnected;
1639 1715
1640 if (intel_sdvo_multifunc_encoder(intel_output) && 1716 if (intel_sdvo_multifunc_encoder(intel_encoder) &&
1641 sdvo_priv->attached_output != response) { 1717 sdvo_priv->attached_output != response) {
1642 if (sdvo_priv->controlled_output != response && 1718 if (sdvo_priv->controlled_output != response &&
1643 intel_sdvo_output_setup(intel_output, response) != true) 1719 intel_sdvo_output_setup(intel_encoder, response) != true)
1644 return connector_status_unknown; 1720 return connector_status_unknown;
1645 sdvo_priv->attached_output = response; 1721 sdvo_priv->attached_output = response;
1646 } 1722 }
@@ -1649,12 +1725,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1649 1725
1650static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) 1726static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1651{ 1727{
1652 struct intel_output *intel_output = to_intel_output(connector); 1728 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1653 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1729 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1654 int num_modes; 1730 int num_modes;
1655 1731
1656 /* set the bus switch and get the modes */ 1732 /* set the bus switch and get the modes */
1657 num_modes = intel_ddc_get_modes(intel_output); 1733 num_modes = intel_ddc_get_modes(intel_encoder);
1658 1734
1659 /* 1735 /*
1660 * Mac mini hack. On this device, the DVI-I connector shares one DDC 1736 * Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1664,17 +1740,17 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1664 */ 1740 */
1665 if (num_modes == 0 && 1741 if (num_modes == 0 &&
1666 sdvo_priv->analog_ddc_bus && 1742 sdvo_priv->analog_ddc_bus &&
1667 !intel_analog_is_connected(intel_output->base.dev)) { 1743 !intel_analog_is_connected(intel_encoder->base.dev)) {
1668 struct i2c_adapter *digital_ddc_bus; 1744 struct i2c_adapter *digital_ddc_bus;
1669 1745
1670 /* Switch to the analog ddc bus and try that 1746 /* Switch to the analog ddc bus and try that
1671 */ 1747 */
1672 digital_ddc_bus = intel_output->ddc_bus; 1748 digital_ddc_bus = intel_encoder->ddc_bus;
1673 intel_output->ddc_bus = sdvo_priv->analog_ddc_bus; 1749 intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus;
1674 1750
1675 (void) intel_ddc_get_modes(intel_output); 1751 (void) intel_ddc_get_modes(intel_encoder);
1676 1752
1677 intel_output->ddc_bus = digital_ddc_bus; 1753 intel_encoder->ddc_bus = digital_ddc_bus;
1678 } 1754 }
1679} 1755}
1680 1756
@@ -1745,7 +1821,7 @@ struct drm_display_mode sdvo_tv_modes[] = {
1745 1821
1746static void intel_sdvo_get_tv_modes(struct drm_connector *connector) 1822static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1747{ 1823{
1748 struct intel_output *output = to_intel_output(connector); 1824 struct intel_encoder *output = to_intel_encoder(connector);
1749 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1825 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
1750 struct intel_sdvo_sdtv_resolution_request tv_res; 1826 struct intel_sdvo_sdtv_resolution_request tv_res;
1751 uint32_t reply = 0, format_map = 0; 1827 uint32_t reply = 0, format_map = 0;
@@ -1787,9 +1863,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1787 1863
1788static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) 1864static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1789{ 1865{
1790 struct intel_output *intel_output = to_intel_output(connector); 1866 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1791 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1867 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1792 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1868 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1793 struct drm_display_mode *newmode; 1869 struct drm_display_mode *newmode;
1794 1870
1795 /* 1871 /*
@@ -1797,7 +1873,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1797 * Assume that the preferred modes are 1873 * Assume that the preferred modes are
1798 * arranged in priority order. 1874 * arranged in priority order.
1799 */ 1875 */
1800 intel_ddc_get_modes(intel_output); 1876 intel_ddc_get_modes(intel_encoder);
1801 if (list_empty(&connector->probed_modes) == false) 1877 if (list_empty(&connector->probed_modes) == false)
1802 goto end; 1878 goto end;
1803 1879
@@ -1826,7 +1902,7 @@ end:
1826 1902
1827static int intel_sdvo_get_modes(struct drm_connector *connector) 1903static int intel_sdvo_get_modes(struct drm_connector *connector)
1828{ 1904{
1829 struct intel_output *output = to_intel_output(connector); 1905 struct intel_encoder *output = to_intel_encoder(connector);
1830 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1906 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
1831 1907
1832 if (sdvo_priv->is_tv) 1908 if (sdvo_priv->is_tv)
@@ -1844,8 +1920,8 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
1844static 1920static
1845void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) 1921void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1846{ 1922{
1847 struct intel_output *intel_output = to_intel_output(connector); 1923 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1848 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1924 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1849 struct drm_device *dev = connector->dev; 1925 struct drm_device *dev = connector->dev;
1850 1926
1851 if (sdvo_priv->is_tv) { 1927 if (sdvo_priv->is_tv) {
@@ -1882,13 +1958,13 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1882 1958
1883static void intel_sdvo_destroy(struct drm_connector *connector) 1959static void intel_sdvo_destroy(struct drm_connector *connector)
1884{ 1960{
1885 struct intel_output *intel_output = to_intel_output(connector); 1961 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1886 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1962 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1887 1963
1888 if (intel_output->i2c_bus) 1964 if (intel_encoder->i2c_bus)
1889 intel_i2c_destroy(intel_output->i2c_bus); 1965 intel_i2c_destroy(intel_encoder->i2c_bus);
1890 if (intel_output->ddc_bus) 1966 if (intel_encoder->ddc_bus)
1891 intel_i2c_destroy(intel_output->ddc_bus); 1967 intel_i2c_destroy(intel_encoder->ddc_bus);
1892 if (sdvo_priv->analog_ddc_bus) 1968 if (sdvo_priv->analog_ddc_bus)
1893 intel_i2c_destroy(sdvo_priv->analog_ddc_bus); 1969 intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
1894 1970
@@ -1906,7 +1982,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1906 drm_sysfs_connector_remove(connector); 1982 drm_sysfs_connector_remove(connector);
1907 drm_connector_cleanup(connector); 1983 drm_connector_cleanup(connector);
1908 1984
1909 kfree(intel_output); 1985 kfree(intel_encoder);
1910} 1986}
1911 1987
1912static int 1988static int
@@ -1914,9 +1990,9 @@ intel_sdvo_set_property(struct drm_connector *connector,
1914 struct drm_property *property, 1990 struct drm_property *property,
1915 uint64_t val) 1991 uint64_t val)
1916{ 1992{
1917 struct intel_output *intel_output = to_intel_output(connector); 1993 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1918 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1994 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1919 struct drm_encoder *encoder = &intel_output->enc; 1995 struct drm_encoder *encoder = &intel_encoder->enc;
1920 struct drm_crtc *crtc = encoder->crtc; 1996 struct drm_crtc *crtc = encoder->crtc;
1921 int ret = 0; 1997 int ret = 0;
1922 bool changed = false; 1998 bool changed = false;
@@ -2024,8 +2100,8 @@ intel_sdvo_set_property(struct drm_connector *connector,
2024 sdvo_priv->cur_brightness = temp_value; 2100 sdvo_priv->cur_brightness = temp_value;
2025 } 2101 }
2026 if (cmd) { 2102 if (cmd) {
2027 intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2); 2103 intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
2028 status = intel_sdvo_read_response(intel_output, 2104 status = intel_sdvo_read_response(intel_encoder,
2029 NULL, 0); 2105 NULL, 0);
2030 if (status != SDVO_CMD_STATUS_SUCCESS) { 2106 if (status != SDVO_CMD_STATUS_SUCCESS) {
2031 DRM_DEBUG_KMS("Incorrect SDVO command \n"); 2107 DRM_DEBUG_KMS("Incorrect SDVO command \n");
@@ -2120,7 +2196,7 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
2120} 2196}
2121 2197
2122static bool 2198static bool
2123intel_sdvo_get_digital_encoding_mode(struct intel_output *output) 2199intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output)
2124{ 2200{
2125 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 2201 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
2126 uint8_t status; 2202 uint8_t status;
@@ -2134,42 +2210,42 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
2134 return true; 2210 return true;
2135} 2211}
2136 2212
2137static struct intel_output * 2213static struct intel_encoder *
2138intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan) 2214intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
2139{ 2215{
2140 struct drm_device *dev = chan->drm_dev; 2216 struct drm_device *dev = chan->drm_dev;
2141 struct drm_connector *connector; 2217 struct drm_connector *connector;
2142 struct intel_output *intel_output = NULL; 2218 struct intel_encoder *intel_encoder = NULL;
2143 2219
2144 list_for_each_entry(connector, 2220 list_for_each_entry(connector,
2145 &dev->mode_config.connector_list, head) { 2221 &dev->mode_config.connector_list, head) {
2146 if (to_intel_output(connector)->ddc_bus == &chan->adapter) { 2222 if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) {
2147 intel_output = to_intel_output(connector); 2223 intel_encoder = to_intel_encoder(connector);
2148 break; 2224 break;
2149 } 2225 }
2150 } 2226 }
2151 return intel_output; 2227 return intel_encoder;
2152} 2228}
2153 2229
2154static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, 2230static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
2155 struct i2c_msg msgs[], int num) 2231 struct i2c_msg msgs[], int num)
2156{ 2232{
2157 struct intel_output *intel_output; 2233 struct intel_encoder *intel_encoder;
2158 struct intel_sdvo_priv *sdvo_priv; 2234 struct intel_sdvo_priv *sdvo_priv;
2159 struct i2c_algo_bit_data *algo_data; 2235 struct i2c_algo_bit_data *algo_data;
2160 const struct i2c_algorithm *algo; 2236 const struct i2c_algorithm *algo;
2161 2237
2162 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; 2238 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
2163 intel_output = 2239 intel_encoder =
2164 intel_sdvo_chan_to_intel_output( 2240 intel_sdvo_chan_to_intel_encoder(
2165 (struct intel_i2c_chan *)(algo_data->data)); 2241 (struct intel_i2c_chan *)(algo_data->data));
2166 if (intel_output == NULL) 2242 if (intel_encoder == NULL)
2167 return -EINVAL; 2243 return -EINVAL;
2168 2244
2169 sdvo_priv = intel_output->dev_priv; 2245 sdvo_priv = intel_encoder->dev_priv;
2170 algo = intel_output->i2c_bus->algo; 2246 algo = intel_encoder->i2c_bus->algo;
2171 2247
2172 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); 2248 intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus);
2173 return algo->master_xfer(i2c_adap, msgs, num); 2249 return algo->master_xfer(i2c_adap, msgs, num);
2174} 2250}
2175 2251
@@ -2178,12 +2254,12 @@ static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
2178}; 2254};
2179 2255
2180static u8 2256static u8
2181intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) 2257intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
2182{ 2258{
2183 struct drm_i915_private *dev_priv = dev->dev_private; 2259 struct drm_i915_private *dev_priv = dev->dev_private;
2184 struct sdvo_device_mapping *my_mapping, *other_mapping; 2260 struct sdvo_device_mapping *my_mapping, *other_mapping;
2185 2261
2186 if (output_device == SDVOB) { 2262 if (sdvo_reg == SDVOB) {
2187 my_mapping = &dev_priv->sdvo_mappings[0]; 2263 my_mapping = &dev_priv->sdvo_mappings[0];
2188 other_mapping = &dev_priv->sdvo_mappings[1]; 2264 other_mapping = &dev_priv->sdvo_mappings[1];
2189 } else { 2265 } else {
@@ -2208,22 +2284,41 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
2208 /* No SDVO device info is found for another DVO port, 2284 /* No SDVO device info is found for another DVO port,
2209 * so use mapping assumption we had before BIOS parsing. 2285 * so use mapping assumption we had before BIOS parsing.
2210 */ 2286 */
2211 if (output_device == SDVOB) 2287 if (sdvo_reg == SDVOB)
2212 return 0x70; 2288 return 0x70;
2213 else 2289 else
2214 return 0x72; 2290 return 0x72;
2215} 2291}
2216 2292
2293static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id)
2294{
2295 DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident);
2296 return 1;
2297}
2298
2299static struct dmi_system_id intel_sdvo_bad_tv[] = {
2300 {
2301 .callback = intel_sdvo_bad_tv_callback,
2302 .ident = "IntelG45/ICH10R/DME1737",
2303 .matches = {
2304 DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"),
2305 DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
2306 },
2307 },
2308
2309 { } /* terminating entry */
2310};
2311
2217static bool 2312static bool
2218intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) 2313intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
2219{ 2314{
2220 struct drm_connector *connector = &intel_output->base; 2315 struct drm_connector *connector = &intel_encoder->base;
2221 struct drm_encoder *encoder = &intel_output->enc; 2316 struct drm_encoder *encoder = &intel_encoder->enc;
2222 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 2317 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2223 bool ret = true, registered = false; 2318 bool ret = true, registered = false;
2224 2319
2225 sdvo_priv->is_tv = false; 2320 sdvo_priv->is_tv = false;
2226 intel_output->needs_tv_clock = false; 2321 intel_encoder->needs_tv_clock = false;
2227 sdvo_priv->is_lvds = false; 2322 sdvo_priv->is_lvds = false;
2228 2323
2229 if (device_is_registered(&connector->kdev)) { 2324 if (device_is_registered(&connector->kdev)) {
@@ -2241,48 +2336,57 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2241 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2336 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2242 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2337 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2243 2338
2244 if (intel_sdvo_get_supp_encode(intel_output, 2339 if (intel_sdvo_get_supp_encode(intel_encoder,
2245 &sdvo_priv->encode) && 2340 &sdvo_priv->encode) &&
2246 intel_sdvo_get_digital_encoding_mode(intel_output) && 2341 intel_sdvo_get_digital_encoding_mode(intel_encoder) &&
2247 sdvo_priv->is_hdmi) { 2342 sdvo_priv->is_hdmi) {
2248 /* enable hdmi encoding mode if supported */ 2343 /* enable hdmi encoding mode if supported */
2249 intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); 2344 intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
2250 intel_sdvo_set_colorimetry(intel_output, 2345 intel_sdvo_set_colorimetry(intel_encoder,
2251 SDVO_COLORIMETRY_RGB256); 2346 SDVO_COLORIMETRY_RGB256);
2252 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2347 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2253 intel_output->clone_mask = 2348 intel_encoder->clone_mask =
2254 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2349 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2255 (1 << INTEL_ANALOG_CLONE_BIT); 2350 (1 << INTEL_ANALOG_CLONE_BIT);
2256 } 2351 }
2257 } else if (flags & SDVO_OUTPUT_SVID0) { 2352 } else if ((flags & SDVO_OUTPUT_SVID0) &&
2353 !dmi_check_system(intel_sdvo_bad_tv)) {
2258 2354
2259 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; 2355 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
2260 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2356 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2261 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; 2357 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2262 sdvo_priv->is_tv = true; 2358 sdvo_priv->is_tv = true;
2263 intel_output->needs_tv_clock = true; 2359 intel_encoder->needs_tv_clock = true;
2264 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; 2360 intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2265 } else if (flags & SDVO_OUTPUT_RGB0) { 2361 } else if (flags & SDVO_OUTPUT_RGB0) {
2266 2362
2267 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; 2363 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
2268 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2364 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2269 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2365 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2270 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2366 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2271 (1 << INTEL_ANALOG_CLONE_BIT); 2367 (1 << INTEL_ANALOG_CLONE_BIT);
2272 } else if (flags & SDVO_OUTPUT_RGB1) { 2368 } else if (flags & SDVO_OUTPUT_RGB1) {
2273 2369
2274 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; 2370 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
2275 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2371 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2276 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2372 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2277 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2373 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2278 (1 << INTEL_ANALOG_CLONE_BIT); 2374 (1 << INTEL_ANALOG_CLONE_BIT);
2375 } else if (flags & SDVO_OUTPUT_CVBS0) {
2376
2377 sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
2378 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2379 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2380 sdvo_priv->is_tv = true;
2381 intel_encoder->needs_tv_clock = true;
2382 intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2279 } else if (flags & SDVO_OUTPUT_LVDS0) { 2383 } else if (flags & SDVO_OUTPUT_LVDS0) {
2280 2384
2281 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 2385 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
2282 encoder->encoder_type = DRM_MODE_ENCODER_LVDS; 2386 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2283 connector->connector_type = DRM_MODE_CONNECTOR_LVDS; 2387 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2284 sdvo_priv->is_lvds = true; 2388 sdvo_priv->is_lvds = true;
2285 intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | 2389 intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2286 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 2390 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
2287 } else if (flags & SDVO_OUTPUT_LVDS1) { 2391 } else if (flags & SDVO_OUTPUT_LVDS1) {
2288 2392
@@ -2290,7 +2394,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2290 encoder->encoder_type = DRM_MODE_ENCODER_LVDS; 2394 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2291 connector->connector_type = DRM_MODE_CONNECTOR_LVDS; 2395 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2292 sdvo_priv->is_lvds = true; 2396 sdvo_priv->is_lvds = true;
2293 intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | 2397 intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2294 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 2398 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
2295 } else { 2399 } else {
2296 2400
@@ -2303,7 +2407,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2303 bytes[0], bytes[1]); 2407 bytes[0], bytes[1]);
2304 ret = false; 2408 ret = false;
2305 } 2409 }
2306 intel_output->crtc_mask = (1 << 0) | (1 << 1); 2410 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
2307 2411
2308 if (ret && registered) 2412 if (ret && registered)
2309 ret = drm_sysfs_connector_add(connector) == 0 ? true : false; 2413 ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
@@ -2315,18 +2419,18 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2315 2419
2316static void intel_sdvo_tv_create_property(struct drm_connector *connector) 2420static void intel_sdvo_tv_create_property(struct drm_connector *connector)
2317{ 2421{
2318 struct intel_output *intel_output = to_intel_output(connector); 2422 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
2319 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 2423 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2320 struct intel_sdvo_tv_format format; 2424 struct intel_sdvo_tv_format format;
2321 uint32_t format_map, i; 2425 uint32_t format_map, i;
2322 uint8_t status; 2426 uint8_t status;
2323 2427
2324 intel_sdvo_set_target_output(intel_output, 2428 intel_sdvo_set_target_output(intel_encoder,
2325 sdvo_priv->controlled_output); 2429 sdvo_priv->controlled_output);
2326 2430
2327 intel_sdvo_write_cmd(intel_output, 2431 intel_sdvo_write_cmd(intel_encoder,
2328 SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); 2432 SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
2329 status = intel_sdvo_read_response(intel_output, 2433 status = intel_sdvo_read_response(intel_encoder,
2330 &format, sizeof(format)); 2434 &format, sizeof(format));
2331 if (status != SDVO_CMD_STATUS_SUCCESS) 2435 if (status != SDVO_CMD_STATUS_SUCCESS)
2332 return; 2436 return;
@@ -2364,16 +2468,16 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
2364 2468
2365static void intel_sdvo_create_enhance_property(struct drm_connector *connector) 2469static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2366{ 2470{
2367 struct intel_output *intel_output = to_intel_output(connector); 2471 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
2368 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 2472 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2369 struct intel_sdvo_enhancements_reply sdvo_data; 2473 struct intel_sdvo_enhancements_reply sdvo_data;
2370 struct drm_device *dev = connector->dev; 2474 struct drm_device *dev = connector->dev;
2371 uint8_t status; 2475 uint8_t status;
2372 uint16_t response, data_value[2]; 2476 uint16_t response, data_value[2];
2373 2477
2374 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2478 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
2375 NULL, 0); 2479 NULL, 0);
2376 status = intel_sdvo_read_response(intel_output, &sdvo_data, 2480 status = intel_sdvo_read_response(intel_encoder, &sdvo_data,
2377 sizeof(sdvo_data)); 2481 sizeof(sdvo_data));
2378 if (status != SDVO_CMD_STATUS_SUCCESS) { 2482 if (status != SDVO_CMD_STATUS_SUCCESS) {
2379 DRM_DEBUG_KMS(" incorrect response is returned\n"); 2483 DRM_DEBUG_KMS(" incorrect response is returned\n");
@@ -2389,18 +2493,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2389 * property 2493 * property
2390 */ 2494 */
2391 if (sdvo_data.overscan_h) { 2495 if (sdvo_data.overscan_h) {
2392 intel_sdvo_write_cmd(intel_output, 2496 intel_sdvo_write_cmd(intel_encoder,
2393 SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); 2497 SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
2394 status = intel_sdvo_read_response(intel_output, 2498 status = intel_sdvo_read_response(intel_encoder,
2395 &data_value, 4); 2499 &data_value, 4);
2396 if (status != SDVO_CMD_STATUS_SUCCESS) { 2500 if (status != SDVO_CMD_STATUS_SUCCESS) {
2397 DRM_DEBUG_KMS("Incorrect SDVO max " 2501 DRM_DEBUG_KMS("Incorrect SDVO max "
2398 "h_overscan\n"); 2502 "h_overscan\n");
2399 return; 2503 return;
2400 } 2504 }
2401 intel_sdvo_write_cmd(intel_output, 2505 intel_sdvo_write_cmd(intel_encoder,
2402 SDVO_CMD_GET_OVERSCAN_H, NULL, 0); 2506 SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
2403 status = intel_sdvo_read_response(intel_output, 2507 status = intel_sdvo_read_response(intel_encoder,
2404 &response, 2); 2508 &response, 2);
2405 if (status != SDVO_CMD_STATUS_SUCCESS) { 2509 if (status != SDVO_CMD_STATUS_SUCCESS) {
2406 DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); 2510 DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
@@ -2430,18 +2534,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2430 data_value[0], data_value[1], response); 2534 data_value[0], data_value[1], response);
2431 } 2535 }
2432 if (sdvo_data.overscan_v) { 2536 if (sdvo_data.overscan_v) {
2433 intel_sdvo_write_cmd(intel_output, 2537 intel_sdvo_write_cmd(intel_encoder,
2434 SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); 2538 SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
2435 status = intel_sdvo_read_response(intel_output, 2539 status = intel_sdvo_read_response(intel_encoder,
2436 &data_value, 4); 2540 &data_value, 4);
2437 if (status != SDVO_CMD_STATUS_SUCCESS) { 2541 if (status != SDVO_CMD_STATUS_SUCCESS) {
2438 DRM_DEBUG_KMS("Incorrect SDVO max " 2542 DRM_DEBUG_KMS("Incorrect SDVO max "
2439 "v_overscan\n"); 2543 "v_overscan\n");
2440 return; 2544 return;
2441 } 2545 }
2442 intel_sdvo_write_cmd(intel_output, 2546 intel_sdvo_write_cmd(intel_encoder,
2443 SDVO_CMD_GET_OVERSCAN_V, NULL, 0); 2547 SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
2444 status = intel_sdvo_read_response(intel_output, 2548 status = intel_sdvo_read_response(intel_encoder,
2445 &response, 2); 2549 &response, 2);
2446 if (status != SDVO_CMD_STATUS_SUCCESS) { 2550 if (status != SDVO_CMD_STATUS_SUCCESS) {
2447 DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); 2551 DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
@@ -2471,17 +2575,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2471 data_value[0], data_value[1], response); 2575 data_value[0], data_value[1], response);
2472 } 2576 }
2473 if (sdvo_data.position_h) { 2577 if (sdvo_data.position_h) {
2474 intel_sdvo_write_cmd(intel_output, 2578 intel_sdvo_write_cmd(intel_encoder,
2475 SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); 2579 SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
2476 status = intel_sdvo_read_response(intel_output, 2580 status = intel_sdvo_read_response(intel_encoder,
2477 &data_value, 4); 2581 &data_value, 4);
2478 if (status != SDVO_CMD_STATUS_SUCCESS) { 2582 if (status != SDVO_CMD_STATUS_SUCCESS) {
2479 DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); 2583 DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
2480 return; 2584 return;
2481 } 2585 }
2482 intel_sdvo_write_cmd(intel_output, 2586 intel_sdvo_write_cmd(intel_encoder,
2483 SDVO_CMD_GET_POSITION_H, NULL, 0); 2587 SDVO_CMD_GET_POSITION_H, NULL, 0);
2484 status = intel_sdvo_read_response(intel_output, 2588 status = intel_sdvo_read_response(intel_encoder,
2485 &response, 2); 2589 &response, 2);
2486 if (status != SDVO_CMD_STATUS_SUCCESS) { 2590 if (status != SDVO_CMD_STATUS_SUCCESS) {
2487 DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); 2591 DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
@@ -2502,17 +2606,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2502 data_value[0], data_value[1], response); 2606 data_value[0], data_value[1], response);
2503 } 2607 }
2504 if (sdvo_data.position_v) { 2608 if (sdvo_data.position_v) {
2505 intel_sdvo_write_cmd(intel_output, 2609 intel_sdvo_write_cmd(intel_encoder,
2506 SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); 2610 SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
2507 status = intel_sdvo_read_response(intel_output, 2611 status = intel_sdvo_read_response(intel_encoder,
2508 &data_value, 4); 2612 &data_value, 4);
2509 if (status != SDVO_CMD_STATUS_SUCCESS) { 2613 if (status != SDVO_CMD_STATUS_SUCCESS) {
2510 DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); 2614 DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
2511 return; 2615 return;
2512 } 2616 }
2513 intel_sdvo_write_cmd(intel_output, 2617 intel_sdvo_write_cmd(intel_encoder,
2514 SDVO_CMD_GET_POSITION_V, NULL, 0); 2618 SDVO_CMD_GET_POSITION_V, NULL, 0);
2515 status = intel_sdvo_read_response(intel_output, 2619 status = intel_sdvo_read_response(intel_encoder,
2516 &response, 2); 2620 &response, 2);
2517 if (status != SDVO_CMD_STATUS_SUCCESS) { 2621 if (status != SDVO_CMD_STATUS_SUCCESS) {
2518 DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); 2622 DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
@@ -2535,17 +2639,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2535 } 2639 }
2536 if (sdvo_priv->is_tv) { 2640 if (sdvo_priv->is_tv) {
2537 if (sdvo_data.saturation) { 2641 if (sdvo_data.saturation) {
2538 intel_sdvo_write_cmd(intel_output, 2642 intel_sdvo_write_cmd(intel_encoder,
2539 SDVO_CMD_GET_MAX_SATURATION, NULL, 0); 2643 SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
2540 status = intel_sdvo_read_response(intel_output, 2644 status = intel_sdvo_read_response(intel_encoder,
2541 &data_value, 4); 2645 &data_value, 4);
2542 if (status != SDVO_CMD_STATUS_SUCCESS) { 2646 if (status != SDVO_CMD_STATUS_SUCCESS) {
2543 DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); 2647 DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
2544 return; 2648 return;
2545 } 2649 }
2546 intel_sdvo_write_cmd(intel_output, 2650 intel_sdvo_write_cmd(intel_encoder,
2547 SDVO_CMD_GET_SATURATION, NULL, 0); 2651 SDVO_CMD_GET_SATURATION, NULL, 0);
2548 status = intel_sdvo_read_response(intel_output, 2652 status = intel_sdvo_read_response(intel_encoder,
2549 &response, 2); 2653 &response, 2);
2550 if (status != SDVO_CMD_STATUS_SUCCESS) { 2654 if (status != SDVO_CMD_STATUS_SUCCESS) {
2551 DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); 2655 DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
@@ -2567,17 +2671,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2567 data_value[0], data_value[1], response); 2671 data_value[0], data_value[1], response);
2568 } 2672 }
2569 if (sdvo_data.contrast) { 2673 if (sdvo_data.contrast) {
2570 intel_sdvo_write_cmd(intel_output, 2674 intel_sdvo_write_cmd(intel_encoder,
2571 SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); 2675 SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
2572 status = intel_sdvo_read_response(intel_output, 2676 status = intel_sdvo_read_response(intel_encoder,
2573 &data_value, 4); 2677 &data_value, 4);
2574 if (status != SDVO_CMD_STATUS_SUCCESS) { 2678 if (status != SDVO_CMD_STATUS_SUCCESS) {
2575 DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); 2679 DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
2576 return; 2680 return;
2577 } 2681 }
2578 intel_sdvo_write_cmd(intel_output, 2682 intel_sdvo_write_cmd(intel_encoder,
2579 SDVO_CMD_GET_CONTRAST, NULL, 0); 2683 SDVO_CMD_GET_CONTRAST, NULL, 0);
2580 status = intel_sdvo_read_response(intel_output, 2684 status = intel_sdvo_read_response(intel_encoder,
2581 &response, 2); 2685 &response, 2);
2582 if (status != SDVO_CMD_STATUS_SUCCESS) { 2686 if (status != SDVO_CMD_STATUS_SUCCESS) {
2583 DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); 2687 DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
@@ -2598,17 +2702,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2598 data_value[0], data_value[1], response); 2702 data_value[0], data_value[1], response);
2599 } 2703 }
2600 if (sdvo_data.hue) { 2704 if (sdvo_data.hue) {
2601 intel_sdvo_write_cmd(intel_output, 2705 intel_sdvo_write_cmd(intel_encoder,
2602 SDVO_CMD_GET_MAX_HUE, NULL, 0); 2706 SDVO_CMD_GET_MAX_HUE, NULL, 0);
2603 status = intel_sdvo_read_response(intel_output, 2707 status = intel_sdvo_read_response(intel_encoder,
2604 &data_value, 4); 2708 &data_value, 4);
2605 if (status != SDVO_CMD_STATUS_SUCCESS) { 2709 if (status != SDVO_CMD_STATUS_SUCCESS) {
2606 DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); 2710 DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
2607 return; 2711 return;
2608 } 2712 }
2609 intel_sdvo_write_cmd(intel_output, 2713 intel_sdvo_write_cmd(intel_encoder,
2610 SDVO_CMD_GET_HUE, NULL, 0); 2714 SDVO_CMD_GET_HUE, NULL, 0);
2611 status = intel_sdvo_read_response(intel_output, 2715 status = intel_sdvo_read_response(intel_encoder,
2612 &response, 2); 2716 &response, 2);
2613 if (status != SDVO_CMD_STATUS_SUCCESS) { 2717 if (status != SDVO_CMD_STATUS_SUCCESS) {
2614 DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); 2718 DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
@@ -2631,17 +2735,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2631 } 2735 }
2632 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { 2736 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
2633 if (sdvo_data.brightness) { 2737 if (sdvo_data.brightness) {
2634 intel_sdvo_write_cmd(intel_output, 2738 intel_sdvo_write_cmd(intel_encoder,
2635 SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); 2739 SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
2636 status = intel_sdvo_read_response(intel_output, 2740 status = intel_sdvo_read_response(intel_encoder,
2637 &data_value, 4); 2741 &data_value, 4);
2638 if (status != SDVO_CMD_STATUS_SUCCESS) { 2742 if (status != SDVO_CMD_STATUS_SUCCESS) {
2639 DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); 2743 DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
2640 return; 2744 return;
2641 } 2745 }
2642 intel_sdvo_write_cmd(intel_output, 2746 intel_sdvo_write_cmd(intel_encoder,
2643 SDVO_CMD_GET_BRIGHTNESS, NULL, 0); 2747 SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
2644 status = intel_sdvo_read_response(intel_output, 2748 status = intel_sdvo_read_response(intel_encoder,
2645 &response, 2); 2749 &response, 2);
2646 if (status != SDVO_CMD_STATUS_SUCCESS) { 2750 if (status != SDVO_CMD_STATUS_SUCCESS) {
2647 DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); 2751 DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
@@ -2666,78 +2770,81 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2666 return; 2770 return;
2667} 2771}
2668 2772
2669bool intel_sdvo_init(struct drm_device *dev, int output_device) 2773bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2670{ 2774{
2775 struct drm_i915_private *dev_priv = dev->dev_private;
2671 struct drm_connector *connector; 2776 struct drm_connector *connector;
2672 struct intel_output *intel_output; 2777 struct intel_encoder *intel_encoder;
2673 struct intel_sdvo_priv *sdvo_priv; 2778 struct intel_sdvo_priv *sdvo_priv;
2674 2779
2675 u8 ch[0x40]; 2780 u8 ch[0x40];
2676 int i; 2781 int i;
2677 2782
2678 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); 2783 intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
2679 if (!intel_output) { 2784 if (!intel_encoder) {
2680 return false; 2785 return false;
2681 } 2786 }
2682 2787
2683 sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); 2788 sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1);
2684 sdvo_priv->output_device = output_device; 2789 sdvo_priv->sdvo_reg = sdvo_reg;
2685 2790
2686 intel_output->dev_priv = sdvo_priv; 2791 intel_encoder->dev_priv = sdvo_priv;
2687 intel_output->type = INTEL_OUTPUT_SDVO; 2792 intel_encoder->type = INTEL_OUTPUT_SDVO;
2688 2793
2689 /* setup the DDC bus. */ 2794 /* setup the DDC bus. */
2690 if (output_device == SDVOB) 2795 if (sdvo_reg == SDVOB)
2691 intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); 2796 intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
2692 else 2797 else
2693 intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); 2798 intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
2694 2799
2695 if (!intel_output->i2c_bus) 2800 if (!intel_encoder->i2c_bus)
2696 goto err_inteloutput; 2801 goto err_inteloutput;
2697 2802
2698 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device); 2803 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
2699 2804
2700 /* Save the bit-banging i2c functionality for use by the DDC wrapper */ 2805 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
2701 intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; 2806 intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
2702 2807
2703 /* Read the regs to test if we can talk to the device */ 2808 /* Read the regs to test if we can talk to the device */
2704 for (i = 0; i < 0x40; i++) { 2809 for (i = 0; i < 0x40; i++) {
2705 if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { 2810 if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
2706 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", 2811 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
2707 output_device == SDVOB ? 'B' : 'C'); 2812 sdvo_reg == SDVOB ? 'B' : 'C');
2708 goto err_i2c; 2813 goto err_i2c;
2709 } 2814 }
2710 } 2815 }
2711 2816
2712 /* setup the DDC bus. */ 2817 /* setup the DDC bus. */
2713 if (output_device == SDVOB) { 2818 if (sdvo_reg == SDVOB) {
2714 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); 2819 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
2715 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2820 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2716 "SDVOB/VGA DDC BUS"); 2821 "SDVOB/VGA DDC BUS");
2822 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2717 } else { 2823 } else {
2718 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); 2824 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
2719 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2825 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2720 "SDVOC/VGA DDC BUS"); 2826 "SDVOC/VGA DDC BUS");
2827 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2721 } 2828 }
2722 2829
2723 if (intel_output->ddc_bus == NULL) 2830 if (intel_encoder->ddc_bus == NULL)
2724 goto err_i2c; 2831 goto err_i2c;
2725 2832
2726 /* Wrap with our custom algo which switches to DDC mode */ 2833 /* Wrap with our custom algo which switches to DDC mode */
2727 intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; 2834 intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
2728 2835
2729 /* In defaut case sdvo lvds is false */ 2836 /* In default case sdvo lvds is false */
2730 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); 2837 intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
2731 2838
2732 if (intel_sdvo_output_setup(intel_output, 2839 if (intel_sdvo_output_setup(intel_encoder,
2733 sdvo_priv->caps.output_flags) != true) { 2840 sdvo_priv->caps.output_flags) != true) {
2734 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", 2841 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
2735 output_device == SDVOB ? 'B' : 'C'); 2842 sdvo_reg == SDVOB ? 'B' : 'C');
2736 goto err_i2c; 2843 goto err_i2c;
2737 } 2844 }
2738 2845
2739 2846
2740 connector = &intel_output->base; 2847 connector = &intel_encoder->base;
2741 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, 2848 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
2742 connector->connector_type); 2849 connector->connector_type);
2743 2850
@@ -2746,12 +2853,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2746 connector->doublescan_allowed = 0; 2853 connector->doublescan_allowed = 0;
2747 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 2854 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
2748 2855
2749 drm_encoder_init(dev, &intel_output->enc, 2856 drm_encoder_init(dev, &intel_encoder->enc,
2750 &intel_sdvo_enc_funcs, intel_output->enc.encoder_type); 2857 &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type);
2751 2858
2752 drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); 2859 drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
2753 2860
2754 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 2861 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
2755 if (sdvo_priv->is_tv) 2862 if (sdvo_priv->is_tv)
2756 intel_sdvo_tv_create_property(connector); 2863 intel_sdvo_tv_create_property(connector);
2757 2864
@@ -2763,9 +2870,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2763 intel_sdvo_select_ddc_bus(sdvo_priv); 2870 intel_sdvo_select_ddc_bus(sdvo_priv);
2764 2871
2765 /* Set the input timing to the screen. Assume always input 0. */ 2872 /* Set the input timing to the screen. Assume always input 0. */
2766 intel_sdvo_set_target_input(intel_output, true, false); 2873 intel_sdvo_set_target_input(intel_encoder, true, false);
2767 2874
2768 intel_sdvo_get_input_pixel_clock_range(intel_output, 2875 intel_sdvo_get_input_pixel_clock_range(intel_encoder,
2769 &sdvo_priv->pixel_clock_min, 2876 &sdvo_priv->pixel_clock_min,
2770 &sdvo_priv->pixel_clock_max); 2877 &sdvo_priv->pixel_clock_max);
2771 2878
@@ -2792,12 +2899,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2792err_i2c: 2899err_i2c:
2793 if (sdvo_priv->analog_ddc_bus != NULL) 2900 if (sdvo_priv->analog_ddc_bus != NULL)
2794 intel_i2c_destroy(sdvo_priv->analog_ddc_bus); 2901 intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
2795 if (intel_output->ddc_bus != NULL) 2902 if (intel_encoder->ddc_bus != NULL)
2796 intel_i2c_destroy(intel_output->ddc_bus); 2903 intel_i2c_destroy(intel_encoder->ddc_bus);
2797 if (intel_output->i2c_bus != NULL) 2904 if (intel_encoder->i2c_bus != NULL)
2798 intel_i2c_destroy(intel_output->i2c_bus); 2905 intel_i2c_destroy(intel_encoder->i2c_bus);
2799err_inteloutput: 2906err_inteloutput:
2800 kfree(intel_output); 2907 kfree(intel_encoder);
2801 2908
2802 return false; 2909 return false;
2803} 2910}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 9ca917931afb..d7d39b2327df 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -921,8 +921,8 @@ intel_tv_save(struct drm_connector *connector)
921{ 921{
922 struct drm_device *dev = connector->dev; 922 struct drm_device *dev = connector->dev;
923 struct drm_i915_private *dev_priv = dev->dev_private; 923 struct drm_i915_private *dev_priv = dev->dev_private;
924 struct intel_output *intel_output = to_intel_output(connector); 924 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
925 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 925 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
926 int i; 926 int i;
927 927
928 tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); 928 tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
@@ -971,8 +971,8 @@ intel_tv_restore(struct drm_connector *connector)
971{ 971{
972 struct drm_device *dev = connector->dev; 972 struct drm_device *dev = connector->dev;
973 struct drm_i915_private *dev_priv = dev->dev_private; 973 struct drm_i915_private *dev_priv = dev->dev_private;
974 struct intel_output *intel_output = to_intel_output(connector); 974 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
975 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 975 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
976 struct drm_crtc *crtc = connector->encoder->crtc; 976 struct drm_crtc *crtc = connector->encoder->crtc;
977 struct intel_crtc *intel_crtc; 977 struct intel_crtc *intel_crtc;
978 int i; 978 int i;
@@ -1068,9 +1068,9 @@ intel_tv_mode_lookup (char *tv_format)
1068} 1068}
1069 1069
1070static const struct tv_mode * 1070static const struct tv_mode *
1071intel_tv_mode_find (struct intel_output *intel_output) 1071intel_tv_mode_find (struct intel_encoder *intel_encoder)
1072{ 1072{
1073 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1073 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1074 1074
1075 return intel_tv_mode_lookup(tv_priv->tv_format); 1075 return intel_tv_mode_lookup(tv_priv->tv_format);
1076} 1076}
@@ -1078,8 +1078,8 @@ intel_tv_mode_find (struct intel_output *intel_output)
1078static enum drm_mode_status 1078static enum drm_mode_status
1079intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) 1079intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
1080{ 1080{
1081 struct intel_output *intel_output = to_intel_output(connector); 1081 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1083 1083
1084 /* Ensure TV refresh is close to desired refresh */ 1084 /* Ensure TV refresh is close to desired refresh */
1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) 1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
@@ -1095,8 +1095,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
1095{ 1095{
1096 struct drm_device *dev = encoder->dev; 1096 struct drm_device *dev = encoder->dev;
1097 struct drm_mode_config *drm_config = &dev->mode_config; 1097 struct drm_mode_config *drm_config = &dev->mode_config;
1098 struct intel_output *intel_output = enc_to_intel_output(encoder); 1098 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1099 const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output); 1099 const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder);
1100 struct drm_encoder *other_encoder; 1100 struct drm_encoder *other_encoder;
1101 1101
1102 if (!tv_mode) 1102 if (!tv_mode)
@@ -1121,9 +1121,9 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1121 struct drm_i915_private *dev_priv = dev->dev_private; 1121 struct drm_i915_private *dev_priv = dev->dev_private;
1122 struct drm_crtc *crtc = encoder->crtc; 1122 struct drm_crtc *crtc = encoder->crtc;
1123 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1123 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1124 struct intel_output *intel_output = enc_to_intel_output(encoder); 1124 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1125 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1125 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1126 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1126 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1127 u32 tv_ctl; 1127 u32 tv_ctl;
1128 u32 hctl1, hctl2, hctl3; 1128 u32 hctl1, hctl2, hctl3;
1129 u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; 1129 u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
@@ -1213,20 +1213,17 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1213 tv_ctl |= TV_TRILEVEL_SYNC; 1213 tv_ctl |= TV_TRILEVEL_SYNC;
1214 if (tv_mode->pal_burst) 1214 if (tv_mode->pal_burst)
1215 tv_ctl |= TV_PAL_BURST; 1215 tv_ctl |= TV_PAL_BURST;
1216
1216 scctl1 = 0; 1217 scctl1 = 0;
1217 /* dda1 implies valid video levels */ 1218 if (tv_mode->dda1_inc)
1218 if (tv_mode->dda1_inc) {
1219 scctl1 |= TV_SC_DDA1_EN; 1219 scctl1 |= TV_SC_DDA1_EN;
1220 }
1221
1222 if (tv_mode->dda2_inc) 1220 if (tv_mode->dda2_inc)
1223 scctl1 |= TV_SC_DDA2_EN; 1221 scctl1 |= TV_SC_DDA2_EN;
1224
1225 if (tv_mode->dda3_inc) 1222 if (tv_mode->dda3_inc)
1226 scctl1 |= TV_SC_DDA3_EN; 1223 scctl1 |= TV_SC_DDA3_EN;
1227
1228 scctl1 |= tv_mode->sc_reset; 1224 scctl1 |= tv_mode->sc_reset;
1229 scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; 1225 if (video_levels)
1226 scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
1230 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; 1227 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
1231 1228
1232 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | 1229 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1363,9 +1360,9 @@ static const struct drm_display_mode reported_modes[] = {
1363 * \return false if TV is disconnected. 1360 * \return false if TV is disconnected.
1364 */ 1361 */
1365static int 1362static int
1366intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) 1363intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
1367{ 1364{
1368 struct drm_encoder *encoder = &intel_output->enc; 1365 struct drm_encoder *encoder = &intel_encoder->enc;
1369 struct drm_device *dev = encoder->dev; 1366 struct drm_device *dev = encoder->dev;
1370 struct drm_i915_private *dev_priv = dev->dev_private; 1367 struct drm_i915_private *dev_priv = dev->dev_private;
1371 unsigned long irqflags; 1368 unsigned long irqflags;
@@ -1416,16 +1413,16 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1416 * 0 0 0 Component 1413 * 0 0 0 Component
1417 */ 1414 */
1418 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { 1415 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
1419 DRM_DEBUG("Detected Composite TV connection\n"); 1416 DRM_DEBUG_KMS("Detected Composite TV connection\n");
1420 type = DRM_MODE_CONNECTOR_Composite; 1417 type = DRM_MODE_CONNECTOR_Composite;
1421 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { 1418 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
1422 DRM_DEBUG("Detected S-Video TV connection\n"); 1419 DRM_DEBUG_KMS("Detected S-Video TV connection\n");
1423 type = DRM_MODE_CONNECTOR_SVIDEO; 1420 type = DRM_MODE_CONNECTOR_SVIDEO;
1424 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { 1421 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
1425 DRM_DEBUG("Detected Component TV connection\n"); 1422 DRM_DEBUG_KMS("Detected Component TV connection\n");
1426 type = DRM_MODE_CONNECTOR_Component; 1423 type = DRM_MODE_CONNECTOR_Component;
1427 } else { 1424 } else {
1428 DRM_DEBUG("No TV connection detected\n"); 1425 DRM_DEBUG_KMS("No TV connection detected\n");
1429 type = -1; 1426 type = -1;
1430 } 1427 }
1431 1428
@@ -1444,9 +1441,9 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1444 */ 1441 */
1445static void intel_tv_find_better_format(struct drm_connector *connector) 1442static void intel_tv_find_better_format(struct drm_connector *connector)
1446{ 1443{
1447 struct intel_output *intel_output = to_intel_output(connector); 1444 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1448 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1445 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1449 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1446 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1450 int i; 1447 int i;
1451 1448
1452 if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == 1449 if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
@@ -1478,9 +1475,9 @@ intel_tv_detect(struct drm_connector *connector)
1478{ 1475{
1479 struct drm_crtc *crtc; 1476 struct drm_crtc *crtc;
1480 struct drm_display_mode mode; 1477 struct drm_display_mode mode;
1481 struct intel_output *intel_output = to_intel_output(connector); 1478 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1482 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1479 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1483 struct drm_encoder *encoder = &intel_output->enc; 1480 struct drm_encoder *encoder = &intel_encoder->enc;
1484 int dpms_mode; 1481 int dpms_mode;
1485 int type = tv_priv->type; 1482 int type = tv_priv->type;
1486 1483
@@ -1488,12 +1485,12 @@ intel_tv_detect(struct drm_connector *connector)
1488 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1485 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1489 1486
1490 if (encoder->crtc && encoder->crtc->enabled) { 1487 if (encoder->crtc && encoder->crtc->enabled) {
1491 type = intel_tv_detect_type(encoder->crtc, intel_output); 1488 type = intel_tv_detect_type(encoder->crtc, intel_encoder);
1492 } else { 1489 } else {
1493 crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); 1490 crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode);
1494 if (crtc) { 1491 if (crtc) {
1495 type = intel_tv_detect_type(crtc, intel_output); 1492 type = intel_tv_detect_type(crtc, intel_encoder);
1496 intel_release_load_detect_pipe(intel_output, dpms_mode); 1493 intel_release_load_detect_pipe(intel_encoder, dpms_mode);
1497 } else 1494 } else
1498 type = -1; 1495 type = -1;
1499 } 1496 }
@@ -1528,8 +1525,8 @@ static void
1528intel_tv_chose_preferred_modes(struct drm_connector *connector, 1525intel_tv_chose_preferred_modes(struct drm_connector *connector,
1529 struct drm_display_mode *mode_ptr) 1526 struct drm_display_mode *mode_ptr)
1530{ 1527{
1531 struct intel_output *intel_output = to_intel_output(connector); 1528 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1532 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1529 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1533 1530
1534 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) 1531 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
1535 mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; 1532 mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
@@ -1553,8 +1550,8 @@ static int
1553intel_tv_get_modes(struct drm_connector *connector) 1550intel_tv_get_modes(struct drm_connector *connector)
1554{ 1551{
1555 struct drm_display_mode *mode_ptr; 1552 struct drm_display_mode *mode_ptr;
1556 struct intel_output *intel_output = to_intel_output(connector); 1553 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1557 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1554 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1558 int j, count = 0; 1555 int j, count = 0;
1559 u64 tmp; 1556 u64 tmp;
1560 1557
@@ -1607,11 +1604,11 @@ intel_tv_get_modes(struct drm_connector *connector)
1607static void 1604static void
1608intel_tv_destroy (struct drm_connector *connector) 1605intel_tv_destroy (struct drm_connector *connector)
1609{ 1606{
1610 struct intel_output *intel_output = to_intel_output(connector); 1607 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1611 1608
1612 drm_sysfs_connector_remove(connector); 1609 drm_sysfs_connector_remove(connector);
1613 drm_connector_cleanup(connector); 1610 drm_connector_cleanup(connector);
1614 kfree(intel_output); 1611 kfree(intel_encoder);
1615} 1612}
1616 1613
1617 1614
@@ -1620,9 +1617,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1620 uint64_t val) 1617 uint64_t val)
1621{ 1618{
1622 struct drm_device *dev = connector->dev; 1619 struct drm_device *dev = connector->dev;
1623 struct intel_output *intel_output = to_intel_output(connector); 1620 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1624 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1621 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1625 struct drm_encoder *encoder = &intel_output->enc; 1622 struct drm_encoder *encoder = &intel_encoder->enc;
1626 struct drm_crtc *crtc = encoder->crtc; 1623 struct drm_crtc *crtc = encoder->crtc;
1627 int ret = 0; 1624 int ret = 0;
1628 bool changed = false; 1625 bool changed = false;
@@ -1702,13 +1699,48 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
1702 .destroy = intel_tv_enc_destroy, 1699 .destroy = intel_tv_enc_destroy,
1703}; 1700};
1704 1701
1702/*
1703 * Enumerate the child dev array parsed from VBT to check whether
1704 * the integrated TV is present.
1705 * If it is present, return 1.
1706 * If it is not present, return false.
1707 * If no child dev is parsed from VBT, it assumes that the TV is present.
1708 */
1709static int tv_is_present_in_vbt(struct drm_device *dev)
1710{
1711 struct drm_i915_private *dev_priv = dev->dev_private;
1712 struct child_device_config *p_child;
1713 int i, ret;
1714
1715 if (!dev_priv->child_dev_num)
1716 return 1;
1717
1718 ret = 0;
1719 for (i = 0; i < dev_priv->child_dev_num; i++) {
1720 p_child = dev_priv->child_dev + i;
1721 /*
1722 * If the device type is not TV, continue.
1723 */
1724 if (p_child->device_type != DEVICE_TYPE_INT_TV &&
1725 p_child->device_type != DEVICE_TYPE_TV)
1726 continue;
1727 /* Only when the addin_offset is non-zero, it is regarded
1728 * as present.
1729 */
1730 if (p_child->addin_offset) {
1731 ret = 1;
1732 break;
1733 }
1734 }
1735 return ret;
1736}
1705 1737
1706void 1738void
1707intel_tv_init(struct drm_device *dev) 1739intel_tv_init(struct drm_device *dev)
1708{ 1740{
1709 struct drm_i915_private *dev_priv = dev->dev_private; 1741 struct drm_i915_private *dev_priv = dev->dev_private;
1710 struct drm_connector *connector; 1742 struct drm_connector *connector;
1711 struct intel_output *intel_output; 1743 struct intel_encoder *intel_encoder;
1712 struct intel_tv_priv *tv_priv; 1744 struct intel_tv_priv *tv_priv;
1713 u32 tv_dac_on, tv_dac_off, save_tv_dac; 1745 u32 tv_dac_on, tv_dac_off, save_tv_dac;
1714 char **tv_format_names; 1746 char **tv_format_names;
@@ -1717,6 +1749,10 @@ intel_tv_init(struct drm_device *dev)
1717 if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) 1749 if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
1718 return; 1750 return;
1719 1751
1752 if (!tv_is_present_in_vbt(dev)) {
1753 DRM_DEBUG_KMS("Integrated TV is not present.\n");
1754 return;
1755 }
1720 /* Even if we have an encoder we may not have a connector */ 1756 /* Even if we have an encoder we may not have a connector */
1721 if (!dev_priv->int_tv_support) 1757 if (!dev_priv->int_tv_support)
1722 return; 1758 return;
@@ -1744,28 +1780,28 @@ intel_tv_init(struct drm_device *dev)
1744 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) 1780 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
1745 return; 1781 return;
1746 1782
1747 intel_output = kzalloc(sizeof(struct intel_output) + 1783 intel_encoder = kzalloc(sizeof(struct intel_encoder) +
1748 sizeof(struct intel_tv_priv), GFP_KERNEL); 1784 sizeof(struct intel_tv_priv), GFP_KERNEL);
1749 if (!intel_output) { 1785 if (!intel_encoder) {
1750 return; 1786 return;
1751 } 1787 }
1752 1788
1753 connector = &intel_output->base; 1789 connector = &intel_encoder->base;
1754 1790
1755 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1791 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1756 DRM_MODE_CONNECTOR_SVIDEO); 1792 DRM_MODE_CONNECTOR_SVIDEO);
1757 1793
1758 drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs, 1794 drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
1759 DRM_MODE_ENCODER_TVDAC); 1795 DRM_MODE_ENCODER_TVDAC);
1760 1796
1761 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1797 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
1762 tv_priv = (struct intel_tv_priv *)(intel_output + 1); 1798 tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
1763 intel_output->type = INTEL_OUTPUT_TVOUT; 1799 intel_encoder->type = INTEL_OUTPUT_TVOUT;
1764 intel_output->crtc_mask = (1 << 0) | (1 << 1); 1800 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1765 intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); 1801 intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
1766 intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); 1802 intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
1767 intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); 1803 intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
1768 intel_output->dev_priv = tv_priv; 1804 intel_encoder->dev_priv = tv_priv;
1769 tv_priv->type = DRM_MODE_CONNECTOR_Unknown; 1805 tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
1770 1806
1771 /* BIOS margin values */ 1807 /* BIOS margin values */
@@ -1776,7 +1812,7 @@ intel_tv_init(struct drm_device *dev)
1776 1812
1777 tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); 1813 tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
1778 1814
1779 drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs); 1815 drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
1780 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); 1816 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
1781 connector->interlace_allowed = false; 1817 connector->interlace_allowed = false;
1782 connector->doublescan_allowed = false; 1818 connector->doublescan_allowed = false;