aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_display.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r--drivers/gpu/drm/i915/intel_display.c732
1 files changed, 390 insertions, 342 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 52cd9b006da..b27202d23eb 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -70,8 +70,6 @@ struct intel_limit {
70 intel_p2_t p2; 70 intel_p2_t p2;
71 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 71 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
72 int, int, intel_clock_t *); 72 int, int, intel_clock_t *);
73 bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *,
74 int, int, intel_clock_t *);
75}; 73};
76 74
77#define I8XX_DOT_MIN 25000 75#define I8XX_DOT_MIN 25000
@@ -242,38 +240,93 @@ struct intel_limit {
242#define IRONLAKE_DOT_MAX 350000 240#define IRONLAKE_DOT_MAX 350000
243#define IRONLAKE_VCO_MIN 1760000 241#define IRONLAKE_VCO_MIN 1760000
244#define IRONLAKE_VCO_MAX 3510000 242#define IRONLAKE_VCO_MAX 3510000
245#define IRONLAKE_N_MIN 1
246#define IRONLAKE_N_MAX 5
247#define IRONLAKE_M_MIN 79
248#define IRONLAKE_M_MAX 118
249#define IRONLAKE_M1_MIN 12 243#define IRONLAKE_M1_MIN 12
250#define IRONLAKE_M1_MAX 23 244#define IRONLAKE_M1_MAX 22
251#define IRONLAKE_M2_MIN 5 245#define IRONLAKE_M2_MIN 5
252#define IRONLAKE_M2_MAX 9 246#define IRONLAKE_M2_MAX 9
253#define IRONLAKE_P_SDVO_DAC_MIN 5
254#define IRONLAKE_P_SDVO_DAC_MAX 80
255#define IRONLAKE_P_LVDS_MIN 28
256#define IRONLAKE_P_LVDS_MAX 112
257#define IRONLAKE_P1_MIN 1
258#define IRONLAKE_P1_MAX 8
259#define IRONLAKE_P2_SDVO_DAC_SLOW 10
260#define IRONLAKE_P2_SDVO_DAC_FAST 5
261#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
262#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
263#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 247#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
264 248
249/* We have parameter ranges for different type of outputs. */
250
251/* DAC & HDMI Refclk 120Mhz */
252#define IRONLAKE_DAC_N_MIN 1
253#define IRONLAKE_DAC_N_MAX 5
254#define IRONLAKE_DAC_M_MIN 79
255#define IRONLAKE_DAC_M_MAX 127
256#define IRONLAKE_DAC_P_MIN 5
257#define IRONLAKE_DAC_P_MAX 80
258#define IRONLAKE_DAC_P1_MIN 1
259#define IRONLAKE_DAC_P1_MAX 8
260#define IRONLAKE_DAC_P2_SLOW 10
261#define IRONLAKE_DAC_P2_FAST 5
262
263/* LVDS single-channel 120Mhz refclk */
264#define IRONLAKE_LVDS_S_N_MIN 1
265#define IRONLAKE_LVDS_S_N_MAX 3
266#define IRONLAKE_LVDS_S_M_MIN 79
267#define IRONLAKE_LVDS_S_M_MAX 118
268#define IRONLAKE_LVDS_S_P_MIN 28
269#define IRONLAKE_LVDS_S_P_MAX 112
270#define IRONLAKE_LVDS_S_P1_MIN 2
271#define IRONLAKE_LVDS_S_P1_MAX 8
272#define IRONLAKE_LVDS_S_P2_SLOW 14
273#define IRONLAKE_LVDS_S_P2_FAST 14
274
275/* LVDS dual-channel 120Mhz refclk */
276#define IRONLAKE_LVDS_D_N_MIN 1
277#define IRONLAKE_LVDS_D_N_MAX 3
278#define IRONLAKE_LVDS_D_M_MIN 79
279#define IRONLAKE_LVDS_D_M_MAX 127
280#define IRONLAKE_LVDS_D_P_MIN 14
281#define IRONLAKE_LVDS_D_P_MAX 56
282#define IRONLAKE_LVDS_D_P1_MIN 2
283#define IRONLAKE_LVDS_D_P1_MAX 8
284#define IRONLAKE_LVDS_D_P2_SLOW 7
285#define IRONLAKE_LVDS_D_P2_FAST 7
286
287/* LVDS single-channel 100Mhz refclk */
288#define IRONLAKE_LVDS_S_SSC_N_MIN 1
289#define IRONLAKE_LVDS_S_SSC_N_MAX 2
290#define IRONLAKE_LVDS_S_SSC_M_MIN 79
291#define IRONLAKE_LVDS_S_SSC_M_MAX 126
292#define IRONLAKE_LVDS_S_SSC_P_MIN 28
293#define IRONLAKE_LVDS_S_SSC_P_MAX 112
294#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
295#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
296#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
297#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
298
299/* LVDS dual-channel 100Mhz refclk */
300#define IRONLAKE_LVDS_D_SSC_N_MIN 1
301#define IRONLAKE_LVDS_D_SSC_N_MAX 3
302#define IRONLAKE_LVDS_D_SSC_M_MIN 79
303#define IRONLAKE_LVDS_D_SSC_M_MAX 126
304#define IRONLAKE_LVDS_D_SSC_P_MIN 14
305#define IRONLAKE_LVDS_D_SSC_P_MAX 42
306#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
307#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
308#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
309#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
310
311/* DisplayPort */
312#define IRONLAKE_DP_N_MIN 1
313#define IRONLAKE_DP_N_MAX 2
314#define IRONLAKE_DP_M_MIN 81
315#define IRONLAKE_DP_M_MAX 90
316#define IRONLAKE_DP_P_MIN 10
317#define IRONLAKE_DP_P_MAX 20
318#define IRONLAKE_DP_P2_FAST 10
319#define IRONLAKE_DP_P2_SLOW 10
320#define IRONLAKE_DP_P2_LIMIT 0
321#define IRONLAKE_DP_P1_MIN 1
322#define IRONLAKE_DP_P1_MAX 2
323
265static bool 324static bool
266intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 325intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
267 int target, int refclk, intel_clock_t *best_clock); 326 int target, int refclk, intel_clock_t *best_clock);
268static bool 327static bool
269intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
270 int target, int refclk, intel_clock_t *best_clock);
271static bool
272intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 328intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
273 int target, int refclk, intel_clock_t *best_clock); 329 int target, int refclk, intel_clock_t *best_clock);
274static bool
275intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
276 int target, int refclk, intel_clock_t *best_clock);
277 330
278static bool 331static bool
279intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, 332intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
@@ -294,7 +347,6 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
294 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 347 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
295 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 348 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
296 .find_pll = intel_find_best_PLL, 349 .find_pll = intel_find_best_PLL,
297 .find_reduced_pll = intel_find_best_reduced_PLL,
298}; 350};
299 351
300static const intel_limit_t intel_limits_i8xx_lvds = { 352static const intel_limit_t intel_limits_i8xx_lvds = {
@@ -309,7 +361,6 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
309 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 361 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
310 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 362 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
311 .find_pll = intel_find_best_PLL, 363 .find_pll = intel_find_best_PLL,
312 .find_reduced_pll = intel_find_best_reduced_PLL,
313}; 364};
314 365
315static const intel_limit_t intel_limits_i9xx_sdvo = { 366static const intel_limit_t intel_limits_i9xx_sdvo = {
@@ -324,7 +375,6 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
324 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 375 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
325 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 376 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
326 .find_pll = intel_find_best_PLL, 377 .find_pll = intel_find_best_PLL,
327 .find_reduced_pll = intel_find_best_reduced_PLL,
328}; 378};
329 379
330static const intel_limit_t intel_limits_i9xx_lvds = { 380static const intel_limit_t intel_limits_i9xx_lvds = {
@@ -342,7 +392,6 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
342 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 392 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
343 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 393 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
344 .find_pll = intel_find_best_PLL, 394 .find_pll = intel_find_best_PLL,
345 .find_reduced_pll = intel_find_best_reduced_PLL,
346}; 395};
347 396
348 /* below parameter and function is for G4X Chipset Family*/ 397 /* below parameter and function is for G4X Chipset Family*/
@@ -360,7 +409,6 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
360 .p2_fast = G4X_P2_SDVO_FAST 409 .p2_fast = G4X_P2_SDVO_FAST
361 }, 410 },
362 .find_pll = intel_g4x_find_best_PLL, 411 .find_pll = intel_g4x_find_best_PLL,
363 .find_reduced_pll = intel_g4x_find_best_PLL,
364}; 412};
365 413
366static const intel_limit_t intel_limits_g4x_hdmi = { 414static const intel_limit_t intel_limits_g4x_hdmi = {
@@ -377,7 +425,6 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
377 .p2_fast = G4X_P2_HDMI_DAC_FAST 425 .p2_fast = G4X_P2_HDMI_DAC_FAST
378 }, 426 },
379 .find_pll = intel_g4x_find_best_PLL, 427 .find_pll = intel_g4x_find_best_PLL,
380 .find_reduced_pll = intel_g4x_find_best_PLL,
381}; 428};
382 429
383static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 430static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
@@ -402,7 +449,6 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
402 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST 449 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
403 }, 450 },
404 .find_pll = intel_g4x_find_best_PLL, 451 .find_pll = intel_g4x_find_best_PLL,
405 .find_reduced_pll = intel_g4x_find_best_PLL,
406}; 452};
407 453
408static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 454static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
@@ -427,7 +473,6 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
427 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST 473 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
428 }, 474 },
429 .find_pll = intel_g4x_find_best_PLL, 475 .find_pll = intel_g4x_find_best_PLL,
430 .find_reduced_pll = intel_g4x_find_best_PLL,
431}; 476};
432 477
433static const intel_limit_t intel_limits_g4x_display_port = { 478static const intel_limit_t intel_limits_g4x_display_port = {
@@ -465,7 +510,6 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
465 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 510 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
466 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 511 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
467 .find_pll = intel_find_best_PLL, 512 .find_pll = intel_find_best_PLL,
468 .find_reduced_pll = intel_find_best_reduced_PLL,
469}; 513};
470 514
471static const intel_limit_t intel_limits_pineview_lvds = { 515static const intel_limit_t intel_limits_pineview_lvds = {
@@ -481,46 +525,135 @@ static const intel_limit_t intel_limits_pineview_lvds = {
481 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 525 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
482 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 526 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
483 .find_pll = intel_find_best_PLL, 527 .find_pll = intel_find_best_PLL,
484 .find_reduced_pll = intel_find_best_reduced_PLL,
485}; 528};
486 529
487static const intel_limit_t intel_limits_ironlake_sdvo = { 530static const intel_limit_t intel_limits_ironlake_dac = {
531 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
532 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
533 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
534 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
535 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
536 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
537 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
538 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
539 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
540 .p2_slow = IRONLAKE_DAC_P2_SLOW,
541 .p2_fast = IRONLAKE_DAC_P2_FAST },
542 .find_pll = intel_g4x_find_best_PLL,
543};
544
545static const intel_limit_t intel_limits_ironlake_single_lvds = {
546 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
547 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
548 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
549 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
550 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
551 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
552 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
553 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
554 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
555 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
556 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
557 .find_pll = intel_g4x_find_best_PLL,
558};
559
560static const intel_limit_t intel_limits_ironlake_dual_lvds = {
488 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 561 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
489 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 562 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
490 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 563 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
491 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 564 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
492 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 565 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
493 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 566 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
494 .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, 567 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
495 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 568 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
496 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 569 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
497 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, 570 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
498 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, 571 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
499 .find_pll = intel_ironlake_find_best_PLL, 572 .find_pll = intel_g4x_find_best_PLL,
500}; 573};
501 574
502static const intel_limit_t intel_limits_ironlake_lvds = { 575static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
503 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 576 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
504 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 577 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
505 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 578 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
506 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 579 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
507 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 580 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
508 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 581 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
509 .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, 582 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
510 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 583 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
511 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 584 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
512 .p2_slow = IRONLAKE_P2_LVDS_SLOW, 585 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
513 .p2_fast = IRONLAKE_P2_LVDS_FAST }, 586 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
514 .find_pll = intel_ironlake_find_best_PLL, 587 .find_pll = intel_g4x_find_best_PLL,
588};
589
590static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
591 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
592 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
593 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
594 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
595 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
596 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
597 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
598 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
599 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
600 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
601 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
602 .find_pll = intel_g4x_find_best_PLL,
603};
604
605static const intel_limit_t intel_limits_ironlake_display_port = {
606 .dot = { .min = IRONLAKE_DOT_MIN,
607 .max = IRONLAKE_DOT_MAX },
608 .vco = { .min = IRONLAKE_VCO_MIN,
609 .max = IRONLAKE_VCO_MAX},
610 .n = { .min = IRONLAKE_DP_N_MIN,
611 .max = IRONLAKE_DP_N_MAX },
612 .m = { .min = IRONLAKE_DP_M_MIN,
613 .max = IRONLAKE_DP_M_MAX },
614 .m1 = { .min = IRONLAKE_M1_MIN,
615 .max = IRONLAKE_M1_MAX },
616 .m2 = { .min = IRONLAKE_M2_MIN,
617 .max = IRONLAKE_M2_MAX },
618 .p = { .min = IRONLAKE_DP_P_MIN,
619 .max = IRONLAKE_DP_P_MAX },
620 .p1 = { .min = IRONLAKE_DP_P1_MIN,
621 .max = IRONLAKE_DP_P1_MAX},
622 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
623 .p2_slow = IRONLAKE_DP_P2_SLOW,
624 .p2_fast = IRONLAKE_DP_P2_FAST },
625 .find_pll = intel_find_pll_ironlake_dp,
515}; 626};
516 627
517static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) 628static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
518{ 629{
630 struct drm_device *dev = crtc->dev;
631 struct drm_i915_private *dev_priv = dev->dev_private;
519 const intel_limit_t *limit; 632 const intel_limit_t *limit;
520 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 633 int refclk = 120;
521 limit = &intel_limits_ironlake_lvds; 634
635 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
636 if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
637 refclk = 100;
638
639 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
640 LVDS_CLKB_POWER_UP) {
641 /* LVDS dual channel */
642 if (refclk == 100)
643 limit = &intel_limits_ironlake_dual_lvds_100m;
644 else
645 limit = &intel_limits_ironlake_dual_lvds;
646 } else {
647 if (refclk == 100)
648 limit = &intel_limits_ironlake_single_lvds_100m;
649 else
650 limit = &intel_limits_ironlake_single_lvds;
651 }
652 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
653 HAS_eDP)
654 limit = &intel_limits_ironlake_display_port;
522 else 655 else
523 limit = &intel_limits_ironlake_sdvo; 656 limit = &intel_limits_ironlake_dac;
524 657
525 return limit; 658 return limit;
526} 659}
@@ -737,46 +870,6 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
737 return (err != target); 870 return (err != target);
738} 871}
739 872
740
741static bool
742intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
743 int target, int refclk, intel_clock_t *best_clock)
744
745{
746 struct drm_device *dev = crtc->dev;
747 intel_clock_t clock;
748 int err = target;
749 bool found = false;
750
751 memcpy(&clock, best_clock, sizeof(intel_clock_t));
752
753 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
754 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
755 /* m1 is always 0 in Pineview */
756 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
757 break;
758 for (clock.n = limit->n.min; clock.n <= limit->n.max;
759 clock.n++) {
760 int this_err;
761
762 intel_clock(dev, refclk, &clock);
763
764 if (!intel_PLL_is_valid(crtc, &clock))
765 continue;
766
767 this_err = abs(clock.dot - target);
768 if (this_err < err) {
769 *best_clock = clock;
770 err = this_err;
771 found = true;
772 }
773 }
774 }
775 }
776
777 return found;
778}
779
780static bool 873static bool
781intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 874intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
782 int target, int refclk, intel_clock_t *best_clock) 875 int target, int refclk, intel_clock_t *best_clock)
@@ -791,7 +884,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
791 found = false; 884 found = false;
792 885
793 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 886 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
794 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 887 int lvds_reg;
888
889 if (IS_IRONLAKE(dev))
890 lvds_reg = PCH_LVDS;
891 else
892 lvds_reg = LVDS;
893 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
795 LVDS_CLKB_POWER_UP) 894 LVDS_CLKB_POWER_UP)
796 clock.p2 = limit->p2.p2_fast; 895 clock.p2 = limit->p2.p2_fast;
797 else 896 else
@@ -839,6 +938,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
839{ 938{
840 struct drm_device *dev = crtc->dev; 939 struct drm_device *dev = crtc->dev;
841 intel_clock_t clock; 940 intel_clock_t clock;
941
942 /* return directly when it is eDP */
943 if (HAS_eDP)
944 return true;
945
842 if (target < 200000) { 946 if (target < 200000) {
843 clock.n = 1; 947 clock.n = 1;
844 clock.p1 = 2; 948 clock.p1 = 2;
@@ -857,68 +961,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
857 return true; 961 return true;
858} 962}
859 963
860static bool
861intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
862 int target, int refclk, intel_clock_t *best_clock)
863{
864 struct drm_device *dev = crtc->dev;
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 intel_clock_t clock;
867 int err_most = 47;
868 int err_min = 10000;
869
870 /* eDP has only 2 clock choice, no n/m/p setting */
871 if (HAS_eDP)
872 return true;
873
874 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
875 return intel_find_pll_ironlake_dp(limit, crtc, target,
876 refclk, best_clock);
877
878 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
879 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
880 LVDS_CLKB_POWER_UP)
881 clock.p2 = limit->p2.p2_fast;
882 else
883 clock.p2 = limit->p2.p2_slow;
884 } else {
885 if (target < limit->p2.dot_limit)
886 clock.p2 = limit->p2.p2_slow;
887 else
888 clock.p2 = limit->p2.p2_fast;
889 }
890
891 memset(best_clock, 0, sizeof(*best_clock));
892 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
893 /* based on hardware requriment prefer smaller n to precision */
894 for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
895 /* based on hardware requirment prefere larger m1,m2 */
896 for (clock.m1 = limit->m1.max;
897 clock.m1 >= limit->m1.min; clock.m1--) {
898 for (clock.m2 = limit->m2.max;
899 clock.m2 >= limit->m2.min; clock.m2--) {
900 int this_err;
901
902 intel_clock(dev, refclk, &clock);
903 if (!intel_PLL_is_valid(crtc, &clock))
904 continue;
905 this_err = abs((10000 - (target*10000/clock.dot)));
906 if (this_err < err_most) {
907 *best_clock = clock;
908 /* found on first matching */
909 goto out;
910 } else if (this_err < err_min) {
911 *best_clock = clock;
912 err_min = this_err;
913 }
914 }
915 }
916 }
917 }
918out:
919 return true;
920}
921
922/* DisplayPort has only two frequencies, 162MHz and 270MHz */ 964/* DisplayPort has only two frequencies, 162MHz and 270MHz */
923static bool 965static bool
924intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 966intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -989,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
989 1031
990 /* enable it... */ 1032 /* enable it... */
991 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 1033 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1034 if (IS_I945GM(dev))
1035 fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
992 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1036 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
993 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1037 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
994 if (obj_priv->tiling_mode != I915_TILING_NONE) 1038 if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -1282,7 +1326,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1282 return ret; 1326 return ret;
1283 } 1327 }
1284 1328
1285 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 1329 ret = i915_gem_object_set_to_display_plane(obj);
1286 if (ret != 0) { 1330 if (ret != 0) {
1287 i915_gem_object_unpin(obj); 1331 i915_gem_object_unpin(obj);
1288 mutex_unlock(&dev->struct_mutex); 1332 mutex_unlock(&dev->struct_mutex);
@@ -1493,6 +1537,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1493 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; 1537 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
1494 u32 temp; 1538 u32 temp;
1495 int tries = 5, j, n; 1539 int tries = 5, j, n;
1540 u32 pipe_bpc;
1541
1542 temp = I915_READ(pipeconf_reg);
1543 pipe_bpc = temp & PIPE_BPC_MASK;
1496 1544
1497 /* XXX: When our outputs are all unaware of DPMS modes other than off 1545 /* XXX: When our outputs are all unaware of DPMS modes other than off
1498 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 1546 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -1524,6 +1572,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1524 1572
1525 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1573 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1526 temp = I915_READ(fdi_rx_reg); 1574 temp = I915_READ(fdi_rx_reg);
1575 /*
1576 * make the BPC in FDI Rx be consistent with that in
1577 * pipeconf reg.
1578 */
1579 temp &= ~(0x7 << 16);
1580 temp |= (pipe_bpc << 11);
1527 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | 1581 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
1528 FDI_SEL_PCDCLK | 1582 FDI_SEL_PCDCLK |
1529 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ 1583 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
@@ -1666,6 +1720,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1666 1720
1667 /* enable PCH transcoder */ 1721 /* enable PCH transcoder */
1668 temp = I915_READ(transconf_reg); 1722 temp = I915_READ(transconf_reg);
1723 /*
1724 * make the BPC in transcoder be consistent with
1725 * that in pipeconf reg.
1726 */
1727 temp &= ~PIPE_BPC_MASK;
1728 temp |= pipe_bpc;
1669 I915_WRITE(transconf_reg, temp | TRANS_ENABLE); 1729 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
1670 I915_READ(transconf_reg); 1730 I915_READ(transconf_reg);
1671 1731
@@ -1697,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1697 case DRM_MODE_DPMS_OFF: 1757 case DRM_MODE_DPMS_OFF:
1698 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); 1758 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
1699 1759
1760 drm_vblank_off(dev, pipe);
1700 /* Disable display plane */ 1761 /* Disable display plane */
1701 temp = I915_READ(dspcntr_reg); 1762 temp = I915_READ(dspcntr_reg);
1702 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 1763 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -1745,6 +1806,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1745 I915_READ(fdi_tx_reg); 1806 I915_READ(fdi_tx_reg);
1746 1807
1747 temp = I915_READ(fdi_rx_reg); 1808 temp = I915_READ(fdi_rx_reg);
1809 /* BPC in FDI rx is consistent with that in pipeconf */
1810 temp &= ~(0x07 << 16);
1811 temp |= (pipe_bpc << 11);
1748 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); 1812 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
1749 I915_READ(fdi_rx_reg); 1813 I915_READ(fdi_rx_reg);
1750 1814
@@ -1789,7 +1853,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1789 } 1853 }
1790 } 1854 }
1791 } 1855 }
1792 1856 temp = I915_READ(transconf_reg);
1857 /* BPC in transcoder is consistent with that in pipeconf */
1858 temp &= ~PIPE_BPC_MASK;
1859 temp |= pipe_bpc;
1860 I915_WRITE(transconf_reg, temp);
1861 I915_READ(transconf_reg);
1793 udelay(100); 1862 udelay(100);
1794 1863
1795 /* disable PCH DPLL */ 1864 /* disable PCH DPLL */
@@ -2448,7 +2517,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
2448 * A value of 5us seems to be a good balance; safe for very low end 2517 * A value of 5us seems to be a good balance; safe for very low end
2449 * platforms but not overly aggressive on lower latency configs. 2518 * platforms but not overly aggressive on lower latency configs.
2450 */ 2519 */
2451const static int latency_ns = 5000; 2520static const int latency_ns = 5000;
2452 2521
2453static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 2522static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2454{ 2523{
@@ -2559,7 +2628,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2559 /* Calc sr entries for one plane configs */ 2628 /* Calc sr entries for one plane configs */
2560 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2629 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2561 /* self-refresh has much higher latency */ 2630 /* self-refresh has much higher latency */
2562 const static int sr_latency_ns = 12000; 2631 static const int sr_latency_ns = 12000;
2563 2632
2564 sr_clock = planea_clock ? planea_clock : planeb_clock; 2633 sr_clock = planea_clock ? planea_clock : planeb_clock;
2565 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2634 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2570,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2570 sr_entries = roundup(sr_entries / cacheline_size, 1); 2639 sr_entries = roundup(sr_entries / cacheline_size, 1);
2571 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 2640 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2572 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2641 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2642 } else {
2643 /* Turn off self refresh if both pipes are enabled */
2644 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2645 & ~FW_BLC_SELF_EN);
2573 } 2646 }
2574 2647
2575 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", 2648 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2598,7 +2671,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2598 /* Calc sr entries for one plane configs */ 2671 /* Calc sr entries for one plane configs */
2599 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2672 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2600 /* self-refresh has much higher latency */ 2673 /* self-refresh has much higher latency */
2601 const static int sr_latency_ns = 12000; 2674 static const int sr_latency_ns = 12000;
2602 2675
2603 sr_clock = planea_clock ? planea_clock : planeb_clock; 2676 sr_clock = planea_clock ? planea_clock : planeb_clock;
2604 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2677 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2613,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2613 srwm = 1; 2686 srwm = 1;
2614 srwm &= 0x3f; 2687 srwm &= 0x3f;
2615 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2688 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2689 } else {
2690 /* Turn off self refresh if both pipes are enabled */
2691 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2692 & ~FW_BLC_SELF_EN);
2616 } 2693 }
2617 2694
2618 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 2695 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2667,7 +2744,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2667 if (HAS_FW_BLC(dev) && sr_hdisplay && 2744 if (HAS_FW_BLC(dev) && sr_hdisplay &&
2668 (!planea_clock || !planeb_clock)) { 2745 (!planea_clock || !planeb_clock)) {
2669 /* self-refresh has much higher latency */ 2746 /* self-refresh has much higher latency */
2670 const static int sr_latency_ns = 6000; 2747 static const int sr_latency_ns = 6000;
2671 2748
2672 sr_clock = planea_clock ? planea_clock : planeb_clock; 2749 sr_clock = planea_clock ? planea_clock : planeb_clock;
2673 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2750 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2681,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2681 if (srwm < 0) 2758 if (srwm < 0)
2682 srwm = 1; 2759 srwm = 1;
2683 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2760 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
2761 } else {
2762 /* Turn off self refresh if both pipes are enabled */
2763 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2764 & ~FW_BLC_SELF_EN);
2684 } 2765 }
2685 2766
2686 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2767 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -2906,10 +2987,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2906 return -EINVAL; 2987 return -EINVAL;
2907 } 2988 }
2908 2989
2909 if (is_lvds && limit->find_reduced_pll && 2990 if (is_lvds && dev_priv->lvds_downclock_avail) {
2910 dev_priv->lvds_downclock_avail) { 2991 has_reduced_clock = limit->find_pll(limit, crtc,
2911 memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
2912 has_reduced_clock = limit->find_reduced_pll(limit, crtc,
2913 dev_priv->lvds_downclock, 2992 dev_priv->lvds_downclock,
2914 refclk, 2993 refclk,
2915 &reduced_clock); 2994 &reduced_clock);
@@ -2969,6 +3048,33 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2969 3048
2970 /* determine panel color depth */ 3049 /* determine panel color depth */
2971 temp = I915_READ(pipeconf_reg); 3050 temp = I915_READ(pipeconf_reg);
3051 temp &= ~PIPE_BPC_MASK;
3052 if (is_lvds) {
3053 int lvds_reg = I915_READ(PCH_LVDS);
3054 /* the BPC will be 6 if it is 18-bit LVDS panel */
3055 if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
3056 temp |= PIPE_8BPC;
3057 else
3058 temp |= PIPE_6BPC;
3059 } else if (is_edp) {
3060 switch (dev_priv->edp_bpp/3) {
3061 case 8:
3062 temp |= PIPE_8BPC;
3063 break;
3064 case 10:
3065 temp |= PIPE_10BPC;
3066 break;
3067 case 6:
3068 temp |= PIPE_6BPC;
3069 break;
3070 case 12:
3071 temp |= PIPE_12BPC;
3072 break;
3073 }
3074 } else
3075 temp |= PIPE_8BPC;
3076 I915_WRITE(pipeconf_reg, temp);
3077 I915_READ(pipeconf_reg);
2972 3078
2973 switch (temp & PIPE_BPC_MASK) { 3079 switch (temp & PIPE_BPC_MASK) {
2974 case PIPE_8BPC: 3080 case PIPE_8BPC:
@@ -3195,7 +3301,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3195 * appropriately here, but we need to look more thoroughly into how 3301 * appropriately here, but we need to look more thoroughly into how
3196 * panels behave in the two modes. 3302 * panels behave in the two modes.
3197 */ 3303 */
3198 3304 /* set the dithering flag */
3305 if (IS_I965G(dev)) {
3306 if (dev_priv->lvds_dither) {
3307 if (IS_IRONLAKE(dev))
3308 pipeconf |= PIPE_ENABLE_DITHER;
3309 else
3310 lvds |= LVDS_ENABLE_DITHER;
3311 } else {
3312 if (IS_IRONLAKE(dev))
3313 pipeconf &= ~PIPE_ENABLE_DITHER;
3314 else
3315 lvds &= ~LVDS_ENABLE_DITHER;
3316 }
3317 }
3199 I915_WRITE(lvds_reg, lvds); 3318 I915_WRITE(lvds_reg, lvds);
3200 I915_READ(lvds_reg); 3319 I915_READ(lvds_reg);
3201 } 3320 }
@@ -3385,7 +3504,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3385 3504
3386 /* we only need to pin inside GTT if cursor is non-phy */ 3505 /* we only need to pin inside GTT if cursor is non-phy */
3387 mutex_lock(&dev->struct_mutex); 3506 mutex_lock(&dev->struct_mutex);
3388 if (!dev_priv->cursor_needs_physical) { 3507 if (!dev_priv->info->cursor_needs_physical) {
3389 ret = i915_gem_object_pin(bo, PAGE_SIZE); 3508 ret = i915_gem_object_pin(bo, PAGE_SIZE);
3390 if (ret) { 3509 if (ret) {
3391 DRM_ERROR("failed to pin cursor bo\n"); 3510 DRM_ERROR("failed to pin cursor bo\n");
@@ -3420,7 +3539,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3420 I915_WRITE(base, addr); 3539 I915_WRITE(base, addr);
3421 3540
3422 if (intel_crtc->cursor_bo) { 3541 if (intel_crtc->cursor_bo) {
3423 if (dev_priv->cursor_needs_physical) { 3542 if (dev_priv->info->cursor_needs_physical) {
3424 if (intel_crtc->cursor_bo != bo) 3543 if (intel_crtc->cursor_bo != bo)
3425 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 3544 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
3426 } else 3545 } else
@@ -3779,125 +3898,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
3779 queue_work(dev_priv->wq, &dev_priv->idle_work); 3898 queue_work(dev_priv->wq, &dev_priv->idle_work);
3780} 3899}
3781 3900
3782void intel_increase_renderclock(struct drm_device *dev, bool schedule)
3783{
3784 drm_i915_private_t *dev_priv = dev->dev_private;
3785
3786 if (IS_IRONLAKE(dev))
3787 return;
3788
3789 if (!dev_priv->render_reclock_avail) {
3790 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3791 return;
3792 }
3793
3794 /* Restore render clock frequency to original value */
3795 if (IS_G4X(dev) || IS_I9XX(dev))
3796 pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
3797 else if (IS_I85X(dev))
3798 pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
3799 DRM_DEBUG_DRIVER("increasing render clock frequency\n");
3800
3801 /* Schedule downclock */
3802 if (schedule)
3803 mod_timer(&dev_priv->idle_timer, jiffies +
3804 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
3805}
3806
3807void intel_decrease_renderclock(struct drm_device *dev)
3808{
3809 drm_i915_private_t *dev_priv = dev->dev_private;
3810
3811 if (IS_IRONLAKE(dev))
3812 return;
3813
3814 if (!dev_priv->render_reclock_avail) {
3815 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3816 return;
3817 }
3818
3819 if (IS_G4X(dev)) {
3820 u16 gcfgc;
3821
3822 /* Adjust render clock... */
3823 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3824
3825 /* Down to minimum... */
3826 gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
3827 gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
3828
3829 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3830 } else if (IS_I965G(dev)) {
3831 u16 gcfgc;
3832
3833 /* Adjust render clock... */
3834 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3835
3836 /* Down to minimum... */
3837 gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
3838 gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
3839
3840 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3841 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
3842 u16 gcfgc;
3843
3844 /* Adjust render clock... */
3845 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3846
3847 /* Down to minimum... */
3848 gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
3849 gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
3850
3851 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3852 } else if (IS_I915G(dev)) {
3853 u16 gcfgc;
3854
3855 /* Adjust render clock... */
3856 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3857
3858 /* Down to minimum... */
3859 gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
3860 gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
3861
3862 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3863 } else if (IS_I85X(dev)) {
3864 u16 hpllcc;
3865
3866 /* Adjust render clock... */
3867 pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
3868
3869 /* Up to maximum... */
3870 hpllcc &= ~GC_CLOCK_CONTROL_MASK;
3871 hpllcc |= GC_CLOCK_133_200;
3872
3873 pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
3874 }
3875 DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
3876}
3877
3878/* Note that no increase function is needed for this - increase_renderclock()
3879 * will also rewrite these bits
3880 */
3881void intel_decrease_displayclock(struct drm_device *dev)
3882{
3883 if (IS_IRONLAKE(dev))
3884 return;
3885
3886 if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
3887 IS_I915GM(dev)) {
3888 u16 gcfgc;
3889
3890 /* Adjust render clock... */
3891 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3892
3893 /* Down to minimum... */
3894 gcfgc &= ~0xf0;
3895 gcfgc |= 0x80;
3896
3897 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3898 }
3899}
3900
3901#define CRTC_IDLE_TIMEOUT 1000 /* ms */ 3901#define CRTC_IDLE_TIMEOUT 1000 /* ms */
3902 3902
3903static void intel_crtc_idle_timer(unsigned long arg) 3903static void intel_crtc_idle_timer(unsigned long arg)
@@ -4011,12 +4011,6 @@ static void intel_idle_update(struct work_struct *work)
4011 4011
4012 mutex_lock(&dev->struct_mutex); 4012 mutex_lock(&dev->struct_mutex);
4013 4013
4014 /* GPU isn't processing, downclock it. */
4015 if (!dev_priv->busy) {
4016 intel_decrease_renderclock(dev);
4017 intel_decrease_displayclock(dev);
4018 }
4019
4020 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4014 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4021 /* Skip inactive CRTCs */ 4015 /* Skip inactive CRTCs */
4022 if (!crtc->fb) 4016 if (!crtc->fb)
@@ -4050,13 +4044,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4050 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4044 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4051 return; 4045 return;
4052 4046
4053 if (!dev_priv->busy) { 4047 if (!dev_priv->busy)
4054 dev_priv->busy = true; 4048 dev_priv->busy = true;
4055 intel_increase_renderclock(dev, true); 4049 else
4056 } else {
4057 mod_timer(&dev_priv->idle_timer, jiffies + 4050 mod_timer(&dev_priv->idle_timer, jiffies +
4058 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 4051 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
4059 }
4060 4052
4061 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4053 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4062 if (!crtc->fb) 4054 if (!crtc->fb)
@@ -4089,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
4089struct intel_unpin_work { 4081struct intel_unpin_work {
4090 struct work_struct work; 4082 struct work_struct work;
4091 struct drm_device *dev; 4083 struct drm_device *dev;
4092 struct drm_gem_object *obj; 4084 struct drm_gem_object *old_fb_obj;
4085 struct drm_gem_object *pending_flip_obj;
4093 struct drm_pending_vblank_event *event; 4086 struct drm_pending_vblank_event *event;
4094 int pending; 4087 int pending;
4095}; 4088};
@@ -4100,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
4100 container_of(__work, struct intel_unpin_work, work); 4093 container_of(__work, struct intel_unpin_work, work);
4101 4094
4102 mutex_lock(&work->dev->struct_mutex); 4095 mutex_lock(&work->dev->struct_mutex);
4103 i915_gem_object_unpin(work->obj); 4096 i915_gem_object_unpin(work->old_fb_obj);
4104 drm_gem_object_unreference(work->obj); 4097 drm_gem_object_unreference(work->pending_flip_obj);
4098 drm_gem_object_unreference(work->old_fb_obj);
4105 mutex_unlock(&work->dev->struct_mutex); 4099 mutex_unlock(&work->dev->struct_mutex);
4106 kfree(work); 4100 kfree(work);
4107} 4101}
@@ -4124,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4124 spin_lock_irqsave(&dev->event_lock, flags); 4118 spin_lock_irqsave(&dev->event_lock, flags);
4125 work = intel_crtc->unpin_work; 4119 work = intel_crtc->unpin_work;
4126 if (work == NULL || !work->pending) { 4120 if (work == NULL || !work->pending) {
4121 if (work && !work->pending) {
4122 obj_priv = work->pending_flip_obj->driver_private;
4123 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4124 obj_priv,
4125 atomic_read(&obj_priv->pending_flip));
4126 }
4127 spin_unlock_irqrestore(&dev->event_lock, flags); 4127 spin_unlock_irqrestore(&dev->event_lock, flags);
4128 return; 4128 return;
4129 } 4129 }
@@ -4144,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4144 4144
4145 spin_unlock_irqrestore(&dev->event_lock, flags); 4145 spin_unlock_irqrestore(&dev->event_lock, flags);
4146 4146
4147 obj_priv = work->obj->driver_private; 4147 obj_priv = work->pending_flip_obj->driver_private;
4148 if (atomic_dec_and_test(&obj_priv->pending_flip)) 4148
4149 /* Initial scanout buffer will have a 0 pending flip count */
4150 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
4151 atomic_dec_and_test(&obj_priv->pending_flip))
4149 DRM_WAKEUP(&dev_priv->pending_flip_queue); 4152 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4150 schedule_work(&work->work); 4153 schedule_work(&work->work);
4151} 4154}
@@ -4158,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
4158 unsigned long flags; 4161 unsigned long flags;
4159 4162
4160 spin_lock_irqsave(&dev->event_lock, flags); 4163 spin_lock_irqsave(&dev->event_lock, flags);
4161 if (intel_crtc->unpin_work) 4164 if (intel_crtc->unpin_work) {
4162 intel_crtc->unpin_work->pending = 1; 4165 intel_crtc->unpin_work->pending = 1;
4166 } else {
4167 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
4168 }
4163 spin_unlock_irqrestore(&dev->event_lock, flags); 4169 spin_unlock_irqrestore(&dev->event_lock, flags);
4164} 4170}
4165 4171
@@ -4175,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4175 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4176 struct intel_unpin_work *work; 4182 struct intel_unpin_work *work;
4177 unsigned long flags; 4183 unsigned long flags;
4178 int ret; 4184 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4185 int ret, pipesrc;
4179 RING_LOCALS; 4186 RING_LOCALS;
4180 4187
4181 work = kzalloc(sizeof *work, GFP_KERNEL); 4188 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -4187,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4187 work->event = event; 4194 work->event = event;
4188 work->dev = crtc->dev; 4195 work->dev = crtc->dev;
4189 intel_fb = to_intel_framebuffer(crtc->fb); 4196 intel_fb = to_intel_framebuffer(crtc->fb);
4190 work->obj = intel_fb->obj; 4197 work->old_fb_obj = intel_fb->obj;
4191 INIT_WORK(&work->work, intel_unpin_work_fn); 4198 INIT_WORK(&work->work, intel_unpin_work_fn);
4192 4199
4193 /* We borrow the event spin lock for protecting unpin_work */ 4200 /* We borrow the event spin lock for protecting unpin_work */
4194 spin_lock_irqsave(&dev->event_lock, flags); 4201 spin_lock_irqsave(&dev->event_lock, flags);
4195 if (intel_crtc->unpin_work) { 4202 if (intel_crtc->unpin_work) {
4203 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4196 spin_unlock_irqrestore(&dev->event_lock, flags); 4204 spin_unlock_irqrestore(&dev->event_lock, flags);
4197 kfree(work); 4205 kfree(work);
4198 mutex_unlock(&dev->struct_mutex); 4206 mutex_unlock(&dev->struct_mutex);
@@ -4206,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4206 4214
4207 ret = intel_pin_and_fence_fb_obj(dev, obj); 4215 ret = intel_pin_and_fence_fb_obj(dev, obj);
4208 if (ret != 0) { 4216 if (ret != 0) {
4217 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4218 obj->driver_private);
4209 kfree(work); 4219 kfree(work);
4220 intel_crtc->unpin_work = NULL;
4210 mutex_unlock(&dev->struct_mutex); 4221 mutex_unlock(&dev->struct_mutex);
4211 return ret; 4222 return ret;
4212 } 4223 }
4213 4224
4214 /* Reference the old fb object for the scheduled work. */ 4225 /* Reference the objects for the scheduled work. */
4215 drm_gem_object_reference(work->obj); 4226 drm_gem_object_reference(work->old_fb_obj);
4227 drm_gem_object_reference(obj);
4216 4228
4217 crtc->fb = fb; 4229 crtc->fb = fb;
4218 i915_gem_object_flush_write_domain(obj); 4230 i915_gem_object_flush_write_domain(obj);
4219 drm_vblank_get(dev, intel_crtc->pipe); 4231 drm_vblank_get(dev, intel_crtc->pipe);
4220 obj_priv = obj->driver_private; 4232 obj_priv = obj->driver_private;
4221 atomic_inc(&obj_priv->pending_flip); 4233 atomic_inc(&obj_priv->pending_flip);
4234 work->pending_flip_obj = obj;
4222 4235
4223 BEGIN_LP_RING(4); 4236 BEGIN_LP_RING(4);
4224 OUT_RING(MI_DISPLAY_FLIP | 4237 OUT_RING(MI_DISPLAY_FLIP |
@@ -4226,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4226 OUT_RING(fb->pitch); 4239 OUT_RING(fb->pitch);
4227 if (IS_I965G(dev)) { 4240 if (IS_I965G(dev)) {
4228 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); 4241 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
4229 OUT_RING((fb->width << 16) | fb->height); 4242 pipesrc = I915_READ(pipesrc_reg);
4243 OUT_RING(pipesrc & 0x0fff0fff);
4230 } else { 4244 } else {
4231 OUT_RING(obj_priv->gtt_offset); 4245 OUT_RING(obj_priv->gtt_offset);
4232 OUT_RING(MI_NOOP); 4246 OUT_RING(MI_NOOP);
@@ -4400,29 +4414,43 @@ static void intel_setup_outputs(struct drm_device *dev)
4400 bool found = false; 4414 bool found = false;
4401 4415
4402 if (I915_READ(SDVOB) & SDVO_DETECTED) { 4416 if (I915_READ(SDVOB) & SDVO_DETECTED) {
4417 DRM_DEBUG_KMS("probing SDVOB\n");
4403 found = intel_sdvo_init(dev, SDVOB); 4418 found = intel_sdvo_init(dev, SDVOB);
4404 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 4419 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
4420 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
4405 intel_hdmi_init(dev, SDVOB); 4421 intel_hdmi_init(dev, SDVOB);
4422 }
4406 4423
4407 if (!found && SUPPORTS_INTEGRATED_DP(dev)) 4424 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
4425 DRM_DEBUG_KMS("probing DP_B\n");
4408 intel_dp_init(dev, DP_B); 4426 intel_dp_init(dev, DP_B);
4427 }
4409 } 4428 }
4410 4429
4411 /* Before G4X SDVOC doesn't have its own detect register */ 4430 /* Before G4X SDVOC doesn't have its own detect register */
4412 4431
4413 if (I915_READ(SDVOB) & SDVO_DETECTED) 4432 if (I915_READ(SDVOB) & SDVO_DETECTED) {
4433 DRM_DEBUG_KMS("probing SDVOC\n");
4414 found = intel_sdvo_init(dev, SDVOC); 4434 found = intel_sdvo_init(dev, SDVOC);
4435 }
4415 4436
4416 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 4437 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
4417 4438
4418 if (SUPPORTS_INTEGRATED_HDMI(dev)) 4439 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
4440 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
4419 intel_hdmi_init(dev, SDVOC); 4441 intel_hdmi_init(dev, SDVOC);
4420 if (SUPPORTS_INTEGRATED_DP(dev)) 4442 }
4443 if (SUPPORTS_INTEGRATED_DP(dev)) {
4444 DRM_DEBUG_KMS("probing DP_C\n");
4421 intel_dp_init(dev, DP_C); 4445 intel_dp_init(dev, DP_C);
4446 }
4422 } 4447 }
4423 4448
4424 if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) 4449 if (SUPPORTS_INTEGRATED_DP(dev) &&
4450 (I915_READ(DP_D) & DP_DETECTED)) {
4451 DRM_DEBUG_KMS("probing DP_D\n");
4425 intel_dp_init(dev, DP_D); 4452 intel_dp_init(dev, DP_D);
4453 }
4426 } else if (IS_I8XX(dev)) 4454 } else if (IS_I8XX(dev))
4427 intel_dvo_init(dev); 4455 intel_dvo_init(dev);
4428 4456
@@ -4527,6 +4555,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
4527 .fb_changed = intelfb_probe, 4555 .fb_changed = intelfb_probe,
4528}; 4556};
4529 4557
4558static struct drm_gem_object *
4559intel_alloc_power_context(struct drm_device *dev)
4560{
4561 struct drm_gem_object *pwrctx;
4562 int ret;
4563
4564 pwrctx = drm_gem_object_alloc(dev, 4096);
4565 if (!pwrctx) {
4566 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
4567 return NULL;
4568 }
4569
4570 mutex_lock(&dev->struct_mutex);
4571 ret = i915_gem_object_pin(pwrctx, 4096);
4572 if (ret) {
4573 DRM_ERROR("failed to pin power context: %d\n", ret);
4574 goto err_unref;
4575 }
4576
4577 ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4578 if (ret) {
4579 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
4580 goto err_unpin;
4581 }
4582 mutex_unlock(&dev->struct_mutex);
4583
4584 return pwrctx;
4585
4586err_unpin:
4587 i915_gem_object_unpin(pwrctx);
4588err_unref:
4589 drm_gem_object_unreference(pwrctx);
4590 mutex_unlock(&dev->struct_mutex);
4591 return NULL;
4592}
4593
4530void intel_init_clock_gating(struct drm_device *dev) 4594void intel_init_clock_gating(struct drm_device *dev)
4531{ 4595{
4532 struct drm_i915_private *dev_priv = dev->dev_private; 4596 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4579,42 +4643,27 @@ void intel_init_clock_gating(struct drm_device *dev)
4579 * GPU can automatically power down the render unit if given a page 4643 * GPU can automatically power down the render unit if given a page
4580 * to save state. 4644 * to save state.
4581 */ 4645 */
4582 if (I915_HAS_RC6(dev)) { 4646 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
4583 struct drm_gem_object *pwrctx; 4647 struct drm_i915_gem_object *obj_priv = NULL;
4584 struct drm_i915_gem_object *obj_priv;
4585 int ret;
4586 4648
4587 if (dev_priv->pwrctx) { 4649 if (dev_priv->pwrctx) {
4588 obj_priv = dev_priv->pwrctx->driver_private; 4650 obj_priv = dev_priv->pwrctx->driver_private;
4589 } else { 4651 } else {
4590 pwrctx = drm_gem_object_alloc(dev, 4096); 4652 struct drm_gem_object *pwrctx;
4591 if (!pwrctx) {
4592 DRM_DEBUG("failed to alloc power context, "
4593 "RC6 disabled\n");
4594 goto out;
4595 }
4596 4653
4597 ret = i915_gem_object_pin(pwrctx, 4096); 4654 pwrctx = intel_alloc_power_context(dev);
4598 if (ret) { 4655 if (pwrctx) {
4599 DRM_ERROR("failed to pin power context: %d\n", 4656 dev_priv->pwrctx = pwrctx;
4600 ret); 4657 obj_priv = pwrctx->driver_private;
4601 drm_gem_object_unreference(pwrctx);
4602 goto out;
4603 } 4658 }
4604
4605 i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4606
4607 dev_priv->pwrctx = pwrctx;
4608 obj_priv = pwrctx->driver_private;
4609 } 4659 }
4610 4660
4611 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); 4661 if (obj_priv) {
4612 I915_WRITE(MCHBAR_RENDER_STANDBY, 4662 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
4613 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); 4663 I915_WRITE(MCHBAR_RENDER_STANDBY,
4664 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
4665 }
4614 } 4666 }
4615
4616out:
4617 return;
4618} 4667}
4619 4668
4620/* Set up chip specific display functions */ 4669/* Set up chip specific display functions */
@@ -4770,7 +4819,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
4770 del_timer_sync(&intel_crtc->idle_timer); 4819 del_timer_sync(&intel_crtc->idle_timer);
4771 } 4820 }
4772 4821
4773 intel_increase_renderclock(dev, false);
4774 del_timer_sync(&dev_priv->idle_timer); 4822 del_timer_sync(&dev_priv->idle_timer);
4775 4823
4776 if (dev_priv->display.disable_fbc) 4824 if (dev_priv->display.disable_fbc)