aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_display.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r--drivers/gpu/drm/i915/intel_display.c498
1 files changed, 196 insertions, 302 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 52cd9b006da..45da78ef4a9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -70,8 +70,6 @@ struct intel_limit {
70 intel_p2_t p2; 70 intel_p2_t p2;
71 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 71 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
72 int, int, intel_clock_t *); 72 int, int, intel_clock_t *);
73 bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *,
74 int, int, intel_clock_t *);
75}; 73};
76 74
77#define I8XX_DOT_MIN 25000 75#define I8XX_DOT_MIN 25000
@@ -243,11 +241,11 @@ struct intel_limit {
243#define IRONLAKE_VCO_MIN 1760000 241#define IRONLAKE_VCO_MIN 1760000
244#define IRONLAKE_VCO_MAX 3510000 242#define IRONLAKE_VCO_MAX 3510000
245#define IRONLAKE_N_MIN 1 243#define IRONLAKE_N_MIN 1
246#define IRONLAKE_N_MAX 5 244#define IRONLAKE_N_MAX 6
247#define IRONLAKE_M_MIN 79 245#define IRONLAKE_M_MIN 79
248#define IRONLAKE_M_MAX 118 246#define IRONLAKE_M_MAX 127
249#define IRONLAKE_M1_MIN 12 247#define IRONLAKE_M1_MIN 12
250#define IRONLAKE_M1_MAX 23 248#define IRONLAKE_M1_MAX 22
251#define IRONLAKE_M2_MIN 5 249#define IRONLAKE_M2_MIN 5
252#define IRONLAKE_M2_MAX 9 250#define IRONLAKE_M2_MAX 9
253#define IRONLAKE_P_SDVO_DAC_MIN 5 251#define IRONLAKE_P_SDVO_DAC_MIN 5
@@ -262,18 +260,20 @@ struct intel_limit {
262#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ 260#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
263#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 261#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
264 262
263#define IRONLAKE_P_DISPLAY_PORT_MIN 10
264#define IRONLAKE_P_DISPLAY_PORT_MAX 20
265#define IRONLAKE_P2_DISPLAY_PORT_FAST 10
266#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10
267#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0
268#define IRONLAKE_P1_DISPLAY_PORT_MIN 1
269#define IRONLAKE_P1_DISPLAY_PORT_MAX 2
270
265static bool 271static bool
266intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 272intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
267 int target, int refclk, intel_clock_t *best_clock); 273 int target, int refclk, intel_clock_t *best_clock);
268static bool 274static bool
269intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
270 int target, int refclk, intel_clock_t *best_clock);
271static bool
272intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 275intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
273 int target, int refclk, intel_clock_t *best_clock); 276 int target, int refclk, intel_clock_t *best_clock);
274static bool
275intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
276 int target, int refclk, intel_clock_t *best_clock);
277 277
278static bool 278static bool
279intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, 279intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
@@ -294,7 +294,6 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
294 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 294 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
295 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 295 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
296 .find_pll = intel_find_best_PLL, 296 .find_pll = intel_find_best_PLL,
297 .find_reduced_pll = intel_find_best_reduced_PLL,
298}; 297};
299 298
300static const intel_limit_t intel_limits_i8xx_lvds = { 299static const intel_limit_t intel_limits_i8xx_lvds = {
@@ -309,7 +308,6 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
309 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 308 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
310 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 309 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
311 .find_pll = intel_find_best_PLL, 310 .find_pll = intel_find_best_PLL,
312 .find_reduced_pll = intel_find_best_reduced_PLL,
313}; 311};
314 312
315static const intel_limit_t intel_limits_i9xx_sdvo = { 313static const intel_limit_t intel_limits_i9xx_sdvo = {
@@ -324,7 +322,6 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
324 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 322 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
325 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 323 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
326 .find_pll = intel_find_best_PLL, 324 .find_pll = intel_find_best_PLL,
327 .find_reduced_pll = intel_find_best_reduced_PLL,
328}; 325};
329 326
330static const intel_limit_t intel_limits_i9xx_lvds = { 327static const intel_limit_t intel_limits_i9xx_lvds = {
@@ -342,7 +339,6 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
342 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 339 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
343 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 340 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
344 .find_pll = intel_find_best_PLL, 341 .find_pll = intel_find_best_PLL,
345 .find_reduced_pll = intel_find_best_reduced_PLL,
346}; 342};
347 343
348 /* below parameter and function is for G4X Chipset Family*/ 344 /* below parameter and function is for G4X Chipset Family*/
@@ -360,7 +356,6 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
360 .p2_fast = G4X_P2_SDVO_FAST 356 .p2_fast = G4X_P2_SDVO_FAST
361 }, 357 },
362 .find_pll = intel_g4x_find_best_PLL, 358 .find_pll = intel_g4x_find_best_PLL,
363 .find_reduced_pll = intel_g4x_find_best_PLL,
364}; 359};
365 360
366static const intel_limit_t intel_limits_g4x_hdmi = { 361static const intel_limit_t intel_limits_g4x_hdmi = {
@@ -377,7 +372,6 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
377 .p2_fast = G4X_P2_HDMI_DAC_FAST 372 .p2_fast = G4X_P2_HDMI_DAC_FAST
378 }, 373 },
379 .find_pll = intel_g4x_find_best_PLL, 374 .find_pll = intel_g4x_find_best_PLL,
380 .find_reduced_pll = intel_g4x_find_best_PLL,
381}; 375};
382 376
383static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 377static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
@@ -402,7 +396,6 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
402 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST 396 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
403 }, 397 },
404 .find_pll = intel_g4x_find_best_PLL, 398 .find_pll = intel_g4x_find_best_PLL,
405 .find_reduced_pll = intel_g4x_find_best_PLL,
406}; 399};
407 400
408static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 401static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
@@ -427,7 +420,6 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
427 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST 420 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
428 }, 421 },
429 .find_pll = intel_g4x_find_best_PLL, 422 .find_pll = intel_g4x_find_best_PLL,
430 .find_reduced_pll = intel_g4x_find_best_PLL,
431}; 423};
432 424
433static const intel_limit_t intel_limits_g4x_display_port = { 425static const intel_limit_t intel_limits_g4x_display_port = {
@@ -465,7 +457,6 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
465 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 457 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
466 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 458 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
467 .find_pll = intel_find_best_PLL, 459 .find_pll = intel_find_best_PLL,
468 .find_reduced_pll = intel_find_best_reduced_PLL,
469}; 460};
470 461
471static const intel_limit_t intel_limits_pineview_lvds = { 462static const intel_limit_t intel_limits_pineview_lvds = {
@@ -481,7 +472,6 @@ static const intel_limit_t intel_limits_pineview_lvds = {
481 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 472 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
482 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 473 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
483 .find_pll = intel_find_best_PLL, 474 .find_pll = intel_find_best_PLL,
484 .find_reduced_pll = intel_find_best_reduced_PLL,
485}; 475};
486 476
487static const intel_limit_t intel_limits_ironlake_sdvo = { 477static const intel_limit_t intel_limits_ironlake_sdvo = {
@@ -496,7 +486,7 @@ static const intel_limit_t intel_limits_ironlake_sdvo = {
496 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 486 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
497 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, 487 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
498 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, 488 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
499 .find_pll = intel_ironlake_find_best_PLL, 489 .find_pll = intel_g4x_find_best_PLL,
500}; 490};
501 491
502static const intel_limit_t intel_limits_ironlake_lvds = { 492static const intel_limit_t intel_limits_ironlake_lvds = {
@@ -511,7 +501,30 @@ static const intel_limit_t intel_limits_ironlake_lvds = {
511 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 501 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
512 .p2_slow = IRONLAKE_P2_LVDS_SLOW, 502 .p2_slow = IRONLAKE_P2_LVDS_SLOW,
513 .p2_fast = IRONLAKE_P2_LVDS_FAST }, 503 .p2_fast = IRONLAKE_P2_LVDS_FAST },
514 .find_pll = intel_ironlake_find_best_PLL, 504 .find_pll = intel_g4x_find_best_PLL,
505};
506
507static const intel_limit_t intel_limits_ironlake_display_port = {
508 .dot = { .min = IRONLAKE_DOT_MIN,
509 .max = IRONLAKE_DOT_MAX },
510 .vco = { .min = IRONLAKE_VCO_MIN,
511 .max = IRONLAKE_VCO_MAX},
512 .n = { .min = IRONLAKE_N_MIN,
513 .max = IRONLAKE_N_MAX },
514 .m = { .min = IRONLAKE_M_MIN,
515 .max = IRONLAKE_M_MAX },
516 .m1 = { .min = IRONLAKE_M1_MIN,
517 .max = IRONLAKE_M1_MAX },
518 .m2 = { .min = IRONLAKE_M2_MIN,
519 .max = IRONLAKE_M2_MAX },
520 .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN,
521 .max = IRONLAKE_P_DISPLAY_PORT_MAX },
522 .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN,
523 .max = IRONLAKE_P1_DISPLAY_PORT_MAX},
524 .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT,
525 .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW,
526 .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST },
527 .find_pll = intel_find_pll_ironlake_dp,
515}; 528};
516 529
517static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) 530static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
@@ -519,6 +532,9 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
519 const intel_limit_t *limit; 532 const intel_limit_t *limit;
520 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 533 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
521 limit = &intel_limits_ironlake_lvds; 534 limit = &intel_limits_ironlake_lvds;
535 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
536 HAS_eDP)
537 limit = &intel_limits_ironlake_display_port;
522 else 538 else
523 limit = &intel_limits_ironlake_sdvo; 539 limit = &intel_limits_ironlake_sdvo;
524 540
@@ -737,46 +753,6 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
737 return (err != target); 753 return (err != target);
738} 754}
739 755
740
741static bool
742intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
743 int target, int refclk, intel_clock_t *best_clock)
744
745{
746 struct drm_device *dev = crtc->dev;
747 intel_clock_t clock;
748 int err = target;
749 bool found = false;
750
751 memcpy(&clock, best_clock, sizeof(intel_clock_t));
752
753 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
754 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
755 /* m1 is always 0 in Pineview */
756 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
757 break;
758 for (clock.n = limit->n.min; clock.n <= limit->n.max;
759 clock.n++) {
760 int this_err;
761
762 intel_clock(dev, refclk, &clock);
763
764 if (!intel_PLL_is_valid(crtc, &clock))
765 continue;
766
767 this_err = abs(clock.dot - target);
768 if (this_err < err) {
769 *best_clock = clock;
770 err = this_err;
771 found = true;
772 }
773 }
774 }
775 }
776
777 return found;
778}
779
780static bool 756static bool
781intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 757intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
782 int target, int refclk, intel_clock_t *best_clock) 758 int target, int refclk, intel_clock_t *best_clock)
@@ -791,7 +767,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
791 found = false; 767 found = false;
792 768
793 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 769 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
794 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 770 int lvds_reg;
771
772 if (IS_IRONLAKE(dev))
773 lvds_reg = PCH_LVDS;
774 else
775 lvds_reg = LVDS;
776 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
795 LVDS_CLKB_POWER_UP) 777 LVDS_CLKB_POWER_UP)
796 clock.p2 = limit->p2.p2_fast; 778 clock.p2 = limit->p2.p2_fast;
797 else 779 else
@@ -839,6 +821,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
839{ 821{
840 struct drm_device *dev = crtc->dev; 822 struct drm_device *dev = crtc->dev;
841 intel_clock_t clock; 823 intel_clock_t clock;
824
825 /* return directly when it is eDP */
826 if (HAS_eDP)
827 return true;
828
842 if (target < 200000) { 829 if (target < 200000) {
843 clock.n = 1; 830 clock.n = 1;
844 clock.p1 = 2; 831 clock.p1 = 2;
@@ -857,68 +844,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
857 return true; 844 return true;
858} 845}
859 846
860static bool
861intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
862 int target, int refclk, intel_clock_t *best_clock)
863{
864 struct drm_device *dev = crtc->dev;
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 intel_clock_t clock;
867 int err_most = 47;
868 int err_min = 10000;
869
870 /* eDP has only 2 clock choice, no n/m/p setting */
871 if (HAS_eDP)
872 return true;
873
874 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
875 return intel_find_pll_ironlake_dp(limit, crtc, target,
876 refclk, best_clock);
877
878 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
879 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
880 LVDS_CLKB_POWER_UP)
881 clock.p2 = limit->p2.p2_fast;
882 else
883 clock.p2 = limit->p2.p2_slow;
884 } else {
885 if (target < limit->p2.dot_limit)
886 clock.p2 = limit->p2.p2_slow;
887 else
888 clock.p2 = limit->p2.p2_fast;
889 }
890
891 memset(best_clock, 0, sizeof(*best_clock));
892 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
893 /* based on hardware requriment prefer smaller n to precision */
894 for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
895 /* based on hardware requirment prefere larger m1,m2 */
896 for (clock.m1 = limit->m1.max;
897 clock.m1 >= limit->m1.min; clock.m1--) {
898 for (clock.m2 = limit->m2.max;
899 clock.m2 >= limit->m2.min; clock.m2--) {
900 int this_err;
901
902 intel_clock(dev, refclk, &clock);
903 if (!intel_PLL_is_valid(crtc, &clock))
904 continue;
905 this_err = abs((10000 - (target*10000/clock.dot)));
906 if (this_err < err_most) {
907 *best_clock = clock;
908 /* found on first matching */
909 goto out;
910 } else if (this_err < err_min) {
911 *best_clock = clock;
912 err_min = this_err;
913 }
914 }
915 }
916 }
917 }
918out:
919 return true;
920}
921
922/* DisplayPort has only two frequencies, 162MHz and 270MHz */ 847/* DisplayPort has only two frequencies, 162MHz and 270MHz */
923static bool 848static bool
924intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 849intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -1282,7 +1207,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1282 return ret; 1207 return ret;
1283 } 1208 }
1284 1209
1285 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 1210 ret = i915_gem_object_set_to_display_plane(obj);
1286 if (ret != 0) { 1211 if (ret != 0) {
1287 i915_gem_object_unpin(obj); 1212 i915_gem_object_unpin(obj);
1288 mutex_unlock(&dev->struct_mutex); 1213 mutex_unlock(&dev->struct_mutex);
@@ -1493,6 +1418,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1493 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; 1418 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
1494 u32 temp; 1419 u32 temp;
1495 int tries = 5, j, n; 1420 int tries = 5, j, n;
1421 u32 pipe_bpc;
1422
1423 temp = I915_READ(pipeconf_reg);
1424 pipe_bpc = temp & PIPE_BPC_MASK;
1496 1425
1497 /* XXX: When our outputs are all unaware of DPMS modes other than off 1426 /* XXX: When our outputs are all unaware of DPMS modes other than off
1498 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 1427 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -1524,6 +1453,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1524 1453
1525 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1454 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1526 temp = I915_READ(fdi_rx_reg); 1455 temp = I915_READ(fdi_rx_reg);
1456 /*
1457 * make the BPC in FDI Rx be consistent with that in
1458 * pipeconf reg.
1459 */
1460 temp &= ~(0x7 << 16);
1461 temp |= (pipe_bpc << 11);
1527 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | 1462 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
1528 FDI_SEL_PCDCLK | 1463 FDI_SEL_PCDCLK |
1529 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ 1464 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
@@ -1666,6 +1601,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1666 1601
1667 /* enable PCH transcoder */ 1602 /* enable PCH transcoder */
1668 temp = I915_READ(transconf_reg); 1603 temp = I915_READ(transconf_reg);
1604 /*
1605 * make the BPC in transcoder be consistent with
1606 * that in pipeconf reg.
1607 */
1608 temp &= ~PIPE_BPC_MASK;
1609 temp |= pipe_bpc;
1669 I915_WRITE(transconf_reg, temp | TRANS_ENABLE); 1610 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
1670 I915_READ(transconf_reg); 1611 I915_READ(transconf_reg);
1671 1612
@@ -1745,6 +1686,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1745 I915_READ(fdi_tx_reg); 1686 I915_READ(fdi_tx_reg);
1746 1687
1747 temp = I915_READ(fdi_rx_reg); 1688 temp = I915_READ(fdi_rx_reg);
1689 /* BPC in FDI rx is consistent with that in pipeconf */
1690 temp &= ~(0x07 << 16);
1691 temp |= (pipe_bpc << 11);
1748 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); 1692 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
1749 I915_READ(fdi_rx_reg); 1693 I915_READ(fdi_rx_reg);
1750 1694
@@ -1789,7 +1733,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1789 } 1733 }
1790 } 1734 }
1791 } 1735 }
1792 1736 temp = I915_READ(transconf_reg);
1737 /* BPC in transcoder is consistent with that in pipeconf */
1738 temp &= ~PIPE_BPC_MASK;
1739 temp |= pipe_bpc;
1740 I915_WRITE(transconf_reg, temp);
1741 I915_READ(transconf_reg);
1793 udelay(100); 1742 udelay(100);
1794 1743
1795 /* disable PCH DPLL */ 1744 /* disable PCH DPLL */
@@ -2448,7 +2397,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
2448 * A value of 5us seems to be a good balance; safe for very low end 2397 * A value of 5us seems to be a good balance; safe for very low end
2449 * platforms but not overly aggressive on lower latency configs. 2398 * platforms but not overly aggressive on lower latency configs.
2450 */ 2399 */
2451const static int latency_ns = 5000; 2400static const int latency_ns = 5000;
2452 2401
2453static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 2402static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2454{ 2403{
@@ -2559,7 +2508,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2559 /* Calc sr entries for one plane configs */ 2508 /* Calc sr entries for one plane configs */
2560 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2509 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2561 /* self-refresh has much higher latency */ 2510 /* self-refresh has much higher latency */
2562 const static int sr_latency_ns = 12000; 2511 static const int sr_latency_ns = 12000;
2563 2512
2564 sr_clock = planea_clock ? planea_clock : planeb_clock; 2513 sr_clock = planea_clock ? planea_clock : planeb_clock;
2565 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2514 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2598,7 +2547,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2598 /* Calc sr entries for one plane configs */ 2547 /* Calc sr entries for one plane configs */
2599 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2548 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2600 /* self-refresh has much higher latency */ 2549 /* self-refresh has much higher latency */
2601 const static int sr_latency_ns = 12000; 2550 static const int sr_latency_ns = 12000;
2602 2551
2603 sr_clock = planea_clock ? planea_clock : planeb_clock; 2552 sr_clock = planea_clock ? planea_clock : planeb_clock;
2604 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2553 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2667,7 +2616,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2667 if (HAS_FW_BLC(dev) && sr_hdisplay && 2616 if (HAS_FW_BLC(dev) && sr_hdisplay &&
2668 (!planea_clock || !planeb_clock)) { 2617 (!planea_clock || !planeb_clock)) {
2669 /* self-refresh has much higher latency */ 2618 /* self-refresh has much higher latency */
2670 const static int sr_latency_ns = 6000; 2619 static const int sr_latency_ns = 6000;
2671 2620
2672 sr_clock = planea_clock ? planea_clock : planeb_clock; 2621 sr_clock = planea_clock ? planea_clock : planeb_clock;
2673 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2622 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2906,10 +2855,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2906 return -EINVAL; 2855 return -EINVAL;
2907 } 2856 }
2908 2857
2909 if (is_lvds && limit->find_reduced_pll && 2858 if (is_lvds && dev_priv->lvds_downclock_avail) {
2910 dev_priv->lvds_downclock_avail) { 2859 has_reduced_clock = limit->find_pll(limit, crtc,
2911 memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
2912 has_reduced_clock = limit->find_reduced_pll(limit, crtc,
2913 dev_priv->lvds_downclock, 2860 dev_priv->lvds_downclock,
2914 refclk, 2861 refclk,
2915 &reduced_clock); 2862 &reduced_clock);
@@ -2969,6 +2916,33 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2969 2916
2970 /* determine panel color depth */ 2917 /* determine panel color depth */
2971 temp = I915_READ(pipeconf_reg); 2918 temp = I915_READ(pipeconf_reg);
2919 temp &= ~PIPE_BPC_MASK;
2920 if (is_lvds) {
2921 int lvds_reg = I915_READ(PCH_LVDS);
2922 /* the BPC will be 6 if it is 18-bit LVDS panel */
2923 if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
2924 temp |= PIPE_8BPC;
2925 else
2926 temp |= PIPE_6BPC;
2927 } else if (is_edp) {
2928 switch (dev_priv->edp_bpp/3) {
2929 case 8:
2930 temp |= PIPE_8BPC;
2931 break;
2932 case 10:
2933 temp |= PIPE_10BPC;
2934 break;
2935 case 6:
2936 temp |= PIPE_6BPC;
2937 break;
2938 case 12:
2939 temp |= PIPE_12BPC;
2940 break;
2941 }
2942 } else
2943 temp |= PIPE_8BPC;
2944 I915_WRITE(pipeconf_reg, temp);
2945 I915_READ(pipeconf_reg);
2972 2946
2973 switch (temp & PIPE_BPC_MASK) { 2947 switch (temp & PIPE_BPC_MASK) {
2974 case PIPE_8BPC: 2948 case PIPE_8BPC:
@@ -3195,7 +3169,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3195 * appropriately here, but we need to look more thoroughly into how 3169 * appropriately here, but we need to look more thoroughly into how
3196 * panels behave in the two modes. 3170 * panels behave in the two modes.
3197 */ 3171 */
3198 3172 /* set the dithering flag */
3173 if (IS_I965G(dev)) {
3174 if (dev_priv->lvds_dither) {
3175 if (IS_IRONLAKE(dev))
3176 pipeconf |= PIPE_ENABLE_DITHER;
3177 else
3178 lvds |= LVDS_ENABLE_DITHER;
3179 } else {
3180 if (IS_IRONLAKE(dev))
3181 pipeconf &= ~PIPE_ENABLE_DITHER;
3182 else
3183 lvds &= ~LVDS_ENABLE_DITHER;
3184 }
3185 }
3199 I915_WRITE(lvds_reg, lvds); 3186 I915_WRITE(lvds_reg, lvds);
3200 I915_READ(lvds_reg); 3187 I915_READ(lvds_reg);
3201 } 3188 }
@@ -3385,7 +3372,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3385 3372
3386 /* we only need to pin inside GTT if cursor is non-phy */ 3373 /* we only need to pin inside GTT if cursor is non-phy */
3387 mutex_lock(&dev->struct_mutex); 3374 mutex_lock(&dev->struct_mutex);
3388 if (!dev_priv->cursor_needs_physical) { 3375 if (!dev_priv->info->cursor_needs_physical) {
3389 ret = i915_gem_object_pin(bo, PAGE_SIZE); 3376 ret = i915_gem_object_pin(bo, PAGE_SIZE);
3390 if (ret) { 3377 if (ret) {
3391 DRM_ERROR("failed to pin cursor bo\n"); 3378 DRM_ERROR("failed to pin cursor bo\n");
@@ -3420,7 +3407,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3420 I915_WRITE(base, addr); 3407 I915_WRITE(base, addr);
3421 3408
3422 if (intel_crtc->cursor_bo) { 3409 if (intel_crtc->cursor_bo) {
3423 if (dev_priv->cursor_needs_physical) { 3410 if (dev_priv->info->cursor_needs_physical) {
3424 if (intel_crtc->cursor_bo != bo) 3411 if (intel_crtc->cursor_bo != bo)
3425 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 3412 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
3426 } else 3413 } else
@@ -3779,125 +3766,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
3779 queue_work(dev_priv->wq, &dev_priv->idle_work); 3766 queue_work(dev_priv->wq, &dev_priv->idle_work);
3780} 3767}
3781 3768
3782void intel_increase_renderclock(struct drm_device *dev, bool schedule)
3783{
3784 drm_i915_private_t *dev_priv = dev->dev_private;
3785
3786 if (IS_IRONLAKE(dev))
3787 return;
3788
3789 if (!dev_priv->render_reclock_avail) {
3790 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3791 return;
3792 }
3793
3794 /* Restore render clock frequency to original value */
3795 if (IS_G4X(dev) || IS_I9XX(dev))
3796 pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
3797 else if (IS_I85X(dev))
3798 pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
3799 DRM_DEBUG_DRIVER("increasing render clock frequency\n");
3800
3801 /* Schedule downclock */
3802 if (schedule)
3803 mod_timer(&dev_priv->idle_timer, jiffies +
3804 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
3805}
3806
3807void intel_decrease_renderclock(struct drm_device *dev)
3808{
3809 drm_i915_private_t *dev_priv = dev->dev_private;
3810
3811 if (IS_IRONLAKE(dev))
3812 return;
3813
3814 if (!dev_priv->render_reclock_avail) {
3815 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3816 return;
3817 }
3818
3819 if (IS_G4X(dev)) {
3820 u16 gcfgc;
3821
3822 /* Adjust render clock... */
3823 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3824
3825 /* Down to minimum... */
3826 gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
3827 gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
3828
3829 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3830 } else if (IS_I965G(dev)) {
3831 u16 gcfgc;
3832
3833 /* Adjust render clock... */
3834 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3835
3836 /* Down to minimum... */
3837 gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
3838 gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
3839
3840 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3841 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
3842 u16 gcfgc;
3843
3844 /* Adjust render clock... */
3845 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3846
3847 /* Down to minimum... */
3848 gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
3849 gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
3850
3851 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3852 } else if (IS_I915G(dev)) {
3853 u16 gcfgc;
3854
3855 /* Adjust render clock... */
3856 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3857
3858 /* Down to minimum... */
3859 gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
3860 gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
3861
3862 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3863 } else if (IS_I85X(dev)) {
3864 u16 hpllcc;
3865
3866 /* Adjust render clock... */
3867 pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
3868
3869 /* Up to maximum... */
3870 hpllcc &= ~GC_CLOCK_CONTROL_MASK;
3871 hpllcc |= GC_CLOCK_133_200;
3872
3873 pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
3874 }
3875 DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
3876}
3877
3878/* Note that no increase function is needed for this - increase_renderclock()
3879 * will also rewrite these bits
3880 */
3881void intel_decrease_displayclock(struct drm_device *dev)
3882{
3883 if (IS_IRONLAKE(dev))
3884 return;
3885
3886 if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
3887 IS_I915GM(dev)) {
3888 u16 gcfgc;
3889
3890 /* Adjust render clock... */
3891 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3892
3893 /* Down to minimum... */
3894 gcfgc &= ~0xf0;
3895 gcfgc |= 0x80;
3896
3897 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3898 }
3899}
3900
3901#define CRTC_IDLE_TIMEOUT 1000 /* ms */ 3769#define CRTC_IDLE_TIMEOUT 1000 /* ms */
3902 3770
3903static void intel_crtc_idle_timer(unsigned long arg) 3771static void intel_crtc_idle_timer(unsigned long arg)
@@ -4011,12 +3879,6 @@ static void intel_idle_update(struct work_struct *work)
4011 3879
4012 mutex_lock(&dev->struct_mutex); 3880 mutex_lock(&dev->struct_mutex);
4013 3881
4014 /* GPU isn't processing, downclock it. */
4015 if (!dev_priv->busy) {
4016 intel_decrease_renderclock(dev);
4017 intel_decrease_displayclock(dev);
4018 }
4019
4020 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3882 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4021 /* Skip inactive CRTCs */ 3883 /* Skip inactive CRTCs */
4022 if (!crtc->fb) 3884 if (!crtc->fb)
@@ -4050,13 +3912,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4050 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3912 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4051 return; 3913 return;
4052 3914
4053 if (!dev_priv->busy) { 3915 if (!dev_priv->busy)
4054 dev_priv->busy = true; 3916 dev_priv->busy = true;
4055 intel_increase_renderclock(dev, true); 3917 else
4056 } else {
4057 mod_timer(&dev_priv->idle_timer, jiffies + 3918 mod_timer(&dev_priv->idle_timer, jiffies +
4058 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 3919 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
4059 }
4060 3920
4061 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3921 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4062 if (!crtc->fb) 3922 if (!crtc->fb)
@@ -4400,29 +4260,43 @@ static void intel_setup_outputs(struct drm_device *dev)
4400 bool found = false; 4260 bool found = false;
4401 4261
4402 if (I915_READ(SDVOB) & SDVO_DETECTED) { 4262 if (I915_READ(SDVOB) & SDVO_DETECTED) {
4263 DRM_DEBUG_KMS("probing SDVOB\n");
4403 found = intel_sdvo_init(dev, SDVOB); 4264 found = intel_sdvo_init(dev, SDVOB);
4404 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 4265 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
4266 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
4405 intel_hdmi_init(dev, SDVOB); 4267 intel_hdmi_init(dev, SDVOB);
4268 }
4406 4269
4407 if (!found && SUPPORTS_INTEGRATED_DP(dev)) 4270 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
4271 DRM_DEBUG_KMS("probing DP_B\n");
4408 intel_dp_init(dev, DP_B); 4272 intel_dp_init(dev, DP_B);
4273 }
4409 } 4274 }
4410 4275
4411 /* Before G4X SDVOC doesn't have its own detect register */ 4276 /* Before G4X SDVOC doesn't have its own detect register */
4412 4277
4413 if (I915_READ(SDVOB) & SDVO_DETECTED) 4278 if (I915_READ(SDVOB) & SDVO_DETECTED) {
4279 DRM_DEBUG_KMS("probing SDVOC\n");
4414 found = intel_sdvo_init(dev, SDVOC); 4280 found = intel_sdvo_init(dev, SDVOC);
4281 }
4415 4282
4416 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 4283 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
4417 4284
4418 if (SUPPORTS_INTEGRATED_HDMI(dev)) 4285 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
4286 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
4419 intel_hdmi_init(dev, SDVOC); 4287 intel_hdmi_init(dev, SDVOC);
4420 if (SUPPORTS_INTEGRATED_DP(dev)) 4288 }
4289 if (SUPPORTS_INTEGRATED_DP(dev)) {
4290 DRM_DEBUG_KMS("probing DP_C\n");
4421 intel_dp_init(dev, DP_C); 4291 intel_dp_init(dev, DP_C);
4292 }
4422 } 4293 }
4423 4294
4424 if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) 4295 if (SUPPORTS_INTEGRATED_DP(dev) &&
4296 (I915_READ(DP_D) & DP_DETECTED)) {
4297 DRM_DEBUG_KMS("probing DP_D\n");
4425 intel_dp_init(dev, DP_D); 4298 intel_dp_init(dev, DP_D);
4299 }
4426 } else if (IS_I8XX(dev)) 4300 } else if (IS_I8XX(dev))
4427 intel_dvo_init(dev); 4301 intel_dvo_init(dev);
4428 4302
@@ -4527,6 +4401,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
4527 .fb_changed = intelfb_probe, 4401 .fb_changed = intelfb_probe,
4528}; 4402};
4529 4403
4404static struct drm_gem_object *
4405intel_alloc_power_context(struct drm_device *dev)
4406{
4407 struct drm_gem_object *pwrctx;
4408 int ret;
4409
4410 pwrctx = drm_gem_object_alloc(dev, 4096);
4411 if (!pwrctx) {
4412 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
4413 return NULL;
4414 }
4415
4416 mutex_lock(&dev->struct_mutex);
4417 ret = i915_gem_object_pin(pwrctx, 4096);
4418 if (ret) {
4419 DRM_ERROR("failed to pin power context: %d\n", ret);
4420 goto err_unref;
4421 }
4422
4423 ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4424 if (ret) {
4425 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
4426 goto err_unpin;
4427 }
4428 mutex_unlock(&dev->struct_mutex);
4429
4430 return pwrctx;
4431
4432err_unpin:
4433 i915_gem_object_unpin(pwrctx);
4434err_unref:
4435 drm_gem_object_unreference(pwrctx);
4436 mutex_unlock(&dev->struct_mutex);
4437 return NULL;
4438}
4439
4530void intel_init_clock_gating(struct drm_device *dev) 4440void intel_init_clock_gating(struct drm_device *dev)
4531{ 4441{
4532 struct drm_i915_private *dev_priv = dev->dev_private; 4442 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4579,42 +4489,27 @@ void intel_init_clock_gating(struct drm_device *dev)
4579 * GPU can automatically power down the render unit if given a page 4489 * GPU can automatically power down the render unit if given a page
4580 * to save state. 4490 * to save state.
4581 */ 4491 */
4582 if (I915_HAS_RC6(dev)) { 4492 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
4583 struct drm_gem_object *pwrctx; 4493 struct drm_i915_gem_object *obj_priv = NULL;
4584 struct drm_i915_gem_object *obj_priv;
4585 int ret;
4586 4494
4587 if (dev_priv->pwrctx) { 4495 if (dev_priv->pwrctx) {
4588 obj_priv = dev_priv->pwrctx->driver_private; 4496 obj_priv = dev_priv->pwrctx->driver_private;
4589 } else { 4497 } else {
4590 pwrctx = drm_gem_object_alloc(dev, 4096); 4498 struct drm_gem_object *pwrctx;
4591 if (!pwrctx) {
4592 DRM_DEBUG("failed to alloc power context, "
4593 "RC6 disabled\n");
4594 goto out;
4595 }
4596 4499
4597 ret = i915_gem_object_pin(pwrctx, 4096); 4500 pwrctx = intel_alloc_power_context(dev);
4598 if (ret) { 4501 if (pwrctx) {
4599 DRM_ERROR("failed to pin power context: %d\n", 4502 dev_priv->pwrctx = pwrctx;
4600 ret); 4503 obj_priv = pwrctx->driver_private;
4601 drm_gem_object_unreference(pwrctx);
4602 goto out;
4603 } 4504 }
4604
4605 i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4606
4607 dev_priv->pwrctx = pwrctx;
4608 obj_priv = pwrctx->driver_private;
4609 } 4505 }
4610 4506
4611 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); 4507 if (obj_priv) {
4612 I915_WRITE(MCHBAR_RENDER_STANDBY, 4508 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
4613 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); 4509 I915_WRITE(MCHBAR_RENDER_STANDBY,
4510 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
4511 }
4614 } 4512 }
4615
4616out:
4617 return;
4618} 4513}
4619 4514
4620/* Set up chip specific display functions */ 4515/* Set up chip specific display functions */
@@ -4770,7 +4665,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
4770 del_timer_sync(&intel_crtc->idle_timer); 4665 del_timer_sync(&intel_crtc->idle_timer);
4771 } 4666 }
4772 4667
4773 intel_increase_renderclock(dev, false);
4774 del_timer_sync(&dev_priv->idle_timer); 4668 del_timer_sync(&dev_priv->idle_timer);
4775 4669
4776 if (dev_priv->display.disable_fbc) 4670 if (dev_priv->display.disable_fbc)