aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_display.c
diff options
context:
space:
mode:
authorJesse Barnes <jbarnes@virtuousgeek.org>2009-08-17 16:31:43 -0400
committerEric Anholt <eric@anholt.net>2009-09-04 16:05:38 -0400
commit652c393a3368af84359da37c45afc35a91144960 (patch)
tree35f76866301f0ee61819d6265068ca548d119c0e /drivers/gpu/drm/i915/intel_display.c
parent043029655816ed4cfc2ed247020ef97e5d637392 (diff)
drm/i915: add dynamic clock frequency control
There are several sources of unnecessary power consumption on Intel graphics systems. The first is the LVDS clock. TFTs don't suffer from persistence issues like CRTs, and so we can reduce the LVDS refresh rate when the screen is idle. It will be automatically upclocked when userspace triggers graphical activity. Beyond that, we can enable memory self refresh. This allows the memory to go into a lower power state when the graphics are idle. Finally, we can drop some clocks on the gpu itself. All of these things can be reenabled between frames when GPU activity is triggered, and so there should be no user visible graphical changes. Signed-off-by: Jesse Barnes <jesse.barnes@intel.com> Signed-off-by: Matthew Garrett <mjg@redhat.com> Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r--drivers/gpu/drm/i915/intel_display.c561
1 files changed, 530 insertions, 31 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 748ed50c55ca..3c9445193e3d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -38,6 +38,7 @@
38 38
39bool intel_pipe_has_type (struct drm_crtc *crtc, int type); 39bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
40static void intel_update_watermarks(struct drm_device *dev); 40static void intel_update_watermarks(struct drm_device *dev);
41static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
41 42
42typedef struct { 43typedef struct {
43 /* given values */ 44 /* given values */
@@ -67,6 +68,8 @@ struct intel_limit {
67 intel_p2_t p2; 68 intel_p2_t p2;
68 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 69 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
69 int, int, intel_clock_t *); 70 int, int, intel_clock_t *);
71 bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *,
72 int, int, intel_clock_t *);
70}; 73};
71 74
72#define I8XX_DOT_MIN 25000 75#define I8XX_DOT_MIN 25000
@@ -261,6 +264,9 @@ static bool
261intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 264intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
262 int target, int refclk, intel_clock_t *best_clock); 265 int target, int refclk, intel_clock_t *best_clock);
263static bool 266static bool
267intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
268 int target, int refclk, intel_clock_t *best_clock);
269static bool
264intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 270intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
265 int target, int refclk, intel_clock_t *best_clock); 271 int target, int refclk, intel_clock_t *best_clock);
266static bool 272static bool
@@ -286,6 +292,7 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
286 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 292 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
287 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 293 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
288 .find_pll = intel_find_best_PLL, 294 .find_pll = intel_find_best_PLL,
295 .find_reduced_pll = intel_find_best_reduced_PLL,
289}; 296};
290 297
291static const intel_limit_t intel_limits_i8xx_lvds = { 298static const intel_limit_t intel_limits_i8xx_lvds = {
@@ -300,6 +307,7 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
300 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 307 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
301 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 308 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
302 .find_pll = intel_find_best_PLL, 309 .find_pll = intel_find_best_PLL,
310 .find_reduced_pll = intel_find_best_reduced_PLL,
303}; 311};
304 312
305static const intel_limit_t intel_limits_i9xx_sdvo = { 313static const intel_limit_t intel_limits_i9xx_sdvo = {
@@ -314,6 +322,7 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
314 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 322 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
315 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 323 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
316 .find_pll = intel_find_best_PLL, 324 .find_pll = intel_find_best_PLL,
325 .find_reduced_pll = intel_find_best_reduced_PLL,
317}; 326};
318 327
319static const intel_limit_t intel_limits_i9xx_lvds = { 328static const intel_limit_t intel_limits_i9xx_lvds = {
@@ -331,6 +340,7 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
331 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 340 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
332 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 341 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
333 .find_pll = intel_find_best_PLL, 342 .find_pll = intel_find_best_PLL,
343 .find_reduced_pll = intel_find_best_reduced_PLL,
334}; 344};
335 345
336 /* below parameter and function is for G4X Chipset Family*/ 346 /* below parameter and function is for G4X Chipset Family*/
@@ -348,6 +358,7 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
348 .p2_fast = G4X_P2_SDVO_FAST 358 .p2_fast = G4X_P2_SDVO_FAST
349 }, 359 },
350 .find_pll = intel_g4x_find_best_PLL, 360 .find_pll = intel_g4x_find_best_PLL,
361 .find_reduced_pll = intel_g4x_find_best_PLL,
351}; 362};
352 363
353static const intel_limit_t intel_limits_g4x_hdmi = { 364static const intel_limit_t intel_limits_g4x_hdmi = {
@@ -364,6 +375,7 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
364 .p2_fast = G4X_P2_HDMI_DAC_FAST 375 .p2_fast = G4X_P2_HDMI_DAC_FAST
365 }, 376 },
366 .find_pll = intel_g4x_find_best_PLL, 377 .find_pll = intel_g4x_find_best_PLL,
378 .find_reduced_pll = intel_g4x_find_best_PLL,
367}; 379};
368 380
369static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 381static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
@@ -388,6 +400,7 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
388 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST 400 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
389 }, 401 },
390 .find_pll = intel_g4x_find_best_PLL, 402 .find_pll = intel_g4x_find_best_PLL,
403 .find_reduced_pll = intel_g4x_find_best_PLL,
391}; 404};
392 405
393static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 406static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
@@ -412,6 +425,7 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
412 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST 425 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
413 }, 426 },
414 .find_pll = intel_g4x_find_best_PLL, 427 .find_pll = intel_g4x_find_best_PLL,
428 .find_reduced_pll = intel_g4x_find_best_PLL,
415}; 429};
416 430
417static const intel_limit_t intel_limits_g4x_display_port = { 431static const intel_limit_t intel_limits_g4x_display_port = {
@@ -449,6 +463,7 @@ static const intel_limit_t intel_limits_igd_sdvo = {
449 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 463 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
450 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 464 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
451 .find_pll = intel_find_best_PLL, 465 .find_pll = intel_find_best_PLL,
466 .find_reduced_pll = intel_find_best_reduced_PLL,
452}; 467};
453 468
454static const intel_limit_t intel_limits_igd_lvds = { 469static const intel_limit_t intel_limits_igd_lvds = {
@@ -464,6 +479,7 @@ static const intel_limit_t intel_limits_igd_lvds = {
464 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 479 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
465 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 480 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
466 .find_pll = intel_find_best_PLL, 481 .find_pll = intel_find_best_PLL,
482 .find_reduced_pll = intel_find_best_reduced_PLL,
467}; 483};
468 484
469static const intel_limit_t intel_limits_igdng_sdvo = { 485static const intel_limit_t intel_limits_igdng_sdvo = {
@@ -688,15 +704,16 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
688 704
689 memset (best_clock, 0, sizeof (*best_clock)); 705 memset (best_clock, 0, sizeof (*best_clock));
690 706
691 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 707 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
692 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { 708 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
693 /* m1 is always 0 in IGD */ 709 clock.m1++) {
694 if (clock.m2 >= clock.m1 && !IS_IGD(dev)) 710 for (clock.m2 = limit->m2.min;
695 break; 711 clock.m2 <= limit->m2.max; clock.m2++) {
696 for (clock.n = limit->n.min; clock.n <= limit->n.max; 712 /* m1 is always 0 in IGD */
697 clock.n++) { 713 if (clock.m2 >= clock.m1 && !IS_IGD(dev))
698 for (clock.p1 = limit->p1.min; 714 break;
699 clock.p1 <= limit->p1.max; clock.p1++) { 715 for (clock.n = limit->n.min;
716 clock.n <= limit->n.max; clock.n++) {
700 int this_err; 717 int this_err;
701 718
702 intel_clock(dev, refclk, &clock); 719 intel_clock(dev, refclk, &clock);
@@ -717,6 +734,46 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
717 return (err != target); 734 return (err != target);
718} 735}
719 736
737
738static bool
739intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
740 int target, int refclk, intel_clock_t *best_clock)
741
742{
743 struct drm_device *dev = crtc->dev;
744 intel_clock_t clock;
745 int err = target;
746 bool found = false;
747
748 memcpy(&clock, best_clock, sizeof(intel_clock_t));
749
750 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
751 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
752 /* m1 is always 0 in IGD */
753 if (clock.m2 >= clock.m1 && !IS_IGD(dev))
754 break;
755 for (clock.n = limit->n.min; clock.n <= limit->n.max;
756 clock.n++) {
757 int this_err;
758
759 intel_clock(dev, refclk, &clock);
760
761 if (!intel_PLL_is_valid(crtc, &clock))
762 continue;
763
764 this_err = abs(clock.dot - target);
765 if (this_err < err) {
766 *best_clock = clock;
767 err = this_err;
768 found = true;
769 }
770 }
771 }
772 }
773
774 return found;
775}
776
720static bool 777static bool
721intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 778intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
722 int target, int refclk, intel_clock_t *best_clock) 779 int target, int refclk, intel_clock_t *best_clock)
@@ -747,7 +804,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
747 max_n = limit->n.max; 804 max_n = limit->n.max;
748 /* based on hardware requriment prefer smaller n to precision */ 805 /* based on hardware requriment prefer smaller n to precision */
749 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 806 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
750 /* based on hardware requirment prefere larger m1,m2, p1 */ 807 /* based on hardware requirment prefere larger m1,m2 */
751 for (clock.m1 = limit->m1.max; 808 for (clock.m1 = limit->m1.max;
752 clock.m1 >= limit->m1.min; clock.m1--) { 809 clock.m1 >= limit->m1.min; clock.m1--) {
753 for (clock.m2 = limit->m2.max; 810 for (clock.m2 = limit->m2.max;
@@ -832,15 +889,14 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
832 889
833 memset(best_clock, 0, sizeof(*best_clock)); 890 memset(best_clock, 0, sizeof(*best_clock));
834 max_n = limit->n.max; 891 max_n = limit->n.max;
835 /* based on hardware requriment prefer smaller n to precision */ 892 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
836 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 893 /* based on hardware requriment prefer smaller n to precision */
837 /* based on hardware requirment prefere larger m1,m2, p1 */ 894 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
838 for (clock.m1 = limit->m1.max; 895 /* based on hardware requirment prefere larger m1,m2 */
839 clock.m1 >= limit->m1.min; clock.m1--) { 896 for (clock.m1 = limit->m1.max;
840 for (clock.m2 = limit->m2.max; 897 clock.m1 >= limit->m1.min; clock.m1--) {
841 clock.m2 >= limit->m2.min; clock.m2--) { 898 for (clock.m2 = limit->m2.max;
842 for (clock.p1 = limit->p1.max; 899 clock.m2 >= limit->m2.min; clock.m2--) {
843 clock.p1 >= limit->p1.min; clock.p1--) {
844 int this_err; 900 int this_err;
845 901
846 intel_clock(dev, refclk, &clock); 902 intel_clock(dev, refclk, &clock);
@@ -1030,8 +1086,11 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1030 1086
1031 if (old_fb) { 1087 if (old_fb) {
1032 intel_fb = to_intel_framebuffer(old_fb); 1088 intel_fb = to_intel_framebuffer(old_fb);
1089 obj_priv = intel_fb->obj->driver_private;
1033 i915_gem_object_unpin(intel_fb->obj); 1090 i915_gem_object_unpin(intel_fb->obj);
1034 } 1091 }
1092 intel_increase_pllclock(crtc, true);
1093
1035 mutex_unlock(&dev->struct_mutex); 1094 mutex_unlock(&dev->struct_mutex);
1036 1095
1037 if (!dev->primary->master) 1096 if (!dev->primary->master)
@@ -2054,6 +2113,18 @@ static int intel_get_fifo_size(struct drm_device *dev, int plane)
2054 return size; 2113 return size;
2055} 2114}
2056 2115
2116static void g4x_update_wm(struct drm_device *dev)
2117{
2118 struct drm_i915_private *dev_priv = dev->dev_private;
2119 u32 fw_blc_self = I915_READ(FW_BLC_SELF);
2120
2121 if (i915_powersave)
2122 fw_blc_self |= FW_BLC_SELF_EN;
2123 else
2124 fw_blc_self &= ~FW_BLC_SELF_EN;
2125 I915_WRITE(FW_BLC_SELF, fw_blc_self);
2126}
2127
2057static void i965_update_wm(struct drm_device *dev) 2128static void i965_update_wm(struct drm_device *dev)
2058{ 2129{
2059 struct drm_i915_private *dev_priv = dev->dev_private; 2130 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2105,7 +2176,8 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2105 cwm = 2; 2176 cwm = 2;
2106 2177
2107 /* Calc sr entries for one plane configs */ 2178 /* Calc sr entries for one plane configs */
2108 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2179 if (HAS_FW_BLC(dev) && sr_hdisplay &&
2180 (!planea_clock || !planeb_clock)) {
2109 /* self-refresh has much higher latency */ 2181 /* self-refresh has much higher latency */
2110 const static int sr_latency_ns = 6000; 2182 const static int sr_latency_ns = 6000;
2111 2183
@@ -2120,8 +2192,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2120 srwm = total_size - sr_entries; 2192 srwm = total_size - sr_entries;
2121 if (srwm < 0) 2193 if (srwm < 0)
2122 srwm = 1; 2194 srwm = 1;
2123 if (IS_I9XX(dev)) 2195 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
2124 I915_WRITE(FW_BLC_SELF, (srwm & 0x3f));
2125 } 2196 }
2126 2197
2127 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2198 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -2195,9 +2266,6 @@ static void intel_update_watermarks(struct drm_device *dev)
2195 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; 2266 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
2196 int enabled = 0, pixel_size = 0; 2267 int enabled = 0, pixel_size = 0;
2197 2268
2198 if (DSPARB_HWCONTROL(dev))
2199 return;
2200
2201 /* Get the clock config from both planes */ 2269 /* Get the clock config from both planes */
2202 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2270 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2203 intel_crtc = to_intel_crtc(crtc); 2271 intel_crtc = to_intel_crtc(crtc);
@@ -2230,7 +2298,9 @@ static void intel_update_watermarks(struct drm_device *dev)
2230 else if (IS_IGD(dev)) 2298 else if (IS_IGD(dev))
2231 igd_disable_cxsr(dev); 2299 igd_disable_cxsr(dev);
2232 2300
2233 if (IS_I965G(dev)) 2301 if (IS_G4X(dev))
2302 g4x_update_wm(dev);
2303 else if (IS_I965G(dev))
2234 i965_update_wm(dev); 2304 i965_update_wm(dev);
2235 else if (IS_I9XX(dev) || IS_MOBILE(dev)) 2305 else if (IS_I9XX(dev) || IS_MOBILE(dev))
2236 i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay, 2306 i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay,
@@ -2264,9 +2334,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2264 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; 2334 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
2265 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; 2335 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
2266 int refclk, num_outputs = 0; 2336 int refclk, num_outputs = 0;
2267 intel_clock_t clock; 2337 intel_clock_t clock, reduced_clock;
2268 u32 dpll = 0, fp = 0, dspcntr, pipeconf; 2338 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
2269 bool ok, is_sdvo = false, is_dvo = false; 2339 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
2270 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 2340 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
2271 bool is_edp = false; 2341 bool is_edp = false;
2272 struct drm_mode_config *mode_config = &dev->mode_config; 2342 struct drm_mode_config *mode_config = &dev->mode_config;
@@ -2349,6 +2419,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2349 return -EINVAL; 2419 return -EINVAL;
2350 } 2420 }
2351 2421
2422 if (limit->find_reduced_pll && dev_priv->lvds_downclock_avail) {
2423 memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
2424 has_reduced_clock = limit->find_reduced_pll(limit, crtc,
2425 (adjusted_mode->clock*3/4),
2426 refclk,
2427 &reduced_clock);
2428 }
2429
2352 /* SDVO TV has fixed PLL values depend on its clock range, 2430 /* SDVO TV has fixed PLL values depend on its clock range,
2353 this mirrors vbios setting. */ 2431 this mirrors vbios setting. */
2354 if (is_sdvo && is_tv) { 2432 if (is_sdvo && is_tv) {
@@ -2394,10 +2472,17 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2394 link_bw, &m_n); 2472 link_bw, &m_n);
2395 } 2473 }
2396 2474
2397 if (IS_IGD(dev)) 2475 if (IS_IGD(dev)) {
2398 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; 2476 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
2399 else 2477 if (has_reduced_clock)
2478 fp2 = (1 << reduced_clock.n) << 16 |
2479 reduced_clock.m1 << 8 | reduced_clock.m2;
2480 } else {
2400 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 2481 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
2482 if (has_reduced_clock)
2483 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
2484 reduced_clock.m2;
2485 }
2401 2486
2402 if (!IS_IGDNG(dev)) 2487 if (!IS_IGDNG(dev))
2403 dpll = DPLL_VGA_MODE_DIS; 2488 dpll = DPLL_VGA_MODE_DIS;
@@ -2426,6 +2511,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2426 /* also FPA1 */ 2511 /* also FPA1 */
2427 if (IS_IGDNG(dev)) 2512 if (IS_IGDNG(dev))
2428 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 2513 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
2514 if (IS_G4X(dev) && has_reduced_clock)
2515 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
2429 } 2516 }
2430 switch (clock.p2) { 2517 switch (clock.p2) {
2431 case 5: 2518 case 5:
@@ -2573,6 +2660,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2573 udelay(150); 2660 udelay(150);
2574 } 2661 }
2575 2662
2663 if (is_lvds && has_reduced_clock && i915_powersave) {
2664 I915_WRITE(fp_reg + 4, fp2);
2665 intel_crtc->lowfreq_avail = true;
2666 if (HAS_PIPE_CXSR(dev)) {
2667 DRM_DEBUG("enabling CxSR downclocking\n");
2668 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
2669 }
2670 } else {
2671 I915_WRITE(fp_reg + 4, fp);
2672 intel_crtc->lowfreq_avail = false;
2673 if (HAS_PIPE_CXSR(dev)) {
2674 DRM_DEBUG("disabling CxSR downclocking\n");
2675 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
2676 }
2677 }
2678
2576 I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | 2679 I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
2577 ((adjusted_mode->crtc_htotal - 1) << 16)); 2680 ((adjusted_mode->crtc_htotal - 1) << 16));
2578 I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | 2681 I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
@@ -2769,10 +2872,16 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
2769 struct drm_device *dev = crtc->dev; 2872 struct drm_device *dev = crtc->dev;
2770 struct drm_i915_private *dev_priv = dev->dev_private; 2873 struct drm_i915_private *dev_priv = dev->dev_private;
2771 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2874 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2875 struct intel_framebuffer *intel_fb;
2772 int pipe = intel_crtc->pipe; 2876 int pipe = intel_crtc->pipe;
2773 uint32_t temp = 0; 2877 uint32_t temp = 0;
2774 uint32_t adder; 2878 uint32_t adder;
2775 2879
2880 if (crtc->fb) {
2881 intel_fb = to_intel_framebuffer(crtc->fb);
2882 intel_mark_busy(dev, intel_fb->obj);
2883 }
2884
2776 if (x < 0) { 2885 if (x < 0) {
2777 temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 2886 temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
2778 x = -x; 2887 x = -x;
@@ -3070,6 +3179,312 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
3070 return mode; 3179 return mode;
3071} 3180}
3072 3181
3182#define GPU_IDLE_TIMEOUT 500 /* ms */
3183
3184/* When this timer fires, we've been idle for awhile */
3185static void intel_gpu_idle_timer(unsigned long arg)
3186{
3187 struct drm_device *dev = (struct drm_device *)arg;
3188 drm_i915_private_t *dev_priv = dev->dev_private;
3189
3190 DRM_DEBUG("idle timer fired, downclocking\n");
3191
3192 dev_priv->busy = false;
3193
3194 schedule_work(&dev_priv->idle_work);
3195}
3196
3197void intel_increase_renderclock(struct drm_device *dev, bool schedule)
3198{
3199 drm_i915_private_t *dev_priv = dev->dev_private;
3200
3201 if (IS_IGDNG(dev))
3202 return;
3203
3204 if (!dev_priv->render_reclock_avail) {
3205 DRM_ERROR("not reclocking render clock\n");
3206 return;
3207 }
3208
3209 /* Restore render clock frequency to original value */
3210 if (IS_G4X(dev) || IS_I9XX(dev))
3211 pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
3212 else if (IS_I85X(dev))
3213 pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
3214 DRM_DEBUG("increasing render clock frequency\n");
3215
3216 /* Schedule downclock */
3217 if (schedule)
3218 mod_timer(&dev_priv->idle_timer, jiffies +
3219 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
3220}
3221
3222void intel_decrease_renderclock(struct drm_device *dev)
3223{
3224 drm_i915_private_t *dev_priv = dev->dev_private;
3225
3226 if (IS_IGDNG(dev))
3227 return;
3228
3229 if (!dev_priv->render_reclock_avail) {
3230 DRM_ERROR("not reclocking render clock\n");
3231 return;
3232 }
3233
3234 if (IS_G4X(dev)) {
3235 u16 gcfgc;
3236
3237 /* Adjust render clock... */
3238 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3239
3240 /* Down to minimum... */
3241 gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
3242 gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
3243
3244 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3245 } else if (IS_I965G(dev)) {
3246 u16 gcfgc;
3247
3248 /* Adjust render clock... */
3249 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3250
3251 /* Down to minimum... */
3252 gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
3253 gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
3254
3255 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3256 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
3257 u16 gcfgc;
3258
3259 /* Adjust render clock... */
3260 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3261
3262 /* Down to minimum... */
3263 gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
3264 gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
3265
3266 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3267 } else if (IS_I915G(dev)) {
3268 u16 gcfgc;
3269
3270 /* Adjust render clock... */
3271 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3272
3273 /* Down to minimum... */
3274 gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
3275 gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
3276
3277 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3278 } else if (IS_I85X(dev)) {
3279 u16 hpllcc;
3280
3281 /* Adjust render clock... */
3282 pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
3283
3284 /* Up to maximum... */
3285 hpllcc &= ~GC_CLOCK_CONTROL_MASK;
3286 hpllcc |= GC_CLOCK_133_200;
3287
3288 pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
3289 }
3290 DRM_DEBUG("decreasing render clock frequency\n");
3291}
3292
3293/* Note that no increase function is needed for this - increase_renderclock()
3294 * will also rewrite these bits
3295 */
3296void intel_decrease_displayclock(struct drm_device *dev)
3297{
3298 if (IS_IGDNG(dev))
3299 return;
3300
3301 if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
3302 IS_I915GM(dev)) {
3303 u16 gcfgc;
3304
3305 /* Adjust render clock... */
3306 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3307
3308 /* Down to minimum... */
3309 gcfgc &= ~0xf0;
3310 gcfgc |= 0x80;
3311
3312 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3313 }
3314}
3315
3316#define CRTC_IDLE_TIMEOUT 1000 /* ms */
3317
3318static void intel_crtc_idle_timer(unsigned long arg)
3319{
3320 struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
3321 struct drm_crtc *crtc = &intel_crtc->base;
3322 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
3323
3324 DRM_DEBUG("idle timer fired, downclocking\n");
3325
3326 intel_crtc->busy = false;
3327
3328 schedule_work(&dev_priv->idle_work);
3329}
3330
3331static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
3332{
3333 struct drm_device *dev = crtc->dev;
3334 drm_i915_private_t *dev_priv = dev->dev_private;
3335 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3336 int pipe = intel_crtc->pipe;
3337 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3338 int dpll = I915_READ(dpll_reg);
3339
3340 if (IS_IGDNG(dev))
3341 return;
3342
3343 if (!dev_priv->lvds_downclock_avail)
3344 return;
3345
3346 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
3347 DRM_DEBUG("upclocking LVDS\n");
3348
3349 /* Unlock panel regs */
3350 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
3351
3352 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
3353 I915_WRITE(dpll_reg, dpll);
3354 dpll = I915_READ(dpll_reg);
3355 intel_wait_for_vblank(dev);
3356 dpll = I915_READ(dpll_reg);
3357 if (dpll & DISPLAY_RATE_SELECT_FPA1)
3358 DRM_DEBUG("failed to upclock LVDS!\n");
3359
3360 /* ...and lock them again */
3361 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
3362 }
3363
3364 /* Schedule downclock */
3365 if (schedule)
3366 mod_timer(&intel_crtc->idle_timer, jiffies +
3367 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
3368}
3369
3370static void intel_decrease_pllclock(struct drm_crtc *crtc)
3371{
3372 struct drm_device *dev = crtc->dev;
3373 drm_i915_private_t *dev_priv = dev->dev_private;
3374 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3375 int pipe = intel_crtc->pipe;
3376 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3377 int dpll = I915_READ(dpll_reg);
3378
3379 if (IS_IGDNG(dev))
3380 return;
3381
3382 if (!dev_priv->lvds_downclock_avail)
3383 return;
3384
3385 /*
3386 * Since this is called by a timer, we should never get here in
3387 * the manual case.
3388 */
3389 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
3390 DRM_DEBUG("downclocking LVDS\n");
3391
3392 /* Unlock panel regs */
3393 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
3394
3395 dpll |= DISPLAY_RATE_SELECT_FPA1;
3396 I915_WRITE(dpll_reg, dpll);
3397 dpll = I915_READ(dpll_reg);
3398 intel_wait_for_vblank(dev);
3399 dpll = I915_READ(dpll_reg);
3400 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
3401 DRM_DEBUG("failed to downclock LVDS!\n");
3402
3403 /* ...and lock them again */
3404 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
3405 }
3406
3407}
3408
3409/**
3410 * intel_idle_update - adjust clocks for idleness
3411 * @work: work struct
3412 *
3413 * Either the GPU or display (or both) went idle. Check the busy status
3414 * here and adjust the CRTC and GPU clocks as necessary.
3415 */
3416static void intel_idle_update(struct work_struct *work)
3417{
3418 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3419 idle_work);
3420 struct drm_device *dev = dev_priv->dev;
3421 struct drm_crtc *crtc;
3422 struct intel_crtc *intel_crtc;
3423
3424 if (!i915_powersave)
3425 return;
3426
3427 mutex_lock(&dev->struct_mutex);
3428
3429 /* GPU isn't processing, downclock it. */
3430 if (!dev_priv->busy) {
3431 intel_decrease_renderclock(dev);
3432 intel_decrease_displayclock(dev);
3433 }
3434
3435 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3436 /* Skip inactive CRTCs */
3437 if (!crtc->fb)
3438 continue;
3439
3440 intel_crtc = to_intel_crtc(crtc);
3441 if (!intel_crtc->busy)
3442 intel_decrease_pllclock(crtc);
3443 }
3444
3445 mutex_unlock(&dev->struct_mutex);
3446}
3447
3448/**
3449 * intel_mark_busy - mark the GPU and possibly the display busy
3450 * @dev: drm device
3451 * @obj: object we're operating on
3452 *
3453 * Callers can use this function to indicate that the GPU is busy processing
3454 * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
3455 * buffer), we'll also mark the display as busy, so we know to increase its
3456 * clock frequency.
3457 */
3458void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
3459{
3460 drm_i915_private_t *dev_priv = dev->dev_private;
3461 struct drm_crtc *crtc = NULL;
3462 struct intel_framebuffer *intel_fb;
3463 struct intel_crtc *intel_crtc;
3464
3465 dev_priv->busy = true;
3466 intel_increase_renderclock(dev, true);
3467
3468 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3469 if (!crtc->fb)
3470 continue;
3471
3472 intel_crtc = to_intel_crtc(crtc);
3473 intel_fb = to_intel_framebuffer(crtc->fb);
3474 if (intel_fb->obj == obj) {
3475 if (!intel_crtc->busy) {
3476 /* Non-busy -> busy, upclock */
3477 intel_increase_pllclock(crtc, true);
3478 intel_crtc->busy = true;
3479 } else {
3480 /* Busy -> busy, put off timer */
3481 mod_timer(&intel_crtc->idle_timer, jiffies +
3482 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
3483 }
3484 }
3485 }
3486}
3487
3073static void intel_crtc_destroy(struct drm_crtc *crtc) 3488static void intel_crtc_destroy(struct drm_crtc *crtc)
3074{ 3489{
3075 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3490 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -3125,6 +3540,10 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
3125 intel_crtc->mode_set.crtc = &intel_crtc->base; 3540 intel_crtc->mode_set.crtc = &intel_crtc->base;
3126 intel_crtc->mode_set.connectors = (struct drm_connector **)(intel_crtc + 1); 3541 intel_crtc->mode_set.connectors = (struct drm_connector **)(intel_crtc + 1);
3127 intel_crtc->mode_set.num_connectors = 0; 3542 intel_crtc->mode_set.num_connectors = 0;
3543 intel_crtc->busy = false;
3544
3545 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
3546 (unsigned long)intel_crtc);
3128 3547
3129 if (i915_fbpercrtc) { 3548 if (i915_fbpercrtc) {
3130 3549
@@ -3362,8 +3781,56 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
3362 .fb_changed = intelfb_probe, 3781 .fb_changed = intelfb_probe,
3363}; 3782};
3364 3783
3784void intel_init_clock_gating(struct drm_device *dev)
3785{
3786 struct drm_i915_private *dev_priv = dev->dev_private;
3787
3788 /*
3789 * Disable clock gating reported to work incorrectly according to the
3790 * specs, but enable as much else as we can.
3791 */
3792 if (IS_G4X(dev)) {
3793 uint32_t dspclk_gate;
3794 I915_WRITE(RENCLK_GATE_D1, 0);
3795 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
3796 GS_UNIT_CLOCK_GATE_DISABLE |
3797 CL_UNIT_CLOCK_GATE_DISABLE);
3798 I915_WRITE(RAMCLK_GATE_D, 0);
3799 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
3800 OVRUNIT_CLOCK_GATE_DISABLE |
3801 OVCUNIT_CLOCK_GATE_DISABLE;
3802 if (IS_GM45(dev))
3803 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
3804 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
3805 } else if (IS_I965GM(dev)) {
3806 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
3807 I915_WRITE(RENCLK_GATE_D2, 0);
3808 I915_WRITE(DSPCLK_GATE_D, 0);
3809 I915_WRITE(RAMCLK_GATE_D, 0);
3810 I915_WRITE16(DEUC, 0);
3811 } else if (IS_I965G(dev)) {
3812 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
3813 I965_RCC_CLOCK_GATE_DISABLE |
3814 I965_RCPB_CLOCK_GATE_DISABLE |
3815 I965_ISC_CLOCK_GATE_DISABLE |
3816 I965_FBC_CLOCK_GATE_DISABLE);
3817 I915_WRITE(RENCLK_GATE_D2, 0);
3818 } else if (IS_I9XX(dev)) {
3819 u32 dstate = I915_READ(D_STATE);
3820
3821 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
3822 DSTATE_DOT_CLOCK_GATING;
3823 I915_WRITE(D_STATE, dstate);
3824 } else if (IS_I855(dev) || IS_I865G(dev)) {
3825 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
3826 } else if (IS_I830(dev)) {
3827 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
3828 }
3829}
3830
3365void intel_modeset_init(struct drm_device *dev) 3831void intel_modeset_init(struct drm_device *dev)
3366{ 3832{
3833 struct drm_i915_private *dev_priv = dev->dev_private;
3367 int num_pipe; 3834 int num_pipe;
3368 int i; 3835 int i;
3369 3836
@@ -3398,15 +3865,47 @@ void intel_modeset_init(struct drm_device *dev)
3398 DRM_DEBUG("%d display pipe%s available.\n", 3865 DRM_DEBUG("%d display pipe%s available.\n",
3399 num_pipe, num_pipe > 1 ? "s" : ""); 3866 num_pipe, num_pipe > 1 ? "s" : "");
3400 3867
3868 if (IS_I85X(dev))
3869 pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock);
3870 else if (IS_I9XX(dev) || IS_G4X(dev))
3871 pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock);
3872
3401 for (i = 0; i < num_pipe; i++) { 3873 for (i = 0; i < num_pipe; i++) {
3402 intel_crtc_init(dev, i); 3874 intel_crtc_init(dev, i);
3403 } 3875 }
3404 3876
3405 intel_setup_outputs(dev); 3877 intel_setup_outputs(dev);
3878
3879 intel_init_clock_gating(dev);
3880
3881 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
3882 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
3883 (unsigned long)dev);
3406} 3884}
3407 3885
3408void intel_modeset_cleanup(struct drm_device *dev) 3886void intel_modeset_cleanup(struct drm_device *dev)
3409{ 3887{
3888 struct drm_i915_private *dev_priv = dev->dev_private;
3889 struct drm_crtc *crtc;
3890 struct intel_crtc *intel_crtc;
3891
3892 mutex_lock(&dev->struct_mutex);
3893
3894 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3895 /* Skip inactive CRTCs */
3896 if (!crtc->fb)
3897 continue;
3898
3899 intel_crtc = to_intel_crtc(crtc);
3900 intel_increase_pllclock(crtc, false);
3901 del_timer_sync(&intel_crtc->idle_timer);
3902 }
3903
3904 intel_increase_renderclock(dev, false);
3905 del_timer_sync(&dev_priv->idle_timer);
3906
3907 mutex_unlock(&dev->struct_mutex);
3908
3410 drm_mode_config_cleanup(dev); 3909 drm_mode_config_cleanup(dev);
3411} 3910}
3412 3911