aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1334
1 files changed, 890 insertions, 444 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 26c2ea3e985c..0a07d7c9cafc 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -32,6 +32,27 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <drm/i915_powerwell.h> 33#include <drm/i915_powerwell.h>
34 34
35/**
36 * RC6 is a special power stage which allows the GPU to enter an very
37 * low-voltage mode when idle, using down to 0V while at this stage. This
38 * stage is entered automatically when the GPU is idle when RC6 support is
39 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
40 *
41 * There are different RC6 modes available in Intel GPU, which differentiate
42 * among each other with the latency required to enter and leave RC6 and
43 * voltage consumed by the GPU in different states.
44 *
45 * The combination of the following flags define which states GPU is allowed
46 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
47 * RC6pp is deepest RC6. Their support by hardware varies according to the
48 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
49 * which brings the most power savings; deeper states save more power, but
50 * require higher latency to switch to and wake up.
51 */
52#define INTEL_RC6_ENABLE (1<<0)
53#define INTEL_RC6p_ENABLE (1<<1)
54#define INTEL_RC6pp_ENABLE (1<<2)
55
35/* FBC, or Frame Buffer Compression, is a technique employed to compress the 56/* FBC, or Frame Buffer Compression, is a technique employed to compress the
36 * framebuffer contents in-memory, aiming at reducing the required bandwidth 57 * framebuffer contents in-memory, aiming at reducing the required bandwidth
37 * during in-memory transfers and, therefore, reduce the power packet. 58 * during in-memory transfers and, therefore, reduce the power packet.
@@ -43,14 +64,6 @@
43 * i915.i915_enable_fbc parameter 64 * i915.i915_enable_fbc parameter
44 */ 65 */
45 66
46static bool intel_crtc_active(struct drm_crtc *crtc)
47{
48 /* Be paranoid as we can arrive here with only partial
49 * state retrieved from the hardware during setup.
50 */
51 return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
52}
53
54static void i8xx_disable_fbc(struct drm_device *dev) 67static void i8xx_disable_fbc(struct drm_device *dev)
55{ 68{
56 struct drm_i915_private *dev_priv = dev->dev_private; 69 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -241,18 +254,6 @@ static void ironlake_disable_fbc(struct drm_device *dev)
241 dpfc_ctl &= ~DPFC_CTL_EN; 254 dpfc_ctl &= ~DPFC_CTL_EN;
242 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 255 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
243 256
244 if (IS_IVYBRIDGE(dev))
245 /* WaFbcDisableDpfcClockGating:ivb */
246 I915_WRITE(ILK_DSPCLK_GATE_D,
247 I915_READ(ILK_DSPCLK_GATE_D) &
248 ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
249
250 if (IS_HASWELL(dev))
251 /* WaFbcDisableDpfcClockGating:hsw */
252 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
253 I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
254 ~HSW_DPFC_GATING_DISABLE);
255
256 DRM_DEBUG_KMS("disabled FBC\n"); 257 DRM_DEBUG_KMS("disabled FBC\n");
257 } 258 }
258} 259}
@@ -282,18 +283,10 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
282 if (IS_IVYBRIDGE(dev)) { 283 if (IS_IVYBRIDGE(dev)) {
283 /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 284 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
284 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS); 285 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
285 /* WaFbcDisableDpfcClockGating:ivb */
286 I915_WRITE(ILK_DSPCLK_GATE_D,
287 I915_READ(ILK_DSPCLK_GATE_D) |
288 ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
289 } else { 286 } else {
290 /* WaFbcAsynchFlipDisableFbcQueue:hsw */ 287 /* WaFbcAsynchFlipDisableFbcQueue:hsw */
291 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe), 288 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
292 HSW_BYPASS_FBC_QUEUE); 289 HSW_BYPASS_FBC_QUEUE);
293 /* WaFbcDisableDpfcClockGating:hsw */
294 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
295 I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
296 HSW_DPFC_GATING_DISABLE);
297 } 290 }
298 291
299 I915_WRITE(SNB_DPFC_CTL_SA, 292 I915_WRITE(SNB_DPFC_CTL_SA,
@@ -378,7 +371,7 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
378 371
379 intel_cancel_fbc_work(dev_priv); 372 intel_cancel_fbc_work(dev_priv);
380 373
381 work = kzalloc(sizeof *work, GFP_KERNEL); 374 work = kzalloc(sizeof(*work), GFP_KERNEL);
382 if (work == NULL) { 375 if (work == NULL) {
383 DRM_ERROR("Failed to allocate FBC work structure\n"); 376 DRM_ERROR("Failed to allocate FBC work structure\n");
384 dev_priv->display.enable_fbc(crtc, interval); 377 dev_priv->display.enable_fbc(crtc, interval);
@@ -458,7 +451,8 @@ void intel_update_fbc(struct drm_device *dev)
458 struct drm_framebuffer *fb; 451 struct drm_framebuffer *fb;
459 struct intel_framebuffer *intel_fb; 452 struct intel_framebuffer *intel_fb;
460 struct drm_i915_gem_object *obj; 453 struct drm_i915_gem_object *obj;
461 unsigned int max_hdisplay, max_vdisplay; 454 const struct drm_display_mode *adjusted_mode;
455 unsigned int max_width, max_height;
462 456
463 if (!I915_HAS_FBC(dev)) { 457 if (!I915_HAS_FBC(dev)) {
464 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED); 458 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
@@ -482,7 +476,7 @@ void intel_update_fbc(struct drm_device *dev)
482 */ 476 */
483 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { 477 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
484 if (intel_crtc_active(tmp_crtc) && 478 if (intel_crtc_active(tmp_crtc) &&
485 !to_intel_crtc(tmp_crtc)->primary_disabled) { 479 to_intel_crtc(tmp_crtc)->primary_enabled) {
486 if (crtc) { 480 if (crtc) {
487 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES)) 481 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
488 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 482 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
@@ -502,6 +496,7 @@ void intel_update_fbc(struct drm_device *dev)
502 fb = crtc->fb; 496 fb = crtc->fb;
503 intel_fb = to_intel_framebuffer(fb); 497 intel_fb = to_intel_framebuffer(fb);
504 obj = intel_fb->obj; 498 obj = intel_fb->obj;
499 adjusted_mode = &intel_crtc->config.adjusted_mode;
505 500
506 if (i915_enable_fbc < 0 && 501 if (i915_enable_fbc < 0 &&
507 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) { 502 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
@@ -514,8 +509,8 @@ void intel_update_fbc(struct drm_device *dev)
514 DRM_DEBUG_KMS("fbc disabled per module param\n"); 509 DRM_DEBUG_KMS("fbc disabled per module param\n");
515 goto out_disable; 510 goto out_disable;
516 } 511 }
517 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || 512 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
518 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { 513 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
519 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE)) 514 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
520 DRM_DEBUG_KMS("mode incompatible with compression, " 515 DRM_DEBUG_KMS("mode incompatible with compression, "
521 "disabling\n"); 516 "disabling\n");
@@ -523,14 +518,14 @@ void intel_update_fbc(struct drm_device *dev)
523 } 518 }
524 519
525 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 520 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
526 max_hdisplay = 4096; 521 max_width = 4096;
527 max_vdisplay = 2048; 522 max_height = 2048;
528 } else { 523 } else {
529 max_hdisplay = 2048; 524 max_width = 2048;
530 max_vdisplay = 1536; 525 max_height = 1536;
531 } 526 }
532 if ((crtc->mode.hdisplay > max_hdisplay) || 527 if (intel_crtc->config.pipe_src_w > max_width ||
533 (crtc->mode.vdisplay > max_vdisplay)) { 528 intel_crtc->config.pipe_src_h > max_height) {
534 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE)) 529 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
535 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 530 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
536 goto out_disable; 531 goto out_disable;
@@ -1087,8 +1082,9 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1087 return enabled; 1082 return enabled;
1088} 1083}
1089 1084
1090static void pineview_update_wm(struct drm_device *dev) 1085static void pineview_update_wm(struct drm_crtc *unused_crtc)
1091{ 1086{
1087 struct drm_device *dev = unused_crtc->dev;
1092 struct drm_i915_private *dev_priv = dev->dev_private; 1088 struct drm_i915_private *dev_priv = dev->dev_private;
1093 struct drm_crtc *crtc; 1089 struct drm_crtc *crtc;
1094 const struct cxsr_latency *latency; 1090 const struct cxsr_latency *latency;
@@ -1105,8 +1101,12 @@ static void pineview_update_wm(struct drm_device *dev)
1105 1101
1106 crtc = single_enabled_crtc(dev); 1102 crtc = single_enabled_crtc(dev);
1107 if (crtc) { 1103 if (crtc) {
1108 int clock = crtc->mode.clock; 1104 const struct drm_display_mode *adjusted_mode;
1109 int pixel_size = crtc->fb->bits_per_pixel / 8; 1105 int pixel_size = crtc->fb->bits_per_pixel / 8;
1106 int clock;
1107
1108 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1109 clock = adjusted_mode->crtc_clock;
1110 1110
1111 /* Display SR */ 1111 /* Display SR */
1112 wm = intel_calculate_wm(clock, &pineview_display_wm, 1112 wm = intel_calculate_wm(clock, &pineview_display_wm,
@@ -1166,6 +1166,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1166 int *cursor_wm) 1166 int *cursor_wm)
1167{ 1167{
1168 struct drm_crtc *crtc; 1168 struct drm_crtc *crtc;
1169 const struct drm_display_mode *adjusted_mode;
1169 int htotal, hdisplay, clock, pixel_size; 1170 int htotal, hdisplay, clock, pixel_size;
1170 int line_time_us, line_count; 1171 int line_time_us, line_count;
1171 int entries, tlb_miss; 1172 int entries, tlb_miss;
@@ -1177,9 +1178,10 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1177 return false; 1178 return false;
1178 } 1179 }
1179 1180
1180 htotal = crtc->mode.htotal; 1181 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1181 hdisplay = crtc->mode.hdisplay; 1182 clock = adjusted_mode->crtc_clock;
1182 clock = crtc->mode.clock; 1183 htotal = adjusted_mode->htotal;
1184 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1183 pixel_size = crtc->fb->bits_per_pixel / 8; 1185 pixel_size = crtc->fb->bits_per_pixel / 8;
1184 1186
1185 /* Use the small buffer method to calculate plane watermark */ 1187 /* Use the small buffer method to calculate plane watermark */
@@ -1250,6 +1252,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1250 int *display_wm, int *cursor_wm) 1252 int *display_wm, int *cursor_wm)
1251{ 1253{
1252 struct drm_crtc *crtc; 1254 struct drm_crtc *crtc;
1255 const struct drm_display_mode *adjusted_mode;
1253 int hdisplay, htotal, pixel_size, clock; 1256 int hdisplay, htotal, pixel_size, clock;
1254 unsigned long line_time_us; 1257 unsigned long line_time_us;
1255 int line_count, line_size; 1258 int line_count, line_size;
@@ -1262,9 +1265,10 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1262 } 1265 }
1263 1266
1264 crtc = intel_get_crtc_for_plane(dev, plane); 1267 crtc = intel_get_crtc_for_plane(dev, plane);
1265 hdisplay = crtc->mode.hdisplay; 1268 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1266 htotal = crtc->mode.htotal; 1269 clock = adjusted_mode->crtc_clock;
1267 clock = crtc->mode.clock; 1270 htotal = adjusted_mode->htotal;
1271 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1268 pixel_size = crtc->fb->bits_per_pixel / 8; 1272 pixel_size = crtc->fb->bits_per_pixel / 8;
1269 1273
1270 line_time_us = (htotal * 1000) / clock; 1274 line_time_us = (htotal * 1000) / clock;
@@ -1303,7 +1307,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
1303 if (!intel_crtc_active(crtc)) 1307 if (!intel_crtc_active(crtc))
1304 return false; 1308 return false;
1305 1309
1306 clock = crtc->mode.clock; /* VESA DOT Clock */ 1310 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1307 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */ 1311 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
1308 1312
1309 entries = (clock / 1000) * pixel_size; 1313 entries = (clock / 1000) * pixel_size;
@@ -1365,8 +1369,9 @@ static void vlv_update_drain_latency(struct drm_device *dev)
1365 1369
1366#define single_plane_enabled(mask) is_power_of_2(mask) 1370#define single_plane_enabled(mask) is_power_of_2(mask)
1367 1371
1368static void valleyview_update_wm(struct drm_device *dev) 1372static void valleyview_update_wm(struct drm_crtc *crtc)
1369{ 1373{
1374 struct drm_device *dev = crtc->dev;
1370 static const int sr_latency_ns = 12000; 1375 static const int sr_latency_ns = 12000;
1371 struct drm_i915_private *dev_priv = dev->dev_private; 1376 struct drm_i915_private *dev_priv = dev->dev_private;
1372 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1377 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1424,8 +1429,9 @@ static void valleyview_update_wm(struct drm_device *dev)
1424 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1429 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1425} 1430}
1426 1431
1427static void g4x_update_wm(struct drm_device *dev) 1432static void g4x_update_wm(struct drm_crtc *crtc)
1428{ 1433{
1434 struct drm_device *dev = crtc->dev;
1429 static const int sr_latency_ns = 12000; 1435 static const int sr_latency_ns = 12000;
1430 struct drm_i915_private *dev_priv = dev->dev_private; 1436 struct drm_i915_private *dev_priv = dev->dev_private;
1431 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1437 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1476,8 +1482,9 @@ static void g4x_update_wm(struct drm_device *dev)
1476 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1482 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1477} 1483}
1478 1484
1479static void i965_update_wm(struct drm_device *dev) 1485static void i965_update_wm(struct drm_crtc *unused_crtc)
1480{ 1486{
1487 struct drm_device *dev = unused_crtc->dev;
1481 struct drm_i915_private *dev_priv = dev->dev_private; 1488 struct drm_i915_private *dev_priv = dev->dev_private;
1482 struct drm_crtc *crtc; 1489 struct drm_crtc *crtc;
1483 int srwm = 1; 1490 int srwm = 1;
@@ -1488,9 +1495,11 @@ static void i965_update_wm(struct drm_device *dev)
1488 if (crtc) { 1495 if (crtc) {
1489 /* self-refresh has much higher latency */ 1496 /* self-refresh has much higher latency */
1490 static const int sr_latency_ns = 12000; 1497 static const int sr_latency_ns = 12000;
1491 int clock = crtc->mode.clock; 1498 const struct drm_display_mode *adjusted_mode =
1492 int htotal = crtc->mode.htotal; 1499 &to_intel_crtc(crtc)->config.adjusted_mode;
1493 int hdisplay = crtc->mode.hdisplay; 1500 int clock = adjusted_mode->crtc_clock;
1501 int htotal = adjusted_mode->htotal;
1502 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1494 int pixel_size = crtc->fb->bits_per_pixel / 8; 1503 int pixel_size = crtc->fb->bits_per_pixel / 8;
1495 unsigned long line_time_us; 1504 unsigned long line_time_us;
1496 int entries; 1505 int entries;
@@ -1541,8 +1550,9 @@ static void i965_update_wm(struct drm_device *dev)
1541 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1550 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1542} 1551}
1543 1552
1544static void i9xx_update_wm(struct drm_device *dev) 1553static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1545{ 1554{
1555 struct drm_device *dev = unused_crtc->dev;
1546 struct drm_i915_private *dev_priv = dev->dev_private; 1556 struct drm_i915_private *dev_priv = dev->dev_private;
1547 const struct intel_watermark_params *wm_info; 1557 const struct intel_watermark_params *wm_info;
1548 uint32_t fwater_lo; 1558 uint32_t fwater_lo;
@@ -1562,11 +1572,13 @@ static void i9xx_update_wm(struct drm_device *dev)
1562 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1572 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1563 crtc = intel_get_crtc_for_plane(dev, 0); 1573 crtc = intel_get_crtc_for_plane(dev, 0);
1564 if (intel_crtc_active(crtc)) { 1574 if (intel_crtc_active(crtc)) {
1575 const struct drm_display_mode *adjusted_mode;
1565 int cpp = crtc->fb->bits_per_pixel / 8; 1576 int cpp = crtc->fb->bits_per_pixel / 8;
1566 if (IS_GEN2(dev)) 1577 if (IS_GEN2(dev))
1567 cpp = 4; 1578 cpp = 4;
1568 1579
1569 planea_wm = intel_calculate_wm(crtc->mode.clock, 1580 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1581 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1570 wm_info, fifo_size, cpp, 1582 wm_info, fifo_size, cpp,
1571 latency_ns); 1583 latency_ns);
1572 enabled = crtc; 1584 enabled = crtc;
@@ -1576,11 +1588,13 @@ static void i9xx_update_wm(struct drm_device *dev)
1576 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1588 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1577 crtc = intel_get_crtc_for_plane(dev, 1); 1589 crtc = intel_get_crtc_for_plane(dev, 1);
1578 if (intel_crtc_active(crtc)) { 1590 if (intel_crtc_active(crtc)) {
1591 const struct drm_display_mode *adjusted_mode;
1579 int cpp = crtc->fb->bits_per_pixel / 8; 1592 int cpp = crtc->fb->bits_per_pixel / 8;
1580 if (IS_GEN2(dev)) 1593 if (IS_GEN2(dev))
1581 cpp = 4; 1594 cpp = 4;
1582 1595
1583 planeb_wm = intel_calculate_wm(crtc->mode.clock, 1596 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1597 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1584 wm_info, fifo_size, cpp, 1598 wm_info, fifo_size, cpp,
1585 latency_ns); 1599 latency_ns);
1586 if (enabled == NULL) 1600 if (enabled == NULL)
@@ -1607,9 +1621,11 @@ static void i9xx_update_wm(struct drm_device *dev)
1607 if (HAS_FW_BLC(dev) && enabled) { 1621 if (HAS_FW_BLC(dev) && enabled) {
1608 /* self-refresh has much higher latency */ 1622 /* self-refresh has much higher latency */
1609 static const int sr_latency_ns = 6000; 1623 static const int sr_latency_ns = 6000;
1610 int clock = enabled->mode.clock; 1624 const struct drm_display_mode *adjusted_mode =
1611 int htotal = enabled->mode.htotal; 1625 &to_intel_crtc(enabled)->config.adjusted_mode;
1612 int hdisplay = enabled->mode.hdisplay; 1626 int clock = adjusted_mode->crtc_clock;
1627 int htotal = adjusted_mode->htotal;
1628 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1613 int pixel_size = enabled->fb->bits_per_pixel / 8; 1629 int pixel_size = enabled->fb->bits_per_pixel / 8;
1614 unsigned long line_time_us; 1630 unsigned long line_time_us;
1615 int entries; 1631 int entries;
@@ -1658,10 +1674,12 @@ static void i9xx_update_wm(struct drm_device *dev)
1658 } 1674 }
1659} 1675}
1660 1676
1661static void i830_update_wm(struct drm_device *dev) 1677static void i830_update_wm(struct drm_crtc *unused_crtc)
1662{ 1678{
1679 struct drm_device *dev = unused_crtc->dev;
1663 struct drm_i915_private *dev_priv = dev->dev_private; 1680 struct drm_i915_private *dev_priv = dev->dev_private;
1664 struct drm_crtc *crtc; 1681 struct drm_crtc *crtc;
1682 const struct drm_display_mode *adjusted_mode;
1665 uint32_t fwater_lo; 1683 uint32_t fwater_lo;
1666 int planea_wm; 1684 int planea_wm;
1667 1685
@@ -1669,7 +1687,9 @@ static void i830_update_wm(struct drm_device *dev)
1669 if (crtc == NULL) 1687 if (crtc == NULL)
1670 return; 1688 return;
1671 1689
1672 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, 1690 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1691 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1692 &i830_wm_info,
1673 dev_priv->display.get_fifo_size(dev, 0), 1693 dev_priv->display.get_fifo_size(dev, 0),
1674 4, latency_ns); 1694 4, latency_ns);
1675 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1695 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -1741,6 +1761,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1741 int *fbc_wm, int *display_wm, int *cursor_wm) 1761 int *fbc_wm, int *display_wm, int *cursor_wm)
1742{ 1762{
1743 struct drm_crtc *crtc; 1763 struct drm_crtc *crtc;
1764 const struct drm_display_mode *adjusted_mode;
1744 unsigned long line_time_us; 1765 unsigned long line_time_us;
1745 int hdisplay, htotal, pixel_size, clock; 1766 int hdisplay, htotal, pixel_size, clock;
1746 int line_count, line_size; 1767 int line_count, line_size;
@@ -1753,9 +1774,10 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1753 } 1774 }
1754 1775
1755 crtc = intel_get_crtc_for_plane(dev, plane); 1776 crtc = intel_get_crtc_for_plane(dev, plane);
1756 hdisplay = crtc->mode.hdisplay; 1777 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1757 htotal = crtc->mode.htotal; 1778 clock = adjusted_mode->crtc_clock;
1758 clock = crtc->mode.clock; 1779 htotal = adjusted_mode->htotal;
1780 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1759 pixel_size = crtc->fb->bits_per_pixel / 8; 1781 pixel_size = crtc->fb->bits_per_pixel / 8;
1760 1782
1761 line_time_us = (htotal * 1000) / clock; 1783 line_time_us = (htotal * 1000) / clock;
@@ -1785,8 +1807,9 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1785 display, cursor); 1807 display, cursor);
1786} 1808}
1787 1809
1788static void ironlake_update_wm(struct drm_device *dev) 1810static void ironlake_update_wm(struct drm_crtc *crtc)
1789{ 1811{
1812 struct drm_device *dev = crtc->dev;
1790 struct drm_i915_private *dev_priv = dev->dev_private; 1813 struct drm_i915_private *dev_priv = dev->dev_private;
1791 int fbc_wm, plane_wm, cursor_wm; 1814 int fbc_wm, plane_wm, cursor_wm;
1792 unsigned int enabled; 1815 unsigned int enabled;
@@ -1868,8 +1891,9 @@ static void ironlake_update_wm(struct drm_device *dev)
1868 */ 1891 */
1869} 1892}
1870 1893
1871static void sandybridge_update_wm(struct drm_device *dev) 1894static void sandybridge_update_wm(struct drm_crtc *crtc)
1872{ 1895{
1896 struct drm_device *dev = crtc->dev;
1873 struct drm_i915_private *dev_priv = dev->dev_private; 1897 struct drm_i915_private *dev_priv = dev->dev_private;
1874 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ 1898 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
1875 u32 val; 1899 u32 val;
@@ -1970,8 +1994,9 @@ static void sandybridge_update_wm(struct drm_device *dev)
1970 cursor_wm); 1994 cursor_wm);
1971} 1995}
1972 1996
1973static void ivybridge_update_wm(struct drm_device *dev) 1997static void ivybridge_update_wm(struct drm_crtc *crtc)
1974{ 1998{
1999 struct drm_device *dev = crtc->dev;
1975 struct drm_i915_private *dev_priv = dev->dev_private; 2000 struct drm_i915_private *dev_priv = dev->dev_private;
1976 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ 2001 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
1977 u32 val; 2002 u32 val;
@@ -2098,7 +2123,7 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2098 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2123 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2099 uint32_t pixel_rate; 2124 uint32_t pixel_rate;
2100 2125
2101 pixel_rate = intel_crtc->config.adjusted_mode.clock; 2126 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
2102 2127
2103 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to 2128 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2104 * adjust the pixel_rate here. */ 2129 * adjust the pixel_rate here. */
@@ -2107,8 +2132,8 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2107 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 2132 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
2108 uint32_t pfit_size = intel_crtc->config.pch_pfit.size; 2133 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
2109 2134
2110 pipe_w = intel_crtc->config.requested_mode.hdisplay; 2135 pipe_w = intel_crtc->config.pipe_src_w;
2111 pipe_h = intel_crtc->config.requested_mode.vdisplay; 2136 pipe_h = intel_crtc->config.pipe_src_h;
2112 pfit_w = (pfit_size >> 16) & 0xFFFF; 2137 pfit_w = (pfit_size >> 16) & 0xFFFF;
2113 pfit_h = pfit_size & 0xFFFF; 2138 pfit_h = pfit_size & 0xFFFF;
2114 if (pipe_w < pfit_w) 2139 if (pipe_w < pfit_w)
@@ -2176,27 +2201,18 @@ struct hsw_wm_maximums {
2176 uint16_t fbc; 2201 uint16_t fbc;
2177}; 2202};
2178 2203
2179struct hsw_wm_values {
2180 uint32_t wm_pipe[3];
2181 uint32_t wm_lp[3];
2182 uint32_t wm_lp_spr[3];
2183 uint32_t wm_linetime[3];
2184 bool enable_fbc_wm;
2185};
2186
2187/* used in computing the new watermarks state */ 2204/* used in computing the new watermarks state */
2188struct intel_wm_config { 2205struct intel_wm_config {
2189 unsigned int num_pipes_active; 2206 unsigned int num_pipes_active;
2190 bool sprites_enabled; 2207 bool sprites_enabled;
2191 bool sprites_scaled; 2208 bool sprites_scaled;
2192 bool fbc_wm_enabled;
2193}; 2209};
2194 2210
2195/* 2211/*
2196 * For both WM_PIPE and WM_LP. 2212 * For both WM_PIPE and WM_LP.
2197 * mem_value must be in 0.1us units. 2213 * mem_value must be in 0.1us units.
2198 */ 2214 */
2199static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params, 2215static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
2200 uint32_t mem_value, 2216 uint32_t mem_value,
2201 bool is_lp) 2217 bool is_lp)
2202{ 2218{
@@ -2225,7 +2241,7 @@ static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
2225 * For both WM_PIPE and WM_LP. 2241 * For both WM_PIPE and WM_LP.
2226 * mem_value must be in 0.1us units. 2242 * mem_value must be in 0.1us units.
2227 */ 2243 */
2228static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params, 2244static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
2229 uint32_t mem_value) 2245 uint32_t mem_value)
2230{ 2246{
2231 uint32_t method1, method2; 2247 uint32_t method1, method2;
@@ -2248,7 +2264,7 @@ static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
2248 * For both WM_PIPE and WM_LP. 2264 * For both WM_PIPE and WM_LP.
2249 * mem_value must be in 0.1us units. 2265 * mem_value must be in 0.1us units.
2250 */ 2266 */
2251static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params, 2267static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
2252 uint32_t mem_value) 2268 uint32_t mem_value)
2253{ 2269{
2254 if (!params->active || !params->cur.enabled) 2270 if (!params->active || !params->cur.enabled)
@@ -2262,7 +2278,7 @@ static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
2262} 2278}
2263 2279
2264/* Only for WM_LP. */ 2280/* Only for WM_LP. */
2265static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, 2281static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
2266 uint32_t pri_val) 2282 uint32_t pri_val)
2267{ 2283{
2268 if (!params->active || !params->pri.enabled) 2284 if (!params->active || !params->pri.enabled)
@@ -2275,7 +2291,9 @@ static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
2275 2291
2276static unsigned int ilk_display_fifo_size(const struct drm_device *dev) 2292static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
2277{ 2293{
2278 if (INTEL_INFO(dev)->gen >= 7) 2294 if (INTEL_INFO(dev)->gen >= 8)
2295 return 3072;
2296 else if (INTEL_INFO(dev)->gen >= 7)
2279 return 768; 2297 return 768;
2280 else 2298 else
2281 return 512; 2299 return 512;
@@ -2320,7 +2338,9 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2320 } 2338 }
2321 2339
2322 /* clamp to max that the registers can hold */ 2340 /* clamp to max that the registers can hold */
2323 if (INTEL_INFO(dev)->gen >= 7) 2341 if (INTEL_INFO(dev)->gen >= 8)
2342 max = level == 0 ? 255 : 2047;
2343 else if (INTEL_INFO(dev)->gen >= 7)
2324 /* IVB/HSW primary/sprite plane watermarks */ 2344 /* IVB/HSW primary/sprite plane watermarks */
2325 max = level == 0 ? 127 : 1023; 2345 max = level == 0 ? 127 : 1023;
2326 else if (!is_sprite) 2346 else if (!is_sprite)
@@ -2350,27 +2370,30 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2350} 2370}
2351 2371
2352/* Calculate the maximum FBC watermark */ 2372/* Calculate the maximum FBC watermark */
2353static unsigned int ilk_fbc_wm_max(void) 2373static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
2354{ 2374{
2355 /* max that registers can hold */ 2375 /* max that registers can hold */
2356 return 15; 2376 if (INTEL_INFO(dev)->gen >= 8)
2377 return 31;
2378 else
2379 return 15;
2357} 2380}
2358 2381
2359static void ilk_wm_max(struct drm_device *dev, 2382static void ilk_compute_wm_maximums(struct drm_device *dev,
2360 int level, 2383 int level,
2361 const struct intel_wm_config *config, 2384 const struct intel_wm_config *config,
2362 enum intel_ddb_partitioning ddb_partitioning, 2385 enum intel_ddb_partitioning ddb_partitioning,
2363 struct hsw_wm_maximums *max) 2386 struct hsw_wm_maximums *max)
2364{ 2387{
2365 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); 2388 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2366 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); 2389 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2367 max->cur = ilk_cursor_wm_max(dev, level, config); 2390 max->cur = ilk_cursor_wm_max(dev, level, config);
2368 max->fbc = ilk_fbc_wm_max(); 2391 max->fbc = ilk_fbc_wm_max(dev);
2369} 2392}
2370 2393
2371static bool ilk_check_wm(int level, 2394static bool ilk_validate_wm_level(int level,
2372 const struct hsw_wm_maximums *max, 2395 const struct hsw_wm_maximums *max,
2373 struct intel_wm_level *result) 2396 struct intel_wm_level *result)
2374{ 2397{
2375 bool ret; 2398 bool ret;
2376 2399
@@ -2406,14 +2429,12 @@ static bool ilk_check_wm(int level,
2406 result->enable = true; 2429 result->enable = true;
2407 } 2430 }
2408 2431
2409 DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
2410
2411 return ret; 2432 return ret;
2412} 2433}
2413 2434
2414static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, 2435static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2415 int level, 2436 int level,
2416 struct hsw_pipe_wm_parameters *p, 2437 const struct hsw_pipe_wm_parameters *p,
2417 struct intel_wm_level *result) 2438 struct intel_wm_level *result)
2418{ 2439{
2419 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 2440 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2434,55 +2455,6 @@ static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2434 result->enable = true; 2455 result->enable = true;
2435} 2456}
2436 2457
2437static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
2438 int level, struct hsw_wm_maximums *max,
2439 struct hsw_pipe_wm_parameters *params,
2440 struct intel_wm_level *result)
2441{
2442 enum pipe pipe;
2443 struct intel_wm_level res[3];
2444
2445 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
2446 ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
2447
2448 result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
2449 result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
2450 result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
2451 result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
2452 result->enable = true;
2453
2454 return ilk_check_wm(level, max, result);
2455}
2456
2457static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
2458 enum pipe pipe,
2459 struct hsw_pipe_wm_parameters *params)
2460{
2461 uint32_t pri_val, cur_val, spr_val;
2462 /* WM0 latency values stored in 0.1us units */
2463 uint16_t pri_latency = dev_priv->wm.pri_latency[0];
2464 uint16_t spr_latency = dev_priv->wm.spr_latency[0];
2465 uint16_t cur_latency = dev_priv->wm.cur_latency[0];
2466
2467 pri_val = ilk_compute_pri_wm(params, pri_latency, false);
2468 spr_val = ilk_compute_spr_wm(params, spr_latency);
2469 cur_val = ilk_compute_cur_wm(params, cur_latency);
2470
2471 WARN(pri_val > 127,
2472 "Primary WM error, mode not supported for pipe %c\n",
2473 pipe_name(pipe));
2474 WARN(spr_val > 127,
2475 "Sprite WM error, mode not supported for pipe %c\n",
2476 pipe_name(pipe));
2477 WARN(cur_val > 63,
2478 "Cursor WM error, mode not supported for pipe %c\n",
2479 pipe_name(pipe));
2480
2481 return (pri_val << WM0_PIPE_PLANE_SHIFT) |
2482 (spr_val << WM0_PIPE_SPRITE_SHIFT) |
2483 cur_val;
2484}
2485
2486static uint32_t 2458static uint32_t
2487hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) 2459hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2488{ 2460{
@@ -2554,19 +2526,22 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2554 wm[3] *= 2; 2526 wm[3] *= 2;
2555} 2527}
2556 2528
2557static void intel_print_wm_latency(struct drm_device *dev, 2529static int ilk_wm_max_level(const struct drm_device *dev)
2558 const char *name,
2559 const uint16_t wm[5])
2560{ 2530{
2561 int level, max_level;
2562
2563 /* how many WM levels are we expecting */ 2531 /* how many WM levels are we expecting */
2564 if (IS_HASWELL(dev)) 2532 if (IS_HASWELL(dev))
2565 max_level = 4; 2533 return 4;
2566 else if (INTEL_INFO(dev)->gen >= 6) 2534 else if (INTEL_INFO(dev)->gen >= 6)
2567 max_level = 3; 2535 return 3;
2568 else 2536 else
2569 max_level = 2; 2537 return 2;
2538}
2539
2540static void intel_print_wm_latency(struct drm_device *dev,
2541 const char *name,
2542 const uint16_t wm[5])
2543{
2544 int level, max_level = ilk_wm_max_level(dev);
2570 2545
2571 for (level = 0; level <= max_level; level++) { 2546 for (level = 0; level <= max_level; level++) {
2572 unsigned int latency = wm[level]; 2547 unsigned int latency = wm[level];
@@ -2606,218 +2581,321 @@ static void intel_setup_wm_latency(struct drm_device *dev)
2606 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2581 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2607} 2582}
2608 2583
2609static void hsw_compute_wm_parameters(struct drm_device *dev, 2584static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
2610 struct hsw_pipe_wm_parameters *params, 2585 struct hsw_pipe_wm_parameters *p,
2611 struct hsw_wm_maximums *lp_max_1_2, 2586 struct intel_wm_config *config)
2612 struct hsw_wm_maximums *lp_max_5_6)
2613{ 2587{
2614 struct drm_crtc *crtc; 2588 struct drm_device *dev = crtc->dev;
2589 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2590 enum pipe pipe = intel_crtc->pipe;
2615 struct drm_plane *plane; 2591 struct drm_plane *plane;
2616 enum pipe pipe;
2617 struct intel_wm_config config = {};
2618
2619 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2620 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2621 struct hsw_pipe_wm_parameters *p;
2622
2623 pipe = intel_crtc->pipe;
2624 p = &params[pipe];
2625
2626 p->active = intel_crtc_active(crtc);
2627 if (!p->active)
2628 continue;
2629
2630 config.num_pipes_active++;
2631 2592
2593 p->active = intel_crtc_active(crtc);
2594 if (p->active) {
2632 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; 2595 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
2633 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); 2596 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2634 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; 2597 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2635 p->cur.bytes_per_pixel = 4; 2598 p->cur.bytes_per_pixel = 4;
2636 p->pri.horiz_pixels = 2599 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2637 intel_crtc->config.requested_mode.hdisplay;
2638 p->cur.horiz_pixels = 64; 2600 p->cur.horiz_pixels = 64;
2639 /* TODO: for now, assume primary and cursor planes are always enabled. */ 2601 /* TODO: for now, assume primary and cursor planes are always enabled. */
2640 p->pri.enabled = true; 2602 p->pri.enabled = true;
2641 p->cur.enabled = true; 2603 p->cur.enabled = true;
2642 } 2604 }
2643 2605
2606 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2607 config->num_pipes_active += intel_crtc_active(crtc);
2608
2644 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 2609 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2645 struct intel_plane *intel_plane = to_intel_plane(plane); 2610 struct intel_plane *intel_plane = to_intel_plane(plane);
2646 struct hsw_pipe_wm_parameters *p;
2647 2611
2648 pipe = intel_plane->pipe; 2612 if (intel_plane->pipe == pipe)
2649 p = &params[pipe]; 2613 p->spr = intel_plane->wm;
2650 2614
2651 p->spr = intel_plane->wm; 2615 config->sprites_enabled |= intel_plane->wm.enabled;
2652 2616 config->sprites_scaled |= intel_plane->wm.scaled;
2653 config.sprites_enabled |= p->spr.enabled;
2654 config.sprites_scaled |= p->spr.scaled;
2655 } 2617 }
2618}
2619
2620/* Compute new watermarks for the pipe */
2621static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2622 const struct hsw_pipe_wm_parameters *params,
2623 struct intel_pipe_wm *pipe_wm)
2624{
2625 struct drm_device *dev = crtc->dev;
2626 struct drm_i915_private *dev_priv = dev->dev_private;
2627 int level, max_level = ilk_wm_max_level(dev);
2628 /* LP0 watermark maximums depend on this pipe alone */
2629 struct intel_wm_config config = {
2630 .num_pipes_active = 1,
2631 .sprites_enabled = params->spr.enabled,
2632 .sprites_scaled = params->spr.scaled,
2633 };
2634 struct hsw_wm_maximums max;
2656 2635
2657 ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2); 2636 /* LP0 watermarks always use 1/2 DDB partitioning */
2637 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2658 2638
2659 /* 5/6 split only in single pipe config on IVB+ */ 2639 for (level = 0; level <= max_level; level++)
2660 if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1) 2640 ilk_compute_wm_level(dev_priv, level, params,
2661 ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6); 2641 &pipe_wm->wm[level]);
2662 else 2642
2663 *lp_max_5_6 = *lp_max_1_2; 2643 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2644
2645 /* At least LP0 must be valid */
2646 return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
2664} 2647}
2665 2648
2666static void hsw_compute_wm_results(struct drm_device *dev, 2649/*
2667 struct hsw_pipe_wm_parameters *params, 2650 * Merge the watermarks from all active pipes for a specific level.
2668 struct hsw_wm_maximums *lp_maximums, 2651 */
2669 struct hsw_wm_values *results) 2652static void ilk_merge_wm_level(struct drm_device *dev,
2653 int level,
2654 struct intel_wm_level *ret_wm)
2670{ 2655{
2671 struct drm_i915_private *dev_priv = dev->dev_private; 2656 const struct intel_crtc *intel_crtc;
2672 struct drm_crtc *crtc;
2673 struct intel_wm_level lp_results[4] = {};
2674 enum pipe pipe;
2675 int level, max_level, wm_lp;
2676 2657
2677 for (level = 1; level <= 4; level++) 2658 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2678 if (!hsw_compute_lp_wm(dev_priv, level, 2659 const struct intel_wm_level *wm =
2679 lp_maximums, params, 2660 &intel_crtc->wm.active.wm[level];
2680 &lp_results[level - 1])) 2661
2681 break; 2662 if (!wm->enable)
2682 max_level = level - 1; 2663 return;
2664
2665 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2666 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2667 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2668 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2669 }
2670
2671 ret_wm->enable = true;
2672}
2683 2673
2684 memset(results, 0, sizeof(*results)); 2674/*
2675 * Merge all low power watermarks for all active pipes.
2676 */
2677static void ilk_wm_merge(struct drm_device *dev,
2678 const struct hsw_wm_maximums *max,
2679 struct intel_pipe_wm *merged)
2680{
2681 int level, max_level = ilk_wm_max_level(dev);
2682
2683 merged->fbc_wm_enabled = true;
2685 2684
2686 /* The spec says it is preferred to disable FBC WMs instead of disabling 2685 /* merge each WM1+ level */
2687 * a WM level. */
2688 results->enable_fbc_wm = true;
2689 for (level = 1; level <= max_level; level++) { 2686 for (level = 1; level <= max_level; level++) {
2690 if (lp_results[level - 1].fbc_val > lp_maximums->fbc) { 2687 struct intel_wm_level *wm = &merged->wm[level];
2691 results->enable_fbc_wm = false; 2688
2692 lp_results[level - 1].fbc_val = 0; 2689 ilk_merge_wm_level(dev, level, wm);
2690
2691 if (!ilk_validate_wm_level(level, max, wm))
2692 break;
2693
2694 /*
2695 * The spec says it is preferred to disable
2696 * FBC WMs instead of disabling a WM level.
2697 */
2698 if (wm->fbc_val > max->fbc) {
2699 merged->fbc_wm_enabled = false;
2700 wm->fbc_val = 0;
2693 } 2701 }
2694 } 2702 }
2703}
2695 2704
2705static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2706{
2707 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2708 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2709}
2710
2711static void hsw_compute_wm_results(struct drm_device *dev,
2712 const struct intel_pipe_wm *merged,
2713 enum intel_ddb_partitioning partitioning,
2714 struct hsw_wm_values *results)
2715{
2716 struct intel_crtc *intel_crtc;
2717 int level, wm_lp;
2718
2719 results->enable_fbc_wm = merged->fbc_wm_enabled;
2720 results->partitioning = partitioning;
2721
2722 /* LP1+ register values */
2696 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 2723 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2697 const struct intel_wm_level *r; 2724 const struct intel_wm_level *r;
2698 2725
2699 level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp; 2726 level = ilk_wm_lp_to_level(wm_lp, merged);
2700 if (level > max_level) 2727
2728 r = &merged->wm[level];
2729 if (!r->enable)
2701 break; 2730 break;
2702 2731
2703 r = &lp_results[level - 1]; 2732 results->wm_lp[wm_lp - 1] = WM3_LP_EN |
2704 results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2, 2733 ((level * 2) << WM1_LP_LATENCY_SHIFT) |
2705 r->fbc_val, 2734 (r->pri_val << WM1_LP_SR_SHIFT) |
2706 r->pri_val, 2735 r->cur_val;
2707 r->cur_val); 2736
2737 if (INTEL_INFO(dev)->gen >= 8)
2738 results->wm_lp[wm_lp - 1] |=
2739 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2740 else
2741 results->wm_lp[wm_lp - 1] |=
2742 r->fbc_val << WM1_LP_FBC_SHIFT;
2743
2708 results->wm_lp_spr[wm_lp - 1] = r->spr_val; 2744 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2709 } 2745 }
2710 2746
2711 for_each_pipe(pipe) 2747 /* LP0 register values */
2712 results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe, 2748 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2713 &params[pipe]); 2749 enum pipe pipe = intel_crtc->pipe;
2750 const struct intel_wm_level *r =
2751 &intel_crtc->wm.active.wm[0];
2714 2752
2715 for_each_pipe(pipe) { 2753 if (WARN_ON(!r->enable))
2716 crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2754 continue;
2717 results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc); 2755
2756 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2757
2758 results->wm_pipe[pipe] =
2759 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2760 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2761 r->cur_val;
2718 } 2762 }
2719} 2763}
2720 2764
2721/* Find the result with the highest level enabled. Check for enable_fbc_wm in 2765/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2722 * case both are at the same level. Prefer r1 in case they're the same. */ 2766 * case both are at the same level. Prefer r1 in case they're the same. */
2723static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1, 2767static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
2724 struct hsw_wm_values *r2) 2768 struct intel_pipe_wm *r1,
2769 struct intel_pipe_wm *r2)
2725{ 2770{
2726 int i, val_r1 = 0, val_r2 = 0; 2771 int level, max_level = ilk_wm_max_level(dev);
2772 int level1 = 0, level2 = 0;
2727 2773
2728 for (i = 0; i < 3; i++) { 2774 for (level = 1; level <= max_level; level++) {
2729 if (r1->wm_lp[i] & WM3_LP_EN) 2775 if (r1->wm[level].enable)
2730 val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK; 2776 level1 = level;
2731 if (r2->wm_lp[i] & WM3_LP_EN) 2777 if (r2->wm[level].enable)
2732 val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK; 2778 level2 = level;
2733 } 2779 }
2734 2780
2735 if (val_r1 == val_r2) { 2781 if (level1 == level2) {
2736 if (r2->enable_fbc_wm && !r1->enable_fbc_wm) 2782 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2737 return r2; 2783 return r2;
2738 else 2784 else
2739 return r1; 2785 return r1;
2740 } else if (val_r1 > val_r2) { 2786 } else if (level1 > level2) {
2741 return r1; 2787 return r1;
2742 } else { 2788 } else {
2743 return r2; 2789 return r2;
2744 } 2790 }
2745} 2791}
2746 2792
2793/* dirty bits used to track which watermarks need changes */
2794#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2795#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2796#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2797#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2798#define WM_DIRTY_FBC (1 << 24)
2799#define WM_DIRTY_DDB (1 << 25)
2800
2801static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2802 const struct hsw_wm_values *old,
2803 const struct hsw_wm_values *new)
2804{
2805 unsigned int dirty = 0;
2806 enum pipe pipe;
2807 int wm_lp;
2808
2809 for_each_pipe(pipe) {
2810 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2811 dirty |= WM_DIRTY_LINETIME(pipe);
2812 /* Must disable LP1+ watermarks too */
2813 dirty |= WM_DIRTY_LP_ALL;
2814 }
2815
2816 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2817 dirty |= WM_DIRTY_PIPE(pipe);
2818 /* Must disable LP1+ watermarks too */
2819 dirty |= WM_DIRTY_LP_ALL;
2820 }
2821 }
2822
2823 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2824 dirty |= WM_DIRTY_FBC;
2825 /* Must disable LP1+ watermarks too */
2826 dirty |= WM_DIRTY_LP_ALL;
2827 }
2828
2829 if (old->partitioning != new->partitioning) {
2830 dirty |= WM_DIRTY_DDB;
2831 /* Must disable LP1+ watermarks too */
2832 dirty |= WM_DIRTY_LP_ALL;
2833 }
2834
2835 /* LP1+ watermarks already deemed dirty, no need to continue */
2836 if (dirty & WM_DIRTY_LP_ALL)
2837 return dirty;
2838
2839 /* Find the lowest numbered LP1+ watermark in need of an update... */
2840 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2841 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2842 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2843 break;
2844 }
2845
2846 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2847 for (; wm_lp <= 3; wm_lp++)
2848 dirty |= WM_DIRTY_LP(wm_lp);
2849
2850 return dirty;
2851}
2852
2747/* 2853/*
2748 * The spec says we shouldn't write when we don't need, because every write 2854 * The spec says we shouldn't write when we don't need, because every write
2749 * causes WMs to be re-evaluated, expending some power. 2855 * causes WMs to be re-evaluated, expending some power.
2750 */ 2856 */
2751static void hsw_write_wm_values(struct drm_i915_private *dev_priv, 2857static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2752 struct hsw_wm_values *results, 2858 struct hsw_wm_values *results)
2753 enum intel_ddb_partitioning partitioning)
2754{ 2859{
2755 struct hsw_wm_values previous; 2860 struct hsw_wm_values *previous = &dev_priv->wm.hw;
2861 unsigned int dirty;
2756 uint32_t val; 2862 uint32_t val;
2757 enum intel_ddb_partitioning prev_partitioning; 2863
2758 bool prev_enable_fbc_wm; 2864 dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results);
2759 2865 if (!dirty)
2760 previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
2761 previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
2762 previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
2763 previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
2764 previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
2765 previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
2766 previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2767 previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2768 previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2769 previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
2770 previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
2771 previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
2772
2773 prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2774 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2775
2776 prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2777
2778 if (memcmp(results->wm_pipe, previous.wm_pipe,
2779 sizeof(results->wm_pipe)) == 0 &&
2780 memcmp(results->wm_lp, previous.wm_lp,
2781 sizeof(results->wm_lp)) == 0 &&
2782 memcmp(results->wm_lp_spr, previous.wm_lp_spr,
2783 sizeof(results->wm_lp_spr)) == 0 &&
2784 memcmp(results->wm_linetime, previous.wm_linetime,
2785 sizeof(results->wm_linetime)) == 0 &&
2786 partitioning == prev_partitioning &&
2787 results->enable_fbc_wm == prev_enable_fbc_wm)
2788 return; 2866 return;
2789 2867
2790 if (previous.wm_lp[2] != 0) 2868 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0)
2791 I915_WRITE(WM3_LP_ILK, 0); 2869 I915_WRITE(WM3_LP_ILK, 0);
2792 if (previous.wm_lp[1] != 0) 2870 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0)
2793 I915_WRITE(WM2_LP_ILK, 0); 2871 I915_WRITE(WM2_LP_ILK, 0);
2794 if (previous.wm_lp[0] != 0) 2872 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0)
2795 I915_WRITE(WM1_LP_ILK, 0); 2873 I915_WRITE(WM1_LP_ILK, 0);
2796 2874
2797 if (previous.wm_pipe[0] != results->wm_pipe[0]) 2875 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2798 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); 2876 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2799 if (previous.wm_pipe[1] != results->wm_pipe[1]) 2877 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2800 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]); 2878 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2801 if (previous.wm_pipe[2] != results->wm_pipe[2]) 2879 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2802 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); 2880 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2803 2881
2804 if (previous.wm_linetime[0] != results->wm_linetime[0]) 2882 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2805 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]); 2883 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2806 if (previous.wm_linetime[1] != results->wm_linetime[1]) 2884 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2807 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]); 2885 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2808 if (previous.wm_linetime[2] != results->wm_linetime[2]) 2886 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2809 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); 2887 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2810 2888
2811 if (prev_partitioning != partitioning) { 2889 if (dirty & WM_DIRTY_DDB) {
2812 val = I915_READ(WM_MISC); 2890 val = I915_READ(WM_MISC);
2813 if (partitioning == INTEL_DDB_PART_1_2) 2891 if (results->partitioning == INTEL_DDB_PART_1_2)
2814 val &= ~WM_MISC_DATA_PARTITION_5_6; 2892 val &= ~WM_MISC_DATA_PARTITION_5_6;
2815 else 2893 else
2816 val |= WM_MISC_DATA_PARTITION_5_6; 2894 val |= WM_MISC_DATA_PARTITION_5_6;
2817 I915_WRITE(WM_MISC, val); 2895 I915_WRITE(WM_MISC, val);
2818 } 2896 }
2819 2897
2820 if (prev_enable_fbc_wm != results->enable_fbc_wm) { 2898 if (dirty & WM_DIRTY_FBC) {
2821 val = I915_READ(DISP_ARB_CTL); 2899 val = I915_READ(DISP_ARB_CTL);
2822 if (results->enable_fbc_wm) 2900 if (results->enable_fbc_wm)
2823 val &= ~DISP_FBC_WM_DIS; 2901 val &= ~DISP_FBC_WM_DIS;
@@ -2826,45 +2904,65 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2826 I915_WRITE(DISP_ARB_CTL, val); 2904 I915_WRITE(DISP_ARB_CTL, val);
2827 } 2905 }
2828 2906
2829 if (previous.wm_lp_spr[0] != results->wm_lp_spr[0]) 2907 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2830 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); 2908 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2831 if (previous.wm_lp_spr[1] != results->wm_lp_spr[1]) 2909 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2832 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); 2910 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2833 if (previous.wm_lp_spr[2] != results->wm_lp_spr[2]) 2911 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2834 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); 2912 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2835 2913
2836 if (results->wm_lp[0] != 0) 2914 if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0)
2837 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); 2915 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2838 if (results->wm_lp[1] != 0) 2916 if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0)
2839 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); 2917 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2840 if (results->wm_lp[2] != 0) 2918 if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0)
2841 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); 2919 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2920
2921 dev_priv->wm.hw = *results;
2842} 2922}
2843 2923
2844static void haswell_update_wm(struct drm_device *dev) 2924static void haswell_update_wm(struct drm_crtc *crtc)
2845{ 2925{
2926 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2927 struct drm_device *dev = crtc->dev;
2846 struct drm_i915_private *dev_priv = dev->dev_private; 2928 struct drm_i915_private *dev_priv = dev->dev_private;
2847 struct hsw_wm_maximums lp_max_1_2, lp_max_5_6; 2929 struct hsw_wm_maximums max;
2848 struct hsw_pipe_wm_parameters params[3]; 2930 struct hsw_pipe_wm_parameters params = {};
2849 struct hsw_wm_values results_1_2, results_5_6, *best_results; 2931 struct hsw_wm_values results = {};
2850 enum intel_ddb_partitioning partitioning; 2932 enum intel_ddb_partitioning partitioning;
2933 struct intel_pipe_wm pipe_wm = {};
2934 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2935 struct intel_wm_config config = {};
2851 2936
2852 hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6); 2937 hsw_compute_wm_parameters(crtc, &params, &config);
2938
2939 intel_compute_pipe_wm(crtc, &params, &pipe_wm);
2940
2941 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2942 return;
2853 2943
2854 hsw_compute_wm_results(dev, params, 2944 intel_crtc->wm.active = pipe_wm;
2855 &lp_max_1_2, &results_1_2); 2945
2856 if (lp_max_1_2.pri != lp_max_5_6.pri) { 2946 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2857 hsw_compute_wm_results(dev, params, 2947 ilk_wm_merge(dev, &max, &lp_wm_1_2);
2858 &lp_max_5_6, &results_5_6); 2948
2859 best_results = hsw_find_best_result(&results_1_2, &results_5_6); 2949 /* 5/6 split only in single pipe config on IVB+ */
2950 if (INTEL_INFO(dev)->gen >= 7 &&
2951 config.num_pipes_active == 1 && config.sprites_enabled) {
2952 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2953 ilk_wm_merge(dev, &max, &lp_wm_5_6);
2954
2955 best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2860 } else { 2956 } else {
2861 best_results = &results_1_2; 2957 best_lp_wm = &lp_wm_1_2;
2862 } 2958 }
2863 2959
2864 partitioning = (best_results == &results_1_2) ? 2960 partitioning = (best_lp_wm == &lp_wm_1_2) ?
2865 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; 2961 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2866 2962
2867 hsw_write_wm_values(dev_priv, best_results, partitioning); 2963 hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2964
2965 hsw_write_wm_values(dev_priv, &results);
2868} 2966}
2869 2967
2870static void haswell_update_sprite_wm(struct drm_plane *plane, 2968static void haswell_update_sprite_wm(struct drm_plane *plane,
@@ -2879,7 +2977,7 @@ static void haswell_update_sprite_wm(struct drm_plane *plane,
2879 intel_plane->wm.horiz_pixels = sprite_width; 2977 intel_plane->wm.horiz_pixels = sprite_width;
2880 intel_plane->wm.bytes_per_pixel = pixel_size; 2978 intel_plane->wm.bytes_per_pixel = pixel_size;
2881 2979
2882 haswell_update_wm(plane->dev); 2980 haswell_update_wm(crtc);
2883} 2981}
2884 2982
2885static bool 2983static bool
@@ -2898,7 +2996,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
2898 return false; 2996 return false;
2899 } 2997 }
2900 2998
2901 clock = crtc->mode.clock; 2999 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
2902 3000
2903 /* Use the small buffer method to calculate the sprite watermark */ 3001 /* Use the small buffer method to calculate the sprite watermark */
2904 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 3002 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -2933,7 +3031,7 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2933 } 3031 }
2934 3032
2935 crtc = intel_get_crtc_for_plane(dev, plane); 3033 crtc = intel_get_crtc_for_plane(dev, plane);
2936 clock = crtc->mode.clock; 3034 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
2937 if (!clock) { 3035 if (!clock) {
2938 *sprite_wm = 0; 3036 *sprite_wm = 0;
2939 return false; 3037 return false;
@@ -3044,6 +3142,74 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane,
3044 I915_WRITE(WM3S_LP_IVB, sprite_wm); 3142 I915_WRITE(WM3S_LP_IVB, sprite_wm);
3045} 3143}
3046 3144
3145static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3146{
3147 struct drm_device *dev = crtc->dev;
3148 struct drm_i915_private *dev_priv = dev->dev_private;
3149 struct hsw_wm_values *hw = &dev_priv->wm.hw;
3150 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3151 struct intel_pipe_wm *active = &intel_crtc->wm.active;
3152 enum pipe pipe = intel_crtc->pipe;
3153 static const unsigned int wm0_pipe_reg[] = {
3154 [PIPE_A] = WM0_PIPEA_ILK,
3155 [PIPE_B] = WM0_PIPEB_ILK,
3156 [PIPE_C] = WM0_PIPEC_IVB,
3157 };
3158
3159 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
3160 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3161
3162 if (intel_crtc_active(crtc)) {
3163 u32 tmp = hw->wm_pipe[pipe];
3164
3165 /*
3166 * For active pipes LP0 watermark is marked as
3167 * enabled, and LP1+ watermaks as disabled since
3168 * we can't really reverse compute them in case
3169 * multiple pipes are active.
3170 */
3171 active->wm[0].enable = true;
3172 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
3173 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
3174 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
3175 active->linetime = hw->wm_linetime[pipe];
3176 } else {
3177 int level, max_level = ilk_wm_max_level(dev);
3178
3179 /*
3180 * For inactive pipes, all watermark levels
3181 * should be marked as enabled but zeroed,
3182 * which is what we'd compute them to.
3183 */
3184 for (level = 0; level <= max_level; level++)
3185 active->wm[level].enable = true;
3186 }
3187}
3188
3189void ilk_wm_get_hw_state(struct drm_device *dev)
3190{
3191 struct drm_i915_private *dev_priv = dev->dev_private;
3192 struct hsw_wm_values *hw = &dev_priv->wm.hw;
3193 struct drm_crtc *crtc;
3194
3195 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3196 ilk_pipe_wm_get_hw_state(crtc);
3197
3198 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
3199 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
3200 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
3201
3202 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
3203 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
3204 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
3205
3206 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
3207 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3208
3209 hw->enable_fbc_wm =
3210 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
3211}
3212
3047/** 3213/**
3048 * intel_update_watermarks - update FIFO watermark values based on current modes 3214 * intel_update_watermarks - update FIFO watermark values based on current modes
3049 * 3215 *
@@ -3076,12 +3242,12 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane,
3076 * We don't use the sprite, so we can ignore that. And on Crestline we have 3242 * We don't use the sprite, so we can ignore that. And on Crestline we have
3077 * to set the non-SR watermarks to 8. 3243 * to set the non-SR watermarks to 8.
3078 */ 3244 */
3079void intel_update_watermarks(struct drm_device *dev) 3245void intel_update_watermarks(struct drm_crtc *crtc)
3080{ 3246{
3081 struct drm_i915_private *dev_priv = dev->dev_private; 3247 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
3082 3248
3083 if (dev_priv->display.update_wm) 3249 if (dev_priv->display.update_wm)
3084 dev_priv->display.update_wm(dev); 3250 dev_priv->display.update_wm(crtc);
3085} 3251}
3086 3252
3087void intel_update_sprite_watermarks(struct drm_plane *plane, 3253void intel_update_sprite_watermarks(struct drm_plane *plane,
@@ -3287,6 +3453,98 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
3287 return limits; 3453 return limits;
3288} 3454}
3289 3455
3456static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3457{
3458 int new_power;
3459
3460 new_power = dev_priv->rps.power;
3461 switch (dev_priv->rps.power) {
3462 case LOW_POWER:
3463 if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
3464 new_power = BETWEEN;
3465 break;
3466
3467 case BETWEEN:
3468 if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
3469 new_power = LOW_POWER;
3470 else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
3471 new_power = HIGH_POWER;
3472 break;
3473
3474 case HIGH_POWER:
3475 if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
3476 new_power = BETWEEN;
3477 break;
3478 }
3479 /* Max/min bins are special */
3480 if (val == dev_priv->rps.min_delay)
3481 new_power = LOW_POWER;
3482 if (val == dev_priv->rps.max_delay)
3483 new_power = HIGH_POWER;
3484 if (new_power == dev_priv->rps.power)
3485 return;
3486
3487 /* Note the units here are not exactly 1us, but 1280ns. */
3488 switch (new_power) {
3489 case LOW_POWER:
3490 /* Upclock if more than 95% busy over 16ms */
3491 I915_WRITE(GEN6_RP_UP_EI, 12500);
3492 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3493
3494 /* Downclock if less than 85% busy over 32ms */
3495 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3496 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3497
3498 I915_WRITE(GEN6_RP_CONTROL,
3499 GEN6_RP_MEDIA_TURBO |
3500 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3501 GEN6_RP_MEDIA_IS_GFX |
3502 GEN6_RP_ENABLE |
3503 GEN6_RP_UP_BUSY_AVG |
3504 GEN6_RP_DOWN_IDLE_AVG);
3505 break;
3506
3507 case BETWEEN:
3508 /* Upclock if more than 90% busy over 13ms */
3509 I915_WRITE(GEN6_RP_UP_EI, 10250);
3510 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3511
3512 /* Downclock if less than 75% busy over 32ms */
3513 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3514 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3515
3516 I915_WRITE(GEN6_RP_CONTROL,
3517 GEN6_RP_MEDIA_TURBO |
3518 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3519 GEN6_RP_MEDIA_IS_GFX |
3520 GEN6_RP_ENABLE |
3521 GEN6_RP_UP_BUSY_AVG |
3522 GEN6_RP_DOWN_IDLE_AVG);
3523 break;
3524
3525 case HIGH_POWER:
3526 /* Upclock if more than 85% busy over 10ms */
3527 I915_WRITE(GEN6_RP_UP_EI, 8000);
3528 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3529
3530 /* Downclock if less than 60% busy over 32ms */
3531 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3532 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3533
3534 I915_WRITE(GEN6_RP_CONTROL,
3535 GEN6_RP_MEDIA_TURBO |
3536 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3537 GEN6_RP_MEDIA_IS_GFX |
3538 GEN6_RP_ENABLE |
3539 GEN6_RP_UP_BUSY_AVG |
3540 GEN6_RP_DOWN_IDLE_AVG);
3541 break;
3542 }
3543
3544 dev_priv->rps.power = new_power;
3545 dev_priv->rps.last_adj = 0;
3546}
3547
3290void gen6_set_rps(struct drm_device *dev, u8 val) 3548void gen6_set_rps(struct drm_device *dev, u8 val)
3291{ 3549{
3292 struct drm_i915_private *dev_priv = dev->dev_private; 3550 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3299,6 +3557,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3299 if (val == dev_priv->rps.cur_delay) 3557 if (val == dev_priv->rps.cur_delay)
3300 return; 3558 return;
3301 3559
3560 gen6_set_rps_thresholds(dev_priv, val);
3561
3302 if (IS_HASWELL(dev)) 3562 if (IS_HASWELL(dev))
3303 I915_WRITE(GEN6_RPNSWREQ, 3563 I915_WRITE(GEN6_RPNSWREQ,
3304 HSW_FREQUENCY(val)); 3564 HSW_FREQUENCY(val));
@@ -3320,6 +3580,32 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3320 trace_intel_gpu_freq_change(val * 50); 3580 trace_intel_gpu_freq_change(val * 50);
3321} 3581}
3322 3582
3583void gen6_rps_idle(struct drm_i915_private *dev_priv)
3584{
3585 mutex_lock(&dev_priv->rps.hw_lock);
3586 if (dev_priv->rps.enabled) {
3587 if (dev_priv->info->is_valleyview)
3588 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3589 else
3590 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3591 dev_priv->rps.last_adj = 0;
3592 }
3593 mutex_unlock(&dev_priv->rps.hw_lock);
3594}
3595
3596void gen6_rps_boost(struct drm_i915_private *dev_priv)
3597{
3598 mutex_lock(&dev_priv->rps.hw_lock);
3599 if (dev_priv->rps.enabled) {
3600 if (dev_priv->info->is_valleyview)
3601 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3602 else
3603 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3604 dev_priv->rps.last_adj = 0;
3605 }
3606 mutex_unlock(&dev_priv->rps.hw_lock);
3607}
3608
3323/* 3609/*
3324 * Wait until the previous freq change has completed, 3610 * Wait until the previous freq change has completed,
3325 * or the timeout elapsed, and then update our notion 3611 * or the timeout elapsed, and then update our notion
@@ -3415,6 +3701,20 @@ static void valleyview_disable_rps(struct drm_device *dev)
3415 } 3701 }
3416} 3702}
3417 3703
3704static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3705{
3706 if (IS_GEN6(dev))
3707 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3708
3709 if (IS_HASWELL(dev))
3710 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
3711
3712 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3713 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3714 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3715 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3716}
3717
3418int intel_enable_rc6(const struct drm_device *dev) 3718int intel_enable_rc6(const struct drm_device *dev)
3419{ 3719{
3420 /* No RC6 before Ironlake */ 3720 /* No RC6 before Ironlake */
@@ -3429,18 +3729,13 @@ int intel_enable_rc6(const struct drm_device *dev)
3429 if (INTEL_INFO(dev)->gen == 5) 3729 if (INTEL_INFO(dev)->gen == 5)
3430 return 0; 3730 return 0;
3431 3731
3432 if (IS_HASWELL(dev)) { 3732 if (IS_HASWELL(dev))
3433 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
3434 return INTEL_RC6_ENABLE; 3733 return INTEL_RC6_ENABLE;
3435 }
3436 3734
3437 /* snb/ivb have more than one rc6 state. */ 3735 /* snb/ivb have more than one rc6 state. */
3438 if (INTEL_INFO(dev)->gen == 6) { 3736 if (INTEL_INFO(dev)->gen == 6)
3439 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3440 return INTEL_RC6_ENABLE; 3737 return INTEL_RC6_ENABLE;
3441 }
3442 3738
3443 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
3444 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 3739 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3445} 3740}
3446 3741
@@ -3467,6 +3762,78 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
3467 I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs); 3762 I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
3468} 3763}
3469 3764
3765static void gen8_enable_rps(struct drm_device *dev)
3766{
3767 struct drm_i915_private *dev_priv = dev->dev_private;
3768 struct intel_ring_buffer *ring;
3769 uint32_t rc6_mask = 0, rp_state_cap;
3770 int unused;
3771
3772 /* 1a: Software RC state - RC0 */
3773 I915_WRITE(GEN6_RC_STATE, 0);
3774
3775 /* 1c & 1d: Get forcewake during program sequence. Although the driver
3776 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
3777 gen6_gt_force_wake_get(dev_priv);
3778
3779 /* 2a: Disable RC states. */
3780 I915_WRITE(GEN6_RC_CONTROL, 0);
3781
3782 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3783
3784 /* 2b: Program RC6 thresholds.*/
3785 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3786 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
3787 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
3788 for_each_ring(ring, dev_priv, unused)
3789 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3790 I915_WRITE(GEN6_RC_SLEEP, 0);
3791 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3792
3793 /* 3: Enable RC6 */
3794 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3795 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3796 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
3797 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3798 GEN6_RC_CTL_EI_MODE(1) |
3799 rc6_mask);
3800
3801 /* 4 Program defaults and thresholds for RPS*/
3802 I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
3803 I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
3804 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3805 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3806
3807 /* Docs recommend 900MHz, and 300 MHz respectively */
3808 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3809 dev_priv->rps.max_delay << 24 |
3810 dev_priv->rps.min_delay << 16);
3811
3812 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
3813 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3814 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
3815 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
3816
3817 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3818
3819 /* 5: Enable RPS */
3820 I915_WRITE(GEN6_RP_CONTROL,
3821 GEN6_RP_MEDIA_TURBO |
3822 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3823 GEN6_RP_MEDIA_IS_GFX |
3824 GEN6_RP_ENABLE |
3825 GEN6_RP_UP_BUSY_AVG |
3826 GEN6_RP_DOWN_IDLE_AVG);
3827
3828 /* 6: Ring frequency + overclocking (our driver does this later */
3829
3830 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3831
3832 gen6_enable_rps_interrupts(dev);
3833
3834 gen6_gt_force_wake_put(dev_priv);
3835}
3836
3470static void gen6_enable_rps(struct drm_device *dev) 3837static void gen6_enable_rps(struct drm_device *dev)
3471{ 3838{
3472 struct drm_i915_private *dev_priv = dev->dev_private; 3839 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3501,7 +3868,10 @@ static void gen6_enable_rps(struct drm_device *dev)
3501 3868
3502 /* In units of 50MHz */ 3869 /* In units of 50MHz */
3503 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff; 3870 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
3504 dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16; 3871 dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
3872 dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
3873 dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
3874 dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
3505 dev_priv->rps.cur_delay = 0; 3875 dev_priv->rps.cur_delay = 0;
3506 3876
3507 /* disable the counters and set deterministic thresholds */ 3877 /* disable the counters and set deterministic thresholds */
@@ -3539,48 +3909,16 @@ static void gen6_enable_rps(struct drm_device *dev)
3539 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 3909 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3540 } 3910 }
3541 3911
3542 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", 3912 intel_print_rc6_info(dev, rc6_mask);
3543 (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3544 (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3545 (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3546 3913
3547 I915_WRITE(GEN6_RC_CONTROL, 3914 I915_WRITE(GEN6_RC_CONTROL,
3548 rc6_mask | 3915 rc6_mask |
3549 GEN6_RC_CTL_EI_MODE(1) | 3916 GEN6_RC_CTL_EI_MODE(1) |
3550 GEN6_RC_CTL_HW_ENABLE); 3917 GEN6_RC_CTL_HW_ENABLE);
3551 3918
3552 if (IS_HASWELL(dev)) { 3919 /* Power down if completely idle for over 50ms */
3553 I915_WRITE(GEN6_RPNSWREQ, 3920 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3554 HSW_FREQUENCY(10));
3555 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3556 HSW_FREQUENCY(12));
3557 } else {
3558 I915_WRITE(GEN6_RPNSWREQ,
3559 GEN6_FREQUENCY(10) |
3560 GEN6_OFFSET(0) |
3561 GEN6_AGGRESSIVE_TURBO);
3562 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3563 GEN6_FREQUENCY(12));
3564 }
3565
3566 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
3567 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3568 dev_priv->rps.max_delay << 24 |
3569 dev_priv->rps.min_delay << 16);
3570
3571 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3572 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3573 I915_WRITE(GEN6_RP_UP_EI, 66000);
3574 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3575
3576 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3921 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3577 I915_WRITE(GEN6_RP_CONTROL,
3578 GEN6_RP_MEDIA_TURBO |
3579 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3580 GEN6_RP_MEDIA_IS_GFX |
3581 GEN6_RP_ENABLE |
3582 GEN6_RP_UP_BUSY_AVG |
3583 (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
3584 3922
3585 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); 3923 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3586 if (!ret) { 3924 if (!ret) {
@@ -3596,7 +3934,8 @@ static void gen6_enable_rps(struct drm_device *dev)
3596 DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); 3934 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3597 } 3935 }
3598 3936
3599 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); 3937 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3938 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3600 3939
3601 gen6_enable_rps_interrupts(dev); 3940 gen6_enable_rps_interrupts(dev);
3602 3941
@@ -3624,23 +3963,28 @@ void gen6_update_ring_freq(struct drm_device *dev)
3624 unsigned int gpu_freq; 3963 unsigned int gpu_freq;
3625 unsigned int max_ia_freq, min_ring_freq; 3964 unsigned int max_ia_freq, min_ring_freq;
3626 int scaling_factor = 180; 3965 int scaling_factor = 180;
3966 struct cpufreq_policy *policy;
3627 3967
3628 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3968 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3629 3969
3630 max_ia_freq = cpufreq_quick_get_max(0); 3970 policy = cpufreq_cpu_get(0);
3631 /* 3971 if (policy) {
3632 * Default to measured freq if none found, PCU will ensure we don't go 3972 max_ia_freq = policy->cpuinfo.max_freq;
3633 * over 3973 cpufreq_cpu_put(policy);
3634 */ 3974 } else {
3635 if (!max_ia_freq) 3975 /*
3976 * Default to measured freq if none found, PCU will ensure we
3977 * don't go over
3978 */
3636 max_ia_freq = tsc_khz; 3979 max_ia_freq = tsc_khz;
3980 }
3637 3981
3638 /* Convert from kHz to MHz */ 3982 /* Convert from kHz to MHz */
3639 max_ia_freq /= 1000; 3983 max_ia_freq /= 1000;
3640 3984
3641 min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK); 3985 min_ring_freq = I915_READ(DCLK) & 0xf;
3642 /* convert DDR frequency from units of 133.3MHz to bandwidth */ 3986 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3643 min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3; 3987 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3644 3988
3645 /* 3989 /*
3646 * For each potential GPU frequency, load a ring frequency we'd like 3990 * For each potential GPU frequency, load a ring frequency we'd like
@@ -3652,8 +3996,11 @@ void gen6_update_ring_freq(struct drm_device *dev)
3652 int diff = dev_priv->rps.max_delay - gpu_freq; 3996 int diff = dev_priv->rps.max_delay - gpu_freq;
3653 unsigned int ia_freq = 0, ring_freq = 0; 3997 unsigned int ia_freq = 0, ring_freq = 0;
3654 3998
3655 if (IS_HASWELL(dev)) { 3999 if (INTEL_INFO(dev)->gen >= 8) {
3656 ring_freq = (gpu_freq * 5 + 3) / 4; 4000 /* max(2 * GT, DDR). NB: GT is 50MHz units */
4001 ring_freq = max(min_ring_freq, gpu_freq);
4002 } else if (IS_HASWELL(dev)) {
4003 ring_freq = mult_frac(gpu_freq, 5, 4);
3657 ring_freq = max(min_ring_freq, ring_freq); 4004 ring_freq = max(min_ring_freq, ring_freq);
3658 /* leave ia_freq as the default, chosen by cpufreq */ 4005 /* leave ia_freq as the default, chosen by cpufreq */
3659 } else { 4006 } else {
@@ -3709,24 +4056,6 @@ int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3709 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 4056 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3710} 4057}
3711 4058
3712static void vlv_rps_timer_work(struct work_struct *work)
3713{
3714 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3715 rps.vlv_work.work);
3716
3717 /*
3718 * Timer fired, we must be idle. Drop to min voltage state.
3719 * Note: we use RPe here since it should match the
3720 * Vmin we were shooting for. That should give us better
3721 * perf when we come back out of RC6 than if we used the
3722 * min freq available.
3723 */
3724 mutex_lock(&dev_priv->rps.hw_lock);
3725 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
3726 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3727 mutex_unlock(&dev_priv->rps.hw_lock);
3728}
3729
3730static void valleyview_setup_pctx(struct drm_device *dev) 4059static void valleyview_setup_pctx(struct drm_device *dev)
3731{ 4060{
3732 struct drm_i915_private *dev_priv = dev->dev_private; 4061 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3773,13 +4102,14 @@ static void valleyview_enable_rps(struct drm_device *dev)
3773{ 4102{
3774 struct drm_i915_private *dev_priv = dev->dev_private; 4103 struct drm_i915_private *dev_priv = dev->dev_private;
3775 struct intel_ring_buffer *ring; 4104 struct intel_ring_buffer *ring;
3776 u32 gtfifodbg, val; 4105 u32 gtfifodbg, val, rc6_mode = 0;
3777 int i; 4106 int i;
3778 4107
3779 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4108 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3780 4109
3781 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 4110 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3782 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); 4111 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4112 gtfifodbg);
3783 I915_WRITE(GTFIFODBG, gtfifodbg); 4113 I915_WRITE(GTFIFODBG, gtfifodbg);
3784 } 4114 }
3785 4115
@@ -3812,9 +4142,16 @@ static void valleyview_enable_rps(struct drm_device *dev)
3812 I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350); 4142 I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
3813 4143
3814 /* allows RC6 residency counter to work */ 4144 /* allows RC6 residency counter to work */
3815 I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3)); 4145 I915_WRITE(VLV_COUNTER_CONTROL,
3816 I915_WRITE(GEN6_RC_CONTROL, 4146 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
3817 GEN7_RC_CTL_TO_MODE); 4147 VLV_MEDIA_RC6_COUNT_EN |
4148 VLV_RENDER_RC6_COUNT_EN));
4149 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4150 rc6_mode = GEN7_RC_CTL_TO_MODE;
4151
4152 intel_print_rc6_info(dev, rc6_mode);
4153
4154 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
3818 4155
3819 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 4156 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3820 switch ((val >> 6) & 3) { 4157 switch ((val >> 6) & 3) {
@@ -3985,6 +4322,8 @@ static void ironlake_enable_rc6(struct drm_device *dev)
3985 4322
3986 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN); 4323 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
3987 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 4324 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4325
4326 intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
3988} 4327}
3989 4328
3990static unsigned long intel_pxfreq(u32 vidfreq) 4329static unsigned long intel_pxfreq(u32 vidfreq)
@@ -4603,13 +4942,12 @@ void intel_disable_gt_powersave(struct drm_device *dev)
4603 } else if (INTEL_INFO(dev)->gen >= 6) { 4942 } else if (INTEL_INFO(dev)->gen >= 6) {
4604 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); 4943 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
4605 cancel_work_sync(&dev_priv->rps.work); 4944 cancel_work_sync(&dev_priv->rps.work);
4606 if (IS_VALLEYVIEW(dev))
4607 cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
4608 mutex_lock(&dev_priv->rps.hw_lock); 4945 mutex_lock(&dev_priv->rps.hw_lock);
4609 if (IS_VALLEYVIEW(dev)) 4946 if (IS_VALLEYVIEW(dev))
4610 valleyview_disable_rps(dev); 4947 valleyview_disable_rps(dev);
4611 else 4948 else
4612 gen6_disable_rps(dev); 4949 gen6_disable_rps(dev);
4950 dev_priv->rps.enabled = false;
4613 mutex_unlock(&dev_priv->rps.hw_lock); 4951 mutex_unlock(&dev_priv->rps.hw_lock);
4614 } 4952 }
4615} 4953}
@@ -4625,10 +4963,14 @@ static void intel_gen6_powersave_work(struct work_struct *work)
4625 4963
4626 if (IS_VALLEYVIEW(dev)) { 4964 if (IS_VALLEYVIEW(dev)) {
4627 valleyview_enable_rps(dev); 4965 valleyview_enable_rps(dev);
4966 } else if (IS_BROADWELL(dev)) {
4967 gen8_enable_rps(dev);
4968 gen6_update_ring_freq(dev);
4628 } else { 4969 } else {
4629 gen6_enable_rps(dev); 4970 gen6_enable_rps(dev);
4630 gen6_update_ring_freq(dev); 4971 gen6_update_ring_freq(dev);
4631 } 4972 }
4973 dev_priv->rps.enabled = true;
4632 mutex_unlock(&dev_priv->rps.hw_lock); 4974 mutex_unlock(&dev_priv->rps.hw_lock);
4633} 4975}
4634 4976
@@ -4672,7 +5014,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
4672 I915_WRITE(DSPCNTR(pipe), 5014 I915_WRITE(DSPCNTR(pipe),
4673 I915_READ(DSPCNTR(pipe)) | 5015 I915_READ(DSPCNTR(pipe)) |
4674 DISPPLANE_TRICKLE_FEED_DISABLE); 5016 DISPPLANE_TRICKLE_FEED_DISABLE);
4675 intel_flush_display_plane(dev_priv, pipe); 5017 intel_flush_primary_plane(dev_priv, pipe);
4676 } 5018 }
4677} 5019}
4678 5020
@@ -4932,6 +5274,50 @@ static void lpt_suspend_hw(struct drm_device *dev)
4932 } 5274 }
4933} 5275}
4934 5276
5277static void gen8_init_clock_gating(struct drm_device *dev)
5278{
5279 struct drm_i915_private *dev_priv = dev->dev_private;
5280 enum pipe i;
5281
5282 I915_WRITE(WM3_LP_ILK, 0);
5283 I915_WRITE(WM2_LP_ILK, 0);
5284 I915_WRITE(WM1_LP_ILK, 0);
5285
5286 /* FIXME(BDW): Check all the w/a, some might only apply to
5287 * pre-production hw. */
5288
5289 WARN(!i915_preliminary_hw_support,
5290 "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n");
5291 I915_WRITE(HALF_SLICE_CHICKEN3,
5292 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
5293 I915_WRITE(HALF_SLICE_CHICKEN3,
5294 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5295 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5296
5297 I915_WRITE(_3D_CHICKEN3,
5298 _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
5299
5300 I915_WRITE(COMMON_SLICE_CHICKEN2,
5301 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
5302
5303 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5304 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
5305
5306 /* WaSwitchSolVfFArbitrationPriority */
5307 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5308
5309 /* WaPsrDPAMaskVBlankInSRD */
5310 I915_WRITE(CHICKEN_PAR1_1,
5311 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
5312
5313 /* WaPsrDPRSUnmaskVBlankInSRD */
5314 for_each_pipe(i) {
5315 I915_WRITE(CHICKEN_PIPESL_1(i),
5316 I915_READ(CHICKEN_PIPESL_1(i) |
5317 DPRS_MASK_VBLANK_SRD));
5318 }
5319}
5320
4935static void haswell_init_clock_gating(struct drm_device *dev) 5321static void haswell_init_clock_gating(struct drm_device *dev)
4936{ 5322{
4937 struct drm_i915_private *dev_priv = dev->dev_private; 5323 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5255,6 +5641,25 @@ void intel_suspend_hw(struct drm_device *dev)
5255 lpt_suspend_hw(dev); 5641 lpt_suspend_hw(dev);
5256} 5642}
5257 5643
5644static bool is_always_on_power_domain(struct drm_device *dev,
5645 enum intel_display_power_domain domain)
5646{
5647 unsigned long always_on_domains;
5648
5649 BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
5650
5651 if (IS_BROADWELL(dev)) {
5652 always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS;
5653 } else if (IS_HASWELL(dev)) {
5654 always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
5655 } else {
5656 WARN_ON(1);
5657 return true;
5658 }
5659
5660 return BIT(domain) & always_on_domains;
5661}
5662
5258/** 5663/**
5259 * We should only use the power well if we explicitly asked the hardware to 5664 * We should only use the power well if we explicitly asked the hardware to
5260 * enable it, so check if it's enabled and also check if we've requested it to 5665 * enable it, so check if it's enabled and also check if we've requested it to
@@ -5268,23 +5673,11 @@ bool intel_display_power_enabled(struct drm_device *dev,
5268 if (!HAS_POWER_WELL(dev)) 5673 if (!HAS_POWER_WELL(dev))
5269 return true; 5674 return true;
5270 5675
5271 switch (domain) { 5676 if (is_always_on_power_domain(dev, domain))
5272 case POWER_DOMAIN_PIPE_A:
5273 case POWER_DOMAIN_TRANSCODER_EDP:
5274 return true; 5677 return true;
5275 case POWER_DOMAIN_PIPE_B: 5678
5276 case POWER_DOMAIN_PIPE_C: 5679 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5277 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5278 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5279 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5280 case POWER_DOMAIN_TRANSCODER_A:
5281 case POWER_DOMAIN_TRANSCODER_B:
5282 case POWER_DOMAIN_TRANSCODER_C:
5283 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5284 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 5680 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5285 default:
5286 BUG();
5287 }
5288} 5681}
5289 5682
5290static void __intel_set_power_well(struct drm_device *dev, bool enable) 5683static void __intel_set_power_well(struct drm_device *dev, bool enable)
@@ -5328,83 +5721,136 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5328 spin_lock_irqsave(&dev->vbl_lock, irqflags); 5721 spin_lock_irqsave(&dev->vbl_lock, irqflags);
5329 for_each_pipe(p) 5722 for_each_pipe(p)
5330 if (p != PIPE_A) 5723 if (p != PIPE_A)
5331 dev->last_vblank[p] = 0; 5724 dev->vblank[p].last = 0;
5332 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 5725 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5333 } 5726 }
5334 } 5727 }
5335} 5728}
5336 5729
5337static struct i915_power_well *hsw_pwr; 5730static void __intel_power_well_get(struct drm_device *dev,
5731 struct i915_power_well *power_well)
5732{
5733 if (!power_well->count++)
5734 __intel_set_power_well(dev, true);
5735}
5736
5737static void __intel_power_well_put(struct drm_device *dev,
5738 struct i915_power_well *power_well)
5739{
5740 WARN_ON(!power_well->count);
5741 if (!--power_well->count && i915_disable_power_well)
5742 __intel_set_power_well(dev, false);
5743}
5744
5745void intel_display_power_get(struct drm_device *dev,
5746 enum intel_display_power_domain domain)
5747{
5748 struct drm_i915_private *dev_priv = dev->dev_private;
5749 struct i915_power_domains *power_domains;
5750
5751 if (!HAS_POWER_WELL(dev))
5752 return;
5753
5754 if (is_always_on_power_domain(dev, domain))
5755 return;
5756
5757 power_domains = &dev_priv->power_domains;
5758
5759 mutex_lock(&power_domains->lock);
5760 __intel_power_well_get(dev, &power_domains->power_wells[0]);
5761 mutex_unlock(&power_domains->lock);
5762}
5763
5764void intel_display_power_put(struct drm_device *dev,
5765 enum intel_display_power_domain domain)
5766{
5767 struct drm_i915_private *dev_priv = dev->dev_private;
5768 struct i915_power_domains *power_domains;
5769
5770 if (!HAS_POWER_WELL(dev))
5771 return;
5772
5773 if (is_always_on_power_domain(dev, domain))
5774 return;
5775
5776 power_domains = &dev_priv->power_domains;
5777
5778 mutex_lock(&power_domains->lock);
5779 __intel_power_well_put(dev, &power_domains->power_wells[0]);
5780 mutex_unlock(&power_domains->lock);
5781}
5782
5783static struct i915_power_domains *hsw_pwr;
5338 5784
5339/* Display audio driver power well request */ 5785/* Display audio driver power well request */
5340void i915_request_power_well(void) 5786void i915_request_power_well(void)
5341{ 5787{
5788 struct drm_i915_private *dev_priv;
5789
5342 if (WARN_ON(!hsw_pwr)) 5790 if (WARN_ON(!hsw_pwr))
5343 return; 5791 return;
5344 5792
5345 spin_lock_irq(&hsw_pwr->lock); 5793 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5346 if (!hsw_pwr->count++ && 5794 power_domains);
5347 !hsw_pwr->i915_request) 5795
5348 __intel_set_power_well(hsw_pwr->device, true); 5796 mutex_lock(&hsw_pwr->lock);
5349 spin_unlock_irq(&hsw_pwr->lock); 5797 __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
5798 mutex_unlock(&hsw_pwr->lock);
5350} 5799}
5351EXPORT_SYMBOL_GPL(i915_request_power_well); 5800EXPORT_SYMBOL_GPL(i915_request_power_well);
5352 5801
5353/* Display audio driver power well release */ 5802/* Display audio driver power well release */
5354void i915_release_power_well(void) 5803void i915_release_power_well(void)
5355{ 5804{
5805 struct drm_i915_private *dev_priv;
5806
5356 if (WARN_ON(!hsw_pwr)) 5807 if (WARN_ON(!hsw_pwr))
5357 return; 5808 return;
5358 5809
5359 spin_lock_irq(&hsw_pwr->lock); 5810 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5360 WARN_ON(!hsw_pwr->count); 5811 power_domains);
5361 if (!--hsw_pwr->count && 5812
5362 !hsw_pwr->i915_request) 5813 mutex_lock(&hsw_pwr->lock);
5363 __intel_set_power_well(hsw_pwr->device, false); 5814 __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
5364 spin_unlock_irq(&hsw_pwr->lock); 5815 mutex_unlock(&hsw_pwr->lock);
5365} 5816}
5366EXPORT_SYMBOL_GPL(i915_release_power_well); 5817EXPORT_SYMBOL_GPL(i915_release_power_well);
5367 5818
5368int i915_init_power_well(struct drm_device *dev) 5819int intel_power_domains_init(struct drm_device *dev)
5369{ 5820{
5370 struct drm_i915_private *dev_priv = dev->dev_private; 5821 struct drm_i915_private *dev_priv = dev->dev_private;
5822 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5823 struct i915_power_well *power_well;
5371 5824
5372 hsw_pwr = &dev_priv->power_well; 5825 mutex_init(&power_domains->lock);
5826 hsw_pwr = power_domains;
5373 5827
5374 hsw_pwr->device = dev; 5828 power_well = &power_domains->power_wells[0];
5375 spin_lock_init(&hsw_pwr->lock); 5829 power_well->count = 0;
5376 hsw_pwr->count = 0;
5377 5830
5378 return 0; 5831 return 0;
5379} 5832}
5380 5833
5381void i915_remove_power_well(struct drm_device *dev) 5834void intel_power_domains_remove(struct drm_device *dev)
5382{ 5835{
5383 hsw_pwr = NULL; 5836 hsw_pwr = NULL;
5384} 5837}
5385 5838
5386void intel_set_power_well(struct drm_device *dev, bool enable) 5839static void intel_power_domains_resume(struct drm_device *dev)
5387{ 5840{
5388 struct drm_i915_private *dev_priv = dev->dev_private; 5841 struct drm_i915_private *dev_priv = dev->dev_private;
5389 struct i915_power_well *power_well = &dev_priv->power_well; 5842 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5843 struct i915_power_well *power_well;
5390 5844
5391 if (!HAS_POWER_WELL(dev)) 5845 if (!HAS_POWER_WELL(dev))
5392 return; 5846 return;
5393 5847
5394 if (!i915_disable_power_well && !enable) 5848 mutex_lock(&power_domains->lock);
5395 return;
5396 5849
5397 spin_lock_irq(&power_well->lock); 5850 power_well = &power_domains->power_wells[0];
5398 power_well->i915_request = enable; 5851 __intel_set_power_well(dev, power_well->count > 0);
5399 5852
5400 /* only reject "disable" power well request */ 5853 mutex_unlock(&power_domains->lock);
5401 if (power_well->count && !enable) {
5402 spin_unlock_irq(&power_well->lock);
5403 return;
5404 }
5405
5406 __intel_set_power_well(dev, enable);
5407 spin_unlock_irq(&power_well->lock);
5408} 5854}
5409 5855
5410/* 5856/*
@@ -5413,7 +5859,7 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
5413 * to be enabled, and it will only be disabled if none of the registers is 5859 * to be enabled, and it will only be disabled if none of the registers is
5414 * requesting it to be enabled. 5860 * requesting it to be enabled.
5415 */ 5861 */
5416void intel_init_power_well(struct drm_device *dev) 5862void intel_power_domains_init_hw(struct drm_device *dev)
5417{ 5863{
5418 struct drm_i915_private *dev_priv = dev->dev_private; 5864 struct drm_i915_private *dev_priv = dev->dev_private;
5419 5865
@@ -5421,7 +5867,8 @@ void intel_init_power_well(struct drm_device *dev)
5421 return; 5867 return;
5422 5868
5423 /* For now, we need the power well to be always enabled. */ 5869 /* For now, we need the power well to be always enabled. */
5424 intel_set_power_well(dev, true); 5870 intel_display_set_init_power(dev, true);
5871 intel_power_domains_resume(dev);
5425 5872
5426 /* We're taking over the BIOS, so clear any requests made by it since 5873 /* We're taking over the BIOS, so clear any requests made by it since
5427 * the driver is in charge now. */ 5874 * the driver is in charge now. */
@@ -5525,6 +5972,8 @@ void intel_init_pm(struct drm_device *dev)
5525 dev_priv->display.update_wm = NULL; 5972 dev_priv->display.update_wm = NULL;
5526 } 5973 }
5527 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 5974 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
5975 } else if (INTEL_INFO(dev)->gen == 8) {
5976 dev_priv->display.init_clock_gating = gen8_init_clock_gating;
5528 } else 5977 } else
5529 dev_priv->display.update_wm = NULL; 5978 dev_priv->display.update_wm = NULL;
5530 } else if (IS_VALLEYVIEW(dev)) { 5979 } else if (IS_VALLEYVIEW(dev)) {
@@ -5686,7 +6135,4 @@ void intel_pm_init(struct drm_device *dev)
5686 6135
5687 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 6136 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5688 intel_gen6_powersave_work); 6137 intel_gen6_powersave_work);
5689
5690 INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
5691} 6138}
5692