aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_runtime_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_runtime_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c476
1 files changed, 361 insertions, 115 deletions
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index ce00e6994eeb..1a45385f4d66 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -49,6 +49,9 @@
49 * present for a given platform. 49 * present for a given platform.
50 */ 50 */
51 51
52#define GEN9_ENABLE_DC5(dev) 0
53#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev)
54
52#define for_each_power_well(i, power_well, domain_mask, power_domains) \ 55#define for_each_power_well(i, power_well, domain_mask, power_domains) \
53 for (i = 0; \ 56 for (i = 0; \
54 i < (power_domains)->power_well_count && \ 57 i < (power_domains)->power_well_count && \
@@ -62,6 +65,9 @@
62 i--) \ 65 i--) \
63 if ((power_well)->domains & (domain_mask)) 66 if ((power_well)->domains & (domain_mask))
64 67
68bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
69 int power_well_id);
70
65/* 71/*
66 * We should only use the power well if we explicitly asked the hardware to 72 * We should only use the power well if we explicitly asked the hardware to
67 * enable it, so check if it's enabled and also check if we've requested it to 73 * enable it, so check if it's enabled and also check if we've requested it to
@@ -308,7 +314,9 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
308 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 314 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
309 BIT(POWER_DOMAIN_INIT)) 315 BIT(POWER_DOMAIN_INIT))
310#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \ 316#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
311 SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS) 317 SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
318 BIT(POWER_DOMAIN_PLLS) | \
319 BIT(POWER_DOMAIN_INIT))
312#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ 320#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
313 (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ 321 (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
314 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 322 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
@@ -319,9 +327,246 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
319 SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \ 327 SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
320 BIT(POWER_DOMAIN_INIT)) 328 BIT(POWER_DOMAIN_INIT))
321 329
330#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
331 BIT(POWER_DOMAIN_TRANSCODER_A) | \
332 BIT(POWER_DOMAIN_PIPE_B) | \
333 BIT(POWER_DOMAIN_TRANSCODER_B) | \
334 BIT(POWER_DOMAIN_PIPE_C) | \
335 BIT(POWER_DOMAIN_TRANSCODER_C) | \
336 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
337 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
338 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
339 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
340 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
341 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
342 BIT(POWER_DOMAIN_AUX_B) | \
343 BIT(POWER_DOMAIN_AUX_C) | \
344 BIT(POWER_DOMAIN_AUDIO) | \
345 BIT(POWER_DOMAIN_VGA) | \
346 BIT(POWER_DOMAIN_INIT))
347#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
348 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
349 BIT(POWER_DOMAIN_PIPE_A) | \
350 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
351 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
352 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
353 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
354 BIT(POWER_DOMAIN_AUX_A) | \
355 BIT(POWER_DOMAIN_PLLS) | \
356 BIT(POWER_DOMAIN_INIT))
357#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
358 (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
359 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \
360 BIT(POWER_DOMAIN_INIT))
361
362static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
363{
364 struct drm_device *dev = dev_priv->dev;
365
366 WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
367 WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
368 "DC9 already programmed to be enabled.\n");
369 WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
370 "DC5 still not disabled to enable DC9.\n");
371 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
372 WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
373
374 /*
375 * TODO: check for the following to verify the conditions to enter DC9
376 * state are satisfied:
377 * 1] Check relevant display engine registers to verify if mode set
378 * disable sequence was followed.
379 * 2] Check if display uninitialize sequence is initialized.
380 */
381}
382
383static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
384{
385 WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
386 WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
387 "DC9 already programmed to be disabled.\n");
388 WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
389 "DC5 still not disabled.\n");
390
391 /*
392 * TODO: check for the following to verify DC9 state was indeed
393 * entered before programming to disable it:
394 * 1] Check relevant display engine registers to verify if mode
395 * set disable sequence was followed.
396 * 2] Check if display uninitialize sequence is initialized.
397 */
398}
399
400void bxt_enable_dc9(struct drm_i915_private *dev_priv)
401{
402 uint32_t val;
403
404 assert_can_enable_dc9(dev_priv);
405
406 DRM_DEBUG_KMS("Enabling DC9\n");
407
408 val = I915_READ(DC_STATE_EN);
409 val |= DC_STATE_EN_DC9;
410 I915_WRITE(DC_STATE_EN, val);
411 POSTING_READ(DC_STATE_EN);
412}
413
414void bxt_disable_dc9(struct drm_i915_private *dev_priv)
415{
416 uint32_t val;
417
418 assert_can_disable_dc9(dev_priv);
419
420 DRM_DEBUG_KMS("Disabling DC9\n");
421
422 val = I915_READ(DC_STATE_EN);
423 val &= ~DC_STATE_EN_DC9;
424 I915_WRITE(DC_STATE_EN, val);
425 POSTING_READ(DC_STATE_EN);
426}
427
428static void gen9_set_dc_state_debugmask_memory_up(
429 struct drm_i915_private *dev_priv)
430{
431 uint32_t val;
432
433 /* The below bit doesn't need to be cleared ever afterwards */
434 val = I915_READ(DC_STATE_DEBUG);
435 if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
436 val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
437 I915_WRITE(DC_STATE_DEBUG, val);
438 POSTING_READ(DC_STATE_DEBUG);
439 }
440}
441
442static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
443{
444 struct drm_device *dev = dev_priv->dev;
445 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
446 SKL_DISP_PW_2);
447
448 WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
449 WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
450 WARN(pg2_enabled, "PG2 not disabled to enable DC5.\n");
451
452 WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
453 "DC5 already programmed to be enabled.\n");
454 WARN(dev_priv->pm.suspended,
455 "DC5 cannot be enabled, if platform is runtime-suspended.\n");
456
457 assert_csr_loaded(dev_priv);
458}
459
460static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
461{
462 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
463 SKL_DISP_PW_2);
464 /*
465 * During initialization, the firmware may not be loaded yet.
466 * We still want to make sure that the DC enabling flag is cleared.
467 */
468 if (dev_priv->power_domains.initializing)
469 return;
470
471 WARN(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
472 WARN(dev_priv->pm.suspended,
473 "Disabling of DC5 while platform is runtime-suspended should never happen.\n");
474}
475
476static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
477{
478 uint32_t val;
479
480 assert_can_enable_dc5(dev_priv);
481
482 DRM_DEBUG_KMS("Enabling DC5\n");
483
484 gen9_set_dc_state_debugmask_memory_up(dev_priv);
485
486 val = I915_READ(DC_STATE_EN);
487 val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
488 val |= DC_STATE_EN_UPTO_DC5;
489 I915_WRITE(DC_STATE_EN, val);
490 POSTING_READ(DC_STATE_EN);
491}
492
493static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
494{
495 uint32_t val;
496
497 assert_can_disable_dc5(dev_priv);
498
499 DRM_DEBUG_KMS("Disabling DC5\n");
500
501 val = I915_READ(DC_STATE_EN);
502 val &= ~DC_STATE_EN_UPTO_DC5;
503 I915_WRITE(DC_STATE_EN, val);
504 POSTING_READ(DC_STATE_EN);
505}
506
507static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
508{
509 struct drm_device *dev = dev_priv->dev;
510
511 WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
512 WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
513 WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
514 "Backlight is not disabled.\n");
515 WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
516 "DC6 already programmed to be enabled.\n");
517
518 assert_csr_loaded(dev_priv);
519}
520
521static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
522{
523 /*
524 * During initialization, the firmware may not be loaded yet.
525 * We still want to make sure that the DC enabling flag is cleared.
526 */
527 if (dev_priv->power_domains.initializing)
528 return;
529
530 assert_csr_loaded(dev_priv);
531 WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
532 "DC6 already programmed to be disabled.\n");
533}
534
535static void skl_enable_dc6(struct drm_i915_private *dev_priv)
536{
537 uint32_t val;
538
539 assert_can_enable_dc6(dev_priv);
540
541 DRM_DEBUG_KMS("Enabling DC6\n");
542
543 gen9_set_dc_state_debugmask_memory_up(dev_priv);
544
545 val = I915_READ(DC_STATE_EN);
546 val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
547 val |= DC_STATE_EN_UPTO_DC6;
548 I915_WRITE(DC_STATE_EN, val);
549 POSTING_READ(DC_STATE_EN);
550}
551
552static void skl_disable_dc6(struct drm_i915_private *dev_priv)
553{
554 uint32_t val;
555
556 assert_can_disable_dc6(dev_priv);
557
558 DRM_DEBUG_KMS("Disabling DC6\n");
559
560 val = I915_READ(DC_STATE_EN);
561 val &= ~DC_STATE_EN_UPTO_DC6;
562 I915_WRITE(DC_STATE_EN, val);
563 POSTING_READ(DC_STATE_EN);
564}
565
322static void skl_set_power_well(struct drm_i915_private *dev_priv, 566static void skl_set_power_well(struct drm_i915_private *dev_priv,
323 struct i915_power_well *power_well, bool enable) 567 struct i915_power_well *power_well, bool enable)
324{ 568{
569 struct drm_device *dev = dev_priv->dev;
325 uint32_t tmp, fuse_status; 570 uint32_t tmp, fuse_status;
326 uint32_t req_mask, state_mask; 571 uint32_t req_mask, state_mask;
327 bool is_enabled, enable_requested, check_fuse_status = false; 572 bool is_enabled, enable_requested, check_fuse_status = false;
@@ -361,6 +606,25 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
361 606
362 if (enable) { 607 if (enable) {
363 if (!enable_requested) { 608 if (!enable_requested) {
609 WARN((tmp & state_mask) &&
610 !I915_READ(HSW_PWR_WELL_BIOS),
611 "Invalid for power well status to be enabled, unless done by the BIOS, \
612 when request is to disable!\n");
613 if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
614 power_well->data == SKL_DISP_PW_2) {
615 if (SKL_ENABLE_DC6(dev)) {
616 skl_disable_dc6(dev_priv);
617 /*
618 * DDI buffer programming unnecessary during driver-load/resume
619 * as it's already done during modeset initialization then.
620 * It's also invalid here as encoder list is still uninitialized.
621 */
622 if (!dev_priv->power_domains.initializing)
623 intel_prepare_ddi(dev);
624 } else {
625 gen9_disable_dc5(dev_priv);
626 }
627 }
364 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); 628 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
365 } 629 }
366 630
@@ -377,6 +641,25 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
377 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); 641 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
378 POSTING_READ(HSW_PWR_WELL_DRIVER); 642 POSTING_READ(HSW_PWR_WELL_DRIVER);
379 DRM_DEBUG_KMS("Disabling %s\n", power_well->name); 643 DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
644
645 if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
646 power_well->data == SKL_DISP_PW_2) {
647 enum csr_state state;
648 /* TODO: wait for a completion event or
649 * similar here instead of busy
650 * waiting using wait_for function.
651 */
652 wait_for((state = intel_csr_load_status_get(dev_priv)) !=
653 FW_UNINITIALIZED, 1000);
654 if (state != FW_LOADED)
655 DRM_ERROR("CSR firmware not ready (%d)\n",
656 state);
657 else
658 if (SKL_ENABLE_DC6(dev))
659 skl_enable_dc6(dev_priv);
660 else
661 gen9_enable_dc5(dev_priv);
662 }
380 } 663 }
381 } 664 }
382 665
@@ -488,7 +771,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
488 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 771 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
489 772
490 if (wait_for(COND, 100)) 773 if (wait_for(COND, 100))
491 DRM_ERROR("timout setting power well state %08x (%08x)\n", 774 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
492 state, 775 state,
493 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 776 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
494 777
@@ -666,8 +949,8 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
666 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) 949 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
667 DRM_ERROR("Display PHY %d is not power up\n", phy); 950 DRM_ERROR("Display PHY %d is not power up\n", phy);
668 951
669 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) | 952 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
670 PHY_COM_LANE_RESET_DEASSERT(phy)); 953 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
671} 954}
672 955
673static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 956static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
@@ -687,8 +970,8 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
687 assert_pll_disabled(dev_priv, PIPE_C); 970 assert_pll_disabled(dev_priv, PIPE_C);
688 } 971 }
689 972
690 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) & 973 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
691 ~PHY_COM_LANE_RESET_DEASSERT(phy)); 974 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
692 975
693 vlv_set_power_well(dev_priv, power_well, false); 976 vlv_set_power_well(dev_priv, power_well, false);
694} 977}
@@ -746,7 +1029,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
746 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); 1029 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
747 1030
748 if (wait_for(COND, 100)) 1031 if (wait_for(COND, 100))
749 DRM_ERROR("timout setting power well state %08x (%08x)\n", 1032 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
750 state, 1033 state,
751 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); 1034 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
752 1035
@@ -950,18 +1233,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
950 BIT(POWER_DOMAIN_AUX_C) | \ 1233 BIT(POWER_DOMAIN_AUX_C) | \
951 BIT(POWER_DOMAIN_INIT)) 1234 BIT(POWER_DOMAIN_INIT))
952 1235
953#define CHV_PIPE_A_POWER_DOMAINS ( \
954 BIT(POWER_DOMAIN_PIPE_A) | \
955 BIT(POWER_DOMAIN_INIT))
956
957#define CHV_PIPE_B_POWER_DOMAINS ( \
958 BIT(POWER_DOMAIN_PIPE_B) | \
959 BIT(POWER_DOMAIN_INIT))
960
961#define CHV_PIPE_C_POWER_DOMAINS ( \
962 BIT(POWER_DOMAIN_PIPE_C) | \
963 BIT(POWER_DOMAIN_INIT))
964
965#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1236#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
966 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1237 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
967 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1238 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
@@ -977,17 +1248,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
977 BIT(POWER_DOMAIN_AUX_D) | \ 1248 BIT(POWER_DOMAIN_AUX_D) | \
978 BIT(POWER_DOMAIN_INIT)) 1249 BIT(POWER_DOMAIN_INIT))
979 1250
980#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
981 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
982 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
983 BIT(POWER_DOMAIN_AUX_D) | \
984 BIT(POWER_DOMAIN_INIT))
985
986#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
987 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
988 BIT(POWER_DOMAIN_AUX_D) | \
989 BIT(POWER_DOMAIN_INIT))
990
991static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 1251static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
992 .sync_hw = i9xx_always_on_power_well_noop, 1252 .sync_hw = i9xx_always_on_power_well_noop,
993 .enable = i9xx_always_on_power_well_noop, 1253 .enable = i9xx_always_on_power_well_noop,
@@ -1145,110 +1405,33 @@ static struct i915_power_well chv_power_wells[] = {
1145 .domains = VLV_ALWAYS_ON_POWER_DOMAINS, 1405 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1146 .ops = &i9xx_always_on_power_well_ops, 1406 .ops = &i9xx_always_on_power_well_ops,
1147 }, 1407 },
1148#if 0
1149 { 1408 {
1150 .name = "display", 1409 .name = "display",
1151 .domains = VLV_DISPLAY_POWER_DOMAINS,
1152 .data = PUNIT_POWER_WELL_DISP2D,
1153 .ops = &vlv_display_power_well_ops,
1154 },
1155#endif
1156 {
1157 .name = "pipe-a",
1158 /* 1410 /*
1159 * FIXME: pipe A power well seems to be the new disp2d well. 1411 * Pipe A power well is the new disp2d well. Pipe B and C
1160 * At least all registers seem to be housed there. Figure 1412 * power wells don't actually exist. Pipe A power well is
1161 * out if this a a temporary situation in pre-production 1413 * required for any pipe to work.
1162 * hardware or a permanent state of affairs.
1163 */ 1414 */
1164 .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS, 1415 .domains = VLV_DISPLAY_POWER_DOMAINS,
1165 .data = PIPE_A, 1416 .data = PIPE_A,
1166 .ops = &chv_pipe_power_well_ops, 1417 .ops = &chv_pipe_power_well_ops,
1167 }, 1418 },
1168#if 0
1169 {
1170 .name = "pipe-b",
1171 .domains = CHV_PIPE_B_POWER_DOMAINS,
1172 .data = PIPE_B,
1173 .ops = &chv_pipe_power_well_ops,
1174 },
1175 {
1176 .name = "pipe-c",
1177 .domains = CHV_PIPE_C_POWER_DOMAINS,
1178 .data = PIPE_C,
1179 .ops = &chv_pipe_power_well_ops,
1180 },
1181#endif
1182 { 1419 {
1183 .name = "dpio-common-bc", 1420 .name = "dpio-common-bc",
1184 /* 1421 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
1185 * XXX: cmnreset for one PHY seems to disturb the other.
1186 * As a workaround keep both powered on at the same
1187 * time for now.
1188 */
1189 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
1190 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 1422 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1191 .ops = &chv_dpio_cmn_power_well_ops, 1423 .ops = &chv_dpio_cmn_power_well_ops,
1192 }, 1424 },
1193 { 1425 {
1194 .name = "dpio-common-d", 1426 .name = "dpio-common-d",
1195 /* 1427 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
1196 * XXX: cmnreset for one PHY seems to disturb the other.
1197 * As a workaround keep both powered on at the same
1198 * time for now.
1199 */
1200 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
1201 .data = PUNIT_POWER_WELL_DPIO_CMN_D, 1428 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
1202 .ops = &chv_dpio_cmn_power_well_ops, 1429 .ops = &chv_dpio_cmn_power_well_ops,
1203 }, 1430 },
1204#if 0
1205 {
1206 .name = "dpio-tx-b-01",
1207 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1208 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
1209 .ops = &vlv_dpio_power_well_ops,
1210 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1211 },
1212 {
1213 .name = "dpio-tx-b-23",
1214 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1215 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
1216 .ops = &vlv_dpio_power_well_ops,
1217 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1218 },
1219 {
1220 .name = "dpio-tx-c-01",
1221 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1222 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1223 .ops = &vlv_dpio_power_well_ops,
1224 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1225 },
1226 {
1227 .name = "dpio-tx-c-23",
1228 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1229 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1230 .ops = &vlv_dpio_power_well_ops,
1231 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1232 },
1233 {
1234 .name = "dpio-tx-d-01",
1235 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
1236 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
1237 .ops = &vlv_dpio_power_well_ops,
1238 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
1239 },
1240 {
1241 .name = "dpio-tx-d-23",
1242 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
1243 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
1244 .ops = &vlv_dpio_power_well_ops,
1245 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
1246 },
1247#endif
1248}; 1431};
1249 1432
1250static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, 1433static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1251 enum punit_power_well power_well_id) 1434 int power_well_id)
1252{ 1435{
1253 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1436 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1254 struct i915_power_well *power_well; 1437 struct i915_power_well *power_well;
@@ -1262,6 +1445,18 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
1262 return NULL; 1445 return NULL;
1263} 1446}
1264 1447
1448bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
1449 int power_well_id)
1450{
1451 struct i915_power_well *power_well;
1452 bool ret;
1453
1454 power_well = lookup_power_well(dev_priv, power_well_id);
1455 ret = power_well->ops->is_enabled(dev_priv, power_well);
1456
1457 return ret;
1458}
1459
1265static struct i915_power_well skl_power_wells[] = { 1460static struct i915_power_well skl_power_wells[] = {
1266 { 1461 {
1267 .name = "always-on", 1462 .name = "always-on",
@@ -1313,6 +1508,27 @@ static struct i915_power_well skl_power_wells[] = {
1313 }, 1508 },
1314}; 1509};
1315 1510
1511static struct i915_power_well bxt_power_wells[] = {
1512 {
1513 .name = "always-on",
1514 .always_on = 1,
1515 .domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1516 .ops = &i9xx_always_on_power_well_ops,
1517 },
1518 {
1519 .name = "power well 1",
1520 .domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1521 .ops = &skl_power_well_ops,
1522 .data = SKL_DISP_PW_1,
1523 },
1524 {
1525 .name = "power well 2",
1526 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1527 .ops = &skl_power_well_ops,
1528 .data = SKL_DISP_PW_2,
1529 }
1530};
1531
1316#define set_power_wells(power_domains, __power_wells) ({ \ 1532#define set_power_wells(power_domains, __power_wells) ({ \
1317 (power_domains)->power_wells = (__power_wells); \ 1533 (power_domains)->power_wells = (__power_wells); \
1318 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 1534 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
@@ -1341,6 +1557,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
1341 set_power_wells(power_domains, bdw_power_wells); 1557 set_power_wells(power_domains, bdw_power_wells);
1342 } else if (IS_SKYLAKE(dev_priv->dev)) { 1558 } else if (IS_SKYLAKE(dev_priv->dev)) {
1343 set_power_wells(power_domains, skl_power_wells); 1559 set_power_wells(power_domains, skl_power_wells);
1560 } else if (IS_BROXTON(dev_priv->dev)) {
1561 set_power_wells(power_domains, bxt_power_wells);
1344 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1562 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1345 set_power_wells(power_domains, chv_power_wells); 1563 set_power_wells(power_domains, chv_power_wells);
1346 } else if (IS_VALLEYVIEW(dev_priv->dev)) { 1564 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -1401,6 +1619,32 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
1401 mutex_unlock(&power_domains->lock); 1619 mutex_unlock(&power_domains->lock);
1402} 1620}
1403 1621
1622static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1623{
1624 struct i915_power_well *cmn_bc =
1625 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1626 struct i915_power_well *cmn_d =
1627 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1628
1629 /*
1630 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1631 * workaround never ever read DISPLAY_PHY_CONTROL, and
1632 * instead maintain a shadow copy ourselves. Use the actual
1633 * power well state to reconstruct the expected initial
1634 * value.
1635 */
1636 dev_priv->chv_phy_control =
1637 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1638 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1639 PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH0) |
1640 PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH1) |
1641 PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY1, DPIO_CH0);
1642 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc))
1643 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
1644 if (cmn_d->ops->is_enabled(dev_priv, cmn_d))
1645 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
1646}
1647
1404static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 1648static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1405{ 1649{
1406 struct i915_power_well *cmn = 1650 struct i915_power_well *cmn =
@@ -1443,7 +1687,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
1443 1687
1444 power_domains->initializing = true; 1688 power_domains->initializing = true;
1445 1689
1446 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 1690 if (IS_CHERRYVIEW(dev)) {
1691 chv_phy_control_init(dev_priv);
1692 } else if (IS_VALLEYVIEW(dev)) {
1447 mutex_lock(&power_domains->lock); 1693 mutex_lock(&power_domains->lock);
1448 vlv_cmnlane_wa(dev_priv); 1694 vlv_cmnlane_wa(dev_priv);
1449 mutex_unlock(&power_domains->lock); 1695 mutex_unlock(&power_domains->lock);