aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2014-09-30 04:56:38 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-10-01 04:52:58 -0400
commit9c065a7d5b679e7fabe3cace4faadb283f2b0c1f (patch)
tree9f3fbac08094e2524ed65ac8b1f888c5be533171
parent970104fac6ca0cfdfbaa1a23c70c06a71208e2ac (diff)
drm/i915: Extract intel_runtime_pm.c
Geez is the audio hack ugly. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Reviewed-by: Imre Deak <imre.deak@intel.com> [danvet: Rebased on top of the skl patches.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h39
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1160
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c1190
4 files changed, 1214 insertions, 1179 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 2d8317d36e09..3a6bce047f6f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -11,7 +11,9 @@ i915-y := i915_drv.o \
11 i915_params.o \ 11 i915_params.o \
12 i915_suspend.o \ 12 i915_suspend.o \
13 i915_sysfs.o \ 13 i915_sysfs.o \
14 intel_pm.o 14 intel_pm.o \
15 intel_runtime_pm.o
16
15i915-$(CONFIG_COMPAT) += i915_ioc32.o 17i915-$(CONFIG_COMPAT) += i915_ioc32.o
16i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o 18i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
17 19
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f14f456e08f9..9d2ee70d51e4 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1081,6 +1081,27 @@ extern struct drm_display_mode *intel_find_panel_downclock(
1081 struct drm_display_mode *fixed_mode, 1081 struct drm_display_mode *fixed_mode,
1082 struct drm_connector *connector); 1082 struct drm_connector *connector);
1083 1083
1084/* intel_runtime_pm.c */
1085int intel_power_domains_init(struct drm_i915_private *);
1086void intel_power_domains_remove(struct drm_i915_private *);
1087void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
1088void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
1089void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
1090
1091bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
1092 enum intel_display_power_domain domain);
1093bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
1094 enum intel_display_power_domain domain);
1095void intel_display_power_get(struct drm_i915_private *dev_priv,
1096 enum intel_display_power_domain domain);
1097void intel_display_power_put(struct drm_i915_private *dev_priv,
1098 enum intel_display_power_domain domain);
1099void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
1100void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
1101void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
1102void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
1103void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
1104
1084/* intel_pm.c */ 1105/* intel_pm.c */
1085void intel_init_clock_gating(struct drm_device *dev); 1106void intel_init_clock_gating(struct drm_device *dev);
1086void intel_suspend_hw(struct drm_device *dev); 1107void intel_suspend_hw(struct drm_device *dev);
@@ -1098,17 +1119,6 @@ bool intel_fbc_enabled(struct drm_device *dev);
1098void intel_update_fbc(struct drm_device *dev); 1119void intel_update_fbc(struct drm_device *dev);
1099void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 1120void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
1100void intel_gpu_ips_teardown(void); 1121void intel_gpu_ips_teardown(void);
1101int intel_power_domains_init(struct drm_i915_private *);
1102void intel_power_domains_remove(struct drm_i915_private *);
1103bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
1104 enum intel_display_power_domain domain);
1105bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
1106 enum intel_display_power_domain domain);
1107void intel_display_power_get(struct drm_i915_private *dev_priv,
1108 enum intel_display_power_domain domain);
1109void intel_display_power_put(struct drm_i915_private *dev_priv,
1110 enum intel_display_power_domain domain);
1111void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
1112void intel_init_gt_powersave(struct drm_device *dev); 1122void intel_init_gt_powersave(struct drm_device *dev);
1113void intel_cleanup_gt_powersave(struct drm_device *dev); 1123void intel_cleanup_gt_powersave(struct drm_device *dev);
1114void intel_enable_gt_powersave(struct drm_device *dev); 1124void intel_enable_gt_powersave(struct drm_device *dev);
@@ -1119,13 +1129,6 @@ void ironlake_teardown_rc6(struct drm_device *dev);
1119void gen6_update_ring_freq(struct drm_device *dev); 1129void gen6_update_ring_freq(struct drm_device *dev);
1120void gen6_rps_idle(struct drm_i915_private *dev_priv); 1130void gen6_rps_idle(struct drm_i915_private *dev_priv);
1121void gen6_rps_boost(struct drm_i915_private *dev_priv); 1131void gen6_rps_boost(struct drm_i915_private *dev_priv);
1122void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
1123void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
1124void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
1125void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
1126void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
1127void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
1128void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
1129void ilk_wm_get_hw_state(struct drm_device *dev); 1132void ilk_wm_get_hw_state(struct drm_device *dev);
1130 1133
1131 1134
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 043c5a8eae20..95006e0e982b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,9 +30,6 @@
30#include "intel_drv.h" 30#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h" 31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/vgaarb.h>
34#include <drm/i915_powerwell.h>
35#include <linux/pm_runtime.h>
36 33
37/** 34/**
38 * RC6 is a special power stage which allows the GPU to enter an very 35 * RC6 is a special power stage which allows the GPU to enter an very
@@ -6230,1163 +6227,6 @@ void intel_suspend_hw(struct drm_device *dev)
6230 lpt_suspend_hw(dev); 6227 lpt_suspend_hw(dev);
6231} 6228}
6232 6229
6233#define for_each_power_well(i, power_well, domain_mask, power_domains) \
6234 for (i = 0; \
6235 i < (power_domains)->power_well_count && \
6236 ((power_well) = &(power_domains)->power_wells[i]); \
6237 i++) \
6238 if ((power_well)->domains & (domain_mask))
6239
6240#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
6241 for (i = (power_domains)->power_well_count - 1; \
6242 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
6243 i--) \
6244 if ((power_well)->domains & (domain_mask))
6245
6246/**
6247 * We should only use the power well if we explicitly asked the hardware to
6248 * enable it, so check if it's enabled and also check if we've requested it to
6249 * be enabled.
6250 */
6251static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
6252 struct i915_power_well *power_well)
6253{
6254 return I915_READ(HSW_PWR_WELL_DRIVER) ==
6255 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
6256}
6257
6258bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
6259 enum intel_display_power_domain domain)
6260{
6261 struct i915_power_domains *power_domains;
6262 struct i915_power_well *power_well;
6263 bool is_enabled;
6264 int i;
6265
6266 if (dev_priv->pm.suspended)
6267 return false;
6268
6269 power_domains = &dev_priv->power_domains;
6270
6271 is_enabled = true;
6272
6273 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6274 if (power_well->always_on)
6275 continue;
6276
6277 if (!power_well->hw_enabled) {
6278 is_enabled = false;
6279 break;
6280 }
6281 }
6282
6283 return is_enabled;
6284}
6285
6286bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
6287 enum intel_display_power_domain domain)
6288{
6289 struct i915_power_domains *power_domains;
6290 bool ret;
6291
6292 power_domains = &dev_priv->power_domains;
6293
6294 mutex_lock(&power_domains->lock);
6295 ret = intel_display_power_enabled_unlocked(dev_priv, domain);
6296 mutex_unlock(&power_domains->lock);
6297
6298 return ret;
6299}
6300
6301/*
6302 * Starting with Haswell, we have a "Power Down Well" that can be turned off
6303 * when not needed anymore. We have 4 registers that can request the power well
6304 * to be enabled, and it will only be disabled if none of the registers is
6305 * requesting it to be enabled.
6306 */
6307static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
6308{
6309 struct drm_device *dev = dev_priv->dev;
6310
6311 /*
6312 * After we re-enable the power well, if we touch VGA register 0x3d5
6313 * we'll get unclaimed register interrupts. This stops after we write
6314 * anything to the VGA MSR register. The vgacon module uses this
6315 * register all the time, so if we unbind our driver and, as a
6316 * consequence, bind vgacon, we'll get stuck in an infinite loop at
6317 * console_unlock(). So make here we touch the VGA MSR register, making
6318 * sure vgacon can keep working normally without triggering interrupts
6319 * and error messages.
6320 */
6321 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
6322 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
6323 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6324
6325 if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
6326 gen8_irq_power_well_post_enable(dev_priv);
6327}
6328
6329static void hsw_set_power_well(struct drm_i915_private *dev_priv,
6330 struct i915_power_well *power_well, bool enable)
6331{
6332 bool is_enabled, enable_requested;
6333 uint32_t tmp;
6334
6335 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
6336 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
6337 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
6338
6339 if (enable) {
6340 if (!enable_requested)
6341 I915_WRITE(HSW_PWR_WELL_DRIVER,
6342 HSW_PWR_WELL_ENABLE_REQUEST);
6343
6344 if (!is_enabled) {
6345 DRM_DEBUG_KMS("Enabling power well\n");
6346 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
6347 HSW_PWR_WELL_STATE_ENABLED), 20))
6348 DRM_ERROR("Timeout enabling power well\n");
6349 }
6350
6351 hsw_power_well_post_enable(dev_priv);
6352 } else {
6353 if (enable_requested) {
6354 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
6355 POSTING_READ(HSW_PWR_WELL_DRIVER);
6356 DRM_DEBUG_KMS("Requesting to disable the power well\n");
6357 }
6358 }
6359}
6360
6361static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
6362 struct i915_power_well *power_well)
6363{
6364 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
6365
6366 /*
6367 * We're taking over the BIOS, so clear any requests made by it since
6368 * the driver is in charge now.
6369 */
6370 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
6371 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
6372}
6373
6374static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
6375 struct i915_power_well *power_well)
6376{
6377 hsw_set_power_well(dev_priv, power_well, true);
6378}
6379
6380static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
6381 struct i915_power_well *power_well)
6382{
6383 hsw_set_power_well(dev_priv, power_well, false);
6384}
6385
6386static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
6387 struct i915_power_well *power_well)
6388{
6389}
6390
6391static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
6392 struct i915_power_well *power_well)
6393{
6394 return true;
6395}
6396
6397static void vlv_set_power_well(struct drm_i915_private *dev_priv,
6398 struct i915_power_well *power_well, bool enable)
6399{
6400 enum punit_power_well power_well_id = power_well->data;
6401 u32 mask;
6402 u32 state;
6403 u32 ctrl;
6404
6405 mask = PUNIT_PWRGT_MASK(power_well_id);
6406 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
6407 PUNIT_PWRGT_PWR_GATE(power_well_id);
6408
6409 mutex_lock(&dev_priv->rps.hw_lock);
6410
6411#define COND \
6412 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
6413
6414 if (COND)
6415 goto out;
6416
6417 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
6418 ctrl &= ~mask;
6419 ctrl |= state;
6420 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
6421
6422 if (wait_for(COND, 100))
6423 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6424 state,
6425 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
6426
6427#undef COND
6428
6429out:
6430 mutex_unlock(&dev_priv->rps.hw_lock);
6431}
6432
6433static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
6434 struct i915_power_well *power_well)
6435{
6436 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
6437}
6438
6439static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
6440 struct i915_power_well *power_well)
6441{
6442 vlv_set_power_well(dev_priv, power_well, true);
6443}
6444
6445static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
6446 struct i915_power_well *power_well)
6447{
6448 vlv_set_power_well(dev_priv, power_well, false);
6449}
6450
6451static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
6452 struct i915_power_well *power_well)
6453{
6454 int power_well_id = power_well->data;
6455 bool enabled = false;
6456 u32 mask;
6457 u32 state;
6458 u32 ctrl;
6459
6460 mask = PUNIT_PWRGT_MASK(power_well_id);
6461 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
6462
6463 mutex_lock(&dev_priv->rps.hw_lock);
6464
6465 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
6466 /*
6467 * We only ever set the power-on and power-gate states, anything
6468 * else is unexpected.
6469 */
6470 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
6471 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
6472 if (state == ctrl)
6473 enabled = true;
6474
6475 /*
6476 * A transient state at this point would mean some unexpected party
6477 * is poking at the power controls too.
6478 */
6479 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
6480 WARN_ON(ctrl != state);
6481
6482 mutex_unlock(&dev_priv->rps.hw_lock);
6483
6484 return enabled;
6485}
6486
6487static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
6488 struct i915_power_well *power_well)
6489{
6490 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6491
6492 vlv_set_power_well(dev_priv, power_well, true);
6493
6494 spin_lock_irq(&dev_priv->irq_lock);
6495 valleyview_enable_display_irqs(dev_priv);
6496 spin_unlock_irq(&dev_priv->irq_lock);
6497
6498 /*
6499 * During driver initialization/resume we can avoid restoring the
6500 * part of the HW/SW state that will be inited anyway explicitly.
6501 */
6502 if (dev_priv->power_domains.initializing)
6503 return;
6504
6505 intel_hpd_init(dev_priv->dev);
6506
6507 i915_redisable_vga_power_on(dev_priv->dev);
6508}
6509
6510static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
6511 struct i915_power_well *power_well)
6512{
6513 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6514
6515 spin_lock_irq(&dev_priv->irq_lock);
6516 valleyview_disable_display_irqs(dev_priv);
6517 spin_unlock_irq(&dev_priv->irq_lock);
6518
6519 vlv_set_power_well(dev_priv, power_well, false);
6520
6521 vlv_power_sequencer_reset(dev_priv);
6522}
6523
6524static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6525 struct i915_power_well *power_well)
6526{
6527 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6528
6529 /*
6530 * Enable the CRI clock source so we can get at the
6531 * display and the reference clock for VGA
6532 * hotplug / manual detection.
6533 */
6534 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6535 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6536 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6537
6538 vlv_set_power_well(dev_priv, power_well, true);
6539
6540 /*
6541 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
6542 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
6543 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
6544 * b. The other bits such as sfr settings / modesel may all
6545 * be set to 0.
6546 *
6547 * This should only be done on init and resume from S3 with
6548 * both PLLs disabled, or we risk losing DPIO and PLL
6549 * synchronization.
6550 */
6551 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
6552}
6553
6554static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6555 struct i915_power_well *power_well)
6556{
6557 enum pipe pipe;
6558
6559 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6560
6561 for_each_pipe(dev_priv, pipe)
6562 assert_pll_disabled(dev_priv, pipe);
6563
6564 /* Assert common reset */
6565 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
6566
6567 vlv_set_power_well(dev_priv, power_well, false);
6568}
6569
6570static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6571 struct i915_power_well *power_well)
6572{
6573 enum dpio_phy phy;
6574
6575 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6576 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6577
6578 /*
6579 * Enable the CRI clock source so we can get at the
6580 * display and the reference clock for VGA
6581 * hotplug / manual detection.
6582 */
6583 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6584 phy = DPIO_PHY0;
6585 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6586 DPLL_REFA_CLK_ENABLE_VLV);
6587 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6588 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6589 } else {
6590 phy = DPIO_PHY1;
6591 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
6592 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6593 }
6594 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6595 vlv_set_power_well(dev_priv, power_well, true);
6596
6597 /* Poll for phypwrgood signal */
6598 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
6599 DRM_ERROR("Display PHY %d is not power up\n", phy);
6600
6601 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
6602 PHY_COM_LANE_RESET_DEASSERT(phy));
6603}
6604
6605static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6606 struct i915_power_well *power_well)
6607{
6608 enum dpio_phy phy;
6609
6610 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6611 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6612
6613 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6614 phy = DPIO_PHY0;
6615 assert_pll_disabled(dev_priv, PIPE_A);
6616 assert_pll_disabled(dev_priv, PIPE_B);
6617 } else {
6618 phy = DPIO_PHY1;
6619 assert_pll_disabled(dev_priv, PIPE_C);
6620 }
6621
6622 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
6623 ~PHY_COM_LANE_RESET_DEASSERT(phy));
6624
6625 vlv_set_power_well(dev_priv, power_well, false);
6626}
6627
6628static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
6629 struct i915_power_well *power_well)
6630{
6631 enum pipe pipe = power_well->data;
6632 bool enabled;
6633 u32 state, ctrl;
6634
6635 mutex_lock(&dev_priv->rps.hw_lock);
6636
6637 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
6638 /*
6639 * We only ever set the power-on and power-gate states, anything
6640 * else is unexpected.
6641 */
6642 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
6643 enabled = state == DP_SSS_PWR_ON(pipe);
6644
6645 /*
6646 * A transient state at this point would mean some unexpected party
6647 * is poking at the power controls too.
6648 */
6649 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
6650 WARN_ON(ctrl << 16 != state);
6651
6652 mutex_unlock(&dev_priv->rps.hw_lock);
6653
6654 return enabled;
6655}
6656
6657static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
6658 struct i915_power_well *power_well,
6659 bool enable)
6660{
6661 enum pipe pipe = power_well->data;
6662 u32 state;
6663 u32 ctrl;
6664
6665 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
6666
6667 mutex_lock(&dev_priv->rps.hw_lock);
6668
6669#define COND \
6670 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
6671
6672 if (COND)
6673 goto out;
6674
6675 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
6676 ctrl &= ~DP_SSC_MASK(pipe);
6677 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
6678 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
6679
6680 if (wait_for(COND, 100))
6681 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6682 state,
6683 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
6684
6685#undef COND
6686
6687out:
6688 mutex_unlock(&dev_priv->rps.hw_lock);
6689}
6690
6691static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
6692 struct i915_power_well *power_well)
6693{
6694 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
6695}
6696
6697static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
6698 struct i915_power_well *power_well)
6699{
6700 WARN_ON_ONCE(power_well->data != PIPE_A &&
6701 power_well->data != PIPE_B &&
6702 power_well->data != PIPE_C);
6703
6704 chv_set_pipe_power_well(dev_priv, power_well, true);
6705}
6706
6707static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
6708 struct i915_power_well *power_well)
6709{
6710 WARN_ON_ONCE(power_well->data != PIPE_A &&
6711 power_well->data != PIPE_B &&
6712 power_well->data != PIPE_C);
6713
6714 chv_set_pipe_power_well(dev_priv, power_well, false);
6715}
6716
6717static void check_power_well_state(struct drm_i915_private *dev_priv,
6718 struct i915_power_well *power_well)
6719{
6720 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
6721
6722 if (power_well->always_on || !i915.disable_power_well) {
6723 if (!enabled)
6724 goto mismatch;
6725
6726 return;
6727 }
6728
6729 if (enabled != (power_well->count > 0))
6730 goto mismatch;
6731
6732 return;
6733
6734mismatch:
6735 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
6736 power_well->name, power_well->always_on, enabled,
6737 power_well->count, i915.disable_power_well);
6738}
6739
6740void intel_display_power_get(struct drm_i915_private *dev_priv,
6741 enum intel_display_power_domain domain)
6742{
6743 struct i915_power_domains *power_domains;
6744 struct i915_power_well *power_well;
6745 int i;
6746
6747 intel_runtime_pm_get(dev_priv);
6748
6749 power_domains = &dev_priv->power_domains;
6750
6751 mutex_lock(&power_domains->lock);
6752
6753 for_each_power_well(i, power_well, BIT(domain), power_domains) {
6754 if (!power_well->count++) {
6755 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
6756 power_well->ops->enable(dev_priv, power_well);
6757 power_well->hw_enabled = true;
6758 }
6759
6760 check_power_well_state(dev_priv, power_well);
6761 }
6762
6763 power_domains->domain_use_count[domain]++;
6764
6765 mutex_unlock(&power_domains->lock);
6766}
6767
6768void intel_display_power_put(struct drm_i915_private *dev_priv,
6769 enum intel_display_power_domain domain)
6770{
6771 struct i915_power_domains *power_domains;
6772 struct i915_power_well *power_well;
6773 int i;
6774
6775 power_domains = &dev_priv->power_domains;
6776
6777 mutex_lock(&power_domains->lock);
6778
6779 WARN_ON(!power_domains->domain_use_count[domain]);
6780 power_domains->domain_use_count[domain]--;
6781
6782 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6783 WARN_ON(!power_well->count);
6784
6785 if (!--power_well->count && i915.disable_power_well) {
6786 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
6787 power_well->hw_enabled = false;
6788 power_well->ops->disable(dev_priv, power_well);
6789 }
6790
6791 check_power_well_state(dev_priv, power_well);
6792 }
6793
6794 mutex_unlock(&power_domains->lock);
6795
6796 intel_runtime_pm_put(dev_priv);
6797}
6798
6799static struct i915_power_domains *hsw_pwr;
6800
6801/* Display audio driver power well request */
6802int i915_request_power_well(void)
6803{
6804 struct drm_i915_private *dev_priv;
6805
6806 if (!hsw_pwr)
6807 return -ENODEV;
6808
6809 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6810 power_domains);
6811 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
6812 return 0;
6813}
6814EXPORT_SYMBOL_GPL(i915_request_power_well);
6815
6816/* Display audio driver power well release */
6817int i915_release_power_well(void)
6818{
6819 struct drm_i915_private *dev_priv;
6820
6821 if (!hsw_pwr)
6822 return -ENODEV;
6823
6824 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6825 power_domains);
6826 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
6827 return 0;
6828}
6829EXPORT_SYMBOL_GPL(i915_release_power_well);
6830
6831/*
6832 * Private interface for the audio driver to get CDCLK in kHz.
6833 *
6834 * Caller must request power well using i915_request_power_well() prior to
6835 * making the call.
6836 */
6837int i915_get_cdclk_freq(void)
6838{
6839 struct drm_i915_private *dev_priv;
6840
6841 if (!hsw_pwr)
6842 return -ENODEV;
6843
6844 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6845 power_domains);
6846
6847 return intel_ddi_get_cdclk_freq(dev_priv);
6848}
6849EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
6850
6851
6852#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
6853
6854#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
6855 BIT(POWER_DOMAIN_PIPE_A) | \
6856 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
6857 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
6858 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
6859 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6860 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6861 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6862 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6863 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6864 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6865 BIT(POWER_DOMAIN_PORT_CRT) | \
6866 BIT(POWER_DOMAIN_PLLS) | \
6867 BIT(POWER_DOMAIN_INIT))
6868#define HSW_DISPLAY_POWER_DOMAINS ( \
6869 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
6870 BIT(POWER_DOMAIN_INIT))
6871
6872#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
6873 HSW_ALWAYS_ON_POWER_DOMAINS | \
6874 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
6875#define BDW_DISPLAY_POWER_DOMAINS ( \
6876 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
6877 BIT(POWER_DOMAIN_INIT))
6878
6879#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
6880#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
6881
6882#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
6883 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6884 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6885 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6886 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6887 BIT(POWER_DOMAIN_PORT_CRT) | \
6888 BIT(POWER_DOMAIN_INIT))
6889
6890#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
6891 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6892 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6893 BIT(POWER_DOMAIN_INIT))
6894
6895#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
6896 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6897 BIT(POWER_DOMAIN_INIT))
6898
6899#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
6900 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6901 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6902 BIT(POWER_DOMAIN_INIT))
6903
6904#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
6905 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6906 BIT(POWER_DOMAIN_INIT))
6907
6908#define CHV_PIPE_A_POWER_DOMAINS ( \
6909 BIT(POWER_DOMAIN_PIPE_A) | \
6910 BIT(POWER_DOMAIN_INIT))
6911
6912#define CHV_PIPE_B_POWER_DOMAINS ( \
6913 BIT(POWER_DOMAIN_PIPE_B) | \
6914 BIT(POWER_DOMAIN_INIT))
6915
6916#define CHV_PIPE_C_POWER_DOMAINS ( \
6917 BIT(POWER_DOMAIN_PIPE_C) | \
6918 BIT(POWER_DOMAIN_INIT))
6919
6920#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
6921 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6922 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6923 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6924 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6925 BIT(POWER_DOMAIN_INIT))
6926
6927#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
6928 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6929 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6930 BIT(POWER_DOMAIN_INIT))
6931
6932#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
6933 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6934 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6935 BIT(POWER_DOMAIN_INIT))
6936
6937#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
6938 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6939 BIT(POWER_DOMAIN_INIT))
6940
6941static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
6942 .sync_hw = i9xx_always_on_power_well_noop,
6943 .enable = i9xx_always_on_power_well_noop,
6944 .disable = i9xx_always_on_power_well_noop,
6945 .is_enabled = i9xx_always_on_power_well_enabled,
6946};
6947
6948static const struct i915_power_well_ops chv_pipe_power_well_ops = {
6949 .sync_hw = chv_pipe_power_well_sync_hw,
6950 .enable = chv_pipe_power_well_enable,
6951 .disable = chv_pipe_power_well_disable,
6952 .is_enabled = chv_pipe_power_well_enabled,
6953};
6954
6955static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
6956 .sync_hw = vlv_power_well_sync_hw,
6957 .enable = chv_dpio_cmn_power_well_enable,
6958 .disable = chv_dpio_cmn_power_well_disable,
6959 .is_enabled = vlv_power_well_enabled,
6960};
6961
6962static struct i915_power_well i9xx_always_on_power_well[] = {
6963 {
6964 .name = "always-on",
6965 .always_on = 1,
6966 .domains = POWER_DOMAIN_MASK,
6967 .ops = &i9xx_always_on_power_well_ops,
6968 },
6969};
6970
6971static const struct i915_power_well_ops hsw_power_well_ops = {
6972 .sync_hw = hsw_power_well_sync_hw,
6973 .enable = hsw_power_well_enable,
6974 .disable = hsw_power_well_disable,
6975 .is_enabled = hsw_power_well_enabled,
6976};
6977
6978static struct i915_power_well hsw_power_wells[] = {
6979 {
6980 .name = "always-on",
6981 .always_on = 1,
6982 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
6983 .ops = &i9xx_always_on_power_well_ops,
6984 },
6985 {
6986 .name = "display",
6987 .domains = HSW_DISPLAY_POWER_DOMAINS,
6988 .ops = &hsw_power_well_ops,
6989 },
6990};
6991
6992static struct i915_power_well bdw_power_wells[] = {
6993 {
6994 .name = "always-on",
6995 .always_on = 1,
6996 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
6997 .ops = &i9xx_always_on_power_well_ops,
6998 },
6999 {
7000 .name = "display",
7001 .domains = BDW_DISPLAY_POWER_DOMAINS,
7002 .ops = &hsw_power_well_ops,
7003 },
7004};
7005
7006static const struct i915_power_well_ops vlv_display_power_well_ops = {
7007 .sync_hw = vlv_power_well_sync_hw,
7008 .enable = vlv_display_power_well_enable,
7009 .disable = vlv_display_power_well_disable,
7010 .is_enabled = vlv_power_well_enabled,
7011};
7012
7013static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
7014 .sync_hw = vlv_power_well_sync_hw,
7015 .enable = vlv_dpio_cmn_power_well_enable,
7016 .disable = vlv_dpio_cmn_power_well_disable,
7017 .is_enabled = vlv_power_well_enabled,
7018};
7019
7020static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
7021 .sync_hw = vlv_power_well_sync_hw,
7022 .enable = vlv_power_well_enable,
7023 .disable = vlv_power_well_disable,
7024 .is_enabled = vlv_power_well_enabled,
7025};
7026
7027static struct i915_power_well vlv_power_wells[] = {
7028 {
7029 .name = "always-on",
7030 .always_on = 1,
7031 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
7032 .ops = &i9xx_always_on_power_well_ops,
7033 },
7034 {
7035 .name = "display",
7036 .domains = VLV_DISPLAY_POWER_DOMAINS,
7037 .data = PUNIT_POWER_WELL_DISP2D,
7038 .ops = &vlv_display_power_well_ops,
7039 },
7040 {
7041 .name = "dpio-tx-b-01",
7042 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
7043 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
7044 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
7045 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
7046 .ops = &vlv_dpio_power_well_ops,
7047 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
7048 },
7049 {
7050 .name = "dpio-tx-b-23",
7051 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
7052 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
7053 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
7054 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
7055 .ops = &vlv_dpio_power_well_ops,
7056 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
7057 },
7058 {
7059 .name = "dpio-tx-c-01",
7060 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
7061 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
7062 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
7063 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
7064 .ops = &vlv_dpio_power_well_ops,
7065 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
7066 },
7067 {
7068 .name = "dpio-tx-c-23",
7069 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
7070 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
7071 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
7072 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
7073 .ops = &vlv_dpio_power_well_ops,
7074 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
7075 },
7076 {
7077 .name = "dpio-common",
7078 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
7079 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
7080 .ops = &vlv_dpio_cmn_power_well_ops,
7081 },
7082};
7083
7084static struct i915_power_well chv_power_wells[] = {
7085 {
7086 .name = "always-on",
7087 .always_on = 1,
7088 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
7089 .ops = &i9xx_always_on_power_well_ops,
7090 },
7091#if 0
7092 {
7093 .name = "display",
7094 .domains = VLV_DISPLAY_POWER_DOMAINS,
7095 .data = PUNIT_POWER_WELL_DISP2D,
7096 .ops = &vlv_display_power_well_ops,
7097 },
7098 {
7099 .name = "pipe-a",
7100 .domains = CHV_PIPE_A_POWER_DOMAINS,
7101 .data = PIPE_A,
7102 .ops = &chv_pipe_power_well_ops,
7103 },
7104 {
7105 .name = "pipe-b",
7106 .domains = CHV_PIPE_B_POWER_DOMAINS,
7107 .data = PIPE_B,
7108 .ops = &chv_pipe_power_well_ops,
7109 },
7110 {
7111 .name = "pipe-c",
7112 .domains = CHV_PIPE_C_POWER_DOMAINS,
7113 .data = PIPE_C,
7114 .ops = &chv_pipe_power_well_ops,
7115 },
7116#endif
7117 {
7118 .name = "dpio-common-bc",
7119 /*
7120 * XXX: cmnreset for one PHY seems to disturb the other.
7121 * As a workaround keep both powered on at the same
7122 * time for now.
7123 */
7124 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
7125 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
7126 .ops = &chv_dpio_cmn_power_well_ops,
7127 },
7128 {
7129 .name = "dpio-common-d",
7130 /*
7131 * XXX: cmnreset for one PHY seems to disturb the other.
7132 * As a workaround keep both powered on at the same
7133 * time for now.
7134 */
7135 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
7136 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
7137 .ops = &chv_dpio_cmn_power_well_ops,
7138 },
7139#if 0
7140 {
7141 .name = "dpio-tx-b-01",
7142 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
7143 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
7144 .ops = &vlv_dpio_power_well_ops,
7145 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
7146 },
7147 {
7148 .name = "dpio-tx-b-23",
7149 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
7150 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
7151 .ops = &vlv_dpio_power_well_ops,
7152 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
7153 },
7154 {
7155 .name = "dpio-tx-c-01",
7156 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
7157 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
7158 .ops = &vlv_dpio_power_well_ops,
7159 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
7160 },
7161 {
7162 .name = "dpio-tx-c-23",
7163 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
7164 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
7165 .ops = &vlv_dpio_power_well_ops,
7166 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
7167 },
7168 {
7169 .name = "dpio-tx-d-01",
7170 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
7171 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
7172 .ops = &vlv_dpio_power_well_ops,
7173 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
7174 },
7175 {
7176 .name = "dpio-tx-d-23",
7177 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
7178 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
7179 .ops = &vlv_dpio_power_well_ops,
7180 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
7181 },
7182#endif
7183};
7184
7185static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
7186 enum punit_power_well power_well_id)
7187{
7188 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7189 struct i915_power_well *power_well;
7190 int i;
7191
7192 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
7193 if (power_well->data == power_well_id)
7194 return power_well;
7195 }
7196
7197 return NULL;
7198}
7199
7200#define set_power_wells(power_domains, __power_wells) ({ \
7201 (power_domains)->power_wells = (__power_wells); \
7202 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
7203})
7204
7205int intel_power_domains_init(struct drm_i915_private *dev_priv)
7206{
7207 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7208
7209 mutex_init(&power_domains->lock);
7210
7211 /*
7212 * The enabling order will be from lower to higher indexed wells,
7213 * the disabling order is reversed.
7214 */
7215 if (IS_HASWELL(dev_priv->dev)) {
7216 set_power_wells(power_domains, hsw_power_wells);
7217 hsw_pwr = power_domains;
7218 } else if (IS_BROADWELL(dev_priv->dev)) {
7219 set_power_wells(power_domains, bdw_power_wells);
7220 hsw_pwr = power_domains;
7221 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
7222 set_power_wells(power_domains, chv_power_wells);
7223 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
7224 set_power_wells(power_domains, vlv_power_wells);
7225 } else {
7226 set_power_wells(power_domains, i9xx_always_on_power_well);
7227 }
7228
7229 return 0;
7230}
7231
7232void intel_power_domains_remove(struct drm_i915_private *dev_priv)
7233{
7234 hsw_pwr = NULL;
7235}
7236
7237static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
7238{
7239 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7240 struct i915_power_well *power_well;
7241 int i;
7242
7243 mutex_lock(&power_domains->lock);
7244 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
7245 power_well->ops->sync_hw(dev_priv, power_well);
7246 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
7247 power_well);
7248 }
7249 mutex_unlock(&power_domains->lock);
7250}
7251
7252static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
7253{
7254 struct i915_power_well *cmn =
7255 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
7256 struct i915_power_well *disp2d =
7257 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
7258
7259 /* nothing to do if common lane is already off */
7260 if (!cmn->ops->is_enabled(dev_priv, cmn))
7261 return;
7262
7263 /* If the display might be already active skip this */
7264 if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
7265 I915_READ(DPIO_CTL) & DPIO_CMNRST)
7266 return;
7267
7268 DRM_DEBUG_KMS("toggling display PHY side reset\n");
7269
7270 /* cmnlane needs DPLL registers */
7271 disp2d->ops->enable(dev_priv, disp2d);
7272
7273 /*
7274 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
7275 * Need to assert and de-assert PHY SB reset by gating the
7276 * common lane power, then un-gating it.
7277 * Simply ungating isn't enough to reset the PHY enough to get
7278 * ports and lanes running.
7279 */
7280 cmn->ops->disable(dev_priv, cmn);
7281}
7282
7283void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
7284{
7285 struct drm_device *dev = dev_priv->dev;
7286 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7287
7288 power_domains->initializing = true;
7289
7290 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
7291 mutex_lock(&power_domains->lock);
7292 vlv_cmnlane_wa(dev_priv);
7293 mutex_unlock(&power_domains->lock);
7294 }
7295
7296 /* For now, we need the power well to be always enabled. */
7297 intel_display_set_init_power(dev_priv, true);
7298 intel_power_domains_resume(dev_priv);
7299 power_domains->initializing = false;
7300}
7301
7302void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
7303{
7304 intel_runtime_pm_get(dev_priv);
7305}
7306
7307void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
7308{
7309 intel_runtime_pm_put(dev_priv);
7310}
7311
7312void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
7313{
7314 struct drm_device *dev = dev_priv->dev;
7315 struct device *device = &dev->pdev->dev;
7316
7317 if (!HAS_RUNTIME_PM(dev))
7318 return;
7319
7320 pm_runtime_get_sync(device);
7321 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
7322}
7323
7324void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
7325{
7326 struct drm_device *dev = dev_priv->dev;
7327 struct device *device = &dev->pdev->dev;
7328
7329 if (!HAS_RUNTIME_PM(dev))
7330 return;
7331
7332 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
7333 pm_runtime_get_noresume(device);
7334}
7335
7336void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
7337{
7338 struct drm_device *dev = dev_priv->dev;
7339 struct device *device = &dev->pdev->dev;
7340
7341 if (!HAS_RUNTIME_PM(dev))
7342 return;
7343
7344 pm_runtime_mark_last_busy(device);
7345 pm_runtime_put_autosuspend(device);
7346}
7347
7348void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
7349{
7350 struct drm_device *dev = dev_priv->dev;
7351 struct device *device = &dev->pdev->dev;
7352
7353 if (!HAS_RUNTIME_PM(dev))
7354 return;
7355
7356 pm_runtime_set_active(device);
7357
7358 /*
7359 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
7360 * requirement.
7361 */
7362 if (!intel_enable_rc6(dev)) {
7363 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
7364 return;
7365 }
7366
7367 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
7368 pm_runtime_mark_last_busy(device);
7369 pm_runtime_use_autosuspend(device);
7370
7371 pm_runtime_put_autosuspend(device);
7372}
7373
7374void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
7375{
7376 struct drm_device *dev = dev_priv->dev;
7377 struct device *device = &dev->pdev->dev;
7378
7379 if (!HAS_RUNTIME_PM(dev))
7380 return;
7381
7382 if (!intel_enable_rc6(dev))
7383 return;
7384
7385 /* Make sure we're not suspended first. */
7386 pm_runtime_get_sync(device);
7387 pm_runtime_disable(device);
7388}
7389
7390static void intel_init_fbc(struct drm_i915_private *dev_priv) 6230static void intel_init_fbc(struct drm_i915_private *dev_priv)
7391{ 6231{
7392 if (!HAS_FBC(dev_priv)) { 6232 if (!HAS_FBC(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
new file mode 100644
index 000000000000..2344ecfc27cc
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -0,0 +1,1190 @@
1/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29#include <linux/pm_runtime.h>
30#include <linux/vgaarb.h>
31
32#include "i915_drv.h"
33#include "intel_drv.h"
34#include <drm/i915_powerwell.h>
35
36static struct i915_power_domains *hsw_pwr;
37
38#define for_each_power_well(i, power_well, domain_mask, power_domains) \
39 for (i = 0; \
40 i < (power_domains)->power_well_count && \
41 ((power_well) = &(power_domains)->power_wells[i]); \
42 i++) \
43 if ((power_well)->domains & (domain_mask))
44
45#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
46 for (i = (power_domains)->power_well_count - 1; \
47 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
48 i--) \
49 if ((power_well)->domains & (domain_mask))
50
51/**
52 * We should only use the power well if we explicitly asked the hardware to
53 * enable it, so check if it's enabled and also check if we've requested it to
54 * be enabled.
55 */
56static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
57 struct i915_power_well *power_well)
58{
59 return I915_READ(HSW_PWR_WELL_DRIVER) ==
60 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
61}
62
63bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
64 enum intel_display_power_domain domain)
65{
66 struct i915_power_domains *power_domains;
67 struct i915_power_well *power_well;
68 bool is_enabled;
69 int i;
70
71 if (dev_priv->pm.suspended)
72 return false;
73
74 power_domains = &dev_priv->power_domains;
75
76 is_enabled = true;
77
78 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
79 if (power_well->always_on)
80 continue;
81
82 if (!power_well->hw_enabled) {
83 is_enabled = false;
84 break;
85 }
86 }
87
88 return is_enabled;
89}
90
91bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
92 enum intel_display_power_domain domain)
93{
94 struct i915_power_domains *power_domains;
95 bool ret;
96
97 power_domains = &dev_priv->power_domains;
98
99 mutex_lock(&power_domains->lock);
100 ret = intel_display_power_enabled_unlocked(dev_priv, domain);
101 mutex_unlock(&power_domains->lock);
102
103 return ret;
104}
105
106/*
107 * Starting with Haswell, we have a "Power Down Well" that can be turned off
108 * when not needed anymore. We have 4 registers that can request the power well
109 * to be enabled, and it will only be disabled if none of the registers is
110 * requesting it to be enabled.
111 */
112static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
113{
114 struct drm_device *dev = dev_priv->dev;
115
116 /*
117 * After we re-enable the power well, if we touch VGA register 0x3d5
118 * we'll get unclaimed register interrupts. This stops after we write
119 * anything to the VGA MSR register. The vgacon module uses this
120 * register all the time, so if we unbind our driver and, as a
121 * consequence, bind vgacon, we'll get stuck in an infinite loop at
122 * console_unlock(). So make here we touch the VGA MSR register, making
123 * sure vgacon can keep working normally without triggering interrupts
124 * and error messages.
125 */
126 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
127 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
128 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
129
130 if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
131 gen8_irq_power_well_post_enable(dev_priv);
132}
133
134static void hsw_set_power_well(struct drm_i915_private *dev_priv,
135 struct i915_power_well *power_well, bool enable)
136{
137 bool is_enabled, enable_requested;
138 uint32_t tmp;
139
140 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
141 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
142 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
143
144 if (enable) {
145 if (!enable_requested)
146 I915_WRITE(HSW_PWR_WELL_DRIVER,
147 HSW_PWR_WELL_ENABLE_REQUEST);
148
149 if (!is_enabled) {
150 DRM_DEBUG_KMS("Enabling power well\n");
151 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
152 HSW_PWR_WELL_STATE_ENABLED), 20))
153 DRM_ERROR("Timeout enabling power well\n");
154 }
155
156 hsw_power_well_post_enable(dev_priv);
157 } else {
158 if (enable_requested) {
159 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
160 POSTING_READ(HSW_PWR_WELL_DRIVER);
161 DRM_DEBUG_KMS("Requesting to disable the power well\n");
162 }
163 }
164}
165
166static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
167 struct i915_power_well *power_well)
168{
169 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
170
171 /*
172 * We're taking over the BIOS, so clear any requests made by it since
173 * the driver is in charge now.
174 */
175 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
176 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
177}
178
179static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
180 struct i915_power_well *power_well)
181{
182 hsw_set_power_well(dev_priv, power_well, true);
183}
184
185static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
186 struct i915_power_well *power_well)
187{
188 hsw_set_power_well(dev_priv, power_well, false);
189}
190
191static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
192 struct i915_power_well *power_well)
193{
194}
195
196static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
197 struct i915_power_well *power_well)
198{
199 return true;
200}
201
202static void vlv_set_power_well(struct drm_i915_private *dev_priv,
203 struct i915_power_well *power_well, bool enable)
204{
205 enum punit_power_well power_well_id = power_well->data;
206 u32 mask;
207 u32 state;
208 u32 ctrl;
209
210 mask = PUNIT_PWRGT_MASK(power_well_id);
211 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
212 PUNIT_PWRGT_PWR_GATE(power_well_id);
213
214 mutex_lock(&dev_priv->rps.hw_lock);
215
216#define COND \
217 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
218
219 if (COND)
220 goto out;
221
222 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
223 ctrl &= ~mask;
224 ctrl |= state;
225 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
226
227 if (wait_for(COND, 100))
228 DRM_ERROR("timout setting power well state %08x (%08x)\n",
229 state,
230 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
231
232#undef COND
233
234out:
235 mutex_unlock(&dev_priv->rps.hw_lock);
236}
237
238static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
239 struct i915_power_well *power_well)
240{
241 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
242}
243
244static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
245 struct i915_power_well *power_well)
246{
247 vlv_set_power_well(dev_priv, power_well, true);
248}
249
250static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
251 struct i915_power_well *power_well)
252{
253 vlv_set_power_well(dev_priv, power_well, false);
254}
255
256static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
257 struct i915_power_well *power_well)
258{
259 int power_well_id = power_well->data;
260 bool enabled = false;
261 u32 mask;
262 u32 state;
263 u32 ctrl;
264
265 mask = PUNIT_PWRGT_MASK(power_well_id);
266 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
267
268 mutex_lock(&dev_priv->rps.hw_lock);
269
270 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
271 /*
272 * We only ever set the power-on and power-gate states, anything
273 * else is unexpected.
274 */
275 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
276 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
277 if (state == ctrl)
278 enabled = true;
279
280 /*
281 * A transient state at this point would mean some unexpected party
282 * is poking at the power controls too.
283 */
284 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
285 WARN_ON(ctrl != state);
286
287 mutex_unlock(&dev_priv->rps.hw_lock);
288
289 return enabled;
290}
291
292static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
293 struct i915_power_well *power_well)
294{
295 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
296
297 vlv_set_power_well(dev_priv, power_well, true);
298
299 spin_lock_irq(&dev_priv->irq_lock);
300 valleyview_enable_display_irqs(dev_priv);
301 spin_unlock_irq(&dev_priv->irq_lock);
302
303 /*
304 * During driver initialization/resume we can avoid restoring the
305 * part of the HW/SW state that will be inited anyway explicitly.
306 */
307 if (dev_priv->power_domains.initializing)
308 return;
309
310 intel_hpd_init(dev_priv->dev);
311
312 i915_redisable_vga_power_on(dev_priv->dev);
313}
314
315static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
316 struct i915_power_well *power_well)
317{
318 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
319
320 spin_lock_irq(&dev_priv->irq_lock);
321 valleyview_disable_display_irqs(dev_priv);
322 spin_unlock_irq(&dev_priv->irq_lock);
323
324 vlv_set_power_well(dev_priv, power_well, false);
325
326 vlv_power_sequencer_reset(dev_priv);
327}
328
329static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
330 struct i915_power_well *power_well)
331{
332 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
333
334 /*
335 * Enable the CRI clock source so we can get at the
336 * display and the reference clock for VGA
337 * hotplug / manual detection.
338 */
339 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
340 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
341 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
342
343 vlv_set_power_well(dev_priv, power_well, true);
344
345 /*
346 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
347 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
348 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
349 * b. The other bits such as sfr settings / modesel may all
350 * be set to 0.
351 *
352 * This should only be done on init and resume from S3 with
353 * both PLLs disabled, or we risk losing DPIO and PLL
354 * synchronization.
355 */
356 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
357}
358
359static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
360 struct i915_power_well *power_well)
361{
362 enum pipe pipe;
363
364 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
365
366 for_each_pipe(dev_priv, pipe)
367 assert_pll_disabled(dev_priv, pipe);
368
369 /* Assert common reset */
370 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
371
372 vlv_set_power_well(dev_priv, power_well, false);
373}
374
375static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
376 struct i915_power_well *power_well)
377{
378 enum dpio_phy phy;
379
380 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
381 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
382
383 /*
384 * Enable the CRI clock source so we can get at the
385 * display and the reference clock for VGA
386 * hotplug / manual detection.
387 */
388 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
389 phy = DPIO_PHY0;
390 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
391 DPLL_REFA_CLK_ENABLE_VLV);
392 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
393 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
394 } else {
395 phy = DPIO_PHY1;
396 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
397 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
398 }
399 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
400 vlv_set_power_well(dev_priv, power_well, true);
401
402 /* Poll for phypwrgood signal */
403 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
404 DRM_ERROR("Display PHY %d is not power up\n", phy);
405
406 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
407 PHY_COM_LANE_RESET_DEASSERT(phy));
408}
409
410static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
411 struct i915_power_well *power_well)
412{
413 enum dpio_phy phy;
414
415 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
416 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
417
418 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
419 phy = DPIO_PHY0;
420 assert_pll_disabled(dev_priv, PIPE_A);
421 assert_pll_disabled(dev_priv, PIPE_B);
422 } else {
423 phy = DPIO_PHY1;
424 assert_pll_disabled(dev_priv, PIPE_C);
425 }
426
427 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
428 ~PHY_COM_LANE_RESET_DEASSERT(phy));
429
430 vlv_set_power_well(dev_priv, power_well, false);
431}
432
433static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
434 struct i915_power_well *power_well)
435{
436 enum pipe pipe = power_well->data;
437 bool enabled;
438 u32 state, ctrl;
439
440 mutex_lock(&dev_priv->rps.hw_lock);
441
442 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
443 /*
444 * We only ever set the power-on and power-gate states, anything
445 * else is unexpected.
446 */
447 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
448 enabled = state == DP_SSS_PWR_ON(pipe);
449
450 /*
451 * A transient state at this point would mean some unexpected party
452 * is poking at the power controls too.
453 */
454 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
455 WARN_ON(ctrl << 16 != state);
456
457 mutex_unlock(&dev_priv->rps.hw_lock);
458
459 return enabled;
460}
461
462static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
463 struct i915_power_well *power_well,
464 bool enable)
465{
466 enum pipe pipe = power_well->data;
467 u32 state;
468 u32 ctrl;
469
470 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
471
472 mutex_lock(&dev_priv->rps.hw_lock);
473
474#define COND \
475 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
476
477 if (COND)
478 goto out;
479
480 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
481 ctrl &= ~DP_SSC_MASK(pipe);
482 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
483 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
484
485 if (wait_for(COND, 100))
486 DRM_ERROR("timout setting power well state %08x (%08x)\n",
487 state,
488 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
489
490#undef COND
491
492out:
493 mutex_unlock(&dev_priv->rps.hw_lock);
494}
495
496static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
497 struct i915_power_well *power_well)
498{
499 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
500}
501
502static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
503 struct i915_power_well *power_well)
504{
505 WARN_ON_ONCE(power_well->data != PIPE_A &&
506 power_well->data != PIPE_B &&
507 power_well->data != PIPE_C);
508
509 chv_set_pipe_power_well(dev_priv, power_well, true);
510}
511
512static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
513 struct i915_power_well *power_well)
514{
515 WARN_ON_ONCE(power_well->data != PIPE_A &&
516 power_well->data != PIPE_B &&
517 power_well->data != PIPE_C);
518
519 chv_set_pipe_power_well(dev_priv, power_well, false);
520}
521
522static void check_power_well_state(struct drm_i915_private *dev_priv,
523 struct i915_power_well *power_well)
524{
525 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
526
527 if (power_well->always_on || !i915.disable_power_well) {
528 if (!enabled)
529 goto mismatch;
530
531 return;
532 }
533
534 if (enabled != (power_well->count > 0))
535 goto mismatch;
536
537 return;
538
539mismatch:
540 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
541 power_well->name, power_well->always_on, enabled,
542 power_well->count, i915.disable_power_well);
543}
544
545void intel_display_power_get(struct drm_i915_private *dev_priv,
546 enum intel_display_power_domain domain)
547{
548 struct i915_power_domains *power_domains;
549 struct i915_power_well *power_well;
550 int i;
551
552 intel_runtime_pm_get(dev_priv);
553
554 power_domains = &dev_priv->power_domains;
555
556 mutex_lock(&power_domains->lock);
557
558 for_each_power_well(i, power_well, BIT(domain), power_domains) {
559 if (!power_well->count++) {
560 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
561 power_well->ops->enable(dev_priv, power_well);
562 power_well->hw_enabled = true;
563 }
564
565 check_power_well_state(dev_priv, power_well);
566 }
567
568 power_domains->domain_use_count[domain]++;
569
570 mutex_unlock(&power_domains->lock);
571}
572
573void intel_display_power_put(struct drm_i915_private *dev_priv,
574 enum intel_display_power_domain domain)
575{
576 struct i915_power_domains *power_domains;
577 struct i915_power_well *power_well;
578 int i;
579
580 power_domains = &dev_priv->power_domains;
581
582 mutex_lock(&power_domains->lock);
583
584 WARN_ON(!power_domains->domain_use_count[domain]);
585 power_domains->domain_use_count[domain]--;
586
587 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
588 WARN_ON(!power_well->count);
589
590 if (!--power_well->count && i915.disable_power_well) {
591 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
592 power_well->hw_enabled = false;
593 power_well->ops->disable(dev_priv, power_well);
594 }
595
596 check_power_well_state(dev_priv, power_well);
597 }
598
599 mutex_unlock(&power_domains->lock);
600
601 intel_runtime_pm_put(dev_priv);
602}
603
604#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
605
606#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
607 BIT(POWER_DOMAIN_PIPE_A) | \
608 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
609 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
610 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
611 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
612 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
613 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
614 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
615 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
616 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
617 BIT(POWER_DOMAIN_PORT_CRT) | \
618 BIT(POWER_DOMAIN_PLLS) | \
619 BIT(POWER_DOMAIN_INIT))
620#define HSW_DISPLAY_POWER_DOMAINS ( \
621 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
622 BIT(POWER_DOMAIN_INIT))
623
624#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
625 HSW_ALWAYS_ON_POWER_DOMAINS | \
626 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
627#define BDW_DISPLAY_POWER_DOMAINS ( \
628 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
629 BIT(POWER_DOMAIN_INIT))
630
631#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
632#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
633
634#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
635 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
636 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
637 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
638 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
639 BIT(POWER_DOMAIN_PORT_CRT) | \
640 BIT(POWER_DOMAIN_INIT))
641
642#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
643 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
644 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
645 BIT(POWER_DOMAIN_INIT))
646
647#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
648 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
649 BIT(POWER_DOMAIN_INIT))
650
651#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
652 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
653 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
654 BIT(POWER_DOMAIN_INIT))
655
656#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
657 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
658 BIT(POWER_DOMAIN_INIT))
659
660#define CHV_PIPE_A_POWER_DOMAINS ( \
661 BIT(POWER_DOMAIN_PIPE_A) | \
662 BIT(POWER_DOMAIN_INIT))
663
664#define CHV_PIPE_B_POWER_DOMAINS ( \
665 BIT(POWER_DOMAIN_PIPE_B) | \
666 BIT(POWER_DOMAIN_INIT))
667
668#define CHV_PIPE_C_POWER_DOMAINS ( \
669 BIT(POWER_DOMAIN_PIPE_C) | \
670 BIT(POWER_DOMAIN_INIT))
671
672#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
673 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
674 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
675 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
676 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
677 BIT(POWER_DOMAIN_INIT))
678
679#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
680 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
681 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
682 BIT(POWER_DOMAIN_INIT))
683
684#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
685 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
686 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
687 BIT(POWER_DOMAIN_INIT))
688
689#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
690 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
691 BIT(POWER_DOMAIN_INIT))
692
693static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
694 .sync_hw = i9xx_always_on_power_well_noop,
695 .enable = i9xx_always_on_power_well_noop,
696 .disable = i9xx_always_on_power_well_noop,
697 .is_enabled = i9xx_always_on_power_well_enabled,
698};
699
700static const struct i915_power_well_ops chv_pipe_power_well_ops = {
701 .sync_hw = chv_pipe_power_well_sync_hw,
702 .enable = chv_pipe_power_well_enable,
703 .disable = chv_pipe_power_well_disable,
704 .is_enabled = chv_pipe_power_well_enabled,
705};
706
707static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
708 .sync_hw = vlv_power_well_sync_hw,
709 .enable = chv_dpio_cmn_power_well_enable,
710 .disable = chv_dpio_cmn_power_well_disable,
711 .is_enabled = vlv_power_well_enabled,
712};
713
714static struct i915_power_well i9xx_always_on_power_well[] = {
715 {
716 .name = "always-on",
717 .always_on = 1,
718 .domains = POWER_DOMAIN_MASK,
719 .ops = &i9xx_always_on_power_well_ops,
720 },
721};
722
723static const struct i915_power_well_ops hsw_power_well_ops = {
724 .sync_hw = hsw_power_well_sync_hw,
725 .enable = hsw_power_well_enable,
726 .disable = hsw_power_well_disable,
727 .is_enabled = hsw_power_well_enabled,
728};
729
730static struct i915_power_well hsw_power_wells[] = {
731 {
732 .name = "always-on",
733 .always_on = 1,
734 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
735 .ops = &i9xx_always_on_power_well_ops,
736 },
737 {
738 .name = "display",
739 .domains = HSW_DISPLAY_POWER_DOMAINS,
740 .ops = &hsw_power_well_ops,
741 },
742};
743
744static struct i915_power_well bdw_power_wells[] = {
745 {
746 .name = "always-on",
747 .always_on = 1,
748 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
749 .ops = &i9xx_always_on_power_well_ops,
750 },
751 {
752 .name = "display",
753 .domains = BDW_DISPLAY_POWER_DOMAINS,
754 .ops = &hsw_power_well_ops,
755 },
756};
757
758static const struct i915_power_well_ops vlv_display_power_well_ops = {
759 .sync_hw = vlv_power_well_sync_hw,
760 .enable = vlv_display_power_well_enable,
761 .disable = vlv_display_power_well_disable,
762 .is_enabled = vlv_power_well_enabled,
763};
764
765static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
766 .sync_hw = vlv_power_well_sync_hw,
767 .enable = vlv_dpio_cmn_power_well_enable,
768 .disable = vlv_dpio_cmn_power_well_disable,
769 .is_enabled = vlv_power_well_enabled,
770};
771
772static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
773 .sync_hw = vlv_power_well_sync_hw,
774 .enable = vlv_power_well_enable,
775 .disable = vlv_power_well_disable,
776 .is_enabled = vlv_power_well_enabled,
777};
778
779static struct i915_power_well vlv_power_wells[] = {
780 {
781 .name = "always-on",
782 .always_on = 1,
783 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
784 .ops = &i9xx_always_on_power_well_ops,
785 },
786 {
787 .name = "display",
788 .domains = VLV_DISPLAY_POWER_DOMAINS,
789 .data = PUNIT_POWER_WELL_DISP2D,
790 .ops = &vlv_display_power_well_ops,
791 },
792 {
793 .name = "dpio-tx-b-01",
794 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
795 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
796 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
797 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
798 .ops = &vlv_dpio_power_well_ops,
799 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
800 },
801 {
802 .name = "dpio-tx-b-23",
803 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
804 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
805 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
806 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
807 .ops = &vlv_dpio_power_well_ops,
808 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
809 },
810 {
811 .name = "dpio-tx-c-01",
812 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
813 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
814 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
815 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
816 .ops = &vlv_dpio_power_well_ops,
817 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
818 },
819 {
820 .name = "dpio-tx-c-23",
821 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
822 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
823 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
824 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
825 .ops = &vlv_dpio_power_well_ops,
826 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
827 },
828 {
829 .name = "dpio-common",
830 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
831 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
832 .ops = &vlv_dpio_cmn_power_well_ops,
833 },
834};
835
836static struct i915_power_well chv_power_wells[] = {
837 {
838 .name = "always-on",
839 .always_on = 1,
840 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
841 .ops = &i9xx_always_on_power_well_ops,
842 },
843#if 0
844 {
845 .name = "display",
846 .domains = VLV_DISPLAY_POWER_DOMAINS,
847 .data = PUNIT_POWER_WELL_DISP2D,
848 .ops = &vlv_display_power_well_ops,
849 },
850 {
851 .name = "pipe-a",
852 .domains = CHV_PIPE_A_POWER_DOMAINS,
853 .data = PIPE_A,
854 .ops = &chv_pipe_power_well_ops,
855 },
856 {
857 .name = "pipe-b",
858 .domains = CHV_PIPE_B_POWER_DOMAINS,
859 .data = PIPE_B,
860 .ops = &chv_pipe_power_well_ops,
861 },
862 {
863 .name = "pipe-c",
864 .domains = CHV_PIPE_C_POWER_DOMAINS,
865 .data = PIPE_C,
866 .ops = &chv_pipe_power_well_ops,
867 },
868#endif
869 {
870 .name = "dpio-common-bc",
871 /*
872 * XXX: cmnreset for one PHY seems to disturb the other.
873 * As a workaround keep both powered on at the same
874 * time for now.
875 */
876 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
877 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
878 .ops = &chv_dpio_cmn_power_well_ops,
879 },
880 {
881 .name = "dpio-common-d",
882 /*
883 * XXX: cmnreset for one PHY seems to disturb the other.
884 * As a workaround keep both powered on at the same
885 * time for now.
886 */
887 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
888 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
889 .ops = &chv_dpio_cmn_power_well_ops,
890 },
891#if 0
892 {
893 .name = "dpio-tx-b-01",
894 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
895 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
896 .ops = &vlv_dpio_power_well_ops,
897 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
898 },
899 {
900 .name = "dpio-tx-b-23",
901 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
902 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
903 .ops = &vlv_dpio_power_well_ops,
904 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
905 },
906 {
907 .name = "dpio-tx-c-01",
908 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
909 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
910 .ops = &vlv_dpio_power_well_ops,
911 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
912 },
913 {
914 .name = "dpio-tx-c-23",
915 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
916 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
917 .ops = &vlv_dpio_power_well_ops,
918 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
919 },
920 {
921 .name = "dpio-tx-d-01",
922 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
923 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
924 .ops = &vlv_dpio_power_well_ops,
925 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
926 },
927 {
928 .name = "dpio-tx-d-23",
929 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
930 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
931 .ops = &vlv_dpio_power_well_ops,
932 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
933 },
934#endif
935};
936
937static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
938 enum punit_power_well power_well_id)
939{
940 struct i915_power_domains *power_domains = &dev_priv->power_domains;
941 struct i915_power_well *power_well;
942 int i;
943
944 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
945 if (power_well->data == power_well_id)
946 return power_well;
947 }
948
949 return NULL;
950}
951
952#define set_power_wells(power_domains, __power_wells) ({ \
953 (power_domains)->power_wells = (__power_wells); \
954 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
955})
956
957int intel_power_domains_init(struct drm_i915_private *dev_priv)
958{
959 struct i915_power_domains *power_domains = &dev_priv->power_domains;
960
961 mutex_init(&power_domains->lock);
962
963 /*
964 * The enabling order will be from lower to higher indexed wells,
965 * the disabling order is reversed.
966 */
967 if (IS_HASWELL(dev_priv->dev)) {
968 set_power_wells(power_domains, hsw_power_wells);
969 hsw_pwr = power_domains;
970 } else if (IS_BROADWELL(dev_priv->dev)) {
971 set_power_wells(power_domains, bdw_power_wells);
972 hsw_pwr = power_domains;
973 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
974 set_power_wells(power_domains, chv_power_wells);
975 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
976 set_power_wells(power_domains, vlv_power_wells);
977 } else {
978 set_power_wells(power_domains, i9xx_always_on_power_well);
979 }
980
981 return 0;
982}
983
984void intel_power_domains_remove(struct drm_i915_private *dev_priv)
985{
986 hsw_pwr = NULL;
987}
988
989static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
990{
991 struct i915_power_domains *power_domains = &dev_priv->power_domains;
992 struct i915_power_well *power_well;
993 int i;
994
995 mutex_lock(&power_domains->lock);
996 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
997 power_well->ops->sync_hw(dev_priv, power_well);
998 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
999 power_well);
1000 }
1001 mutex_unlock(&power_domains->lock);
1002}
1003
1004static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1005{
1006 struct i915_power_well *cmn =
1007 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1008 struct i915_power_well *disp2d =
1009 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
1010
1011 /* nothing to do if common lane is already off */
1012 if (!cmn->ops->is_enabled(dev_priv, cmn))
1013 return;
1014
1015 /* If the display might be already active skip this */
1016 if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
1017 I915_READ(DPIO_CTL) & DPIO_CMNRST)
1018 return;
1019
1020 DRM_DEBUG_KMS("toggling display PHY side reset\n");
1021
1022 /* cmnlane needs DPLL registers */
1023 disp2d->ops->enable(dev_priv, disp2d);
1024
1025 /*
1026 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1027 * Need to assert and de-assert PHY SB reset by gating the
1028 * common lane power, then un-gating it.
1029 * Simply ungating isn't enough to reset the PHY enough to get
1030 * ports and lanes running.
1031 */
1032 cmn->ops->disable(dev_priv, cmn);
1033}
1034
1035void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
1036{
1037 struct drm_device *dev = dev_priv->dev;
1038 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1039
1040 power_domains->initializing = true;
1041
1042 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
1043 mutex_lock(&power_domains->lock);
1044 vlv_cmnlane_wa(dev_priv);
1045 mutex_unlock(&power_domains->lock);
1046 }
1047
1048 /* For now, we need the power well to be always enabled. */
1049 intel_display_set_init_power(dev_priv, true);
1050 intel_power_domains_resume(dev_priv);
1051 power_domains->initializing = false;
1052}
1053
1054void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
1055{
1056 intel_runtime_pm_get(dev_priv);
1057}
1058
1059void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
1060{
1061 intel_runtime_pm_put(dev_priv);
1062}
1063
1064void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
1065{
1066 struct drm_device *dev = dev_priv->dev;
1067 struct device *device = &dev->pdev->dev;
1068
1069 if (!HAS_RUNTIME_PM(dev))
1070 return;
1071
1072 pm_runtime_get_sync(device);
1073 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
1074}
1075
1076void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
1077{
1078 struct drm_device *dev = dev_priv->dev;
1079 struct device *device = &dev->pdev->dev;
1080
1081 if (!HAS_RUNTIME_PM(dev))
1082 return;
1083
1084 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
1085 pm_runtime_get_noresume(device);
1086}
1087
1088void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
1089{
1090 struct drm_device *dev = dev_priv->dev;
1091 struct device *device = &dev->pdev->dev;
1092
1093 if (!HAS_RUNTIME_PM(dev))
1094 return;
1095
1096 pm_runtime_mark_last_busy(device);
1097 pm_runtime_put_autosuspend(device);
1098}
1099
1100void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
1101{
1102 struct drm_device *dev = dev_priv->dev;
1103 struct device *device = &dev->pdev->dev;
1104
1105 if (!HAS_RUNTIME_PM(dev))
1106 return;
1107
1108 pm_runtime_set_active(device);
1109
1110 /*
1111 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
1112 * requirement.
1113 */
1114 if (!intel_enable_rc6(dev)) {
1115 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
1116 return;
1117 }
1118
1119 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
1120 pm_runtime_mark_last_busy(device);
1121 pm_runtime_use_autosuspend(device);
1122
1123 pm_runtime_put_autosuspend(device);
1124}
1125
1126void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
1127{
1128 struct drm_device *dev = dev_priv->dev;
1129 struct device *device = &dev->pdev->dev;
1130
1131 if (!HAS_RUNTIME_PM(dev))
1132 return;
1133
1134 if (!intel_enable_rc6(dev))
1135 return;
1136
1137 /* Make sure we're not suspended first. */
1138 pm_runtime_get_sync(device);
1139 pm_runtime_disable(device);
1140}
1141
1142/* Display audio driver power well request */
1143int i915_request_power_well(void)
1144{
1145 struct drm_i915_private *dev_priv;
1146
1147 if (!hsw_pwr)
1148 return -ENODEV;
1149
1150 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
1151 power_domains);
1152 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
1153 return 0;
1154}
1155EXPORT_SYMBOL_GPL(i915_request_power_well);
1156
1157/* Display audio driver power well release */
1158int i915_release_power_well(void)
1159{
1160 struct drm_i915_private *dev_priv;
1161
1162 if (!hsw_pwr)
1163 return -ENODEV;
1164
1165 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
1166 power_domains);
1167 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
1168 return 0;
1169}
1170EXPORT_SYMBOL_GPL(i915_release_power_well);
1171
1172/*
1173 * Private interface for the audio driver to get CDCLK in kHz.
1174 *
1175 * Caller must request power well using i915_request_power_well() prior to
1176 * making the call.
1177 */
1178int i915_get_cdclk_freq(void)
1179{
1180 struct drm_i915_private *dev_priv;
1181
1182 if (!hsw_pwr)
1183 return -ENODEV;
1184
1185 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
1186 power_domains);
1187
1188 return intel_ddi_get_cdclk_freq(dev_priv);
1189}
1190EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);