aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_runtime_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_runtime_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c190
1 files changed, 169 insertions, 21 deletions
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index bbca527184d0..4172e73212cd 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -284,6 +284,13 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
284 1 << PIPE_C | 1 << PIPE_B); 284 1 << PIPE_C | 1 << PIPE_B);
285} 285}
286 286
287static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
288{
289 if (IS_BROADWELL(dev_priv))
290 gen8_irq_power_well_pre_disable(dev_priv,
291 1 << PIPE_C | 1 << PIPE_B);
292}
293
287static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, 294static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
288 struct i915_power_well *power_well) 295 struct i915_power_well *power_well)
289{ 296{
@@ -309,6 +316,14 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
309 } 316 }
310} 317}
311 318
319static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
320 struct i915_power_well *power_well)
321{
322 if (power_well->data == SKL_DISP_PW_2)
323 gen8_irq_power_well_pre_disable(dev_priv,
324 1 << PIPE_C | 1 << PIPE_B);
325}
326
312static void hsw_set_power_well(struct drm_i915_private *dev_priv, 327static void hsw_set_power_well(struct drm_i915_private *dev_priv,
313 struct i915_power_well *power_well, bool enable) 328 struct i915_power_well *power_well, bool enable)
314{ 329{
@@ -334,6 +349,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
334 349
335 } else { 350 } else {
336 if (enable_requested) { 351 if (enable_requested) {
352 hsw_power_well_pre_disable(dev_priv);
337 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 353 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
338 POSTING_READ(HSW_PWR_WELL_DRIVER); 354 POSTING_READ(HSW_PWR_WELL_DRIVER);
339 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 355 DRM_DEBUG_KMS("Requesting to disable the power well\n");
@@ -456,20 +472,61 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
456 */ 472 */
457} 473}
458 474
459static void gen9_set_dc_state_debugmask_memory_up( 475static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
460 struct drm_i915_private *dev_priv)
461{ 476{
462 uint32_t val; 477 uint32_t val, mask;
478
479 mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
480
481 if (IS_BROXTON(dev_priv))
482 mask |= DC_STATE_DEBUG_MASK_CORES;
463 483
464 /* The below bit doesn't need to be cleared ever afterwards */ 484 /* The below bit doesn't need to be cleared ever afterwards */
465 val = I915_READ(DC_STATE_DEBUG); 485 val = I915_READ(DC_STATE_DEBUG);
466 if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) { 486 if ((val & mask) != mask) {
467 val |= DC_STATE_DEBUG_MASK_MEMORY_UP; 487 val |= mask;
468 I915_WRITE(DC_STATE_DEBUG, val); 488 I915_WRITE(DC_STATE_DEBUG, val);
469 POSTING_READ(DC_STATE_DEBUG); 489 POSTING_READ(DC_STATE_DEBUG);
470 } 490 }
471} 491}
472 492
493static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
494 u32 state)
495{
496 int rewrites = 0;
497 int rereads = 0;
498 u32 v;
499
500 I915_WRITE(DC_STATE_EN, state);
501
502 /* It has been observed that disabling the dc6 state sometimes
503 * doesn't stick and dmc keeps returning old value. Make sure
504 * the write really sticks enough times and also force rewrite until
505 * we are confident that state is exactly what we want.
506 */
507 do {
508 v = I915_READ(DC_STATE_EN);
509
510 if (v != state) {
511 I915_WRITE(DC_STATE_EN, state);
512 rewrites++;
513 rereads = 0;
514 } else if (rereads++ > 5) {
515 break;
516 }
517
518 } while (rewrites < 100);
519
520 if (v != state)
521 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
522 state, v);
523
524 /* Most of the times we need one retry, avoid spam */
525 if (rewrites > 1)
526 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
527 state, rewrites);
528}
529
473static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) 530static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
474{ 531{
475 uint32_t val; 532 uint32_t val;
@@ -488,16 +545,21 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
488 else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5) 545 else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
489 state = DC_STATE_EN_UPTO_DC5; 546 state = DC_STATE_EN_UPTO_DC5;
490 547
491 if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK)
492 gen9_set_dc_state_debugmask_memory_up(dev_priv);
493
494 val = I915_READ(DC_STATE_EN); 548 val = I915_READ(DC_STATE_EN);
495 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 549 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
496 val & mask, state); 550 val & mask, state);
551
552 /* Check if DMC is ignoring our DC state requests */
553 if ((val & mask) != dev_priv->csr.dc_state)
554 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
555 dev_priv->csr.dc_state, val & mask);
556
497 val &= ~mask; 557 val &= ~mask;
498 val |= state; 558 val |= state;
499 I915_WRITE(DC_STATE_EN, val); 559
500 POSTING_READ(DC_STATE_EN); 560 gen9_write_dc_state(dev_priv, val);
561
562 dev_priv->csr.dc_state = val & mask;
501} 563}
502 564
503void bxt_enable_dc9(struct drm_i915_private *dev_priv) 565void bxt_enable_dc9(struct drm_i915_private *dev_priv)
@@ -663,6 +725,9 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
663 state_mask = SKL_POWER_WELL_STATE(power_well->data); 725 state_mask = SKL_POWER_WELL_STATE(power_well->data);
664 is_enabled = tmp & state_mask; 726 is_enabled = tmp & state_mask;
665 727
728 if (!enable && enable_requested)
729 skl_power_well_pre_disable(dev_priv, power_well);
730
666 if (enable) { 731 if (enable) {
667 if (!enable_requested) { 732 if (!enable_requested) {
668 WARN((tmp & state_mask) && 733 WARN((tmp & state_mask) &&
@@ -941,6 +1006,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
941 valleyview_disable_display_irqs(dev_priv); 1006 valleyview_disable_display_irqs(dev_priv);
942 spin_unlock_irq(&dev_priv->irq_lock); 1007 spin_unlock_irq(&dev_priv->irq_lock);
943 1008
1009 /* make sure we're done processing display irqs */
1010 synchronize_irq(dev_priv->dev->irq);
1011
944 vlv_power_sequencer_reset(dev_priv); 1012 vlv_power_sequencer_reset(dev_priv);
945} 1013}
946 1014
@@ -1435,6 +1503,22 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1435 chv_set_pipe_power_well(dev_priv, power_well, false); 1503 chv_set_pipe_power_well(dev_priv, power_well, false);
1436} 1504}
1437 1505
1506static void
1507__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1508 enum intel_display_power_domain domain)
1509{
1510 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1511 struct i915_power_well *power_well;
1512 int i;
1513
1514 for_each_power_well(i, power_well, BIT(domain), power_domains) {
1515 if (!power_well->count++)
1516 intel_power_well_enable(dev_priv, power_well);
1517 }
1518
1519 power_domains->domain_use_count[domain]++;
1520}
1521
1438/** 1522/**
1439 * intel_display_power_get - grab a power domain reference 1523 * intel_display_power_get - grab a power domain reference
1440 * @dev_priv: i915 device instance 1524 * @dev_priv: i915 device instance
@@ -1450,24 +1534,53 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1450void intel_display_power_get(struct drm_i915_private *dev_priv, 1534void intel_display_power_get(struct drm_i915_private *dev_priv,
1451 enum intel_display_power_domain domain) 1535 enum intel_display_power_domain domain)
1452{ 1536{
1453 struct i915_power_domains *power_domains; 1537 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1454 struct i915_power_well *power_well;
1455 int i;
1456 1538
1457 intel_runtime_pm_get(dev_priv); 1539 intel_runtime_pm_get(dev_priv);
1458 1540
1459 power_domains = &dev_priv->power_domains; 1541 mutex_lock(&power_domains->lock);
1542
1543 __intel_display_power_get_domain(dev_priv, domain);
1544
1545 mutex_unlock(&power_domains->lock);
1546}
1547
1548/**
1549 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1550 * @dev_priv: i915 device instance
1551 * @domain: power domain to reference
1552 *
1553 * This function grabs a power domain reference for @domain and ensures that the
1554 * power domain and all its parents are powered up. Therefore users should only
1555 * grab a reference to the innermost power domain they need.
1556 *
1557 * Any power domain reference obtained by this function must have a symmetric
1558 * call to intel_display_power_put() to release the reference again.
1559 */
1560bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1561 enum intel_display_power_domain domain)
1562{
1563 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1564 bool is_enabled;
1565
1566 if (!intel_runtime_pm_get_if_in_use(dev_priv))
1567 return false;
1460 1568
1461 mutex_lock(&power_domains->lock); 1569 mutex_lock(&power_domains->lock);
1462 1570
1463 for_each_power_well(i, power_well, BIT(domain), power_domains) { 1571 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1464 if (!power_well->count++) 1572 __intel_display_power_get_domain(dev_priv, domain);
1465 intel_power_well_enable(dev_priv, power_well); 1573 is_enabled = true;
1574 } else {
1575 is_enabled = false;
1466 } 1576 }
1467 1577
1468 power_domains->domain_use_count[domain]++;
1469
1470 mutex_unlock(&power_domains->lock); 1578 mutex_unlock(&power_domains->lock);
1579
1580 if (!is_enabled)
1581 intel_runtime_pm_put(dev_priv);
1582
1583 return is_enabled;
1471} 1584}
1472 1585
1473/** 1586/**
@@ -2028,8 +2141,8 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
2028 2141
2029 skl_init_cdclk(dev_priv); 2142 skl_init_cdclk(dev_priv);
2030 2143
2031 if (dev_priv->csr.dmc_payload) 2144 if (dev_priv->csr.dmc_payload && intel_csr_load_program(dev_priv))
2032 intel_csr_load_program(dev_priv); 2145 gen9_set_dc_state_debugmask(dev_priv);
2033} 2146}
2034 2147
2035static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 2148static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
@@ -2239,6 +2352,41 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2239} 2352}
2240 2353
2241/** 2354/**
2355 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
2356 * @dev_priv: i915 device instance
2357 *
2358 * This function grabs a device-level runtime pm reference if the device is
2359 * already in use and ensures that it is powered up.
2360 *
2361 * Any runtime pm reference obtained by this function must have a symmetric
2362 * call to intel_runtime_pm_put() to release the reference again.
2363 */
2364bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2365{
2366 struct drm_device *dev = dev_priv->dev;
2367 struct device *device = &dev->pdev->dev;
2368
2369 if (IS_ENABLED(CONFIG_PM)) {
2370 int ret = pm_runtime_get_if_in_use(device);
2371
2372 /*
2373 * In cases runtime PM is disabled by the RPM core and we get
2374 * an -EINVAL return value we are not supposed to call this
2375 * function, since the power state is undefined. This applies
2376 * atm to the late/early system suspend/resume handlers.
2377 */
2378 WARN_ON_ONCE(ret < 0);
2379 if (ret <= 0)
2380 return false;
2381 }
2382
2383 atomic_inc(&dev_priv->pm.wakeref_count);
2384 assert_rpm_wakelock_held(dev_priv);
2385
2386 return true;
2387}
2388
2389/**
2242 * intel_runtime_pm_get_noresume - grab a runtime pm reference 2390 * intel_runtime_pm_get_noresume - grab a runtime pm reference
2243 * @dev_priv: i915 device instance 2391 * @dev_priv: i915 device instance
2244 * 2392 *