diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 464 |
1 files changed, 290 insertions, 174 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 72f41aaa71ff..0cbc0e6402b4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -1468,9 +1468,12 @@ static void i9xx_update_wm(struct drm_device *dev) | |||
1468 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); | 1468 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
1469 | crtc = intel_get_crtc_for_plane(dev, 0); | 1469 | crtc = intel_get_crtc_for_plane(dev, 0); |
1470 | if (crtc->enabled && crtc->fb) { | 1470 | if (crtc->enabled && crtc->fb) { |
1471 | int cpp = crtc->fb->bits_per_pixel / 8; | ||
1472 | if (IS_GEN2(dev)) | ||
1473 | cpp = 4; | ||
1474 | |||
1471 | planea_wm = intel_calculate_wm(crtc->mode.clock, | 1475 | planea_wm = intel_calculate_wm(crtc->mode.clock, |
1472 | wm_info, fifo_size, | 1476 | wm_info, fifo_size, cpp, |
1473 | crtc->fb->bits_per_pixel / 8, | ||
1474 | latency_ns); | 1477 | latency_ns); |
1475 | enabled = crtc; | 1478 | enabled = crtc; |
1476 | } else | 1479 | } else |
@@ -1479,9 +1482,12 @@ static void i9xx_update_wm(struct drm_device *dev) | |||
1479 | fifo_size = dev_priv->display.get_fifo_size(dev, 1); | 1482 | fifo_size = dev_priv->display.get_fifo_size(dev, 1); |
1480 | crtc = intel_get_crtc_for_plane(dev, 1); | 1483 | crtc = intel_get_crtc_for_plane(dev, 1); |
1481 | if (crtc->enabled && crtc->fb) { | 1484 | if (crtc->enabled && crtc->fb) { |
1485 | int cpp = crtc->fb->bits_per_pixel / 8; | ||
1486 | if (IS_GEN2(dev)) | ||
1487 | cpp = 4; | ||
1488 | |||
1482 | planeb_wm = intel_calculate_wm(crtc->mode.clock, | 1489 | planeb_wm = intel_calculate_wm(crtc->mode.clock, |
1483 | wm_info, fifo_size, | 1490 | wm_info, fifo_size, cpp, |
1484 | crtc->fb->bits_per_pixel / 8, | ||
1485 | latency_ns); | 1491 | latency_ns); |
1486 | if (enabled == NULL) | 1492 | if (enabled == NULL) |
1487 | enabled = crtc; | 1493 | enabled = crtc; |
@@ -1571,8 +1577,7 @@ static void i830_update_wm(struct drm_device *dev) | |||
1571 | 1577 | ||
1572 | planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, | 1578 | planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, |
1573 | dev_priv->display.get_fifo_size(dev, 0), | 1579 | dev_priv->display.get_fifo_size(dev, 0), |
1574 | crtc->fb->bits_per_pixel / 8, | 1580 | 4, latency_ns); |
1575 | latency_ns); | ||
1576 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; | 1581 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; |
1577 | fwater_lo |= (3<<8) | planea_wm; | 1582 | fwater_lo |= (3<<8) | planea_wm; |
1578 | 1583 | ||
@@ -2323,7 +2328,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
2323 | struct drm_i915_private *dev_priv = dev->dev_private; | 2328 | struct drm_i915_private *dev_priv = dev->dev_private; |
2324 | u32 limits = gen6_rps_limits(dev_priv, &val); | 2329 | u32 limits = gen6_rps_limits(dev_priv, &val); |
2325 | 2330 | ||
2326 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 2331 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
2327 | WARN_ON(val > dev_priv->rps.max_delay); | 2332 | WARN_ON(val > dev_priv->rps.max_delay); |
2328 | WARN_ON(val < dev_priv->rps.min_delay); | 2333 | WARN_ON(val < dev_priv->rps.min_delay); |
2329 | 2334 | ||
@@ -2404,12 +2409,12 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2404 | struct intel_ring_buffer *ring; | 2409 | struct intel_ring_buffer *ring; |
2405 | u32 rp_state_cap; | 2410 | u32 rp_state_cap; |
2406 | u32 gt_perf_status; | 2411 | u32 gt_perf_status; |
2407 | u32 pcu_mbox, rc6_mask = 0; | 2412 | u32 rc6vids, pcu_mbox, rc6_mask = 0; |
2408 | u32 gtfifodbg; | 2413 | u32 gtfifodbg; |
2409 | int rc6_mode; | 2414 | int rc6_mode; |
2410 | int i; | 2415 | int i, ret; |
2411 | 2416 | ||
2412 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 2417 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
2413 | 2418 | ||
2414 | /* Here begins a magic sequence of register writes to enable | 2419 | /* Here begins a magic sequence of register writes to enable |
2415 | * auto-downclocking. | 2420 | * auto-downclocking. |
@@ -2503,30 +2508,16 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2503 | GEN6_RP_UP_BUSY_AVG | | 2508 | GEN6_RP_UP_BUSY_AVG | |
2504 | (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT)); | 2509 | (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT)); |
2505 | 2510 | ||
2506 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | 2511 | ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); |
2507 | 500)) | 2512 | if (!ret) { |
2508 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | 2513 | pcu_mbox = 0; |
2509 | 2514 | ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); | |
2510 | I915_WRITE(GEN6_PCODE_DATA, 0); | 2515 | if (ret && pcu_mbox & (1<<31)) { /* OC supported */ |
2511 | I915_WRITE(GEN6_PCODE_MAILBOX, | 2516 | dev_priv->rps.max_delay = pcu_mbox & 0xff; |
2512 | GEN6_PCODE_READY | | 2517 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); |
2513 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); | 2518 | } |
2514 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | 2519 | } else { |
2515 | 500)) | 2520 | DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); |
2516 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | ||
2517 | |||
2518 | /* Check for overclock support */ | ||
2519 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
2520 | 500)) | ||
2521 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | ||
2522 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); | ||
2523 | pcu_mbox = I915_READ(GEN6_PCODE_DATA); | ||
2524 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
2525 | 500)) | ||
2526 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | ||
2527 | if (pcu_mbox & (1<<31)) { /* OC supported */ | ||
2528 | dev_priv->rps.max_delay = pcu_mbox & 0xff; | ||
2529 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); | ||
2530 | } | 2521 | } |
2531 | 2522 | ||
2532 | gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); | 2523 | gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); |
@@ -2540,6 +2531,20 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2540 | /* enable all PM interrupts */ | 2531 | /* enable all PM interrupts */ |
2541 | I915_WRITE(GEN6_PMINTRMSK, 0); | 2532 | I915_WRITE(GEN6_PMINTRMSK, 0); |
2542 | 2533 | ||
2534 | rc6vids = 0; | ||
2535 | ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); | ||
2536 | if (IS_GEN6(dev) && ret) { | ||
2537 | DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); | ||
2538 | } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { | ||
2539 | DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", | ||
2540 | GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); | ||
2541 | rc6vids &= 0xffff00; | ||
2542 | rc6vids |= GEN6_ENCODE_RC6_VID(450); | ||
2543 | ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); | ||
2544 | if (ret) | ||
2545 | DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); | ||
2546 | } | ||
2547 | |||
2543 | gen6_gt_force_wake_put(dev_priv); | 2548 | gen6_gt_force_wake_put(dev_priv); |
2544 | } | 2549 | } |
2545 | 2550 | ||
@@ -2550,7 +2555,7 @@ static void gen6_update_ring_freq(struct drm_device *dev) | |||
2550 | int gpu_freq, ia_freq, max_ia_freq; | 2555 | int gpu_freq, ia_freq, max_ia_freq; |
2551 | int scaling_factor = 180; | 2556 | int scaling_factor = 180; |
2552 | 2557 | ||
2553 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 2558 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
2554 | 2559 | ||
2555 | max_ia_freq = cpufreq_quick_get_max(0); | 2560 | max_ia_freq = cpufreq_quick_get_max(0); |
2556 | /* | 2561 | /* |
@@ -2581,17 +2586,11 @@ static void gen6_update_ring_freq(struct drm_device *dev) | |||
2581 | else | 2586 | else |
2582 | ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); | 2587 | ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); |
2583 | ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); | 2588 | ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); |
2589 | ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT; | ||
2584 | 2590 | ||
2585 | I915_WRITE(GEN6_PCODE_DATA, | 2591 | sandybridge_pcode_write(dev_priv, |
2586 | (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | | 2592 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE, |
2587 | gpu_freq); | 2593 | ia_freq | gpu_freq); |
2588 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | | ||
2589 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); | ||
2590 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & | ||
2591 | GEN6_PCODE_READY) == 0, 10)) { | ||
2592 | DRM_ERROR("pcode write of freq table timed out\n"); | ||
2593 | continue; | ||
2594 | } | ||
2595 | } | 2594 | } |
2596 | } | 2595 | } |
2597 | 2596 | ||
@@ -2599,16 +2598,16 @@ void ironlake_teardown_rc6(struct drm_device *dev) | |||
2599 | { | 2598 | { |
2600 | struct drm_i915_private *dev_priv = dev->dev_private; | 2599 | struct drm_i915_private *dev_priv = dev->dev_private; |
2601 | 2600 | ||
2602 | if (dev_priv->renderctx) { | 2601 | if (dev_priv->ips.renderctx) { |
2603 | i915_gem_object_unpin(dev_priv->renderctx); | 2602 | i915_gem_object_unpin(dev_priv->ips.renderctx); |
2604 | drm_gem_object_unreference(&dev_priv->renderctx->base); | 2603 | drm_gem_object_unreference(&dev_priv->ips.renderctx->base); |
2605 | dev_priv->renderctx = NULL; | 2604 | dev_priv->ips.renderctx = NULL; |
2606 | } | 2605 | } |
2607 | 2606 | ||
2608 | if (dev_priv->pwrctx) { | 2607 | if (dev_priv->ips.pwrctx) { |
2609 | i915_gem_object_unpin(dev_priv->pwrctx); | 2608 | i915_gem_object_unpin(dev_priv->ips.pwrctx); |
2610 | drm_gem_object_unreference(&dev_priv->pwrctx->base); | 2609 | drm_gem_object_unreference(&dev_priv->ips.pwrctx->base); |
2611 | dev_priv->pwrctx = NULL; | 2610 | dev_priv->ips.pwrctx = NULL; |
2612 | } | 2611 | } |
2613 | } | 2612 | } |
2614 | 2613 | ||
@@ -2634,14 +2633,14 @@ static int ironlake_setup_rc6(struct drm_device *dev) | |||
2634 | { | 2633 | { |
2635 | struct drm_i915_private *dev_priv = dev->dev_private; | 2634 | struct drm_i915_private *dev_priv = dev->dev_private; |
2636 | 2635 | ||
2637 | if (dev_priv->renderctx == NULL) | 2636 | if (dev_priv->ips.renderctx == NULL) |
2638 | dev_priv->renderctx = intel_alloc_context_page(dev); | 2637 | dev_priv->ips.renderctx = intel_alloc_context_page(dev); |
2639 | if (!dev_priv->renderctx) | 2638 | if (!dev_priv->ips.renderctx) |
2640 | return -ENOMEM; | 2639 | return -ENOMEM; |
2641 | 2640 | ||
2642 | if (dev_priv->pwrctx == NULL) | 2641 | if (dev_priv->ips.pwrctx == NULL) |
2643 | dev_priv->pwrctx = intel_alloc_context_page(dev); | 2642 | dev_priv->ips.pwrctx = intel_alloc_context_page(dev); |
2644 | if (!dev_priv->pwrctx) { | 2643 | if (!dev_priv->ips.pwrctx) { |
2645 | ironlake_teardown_rc6(dev); | 2644 | ironlake_teardown_rc6(dev); |
2646 | return -ENOMEM; | 2645 | return -ENOMEM; |
2647 | } | 2646 | } |
@@ -2679,7 +2678,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) | |||
2679 | 2678 | ||
2680 | intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); | 2679 | intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); |
2681 | intel_ring_emit(ring, MI_SET_CONTEXT); | 2680 | intel_ring_emit(ring, MI_SET_CONTEXT); |
2682 | intel_ring_emit(ring, dev_priv->renderctx->gtt_offset | | 2681 | intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset | |
2683 | MI_MM_SPACE_GTT | | 2682 | MI_MM_SPACE_GTT | |
2684 | MI_SAVE_EXT_STATE_EN | | 2683 | MI_SAVE_EXT_STATE_EN | |
2685 | MI_RESTORE_EXT_STATE_EN | | 2684 | MI_RESTORE_EXT_STATE_EN | |
@@ -2701,7 +2700,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) | |||
2701 | return; | 2700 | return; |
2702 | } | 2701 | } |
2703 | 2702 | ||
2704 | I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); | 2703 | I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN); |
2705 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | 2704 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
2706 | } | 2705 | } |
2707 | 2706 | ||
@@ -3310,37 +3309,72 @@ static void intel_init_emon(struct drm_device *dev) | |||
3310 | 3309 | ||
3311 | void intel_disable_gt_powersave(struct drm_device *dev) | 3310 | void intel_disable_gt_powersave(struct drm_device *dev) |
3312 | { | 3311 | { |
3312 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3313 | |||
3313 | if (IS_IRONLAKE_M(dev)) { | 3314 | if (IS_IRONLAKE_M(dev)) { |
3314 | ironlake_disable_drps(dev); | 3315 | ironlake_disable_drps(dev); |
3315 | ironlake_disable_rc6(dev); | 3316 | ironlake_disable_rc6(dev); |
3316 | } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) { | 3317 | } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) { |
3318 | cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); | ||
3319 | mutex_lock(&dev_priv->rps.hw_lock); | ||
3317 | gen6_disable_rps(dev); | 3320 | gen6_disable_rps(dev); |
3321 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
3318 | } | 3322 | } |
3319 | } | 3323 | } |
3320 | 3324 | ||
3325 | static void intel_gen6_powersave_work(struct work_struct *work) | ||
3326 | { | ||
3327 | struct drm_i915_private *dev_priv = | ||
3328 | container_of(work, struct drm_i915_private, | ||
3329 | rps.delayed_resume_work.work); | ||
3330 | struct drm_device *dev = dev_priv->dev; | ||
3331 | |||
3332 | mutex_lock(&dev_priv->rps.hw_lock); | ||
3333 | gen6_enable_rps(dev); | ||
3334 | gen6_update_ring_freq(dev); | ||
3335 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
3336 | } | ||
3337 | |||
3321 | void intel_enable_gt_powersave(struct drm_device *dev) | 3338 | void intel_enable_gt_powersave(struct drm_device *dev) |
3322 | { | 3339 | { |
3340 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3341 | |||
3323 | if (IS_IRONLAKE_M(dev)) { | 3342 | if (IS_IRONLAKE_M(dev)) { |
3324 | ironlake_enable_drps(dev); | 3343 | ironlake_enable_drps(dev); |
3325 | ironlake_enable_rc6(dev); | 3344 | ironlake_enable_rc6(dev); |
3326 | intel_init_emon(dev); | 3345 | intel_init_emon(dev); |
3327 | } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { | 3346 | } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { |
3328 | gen6_enable_rps(dev); | 3347 | /* |
3329 | gen6_update_ring_freq(dev); | 3348 | * PCU communication is slow and this doesn't need to be |
3349 | * done at any specific time, so do this out of our fast path | ||
3350 | * to make resume and init faster. | ||
3351 | */ | ||
3352 | schedule_delayed_work(&dev_priv->rps.delayed_resume_work, | ||
3353 | round_jiffies_up_relative(HZ)); | ||
3330 | } | 3354 | } |
3331 | } | 3355 | } |
3332 | 3356 | ||
3357 | static void ibx_init_clock_gating(struct drm_device *dev) | ||
3358 | { | ||
3359 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3360 | |||
3361 | /* | ||
3362 | * On Ibex Peak and Cougar Point, we need to disable clock | ||
3363 | * gating for the panel power sequencer or it will fail to | ||
3364 | * start up when no ports are active. | ||
3365 | */ | ||
3366 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | ||
3367 | } | ||
3368 | |||
3333 | static void ironlake_init_clock_gating(struct drm_device *dev) | 3369 | static void ironlake_init_clock_gating(struct drm_device *dev) |
3334 | { | 3370 | { |
3335 | struct drm_i915_private *dev_priv = dev->dev_private; | 3371 | struct drm_i915_private *dev_priv = dev->dev_private; |
3336 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | 3372 | uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; |
3337 | 3373 | ||
3338 | /* Required for FBC */ | 3374 | /* Required for FBC */ |
3339 | dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | | 3375 | dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | |
3340 | DPFCRUNIT_CLOCK_GATE_DISABLE | | 3376 | ILK_DPFCUNIT_CLOCK_GATE_DISABLE | |
3341 | DPFDUNIT_CLOCK_GATE_DISABLE; | 3377 | ILK_DPFDUNIT_CLOCK_GATE_ENABLE; |
3342 | /* Required for CxSR */ | ||
3343 | dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; | ||
3344 | 3378 | ||
3345 | I915_WRITE(PCH_3DCGDIS0, | 3379 | I915_WRITE(PCH_3DCGDIS0, |
3346 | MARIUNIT_CLOCK_GATE_DISABLE | | 3380 | MARIUNIT_CLOCK_GATE_DISABLE | |
@@ -3348,8 +3382,6 @@ static void ironlake_init_clock_gating(struct drm_device *dev) | |||
3348 | I915_WRITE(PCH_3DCGDIS1, | 3382 | I915_WRITE(PCH_3DCGDIS1, |
3349 | VFMUNIT_CLOCK_GATE_DISABLE); | 3383 | VFMUNIT_CLOCK_GATE_DISABLE); |
3350 | 3384 | ||
3351 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | ||
3352 | |||
3353 | /* | 3385 | /* |
3354 | * According to the spec the following bits should be set in | 3386 | * According to the spec the following bits should be set in |
3355 | * order to enable memory self-refresh | 3387 | * order to enable memory self-refresh |
@@ -3360,9 +3392,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev) | |||
3360 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 3392 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
3361 | (I915_READ(ILK_DISPLAY_CHICKEN2) | | 3393 | (I915_READ(ILK_DISPLAY_CHICKEN2) | |
3362 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); | 3394 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); |
3363 | I915_WRITE(ILK_DSPCLK_GATE, | 3395 | dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; |
3364 | (I915_READ(ILK_DSPCLK_GATE) | | ||
3365 | ILK_DPARB_CLK_GATE)); | ||
3366 | I915_WRITE(DISP_ARB_CTL, | 3396 | I915_WRITE(DISP_ARB_CTL, |
3367 | (I915_READ(DISP_ARB_CTL) | | 3397 | (I915_READ(DISP_ARB_CTL) | |
3368 | DISP_FBC_WM_DIS)); | 3398 | DISP_FBC_WM_DIS)); |
@@ -3384,28 +3414,51 @@ static void ironlake_init_clock_gating(struct drm_device *dev) | |||
3384 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 3414 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
3385 | I915_READ(ILK_DISPLAY_CHICKEN2) | | 3415 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
3386 | ILK_DPARB_GATE); | 3416 | ILK_DPARB_GATE); |
3387 | I915_WRITE(ILK_DSPCLK_GATE, | ||
3388 | I915_READ(ILK_DSPCLK_GATE) | | ||
3389 | ILK_DPFC_DIS1 | | ||
3390 | ILK_DPFC_DIS2 | | ||
3391 | ILK_CLK_FBC); | ||
3392 | } | 3417 | } |
3393 | 3418 | ||
3419 | I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); | ||
3420 | |||
3394 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 3421 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
3395 | I915_READ(ILK_DISPLAY_CHICKEN2) | | 3422 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
3396 | ILK_ELPIN_409_SELECT); | 3423 | ILK_ELPIN_409_SELECT); |
3397 | I915_WRITE(_3D_CHICKEN2, | 3424 | I915_WRITE(_3D_CHICKEN2, |
3398 | _3D_CHICKEN2_WM_READ_PIPELINED << 16 | | 3425 | _3D_CHICKEN2_WM_READ_PIPELINED << 16 | |
3399 | _3D_CHICKEN2_WM_READ_PIPELINED); | 3426 | _3D_CHICKEN2_WM_READ_PIPELINED); |
3427 | |||
3428 | /* WaDisableRenderCachePipelinedFlush */ | ||
3429 | I915_WRITE(CACHE_MODE_0, | ||
3430 | _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); | ||
3431 | |||
3432 | ibx_init_clock_gating(dev); | ||
3433 | } | ||
3434 | |||
3435 | static void cpt_init_clock_gating(struct drm_device *dev) | ||
3436 | { | ||
3437 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3438 | int pipe; | ||
3439 | |||
3440 | /* | ||
3441 | * On Ibex Peak and Cougar Point, we need to disable clock | ||
3442 | * gating for the panel power sequencer or it will fail to | ||
3443 | * start up when no ports are active. | ||
3444 | */ | ||
3445 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | ||
3446 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | | ||
3447 | DPLS_EDP_PPS_FIX_DIS); | ||
3448 | /* WADP0ClockGatingDisable */ | ||
3449 | for_each_pipe(pipe) { | ||
3450 | I915_WRITE(TRANS_CHICKEN1(pipe), | ||
3451 | TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); | ||
3452 | } | ||
3400 | } | 3453 | } |
3401 | 3454 | ||
3402 | static void gen6_init_clock_gating(struct drm_device *dev) | 3455 | static void gen6_init_clock_gating(struct drm_device *dev) |
3403 | { | 3456 | { |
3404 | struct drm_i915_private *dev_priv = dev->dev_private; | 3457 | struct drm_i915_private *dev_priv = dev->dev_private; |
3405 | int pipe; | 3458 | int pipe; |
3406 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | 3459 | uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; |
3407 | 3460 | ||
3408 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | 3461 | I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); |
3409 | 3462 | ||
3410 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 3463 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
3411 | I915_READ(ILK_DISPLAY_CHICKEN2) | | 3464 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
@@ -3460,10 +3513,10 @@ static void gen6_init_clock_gating(struct drm_device *dev) | |||
3460 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 3513 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
3461 | I915_READ(ILK_DISPLAY_CHICKEN2) | | 3514 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
3462 | ILK_DPARB_GATE | ILK_VSDPFD_FULL); | 3515 | ILK_DPARB_GATE | ILK_VSDPFD_FULL); |
3463 | I915_WRITE(ILK_DSPCLK_GATE, | 3516 | I915_WRITE(ILK_DSPCLK_GATE_D, |
3464 | I915_READ(ILK_DSPCLK_GATE) | | 3517 | I915_READ(ILK_DSPCLK_GATE_D) | |
3465 | ILK_DPARB_CLK_GATE | | 3518 | ILK_DPARBUNIT_CLOCK_GATE_ENABLE | |
3466 | ILK_DPFD_CLK_GATE); | 3519 | ILK_DPFDUNIT_CLOCK_GATE_ENABLE); |
3467 | 3520 | ||
3468 | I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | | 3521 | I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | |
3469 | GEN6_MBCTL_ENABLE_BOOT_FETCH); | 3522 | GEN6_MBCTL_ENABLE_BOOT_FETCH); |
@@ -3479,6 +3532,8 @@ static void gen6_init_clock_gating(struct drm_device *dev) | |||
3479 | * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ | 3532 | * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ |
3480 | I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff)); | 3533 | I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff)); |
3481 | I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); | 3534 | I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); |
3535 | |||
3536 | cpt_init_clock_gating(dev); | ||
3482 | } | 3537 | } |
3483 | 3538 | ||
3484 | static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) | 3539 | static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) |
@@ -3497,9 +3552,6 @@ static void haswell_init_clock_gating(struct drm_device *dev) | |||
3497 | { | 3552 | { |
3498 | struct drm_i915_private *dev_priv = dev->dev_private; | 3553 | struct drm_i915_private *dev_priv = dev->dev_private; |
3499 | int pipe; | 3554 | int pipe; |
3500 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | ||
3501 | |||
3502 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | ||
3503 | 3555 | ||
3504 | I915_WRITE(WM3_LP_ILK, 0); | 3556 | I915_WRITE(WM3_LP_ILK, 0); |
3505 | I915_WRITE(WM2_LP_ILK, 0); | 3557 | I915_WRITE(WM2_LP_ILK, 0); |
@@ -3510,12 +3562,6 @@ static void haswell_init_clock_gating(struct drm_device *dev) | |||
3510 | */ | 3562 | */ |
3511 | I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); | 3563 | I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); |
3512 | 3564 | ||
3513 | I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); | ||
3514 | |||
3515 | I915_WRITE(IVB_CHICKEN3, | ||
3516 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | | ||
3517 | CHICKEN3_DGMG_DONE_FIX_DISABLE); | ||
3518 | |||
3519 | /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ | 3565 | /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ |
3520 | I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, | 3566 | I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, |
3521 | GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); | 3567 | GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); |
@@ -3559,21 +3605,31 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | |||
3559 | { | 3605 | { |
3560 | struct drm_i915_private *dev_priv = dev->dev_private; | 3606 | struct drm_i915_private *dev_priv = dev->dev_private; |
3561 | int pipe; | 3607 | int pipe; |
3562 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | ||
3563 | uint32_t snpcr; | 3608 | uint32_t snpcr; |
3564 | 3609 | ||
3565 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | ||
3566 | |||
3567 | I915_WRITE(WM3_LP_ILK, 0); | 3610 | I915_WRITE(WM3_LP_ILK, 0); |
3568 | I915_WRITE(WM2_LP_ILK, 0); | 3611 | I915_WRITE(WM2_LP_ILK, 0); |
3569 | I915_WRITE(WM1_LP_ILK, 0); | 3612 | I915_WRITE(WM1_LP_ILK, 0); |
3570 | 3613 | ||
3571 | I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); | 3614 | I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); |
3615 | |||
3616 | /* WaDisableEarlyCull */ | ||
3617 | I915_WRITE(_3D_CHICKEN3, | ||
3618 | _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); | ||
3572 | 3619 | ||
3620 | /* WaDisableBackToBackFlipFix */ | ||
3573 | I915_WRITE(IVB_CHICKEN3, | 3621 | I915_WRITE(IVB_CHICKEN3, |
3574 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | | 3622 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | |
3575 | CHICKEN3_DGMG_DONE_FIX_DISABLE); | 3623 | CHICKEN3_DGMG_DONE_FIX_DISABLE); |
3576 | 3624 | ||
3625 | /* WaDisablePSDDualDispatchEnable */ | ||
3626 | if (IS_IVB_GT1(dev)) | ||
3627 | I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, | ||
3628 | _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); | ||
3629 | else | ||
3630 | I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2, | ||
3631 | _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); | ||
3632 | |||
3577 | /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ | 3633 | /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ |
3578 | I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, | 3634 | I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, |
3579 | GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); | 3635 | GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); |
@@ -3582,7 +3638,18 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | |||
3582 | I915_WRITE(GEN7_L3CNTLREG1, | 3638 | I915_WRITE(GEN7_L3CNTLREG1, |
3583 | GEN7_WA_FOR_GEN7_L3_CONTROL); | 3639 | GEN7_WA_FOR_GEN7_L3_CONTROL); |
3584 | I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, | 3640 | I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, |
3585 | GEN7_WA_L3_CHICKEN_MODE); | 3641 | GEN7_WA_L3_CHICKEN_MODE); |
3642 | if (IS_IVB_GT1(dev)) | ||
3643 | I915_WRITE(GEN7_ROW_CHICKEN2, | ||
3644 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); | ||
3645 | else | ||
3646 | I915_WRITE(GEN7_ROW_CHICKEN2_GT2, | ||
3647 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); | ||
3648 | |||
3649 | |||
3650 | /* WaForceL3Serialization */ | ||
3651 | I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & | ||
3652 | ~L3SQ_URB_READ_CAM_MATCH_DISABLE); | ||
3586 | 3653 | ||
3587 | /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock | 3654 | /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock |
3588 | * gating disable must be set. Failure to set it results in | 3655 | * gating disable must be set. Failure to set it results in |
@@ -3626,34 +3693,53 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | |||
3626 | snpcr &= ~GEN6_MBC_SNPCR_MASK; | 3693 | snpcr &= ~GEN6_MBC_SNPCR_MASK; |
3627 | snpcr |= GEN6_MBC_SNPCR_MED; | 3694 | snpcr |= GEN6_MBC_SNPCR_MED; |
3628 | I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); | 3695 | I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); |
3696 | |||
3697 | cpt_init_clock_gating(dev); | ||
3629 | } | 3698 | } |
3630 | 3699 | ||
3631 | static void valleyview_init_clock_gating(struct drm_device *dev) | 3700 | static void valleyview_init_clock_gating(struct drm_device *dev) |
3632 | { | 3701 | { |
3633 | struct drm_i915_private *dev_priv = dev->dev_private; | 3702 | struct drm_i915_private *dev_priv = dev->dev_private; |
3634 | int pipe; | 3703 | int pipe; |
3635 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | ||
3636 | |||
3637 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | ||
3638 | 3704 | ||
3639 | I915_WRITE(WM3_LP_ILK, 0); | 3705 | I915_WRITE(WM3_LP_ILK, 0); |
3640 | I915_WRITE(WM2_LP_ILK, 0); | 3706 | I915_WRITE(WM2_LP_ILK, 0); |
3641 | I915_WRITE(WM1_LP_ILK, 0); | 3707 | I915_WRITE(WM1_LP_ILK, 0); |
3642 | 3708 | ||
3643 | I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); | 3709 | I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); |
3710 | |||
3711 | /* WaDisableEarlyCull */ | ||
3712 | I915_WRITE(_3D_CHICKEN3, | ||
3713 | _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); | ||
3644 | 3714 | ||
3715 | /* WaDisableBackToBackFlipFix */ | ||
3645 | I915_WRITE(IVB_CHICKEN3, | 3716 | I915_WRITE(IVB_CHICKEN3, |
3646 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | | 3717 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | |
3647 | CHICKEN3_DGMG_DONE_FIX_DISABLE); | 3718 | CHICKEN3_DGMG_DONE_FIX_DISABLE); |
3648 | 3719 | ||
3720 | I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, | ||
3721 | _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); | ||
3722 | |||
3649 | /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ | 3723 | /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ |
3650 | I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, | 3724 | I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, |
3651 | GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); | 3725 | GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); |
3652 | 3726 | ||
3653 | /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ | 3727 | /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ |
3654 | I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); | 3728 | I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS); |
3655 | I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); | 3729 | I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); |
3656 | 3730 | ||
3731 | /* WaForceL3Serialization */ | ||
3732 | I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & | ||
3733 | ~L3SQ_URB_READ_CAM_MATCH_DISABLE); | ||
3734 | |||
3735 | /* WaDisableDopClockGating */ | ||
3736 | I915_WRITE(GEN7_ROW_CHICKEN2, | ||
3737 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); | ||
3738 | |||
3739 | /* WaForceL3Serialization */ | ||
3740 | I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & | ||
3741 | ~L3SQ_URB_READ_CAM_MATCH_DISABLE); | ||
3742 | |||
3657 | /* This is required by WaCatErrorRejectionIssue */ | 3743 | /* This is required by WaCatErrorRejectionIssue */ |
3658 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, | 3744 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, |
3659 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | | 3745 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | |
@@ -3710,6 +3796,13 @@ static void valleyview_init_clock_gating(struct drm_device *dev) | |||
3710 | PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN | | 3796 | PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN | |
3711 | SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN | | 3797 | SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN | |
3712 | PLANEA_FLIPDONE_INT_EN); | 3798 | PLANEA_FLIPDONE_INT_EN); |
3799 | |||
3800 | /* | ||
3801 | * WaDisableVLVClockGating_VBIIssue | ||
3802 | * Disable clock gating on th GCFG unit to prevent a delay | ||
3803 | * in the reporting of vblank events. | ||
3804 | */ | ||
3805 | I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); | ||
3713 | } | 3806 | } |
3714 | 3807 | ||
3715 | static void g4x_init_clock_gating(struct drm_device *dev) | 3808 | static void g4x_init_clock_gating(struct drm_device *dev) |
@@ -3728,6 +3821,10 @@ static void g4x_init_clock_gating(struct drm_device *dev) | |||
3728 | if (IS_GM45(dev)) | 3821 | if (IS_GM45(dev)) |
3729 | dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; | 3822 | dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; |
3730 | I915_WRITE(DSPCLK_GATE_D, dspclk_gate); | 3823 | I915_WRITE(DSPCLK_GATE_D, dspclk_gate); |
3824 | |||
3825 | /* WaDisableRenderCachePipelinedFlush */ | ||
3826 | I915_WRITE(CACHE_MODE_0, | ||
3827 | _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); | ||
3731 | } | 3828 | } |
3732 | 3829 | ||
3733 | static void crestline_init_clock_gating(struct drm_device *dev) | 3830 | static void crestline_init_clock_gating(struct drm_device *dev) |
@@ -3783,44 +3880,11 @@ static void i830_init_clock_gating(struct drm_device *dev) | |||
3783 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); | 3880 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); |
3784 | } | 3881 | } |
3785 | 3882 | ||
3786 | static void ibx_init_clock_gating(struct drm_device *dev) | ||
3787 | { | ||
3788 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3789 | |||
3790 | /* | ||
3791 | * On Ibex Peak and Cougar Point, we need to disable clock | ||
3792 | * gating for the panel power sequencer or it will fail to | ||
3793 | * start up when no ports are active. | ||
3794 | */ | ||
3795 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | ||
3796 | } | ||
3797 | |||
3798 | static void cpt_init_clock_gating(struct drm_device *dev) | ||
3799 | { | ||
3800 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3801 | int pipe; | ||
3802 | |||
3803 | /* | ||
3804 | * On Ibex Peak and Cougar Point, we need to disable clock | ||
3805 | * gating for the panel power sequencer or it will fail to | ||
3806 | * start up when no ports are active. | ||
3807 | */ | ||
3808 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | ||
3809 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | | ||
3810 | DPLS_EDP_PPS_FIX_DIS); | ||
3811 | /* Without this, mode sets may fail silently on FDI */ | ||
3812 | for_each_pipe(pipe) | ||
3813 | I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); | ||
3814 | } | ||
3815 | |||
3816 | void intel_init_clock_gating(struct drm_device *dev) | 3883 | void intel_init_clock_gating(struct drm_device *dev) |
3817 | { | 3884 | { |
3818 | struct drm_i915_private *dev_priv = dev->dev_private; | 3885 | struct drm_i915_private *dev_priv = dev->dev_private; |
3819 | 3886 | ||
3820 | dev_priv->display.init_clock_gating(dev); | 3887 | dev_priv->display.init_clock_gating(dev); |
3821 | |||
3822 | if (dev_priv->display.init_pch_clock_gating) | ||
3823 | dev_priv->display.init_pch_clock_gating(dev); | ||
3824 | } | 3888 | } |
3825 | 3889 | ||
3826 | /* Starting with Haswell, we have different power wells for | 3890 | /* Starting with Haswell, we have different power wells for |
@@ -3846,7 +3910,7 @@ void intel_init_power_wells(struct drm_device *dev) | |||
3846 | 3910 | ||
3847 | if ((well & HSW_PWR_WELL_STATE) == 0) { | 3911 | if ((well & HSW_PWR_WELL_STATE) == 0) { |
3848 | I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE); | 3912 | I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE); |
3849 | if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20)) | 3913 | if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20)) |
3850 | DRM_ERROR("Error enabling power well %lx\n", power_wells[i]); | 3914 | DRM_ERROR("Error enabling power well %lx\n", power_wells[i]); |
3851 | } | 3915 | } |
3852 | } | 3916 | } |
@@ -3884,11 +3948,6 @@ void intel_init_pm(struct drm_device *dev) | |||
3884 | 3948 | ||
3885 | /* For FIFO watermark updates */ | 3949 | /* For FIFO watermark updates */ |
3886 | if (HAS_PCH_SPLIT(dev)) { | 3950 | if (HAS_PCH_SPLIT(dev)) { |
3887 | if (HAS_PCH_IBX(dev)) | ||
3888 | dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; | ||
3889 | else if (HAS_PCH_CPT(dev)) | ||
3890 | dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; | ||
3891 | |||
3892 | if (IS_GEN5(dev)) { | 3951 | if (IS_GEN5(dev)) { |
3893 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) | 3952 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) |
3894 | dev_priv->display.update_wm = ironlake_update_wm; | 3953 | dev_priv->display.update_wm = ironlake_update_wm; |
@@ -3999,6 +4058,12 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) | |||
3999 | DRM_ERROR("GT thread status wait timed out\n"); | 4058 | DRM_ERROR("GT thread status wait timed out\n"); |
4000 | } | 4059 | } |
4001 | 4060 | ||
4061 | static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) | ||
4062 | { | ||
4063 | I915_WRITE_NOTRACE(FORCEWAKE, 0); | ||
4064 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ | ||
4065 | } | ||
4066 | |||
4002 | static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | 4067 | static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) |
4003 | { | 4068 | { |
4004 | u32 forcewake_ack; | 4069 | u32 forcewake_ack; |
@@ -4012,7 +4077,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | |||
4012 | FORCEWAKE_ACK_TIMEOUT_MS)) | 4077 | FORCEWAKE_ACK_TIMEOUT_MS)) |
4013 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | 4078 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); |
4014 | 4079 | ||
4015 | I915_WRITE_NOTRACE(FORCEWAKE, 1); | 4080 | I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL); |
4016 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ | 4081 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ |
4017 | 4082 | ||
4018 | if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), | 4083 | if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), |
@@ -4022,6 +4087,12 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | |||
4022 | __gen6_gt_wait_for_thread_c0(dev_priv); | 4087 | __gen6_gt_wait_for_thread_c0(dev_priv); |
4023 | } | 4088 | } |
4024 | 4089 | ||
4090 | static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) | ||
4091 | { | ||
4092 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); | ||
4093 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ | ||
4094 | } | ||
4095 | |||
4025 | static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) | 4096 | static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) |
4026 | { | 4097 | { |
4027 | u32 forcewake_ack; | 4098 | u32 forcewake_ack; |
@@ -4035,7 +4106,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) | |||
4035 | FORCEWAKE_ACK_TIMEOUT_MS)) | 4106 | FORCEWAKE_ACK_TIMEOUT_MS)) |
4036 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | 4107 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); |
4037 | 4108 | ||
4038 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); | 4109 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); |
4039 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ | 4110 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ |
4040 | 4111 | ||
4041 | if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), | 4112 | if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), |
@@ -4079,7 +4150,7 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | |||
4079 | 4150 | ||
4080 | static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) | 4151 | static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) |
4081 | { | 4152 | { |
4082 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); | 4153 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); |
4083 | /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ | 4154 | /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ |
4084 | gen6_gt_check_fifodbg(dev_priv); | 4155 | gen6_gt_check_fifodbg(dev_priv); |
4085 | } | 4156 | } |
@@ -4117,13 +4188,18 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | |||
4117 | return ret; | 4188 | return ret; |
4118 | } | 4189 | } |
4119 | 4190 | ||
4191 | static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) | ||
4192 | { | ||
4193 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); | ||
4194 | } | ||
4195 | |||
4120 | static void vlv_force_wake_get(struct drm_i915_private *dev_priv) | 4196 | static void vlv_force_wake_get(struct drm_i915_private *dev_priv) |
4121 | { | 4197 | { |
4122 | if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0, | 4198 | if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0, |
4123 | FORCEWAKE_ACK_TIMEOUT_MS)) | 4199 | FORCEWAKE_ACK_TIMEOUT_MS)) |
4124 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | 4200 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); |
4125 | 4201 | ||
4126 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1)); | 4202 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); |
4127 | 4203 | ||
4128 | if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), | 4204 | if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), |
4129 | FORCEWAKE_ACK_TIMEOUT_MS)) | 4205 | FORCEWAKE_ACK_TIMEOUT_MS)) |
@@ -4134,49 +4210,89 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv) | |||
4134 | 4210 | ||
4135 | static void vlv_force_wake_put(struct drm_i915_private *dev_priv) | 4211 | static void vlv_force_wake_put(struct drm_i915_private *dev_priv) |
4136 | { | 4212 | { |
4137 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1)); | 4213 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); |
4138 | /* The below doubles as a POSTING_READ */ | 4214 | /* The below doubles as a POSTING_READ */ |
4139 | gen6_gt_check_fifodbg(dev_priv); | 4215 | gen6_gt_check_fifodbg(dev_priv); |
4140 | } | 4216 | } |
4141 | 4217 | ||
4218 | void intel_gt_reset(struct drm_device *dev) | ||
4219 | { | ||
4220 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4221 | |||
4222 | if (IS_VALLEYVIEW(dev)) { | ||
4223 | vlv_force_wake_reset(dev_priv); | ||
4224 | } else if (INTEL_INFO(dev)->gen >= 6) { | ||
4225 | __gen6_gt_force_wake_reset(dev_priv); | ||
4226 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | ||
4227 | __gen6_gt_force_wake_mt_reset(dev_priv); | ||
4228 | } | ||
4229 | } | ||
4230 | |||
4142 | void intel_gt_init(struct drm_device *dev) | 4231 | void intel_gt_init(struct drm_device *dev) |
4143 | { | 4232 | { |
4144 | struct drm_i915_private *dev_priv = dev->dev_private; | 4233 | struct drm_i915_private *dev_priv = dev->dev_private; |
4145 | 4234 | ||
4146 | spin_lock_init(&dev_priv->gt_lock); | 4235 | spin_lock_init(&dev_priv->gt_lock); |
4147 | 4236 | ||
4237 | intel_gt_reset(dev); | ||
4238 | |||
4148 | if (IS_VALLEYVIEW(dev)) { | 4239 | if (IS_VALLEYVIEW(dev)) { |
4149 | dev_priv->gt.force_wake_get = vlv_force_wake_get; | 4240 | dev_priv->gt.force_wake_get = vlv_force_wake_get; |
4150 | dev_priv->gt.force_wake_put = vlv_force_wake_put; | 4241 | dev_priv->gt.force_wake_put = vlv_force_wake_put; |
4151 | } else if (INTEL_INFO(dev)->gen >= 6) { | 4242 | } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { |
4243 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; | ||
4244 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; | ||
4245 | } else if (IS_GEN6(dev)) { | ||
4152 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; | 4246 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; |
4153 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; | 4247 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; |
4248 | } | ||
4249 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, | ||
4250 | intel_gen6_powersave_work); | ||
4251 | } | ||
4154 | 4252 | ||
4155 | /* IVB configs may use multi-threaded forcewake */ | 4253 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) |
4156 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { | 4254 | { |
4157 | u32 ecobus; | 4255 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
4158 | 4256 | ||
4159 | /* A small trick here - if the bios hasn't configured | 4257 | if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { |
4160 | * MT forcewake, and if the device is in RC6, then | 4258 | DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); |
4161 | * force_wake_mt_get will not wake the device and the | 4259 | return -EAGAIN; |
4162 | * ECOBUS read will return zero. Which will be | ||
4163 | * (correctly) interpreted by the test below as MT | ||
4164 | * forcewake being disabled. | ||
4165 | */ | ||
4166 | mutex_lock(&dev->struct_mutex); | ||
4167 | __gen6_gt_force_wake_mt_get(dev_priv); | ||
4168 | ecobus = I915_READ_NOTRACE(ECOBUS); | ||
4169 | __gen6_gt_force_wake_mt_put(dev_priv); | ||
4170 | mutex_unlock(&dev->struct_mutex); | ||
4171 | |||
4172 | if (ecobus & FORCEWAKE_MT_ENABLE) { | ||
4173 | DRM_DEBUG_KMS("Using MT version of forcewake\n"); | ||
4174 | dev_priv->gt.force_wake_get = | ||
4175 | __gen6_gt_force_wake_mt_get; | ||
4176 | dev_priv->gt.force_wake_put = | ||
4177 | __gen6_gt_force_wake_mt_put; | ||
4178 | } | ||
4179 | } | ||
4180 | } | 4260 | } |
4261 | |||
4262 | I915_WRITE(GEN6_PCODE_DATA, *val); | ||
4263 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); | ||
4264 | |||
4265 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
4266 | 500)) { | ||
4267 | DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); | ||
4268 | return -ETIMEDOUT; | ||
4269 | } | ||
4270 | |||
4271 | *val = I915_READ(GEN6_PCODE_DATA); | ||
4272 | I915_WRITE(GEN6_PCODE_DATA, 0); | ||
4273 | |||
4274 | return 0; | ||
4181 | } | 4275 | } |
4182 | 4276 | ||
4277 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) | ||
4278 | { | ||
4279 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | ||
4280 | |||
4281 | if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { | ||
4282 | DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); | ||
4283 | return -EAGAIN; | ||
4284 | } | ||
4285 | |||
4286 | I915_WRITE(GEN6_PCODE_DATA, val); | ||
4287 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); | ||
4288 | |||
4289 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
4290 | 500)) { | ||
4291 | DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); | ||
4292 | return -ETIMEDOUT; | ||
4293 | } | ||
4294 | |||
4295 | I915_WRITE(GEN6_PCODE_DATA, 0); | ||
4296 | |||
4297 | return 0; | ||
4298 | } | ||