aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2014-09-29 09:07:19 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-09-29 09:08:56 -0400
commit7526ed79b014cbd943cd48b8dda63e74391ddd48 (patch)
treeebdef8dac8d3e48d4f3ad8945570131fad902f65
parentd37cf5f7e1b315585940a735a8508d955ffc0f16 (diff)
Revert "drm/i915/bdw: BDW Software Turbo"
This reverts commit c76bb61a71083b2d90504cc6d0dda2047c5d63ca. It's apparently too broken so that Rodrigo submitted a patch to add a config option for it. Given that the design is also ... suboptimal and that I've only merged this to get lead engineers and managers off my back for one second let's just revert this. /me puts on combat gear again It was worth a shot ... References: http://mid.mail-archive.com/1411686380-1953-1-git-send-email-rodrigo.vivi@intel.com Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Cc: Daisy Sun <daisy.sun@intel.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h22
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c21
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c230
5 files changed, 39 insertions, 241 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 17dfce0f4e68..32180ac92770 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -945,23 +945,6 @@ struct intel_rps_ei {
945 u32 media_c0; 945 u32 media_c0;
946}; 946};
947 947
948struct intel_rps_bdw_cal {
949 u32 it_threshold_pct; /* interrupt, in percentage */
950 u32 eval_interval; /* evaluation interval, in us */
951 u32 last_ts;
952 u32 last_c0;
953 bool is_up;
954};
955
956struct intel_rps_bdw_turbo {
957 struct intel_rps_bdw_cal up;
958 struct intel_rps_bdw_cal down;
959 struct timer_list flip_timer;
960 u32 timeout;
961 atomic_t flip_received;
962 struct work_struct work_max_freq;
963};
964
965struct intel_gen6_power_mgmt { 948struct intel_gen6_power_mgmt {
966 /* work and pm_iir are protected by dev_priv->irq_lock */ 949 /* work and pm_iir are protected by dev_priv->irq_lock */
967 struct work_struct work; 950 struct work_struct work;
@@ -995,9 +978,6 @@ struct intel_gen6_power_mgmt {
995 bool enabled; 978 bool enabled;
996 struct delayed_work delayed_resume_work; 979 struct delayed_work delayed_resume_work;
997 980
998 bool is_bdw_sw_turbo; /* Switch of BDW software turbo */
999 struct intel_rps_bdw_turbo sw_turbo; /* Calculate RP interrupt timing */
1000
1001 /* manual wa residency calculations */ 981 /* manual wa residency calculations */
1002 struct intel_rps_ei up_ei, down_ei; 982 struct intel_rps_ei up_ei, down_ei;
1003 983
@@ -2828,8 +2808,6 @@ extern void intel_disable_fbc(struct drm_device *dev);
2828extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 2808extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2829extern void intel_init_pch_refclk(struct drm_device *dev); 2809extern void intel_init_pch_refclk(struct drm_device *dev);
2830extern void gen6_set_rps(struct drm_device *dev, u8 val); 2810extern void gen6_set_rps(struct drm_device *dev, u8 val);
2831extern void bdw_software_turbo(struct drm_device *dev);
2832extern void gen8_flip_interrupt(struct drm_device *dev);
2833extern void valleyview_set_rps(struct drm_device *dev, u8 val); 2811extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2834extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 2812extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
2835 bool enable); 2813 bool enable);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c96ddc953531..3201986bf25e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1979,27 +1979,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1979 res1, res2); 1979 res1, res2);
1980} 1980}
1981 1981
1982void gen8_flip_interrupt(struct drm_device *dev)
1983{
1984 struct drm_i915_private *dev_priv = dev->dev_private;
1985
1986 if (!dev_priv->rps.is_bdw_sw_turbo)
1987 return;
1988
1989 if(atomic_read(&dev_priv->rps.sw_turbo.flip_received)) {
1990 mod_timer(&dev_priv->rps.sw_turbo.flip_timer,
1991 usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies);
1992 }
1993 else {
1994 dev_priv->rps.sw_turbo.flip_timer.expires =
1995 usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
1996 add_timer(&dev_priv->rps.sw_turbo.flip_timer);
1997 atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
1998 }
1999
2000 bdw_software_turbo(dev);
2001}
2002
2003/* The RPS events need forcewake, so we add them to a work queue and mask their 1982/* The RPS events need forcewake, so we add them to a work queue and mask their
2004 * IMR bits until the work is done. Other interrupts can be processed without 1983 * IMR bits until the work is done. Other interrupts can be processed without
2005 * the work queue. */ 1984 * the work queue. */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ad8179b40d19..e887d4c13ca1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -5585,10 +5585,6 @@ enum punit_power_well {
5585#define GEN8_UCGCTL6 0x9430 5585#define GEN8_UCGCTL6 0x9430
5586#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) 5586#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
5587 5587
5588#define TIMESTAMP_CTR 0x44070
5589#define FREQ_1_28_US(us) (((us) * 100) >> 7)
5590#define MCHBAR_PCU_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5960)
5591
5592#define GEN6_GFXPAUSE 0xA000 5588#define GEN6_GFXPAUSE 0xA000
5593#define GEN6_RPNSWREQ 0xA008 5589#define GEN6_RPNSWREQ 0xA008
5594#define GEN6_TURBO_DISABLE (1<<31) 5590#define GEN6_TURBO_DISABLE (1<<31)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c5079f2c49f3..2d4258038ef2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9926,9 +9926,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9926 unsigned long flags; 9926 unsigned long flags;
9927 int ret; 9927 int ret;
9928 9928
9929 //trigger software GT busyness calculation
9930 gen8_flip_interrupt(dev);
9931
9932 /* 9929 /*
9933 * drm_mode_page_flip_ioctl() should already catch this, but double 9930 * drm_mode_page_flip_ioctl() should already catch this, but double
9934 * check to be safe. In the future we may enable pageflipping from 9931 * check to be safe. In the future we may enable pageflipping from
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 675e8a2ce988..45f2aa0b8fe5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2285,6 +2285,7 @@ int ilk_wm_max_level(const struct drm_device *dev)
2285 else 2285 else
2286 return 2; 2286 return 2;
2287} 2287}
2288
2288static void intel_print_wm_latency(struct drm_device *dev, 2289static void intel_print_wm_latency(struct drm_device *dev,
2289 const char *name, 2290 const char *name,
2290 const uint16_t wm[5]) 2291 const uint16_t wm[5])
@@ -3253,9 +3254,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3253{ 3254{
3254 int new_power; 3255 int new_power;
3255 3256
3256 if (dev_priv->rps.is_bdw_sw_turbo)
3257 return;
3258
3259 new_power = dev_priv->rps.power; 3257 new_power = dev_priv->rps.power;
3260 switch (dev_priv->rps.power) { 3258 switch (dev_priv->rps.power) {
3261 case LOW_POWER: 3259 case LOW_POWER:
@@ -3463,11 +3461,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
3463 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 3461 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3464 else if (IS_VALLEYVIEW(dev)) 3462 else if (IS_VALLEYVIEW(dev))
3465 vlv_set_rps_idle(dev_priv); 3463 vlv_set_rps_idle(dev_priv);
3466 else if (!dev_priv->rps.is_bdw_sw_turbo 3464 else
3467 || atomic_read(&dev_priv->rps.sw_turbo.flip_received)){
3468 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 3465 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3469 }
3470
3471 dev_priv->rps.last_adj = 0; 3466 dev_priv->rps.last_adj = 0;
3472 } 3467 }
3473 mutex_unlock(&dev_priv->rps.hw_lock); 3468 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3481,11 +3476,8 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv)
3481 if (dev_priv->rps.enabled) { 3476 if (dev_priv->rps.enabled) {
3482 if (IS_VALLEYVIEW(dev)) 3477 if (IS_VALLEYVIEW(dev))
3483 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); 3478 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3484 else if (!dev_priv->rps.is_bdw_sw_turbo 3479 else
3485 || atomic_read(&dev_priv->rps.sw_turbo.flip_received)){
3486 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); 3480 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3487 }
3488
3489 dev_priv->rps.last_adj = 0; 3481 dev_priv->rps.last_adj = 0;
3490 } 3482 }
3491 mutex_unlock(&dev_priv->rps.hw_lock); 3483 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3520,26 +3512,21 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3520static void gen8_disable_rps_interrupts(struct drm_device *dev) 3512static void gen8_disable_rps_interrupts(struct drm_device *dev)
3521{ 3513{
3522 struct drm_i915_private *dev_priv = dev->dev_private; 3514 struct drm_i915_private *dev_priv = dev->dev_private;
3523 if (IS_BROADWELL(dev) && dev_priv->rps.is_bdw_sw_turbo){
3524 if (atomic_read(&dev_priv->rps.sw_turbo.flip_received))
3525 del_timer(&dev_priv->rps.sw_turbo.flip_timer);
3526 dev_priv-> rps.is_bdw_sw_turbo = false;
3527 } else {
3528 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
3529 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
3530 ~dev_priv->pm_rps_events);
3531 /* Complete PM interrupt masking here doesn't race with the rps work
3532 * item again unmasking PM interrupts because that is using a different
3533 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
3534 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
3535 * gen8_enable_rps will clean up. */
3536 3515
3537 spin_lock_irq(&dev_priv->irq_lock); 3516 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
3538 dev_priv->rps.pm_iir = 0; 3517 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
3539 spin_unlock_irq(&dev_priv->irq_lock); 3518 ~dev_priv->pm_rps_events);
3519 /* Complete PM interrupt masking here doesn't race with the rps work
3520 * item again unmasking PM interrupts because that is using a different
3521 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
3522 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
3523 * gen8_enable_rps will clean up. */
3540 3524
3541 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events); 3525 spin_lock_irq(&dev_priv->irq_lock);
3542 } 3526 dev_priv->rps.pm_iir = 0;
3527 spin_unlock_irq(&dev_priv->irq_lock);
3528
3529 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3543} 3530}
3544 3531
3545static void gen6_disable_rps_interrupts(struct drm_device *dev) 3532static void gen6_disable_rps_interrupts(struct drm_device *dev)
@@ -3697,111 +3684,13 @@ static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_c
3697 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 3684 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3698} 3685}
3699 3686
3700static void bdw_sw_calculate_freq(struct drm_device *dev,
3701 struct intel_rps_bdw_cal *c, u32 *cur_time, u32 *c0)
3702{
3703 struct drm_i915_private *dev_priv = dev->dev_private;
3704 u64 busy = 0;
3705 u32 busyness_pct = 0;
3706 u32 elapsed_time = 0;
3707 u16 new_freq = 0;
3708
3709 if (!c || !cur_time || !c0)
3710 return;
3711
3712 if (0 == c->last_c0)
3713 goto out;
3714
3715 /* Check Evaluation interval */
3716 elapsed_time = *cur_time - c->last_ts;
3717 if (elapsed_time < c->eval_interval)
3718 return;
3719
3720 mutex_lock(&dev_priv->rps.hw_lock);
3721
3722 /*
3723 * c0 unit in 32*1.28 usec, elapsed_time unit in 1 usec.
3724 * Whole busyness_pct calculation should be
3725 * busy = ((u64)(*c0 - c->last_c0) << 5 << 7) / 100;
3726 * busyness_pct = (u32)(busy * 100 / elapsed_time);
3727 * The final formula is to simplify CPU calculation
3728 */
3729 busy = (u64)(*c0 - c->last_c0) << 12;
3730 do_div(busy, elapsed_time);
3731 busyness_pct = (u32)busy;
3732
3733 if (c->is_up && busyness_pct >= c->it_threshold_pct)
3734 new_freq = (u16)dev_priv->rps.cur_freq + 3;
3735 if (!c->is_up && busyness_pct <= c->it_threshold_pct)
3736 new_freq = (u16)dev_priv->rps.cur_freq - 1;
3737
3738 /* Adjust to new frequency busyness and compare with threshold */
3739 if (0 != new_freq) {
3740 if (new_freq > dev_priv->rps.max_freq_softlimit)
3741 new_freq = dev_priv->rps.max_freq_softlimit;
3742 else if (new_freq < dev_priv->rps.min_freq_softlimit)
3743 new_freq = dev_priv->rps.min_freq_softlimit;
3744
3745 gen6_set_rps(dev, new_freq);
3746 }
3747
3748 mutex_unlock(&dev_priv->rps.hw_lock);
3749
3750out:
3751 c->last_c0 = *c0;
3752 c->last_ts = *cur_time;
3753}
3754
3755static void gen8_set_frequency_RP0(struct work_struct *work)
3756{
3757 struct intel_rps_bdw_turbo *p_bdw_turbo =
3758 container_of(work, struct intel_rps_bdw_turbo, work_max_freq);
3759 struct intel_gen6_power_mgmt *p_power_mgmt =
3760 container_of(p_bdw_turbo, struct intel_gen6_power_mgmt, sw_turbo);
3761 struct drm_i915_private *dev_priv =
3762 container_of(p_power_mgmt, struct drm_i915_private, rps);
3763
3764 mutex_lock(&dev_priv->rps.hw_lock);
3765 gen6_set_rps(dev_priv->dev, dev_priv->rps.rp0_freq);
3766 mutex_unlock(&dev_priv->rps.hw_lock);
3767}
3768
3769static void flip_active_timeout_handler(unsigned long var)
3770{
3771 struct drm_i915_private *dev_priv = (struct drm_i915_private *) var;
3772
3773 del_timer(&dev_priv->rps.sw_turbo.flip_timer);
3774 atomic_set(&dev_priv->rps.sw_turbo.flip_received, false);
3775
3776 queue_work(dev_priv->wq, &dev_priv->rps.sw_turbo.work_max_freq);
3777}
3778
3779void bdw_software_turbo(struct drm_device *dev)
3780{
3781 struct drm_i915_private *dev_priv = dev->dev_private;
3782
3783 u32 current_time = I915_READ(TIMESTAMP_CTR); /* unit in usec */
3784 u32 current_c0 = I915_READ(MCHBAR_PCU_C0); /* unit in 32*1.28 usec */
3785
3786 bdw_sw_calculate_freq(dev, &dev_priv->rps.sw_turbo.up,
3787 &current_time, &current_c0);
3788 bdw_sw_calculate_freq(dev, &dev_priv->rps.sw_turbo.down,
3789 &current_time, &current_c0);
3790}
3791
3792static void gen8_enable_rps(struct drm_device *dev) 3687static void gen8_enable_rps(struct drm_device *dev)
3793{ 3688{
3794 struct drm_i915_private *dev_priv = dev->dev_private; 3689 struct drm_i915_private *dev_priv = dev->dev_private;
3795 struct intel_engine_cs *ring; 3690 struct intel_engine_cs *ring;
3796 uint32_t rc6_mask = 0, rp_state_cap; 3691 uint32_t rc6_mask = 0, rp_state_cap;
3797 uint32_t threshold_up_pct, threshold_down_pct;
3798 uint32_t ei_up, ei_down; /* up and down evaluation interval */
3799 u32 rp_ctl_flag;
3800 int unused; 3692 int unused;
3801 3693
3802 /* Use software Turbo for BDW */
3803 dev_priv->rps.is_bdw_sw_turbo = IS_BROADWELL(dev);
3804
3805 /* 1a: Software RC state - RC0 */ 3694 /* 1a: Software RC state - RC0 */
3806 I915_WRITE(GEN6_RC_STATE, 0); 3695 I915_WRITE(GEN6_RC_STATE, 0);
3807 3696
@@ -3845,74 +3734,35 @@ static void gen8_enable_rps(struct drm_device *dev)
3845 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 3734 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3846 I915_WRITE(GEN6_RC_VIDEO_FREQ, 3735 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3847 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 3736 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3848 ei_up = 84480; /* 84.48ms */ 3737 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3849 ei_down = 448000; 3738 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3850 threshold_up_pct = 90; /* x percent busy */
3851 threshold_down_pct = 70;
3852
3853 if (dev_priv->rps.is_bdw_sw_turbo) {
3854 dev_priv->rps.sw_turbo.up.it_threshold_pct = threshold_up_pct;
3855 dev_priv->rps.sw_turbo.up.eval_interval = ei_up;
3856 dev_priv->rps.sw_turbo.up.is_up = true;
3857 dev_priv->rps.sw_turbo.up.last_ts = 0;
3858 dev_priv->rps.sw_turbo.up.last_c0 = 0;
3859
3860 dev_priv->rps.sw_turbo.down.it_threshold_pct = threshold_down_pct;
3861 dev_priv->rps.sw_turbo.down.eval_interval = ei_down;
3862 dev_priv->rps.sw_turbo.down.is_up = false;
3863 dev_priv->rps.sw_turbo.down.last_ts = 0;
3864 dev_priv->rps.sw_turbo.down.last_c0 = 0;
3865
3866 /* Start the timer to track if flip comes*/
3867 dev_priv->rps.sw_turbo.timeout = 200*1000; /* in us */
3868
3869 init_timer(&dev_priv->rps.sw_turbo.flip_timer);
3870 dev_priv->rps.sw_turbo.flip_timer.function = flip_active_timeout_handler;
3871 dev_priv->rps.sw_turbo.flip_timer.data = (unsigned long) dev_priv;
3872 dev_priv->rps.sw_turbo.flip_timer.expires =
3873 usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
3874 add_timer(&dev_priv->rps.sw_turbo.flip_timer);
3875 INIT_WORK(&dev_priv->rps.sw_turbo.work_max_freq, gen8_set_frequency_RP0);
3876
3877 atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
3878 } else {
3879 /* NB: Docs say 1s, and 1000000 - which aren't equivalent
3880 * 1 second timeout*/
3881 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, FREQ_1_28_US(1000000));
3882 3739
3883 /* Docs recommend 900MHz, and 300 MHz respectively */ 3740 /* Docs recommend 900MHz, and 300 MHz respectively */
3884 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 3741 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3885 dev_priv->rps.max_freq_softlimit << 24 | 3742 dev_priv->rps.max_freq_softlimit << 24 |
3886 dev_priv->rps.min_freq_softlimit << 16); 3743 dev_priv->rps.min_freq_softlimit << 16);
3887 3744
3888 I915_WRITE(GEN6_RP_UP_THRESHOLD, 3745 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
3889 FREQ_1_28_US(ei_up * threshold_up_pct / 100)); 3746 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3890 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 3747 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
3891 FREQ_1_28_US(ei_down * threshold_down_pct / 100)); 3748 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
3892 I915_WRITE(GEN6_RP_UP_EI,
3893 FREQ_1_28_US(ei_up));
3894 I915_WRITE(GEN6_RP_DOWN_EI,
3895 FREQ_1_28_US(ei_down));
3896 3749
3897 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3750 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3898 }
3899 3751
3900 /* 5: Enable RPS */ 3752 /* 5: Enable RPS */
3901 rp_ctl_flag = GEN6_RP_MEDIA_TURBO | 3753 I915_WRITE(GEN6_RP_CONTROL,
3902 GEN6_RP_MEDIA_HW_NORMAL_MODE | 3754 GEN6_RP_MEDIA_TURBO |
3903 GEN6_RP_MEDIA_IS_GFX | 3755 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3904 GEN6_RP_UP_BUSY_AVG | 3756 GEN6_RP_MEDIA_IS_GFX |
3905 GEN6_RP_DOWN_IDLE_AVG; 3757 GEN6_RP_ENABLE |
3906 if (!dev_priv->rps.is_bdw_sw_turbo) 3758 GEN6_RP_UP_BUSY_AVG |
3907 rp_ctl_flag |= GEN6_RP_ENABLE; 3759 GEN6_RP_DOWN_IDLE_AVG);
3908 3760
3909 I915_WRITE(GEN6_RP_CONTROL, rp_ctl_flag); 3761 /* 6: Ring frequency + overclocking (our driver does this later */
3910 3762
3911 /* 6: Ring frequency + overclocking
3912 * (our driver does this later */
3913 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8); 3763 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3914 if (!dev_priv->rps.is_bdw_sw_turbo) 3764
3915 gen8_enable_rps_interrupts(dev); 3765 gen8_enable_rps_interrupts(dev);
3916 3766
3917 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3767 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3918} 3768}
@@ -5386,8 +5236,6 @@ static void intel_gen6_powersave_work(struct work_struct *work)
5386 rps.delayed_resume_work.work); 5236 rps.delayed_resume_work.work);
5387 struct drm_device *dev = dev_priv->dev; 5237 struct drm_device *dev = dev_priv->dev;
5388 5238
5389 dev_priv->rps.is_bdw_sw_turbo = false;
5390
5391 mutex_lock(&dev_priv->rps.hw_lock); 5239 mutex_lock(&dev_priv->rps.hw_lock);
5392 5240
5393 if (IS_CHERRYVIEW(dev)) { 5241 if (IS_CHERRYVIEW(dev)) {