diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 118 |
1 files changed, 60 insertions, 58 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index dd631d19b2bd..3db7c40cc9ae 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2907,9 +2907,9 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val) | |||
2907 | * the hw runs at the minimal clock before selecting the desired | 2907 | * the hw runs at the minimal clock before selecting the desired |
2908 | * frequency, if the down threshold expires in that window we will not | 2908 | * frequency, if the down threshold expires in that window we will not |
2909 | * receive a down interrupt. */ | 2909 | * receive a down interrupt. */ |
2910 | limits = dev_priv->rps.max_delay << 24; | 2910 | limits = dev_priv->rps.max_freq_softlimit << 24; |
2911 | if (val <= dev_priv->rps.min_delay) | 2911 | if (val <= dev_priv->rps.min_freq_softlimit) |
2912 | limits |= dev_priv->rps.min_delay << 16; | 2912 | limits |= dev_priv->rps.min_freq_softlimit << 16; |
2913 | 2913 | ||
2914 | return limits; | 2914 | return limits; |
2915 | } | 2915 | } |
@@ -2921,26 +2921,26 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) | |||
2921 | new_power = dev_priv->rps.power; | 2921 | new_power = dev_priv->rps.power; |
2922 | switch (dev_priv->rps.power) { | 2922 | switch (dev_priv->rps.power) { |
2923 | case LOW_POWER: | 2923 | case LOW_POWER: |
2924 | if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay) | 2924 | if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq) |
2925 | new_power = BETWEEN; | 2925 | new_power = BETWEEN; |
2926 | break; | 2926 | break; |
2927 | 2927 | ||
2928 | case BETWEEN: | 2928 | case BETWEEN: |
2929 | if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay) | 2929 | if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq) |
2930 | new_power = LOW_POWER; | 2930 | new_power = LOW_POWER; |
2931 | else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay) | 2931 | else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq) |
2932 | new_power = HIGH_POWER; | 2932 | new_power = HIGH_POWER; |
2933 | break; | 2933 | break; |
2934 | 2934 | ||
2935 | case HIGH_POWER: | 2935 | case HIGH_POWER: |
2936 | if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay) | 2936 | if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq) |
2937 | new_power = BETWEEN; | 2937 | new_power = BETWEEN; |
2938 | break; | 2938 | break; |
2939 | } | 2939 | } |
2940 | /* Max/min bins are special */ | 2940 | /* Max/min bins are special */ |
2941 | if (val == dev_priv->rps.min_delay) | 2941 | if (val == dev_priv->rps.min_freq_softlimit) |
2942 | new_power = LOW_POWER; | 2942 | new_power = LOW_POWER; |
2943 | if (val == dev_priv->rps.max_delay) | 2943 | if (val == dev_priv->rps.max_freq_softlimit) |
2944 | new_power = HIGH_POWER; | 2944 | new_power = HIGH_POWER; |
2945 | if (new_power == dev_priv->rps.power) | 2945 | if (new_power == dev_priv->rps.power) |
2946 | return; | 2946 | return; |
@@ -3014,10 +3014,10 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
3014 | struct drm_i915_private *dev_priv = dev->dev_private; | 3014 | struct drm_i915_private *dev_priv = dev->dev_private; |
3015 | 3015 | ||
3016 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 3016 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
3017 | WARN_ON(val > dev_priv->rps.max_delay); | 3017 | WARN_ON(val > dev_priv->rps.max_freq_softlimit); |
3018 | WARN_ON(val < dev_priv->rps.min_delay); | 3018 | WARN_ON(val < dev_priv->rps.min_freq_softlimit); |
3019 | 3019 | ||
3020 | if (val == dev_priv->rps.cur_delay) { | 3020 | if (val == dev_priv->rps.cur_freq) { |
3021 | /* min/max delay may still have been modified so be sure to | 3021 | /* min/max delay may still have been modified so be sure to |
3022 | * write the limits value */ | 3022 | * write the limits value */ |
3023 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | 3023 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, |
@@ -3045,7 +3045,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
3045 | 3045 | ||
3046 | POSTING_READ(GEN6_RPNSWREQ); | 3046 | POSTING_READ(GEN6_RPNSWREQ); |
3047 | 3047 | ||
3048 | dev_priv->rps.cur_delay = val; | 3048 | dev_priv->rps.cur_freq = val; |
3049 | 3049 | ||
3050 | trace_intel_gpu_freq_change(val * 50); | 3050 | trace_intel_gpu_freq_change(val * 50); |
3051 | } | 3051 | } |
@@ -3065,7 +3065,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) | |||
3065 | * When we are idle. Drop to min voltage state. | 3065 | * When we are idle. Drop to min voltage state. |
3066 | */ | 3066 | */ |
3067 | 3067 | ||
3068 | if (dev_priv->rps.cur_delay <= dev_priv->rps.min_delay) | 3068 | if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit) |
3069 | return; | 3069 | return; |
3070 | 3070 | ||
3071 | /* Mask turbo interrupt so that they will not come in between */ | 3071 | /* Mask turbo interrupt so that they will not come in between */ |
@@ -3082,10 +3082,10 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) | |||
3082 | return; | 3082 | return; |
3083 | } | 3083 | } |
3084 | 3084 | ||
3085 | dev_priv->rps.cur_delay = dev_priv->rps.min_delay; | 3085 | dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit; |
3086 | 3086 | ||
3087 | vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, | 3087 | vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, |
3088 | dev_priv->rps.min_delay); | 3088 | dev_priv->rps.min_freq_softlimit); |
3089 | 3089 | ||
3090 | if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) | 3090 | if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) |
3091 | & GENFREQSTATUS) == 0, 5)) | 3091 | & GENFREQSTATUS) == 0, 5)) |
@@ -3099,7 +3099,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) | |||
3099 | /* Unmask Up interrupts */ | 3099 | /* Unmask Up interrupts */ |
3100 | dev_priv->rps.rp_up_masked = true; | 3100 | dev_priv->rps.rp_up_masked = true; |
3101 | gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD, | 3101 | gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD, |
3102 | dev_priv->rps.min_delay); | 3102 | dev_priv->rps.min_freq_softlimit); |
3103 | } | 3103 | } |
3104 | 3104 | ||
3105 | void gen6_rps_idle(struct drm_i915_private *dev_priv) | 3105 | void gen6_rps_idle(struct drm_i915_private *dev_priv) |
@@ -3111,7 +3111,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) | |||
3111 | if (IS_VALLEYVIEW(dev)) | 3111 | if (IS_VALLEYVIEW(dev)) |
3112 | vlv_set_rps_idle(dev_priv); | 3112 | vlv_set_rps_idle(dev_priv); |
3113 | else | 3113 | else |
3114 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); | 3114 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); |
3115 | dev_priv->rps.last_adj = 0; | 3115 | dev_priv->rps.last_adj = 0; |
3116 | } | 3116 | } |
3117 | mutex_unlock(&dev_priv->rps.hw_lock); | 3117 | mutex_unlock(&dev_priv->rps.hw_lock); |
@@ -3124,9 +3124,9 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv) | |||
3124 | mutex_lock(&dev_priv->rps.hw_lock); | 3124 | mutex_lock(&dev_priv->rps.hw_lock); |
3125 | if (dev_priv->rps.enabled) { | 3125 | if (dev_priv->rps.enabled) { |
3126 | if (IS_VALLEYVIEW(dev)) | 3126 | if (IS_VALLEYVIEW(dev)) |
3127 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); | 3127 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); |
3128 | else | 3128 | else |
3129 | gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); | 3129 | gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); |
3130 | dev_priv->rps.last_adj = 0; | 3130 | dev_priv->rps.last_adj = 0; |
3131 | } | 3131 | } |
3132 | mutex_unlock(&dev_priv->rps.hw_lock); | 3132 | mutex_unlock(&dev_priv->rps.hw_lock); |
@@ -3137,20 +3137,20 @@ void valleyview_set_rps(struct drm_device *dev, u8 val) | |||
3137 | struct drm_i915_private *dev_priv = dev->dev_private; | 3137 | struct drm_i915_private *dev_priv = dev->dev_private; |
3138 | 3138 | ||
3139 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 3139 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
3140 | WARN_ON(val > dev_priv->rps.max_delay); | 3140 | WARN_ON(val > dev_priv->rps.max_freq_softlimit); |
3141 | WARN_ON(val < dev_priv->rps.min_delay); | 3141 | WARN_ON(val < dev_priv->rps.min_freq_softlimit); |
3142 | 3142 | ||
3143 | DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", | 3143 | DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", |
3144 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), | 3144 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), |
3145 | dev_priv->rps.cur_delay, | 3145 | dev_priv->rps.cur_freq, |
3146 | vlv_gpu_freq(dev_priv, val), val); | 3146 | vlv_gpu_freq(dev_priv, val), val); |
3147 | 3147 | ||
3148 | if (val == dev_priv->rps.cur_delay) | 3148 | if (val == dev_priv->rps.cur_freq) |
3149 | return; | 3149 | return; |
3150 | 3150 | ||
3151 | vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); | 3151 | vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); |
3152 | 3152 | ||
3153 | dev_priv->rps.cur_delay = val; | 3153 | dev_priv->rps.cur_freq = val; |
3154 | 3154 | ||
3155 | trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); | 3155 | trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); |
3156 | } | 3156 | } |
@@ -3292,8 +3292,8 @@ static void gen8_enable_rps(struct drm_device *dev) | |||
3292 | 3292 | ||
3293 | /* Docs recommend 900MHz, and 300 MHz respectively */ | 3293 | /* Docs recommend 900MHz, and 300 MHz respectively */ |
3294 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | 3294 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, |
3295 | dev_priv->rps.max_delay << 24 | | 3295 | dev_priv->rps.max_freq_softlimit << 24 | |
3296 | dev_priv->rps.min_delay << 16); | 3296 | dev_priv->rps.min_freq_softlimit << 16); |
3297 | 3297 | ||
3298 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ | 3298 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ |
3299 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ | 3299 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ |
@@ -3352,20 +3352,22 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
3352 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 3352 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
3353 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | 3353 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
3354 | 3354 | ||
3355 | /* In units of 50MHz */ | 3355 | /* All of these values are in units of 50MHz */ |
3356 | dev_priv->rps.hw_max = hw_max = rp_state_cap & 0xff; | 3356 | dev_priv->rps.cur_freq = 0; |
3357 | /* hw_max = RP0 until we check for overclocking */ | ||
3358 | dev_priv->rps.max_freq = hw_max = rp_state_cap & 0xff; | ||
3359 | /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */ | ||
3360 | dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; | ||
3361 | dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; | ||
3362 | dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; | ||
3357 | dev_priv->rps.min_freq = hw_min = (rp_state_cap >> 16) & 0xff; | 3363 | dev_priv->rps.min_freq = hw_min = (rp_state_cap >> 16) & 0xff; |
3358 | dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff; | ||
3359 | dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff; | ||
3360 | dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay; | ||
3361 | dev_priv->rps.cur_delay = 0; | ||
3362 | 3364 | ||
3363 | /* Preserve min/max settings in case of re-init */ | 3365 | /* Preserve min/max settings in case of re-init */ |
3364 | if (dev_priv->rps.max_delay == 0) | 3366 | if (dev_priv->rps.max_freq_softlimit == 0) |
3365 | dev_priv->rps.max_delay = hw_max; | 3367 | dev_priv->rps.max_freq_softlimit = hw_max; |
3366 | 3368 | ||
3367 | if (dev_priv->rps.min_delay == 0) | 3369 | if (dev_priv->rps.min_freq_softlimit == 0) |
3368 | dev_priv->rps.min_delay = hw_min; | 3370 | dev_priv->rps.min_freq_softlimit = hw_min; |
3369 | 3371 | ||
3370 | /* disable the counters and set deterministic thresholds */ | 3372 | /* disable the counters and set deterministic thresholds */ |
3371 | I915_WRITE(GEN6_RC_CONTROL, 0); | 3373 | I915_WRITE(GEN6_RC_CONTROL, 0); |
@@ -3420,13 +3422,13 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
3420 | ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); | 3422 | ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); |
3421 | if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ | 3423 | if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ |
3422 | DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", | 3424 | DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", |
3423 | (dev_priv->rps.max_delay & 0xff) * 50, | 3425 | (dev_priv->rps.max_freq_softlimit & 0xff) * 50, |
3424 | (pcu_mbox & 0xff) * 50); | 3426 | (pcu_mbox & 0xff) * 50); |
3425 | dev_priv->rps.hw_max = pcu_mbox & 0xff; | 3427 | dev_priv->rps.max_freq = pcu_mbox & 0xff; |
3426 | } | 3428 | } |
3427 | 3429 | ||
3428 | dev_priv->rps.power = HIGH_POWER; /* force a reset */ | 3430 | dev_priv->rps.power = HIGH_POWER; /* force a reset */ |
3429 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); | 3431 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); |
3430 | 3432 | ||
3431 | gen6_enable_rps_interrupts(dev); | 3433 | gen6_enable_rps_interrupts(dev); |
3432 | 3434 | ||
@@ -3482,9 +3484,9 @@ void gen6_update_ring_freq(struct drm_device *dev) | |||
3482 | * to use for memory access. We do this by specifying the IA frequency | 3484 | * to use for memory access. We do this by specifying the IA frequency |
3483 | * the PCU should use as a reference to determine the ring frequency. | 3485 | * the PCU should use as a reference to determine the ring frequency. |
3484 | */ | 3486 | */ |
3485 | for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay; | 3487 | for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit; |
3486 | gpu_freq--) { | 3488 | gpu_freq--) { |
3487 | int diff = dev_priv->rps.max_delay - gpu_freq; | 3489 | int diff = dev_priv->rps.max_freq_softlimit - gpu_freq; |
3488 | unsigned int ia_freq = 0, ring_freq = 0; | 3490 | unsigned int ia_freq = 0, ring_freq = 0; |
3489 | 3491 | ||
3490 | if (INTEL_INFO(dev)->gen >= 8) { | 3492 | if (INTEL_INFO(dev)->gen >= 8) { |
@@ -3650,20 +3652,20 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
3650 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); | 3652 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); |
3651 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); | 3653 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); |
3652 | 3654 | ||
3653 | dev_priv->rps.cur_delay = (val >> 8) & 0xff; | 3655 | dev_priv->rps.cur_freq = (val >> 8) & 0xff; |
3654 | DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", | 3656 | DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", |
3655 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), | 3657 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), |
3656 | dev_priv->rps.cur_delay); | 3658 | dev_priv->rps.cur_freq); |
3657 | 3659 | ||
3658 | dev_priv->rps.hw_max = hw_max = valleyview_rps_max_freq(dev_priv); | 3660 | dev_priv->rps.max_freq = hw_max = valleyview_rps_max_freq(dev_priv); |
3659 | DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", | 3661 | DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", |
3660 | vlv_gpu_freq(dev_priv, hw_max), | 3662 | vlv_gpu_freq(dev_priv, hw_max), |
3661 | hw_max); | 3663 | hw_max); |
3662 | 3664 | ||
3663 | dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv); | 3665 | dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv); |
3664 | DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", | 3666 | DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", |
3665 | vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), | 3667 | vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), |
3666 | dev_priv->rps.rpe_delay); | 3668 | dev_priv->rps.efficient_freq); |
3667 | 3669 | ||
3668 | hw_min = valleyview_rps_min_freq(dev_priv); | 3670 | hw_min = valleyview_rps_min_freq(dev_priv); |
3669 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", | 3671 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", |
@@ -3671,17 +3673,17 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
3671 | hw_min); | 3673 | hw_min); |
3672 | 3674 | ||
3673 | /* Preserve min/max settings in case of re-init */ | 3675 | /* Preserve min/max settings in case of re-init */ |
3674 | if (dev_priv->rps.max_delay == 0) | 3676 | if (dev_priv->rps.max_freq_softlimit == 0) |
3675 | dev_priv->rps.max_delay = hw_max; | 3677 | dev_priv->rps.max_freq_softlimit = hw_max; |
3676 | 3678 | ||
3677 | if (dev_priv->rps.min_delay == 0) | 3679 | if (dev_priv->rps.min_freq_softlimit == 0) |
3678 | dev_priv->rps.min_delay = hw_min; | 3680 | dev_priv->rps.min_freq_softlimit = hw_min; |
3679 | 3681 | ||
3680 | DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", | 3682 | DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", |
3681 | vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), | 3683 | vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), |
3682 | dev_priv->rps.rpe_delay); | 3684 | dev_priv->rps.efficient_freq); |
3683 | 3685 | ||
3684 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); | 3686 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); |
3685 | 3687 | ||
3686 | dev_priv->rps.rp_up_masked = false; | 3688 | dev_priv->rps.rp_up_masked = false; |
3687 | dev_priv->rps.rp_down_masked = false; | 3689 | dev_priv->rps.rp_down_masked = false; |
@@ -4122,7 +4124,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) | |||
4122 | 4124 | ||
4123 | assert_spin_locked(&mchdev_lock); | 4125 | assert_spin_locked(&mchdev_lock); |
4124 | 4126 | ||
4125 | pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4)); | 4127 | pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4)); |
4126 | pxvid = (pxvid >> 24) & 0x7f; | 4128 | pxvid = (pxvid >> 24) & 0x7f; |
4127 | ext_v = pvid_to_extvid(dev_priv, pxvid); | 4129 | ext_v = pvid_to_extvid(dev_priv, pxvid); |
4128 | 4130 | ||