diff options
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_sysfs.c | 32 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 118 |
5 files changed, 120 insertions, 107 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 603791380cdf..d1e0a360558f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -1026,7 +1026,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
1026 | max_freq * GT_FREQUENCY_MULTIPLIER); | 1026 | max_freq * GT_FREQUENCY_MULTIPLIER); |
1027 | 1027 | ||
1028 | seq_printf(m, "Max overclocked frequency: %dMHz\n", | 1028 | seq_printf(m, "Max overclocked frequency: %dMHz\n", |
1029 | dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER); | 1029 | dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER); |
1030 | } else if (IS_VALLEYVIEW(dev)) { | 1030 | } else if (IS_VALLEYVIEW(dev)) { |
1031 | u32 freq_sts, val; | 1031 | u32 freq_sts, val; |
1032 | 1032 | ||
@@ -1498,8 +1498,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) | |||
1498 | 1498 | ||
1499 | seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); | 1499 | seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); |
1500 | 1500 | ||
1501 | for (gpu_freq = dev_priv->rps.min_delay; | 1501 | for (gpu_freq = dev_priv->rps.min_freq_softlimit; |
1502 | gpu_freq <= dev_priv->rps.max_delay; | 1502 | gpu_freq <= dev_priv->rps.max_freq_softlimit; |
1503 | gpu_freq++) { | 1503 | gpu_freq++) { |
1504 | ia_freq = gpu_freq; | 1504 | ia_freq = gpu_freq; |
1505 | sandybridge_pcode_read(dev_priv, | 1505 | sandybridge_pcode_read(dev_priv, |
@@ -3449,9 +3449,9 @@ i915_max_freq_get(void *data, u64 *val) | |||
3449 | return ret; | 3449 | return ret; |
3450 | 3450 | ||
3451 | if (IS_VALLEYVIEW(dev)) | 3451 | if (IS_VALLEYVIEW(dev)) |
3452 | *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); | 3452 | *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); |
3453 | else | 3453 | else |
3454 | *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; | 3454 | *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER; |
3455 | mutex_unlock(&dev_priv->rps.hw_lock); | 3455 | mutex_unlock(&dev_priv->rps.hw_lock); |
3456 | 3456 | ||
3457 | return 0; | 3457 | return 0; |
@@ -3488,16 +3488,16 @@ i915_max_freq_set(void *data, u64 val) | |||
3488 | do_div(val, GT_FREQUENCY_MULTIPLIER); | 3488 | do_div(val, GT_FREQUENCY_MULTIPLIER); |
3489 | 3489 | ||
3490 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 3490 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
3491 | hw_max = dev_priv->rps.hw_max; | 3491 | hw_max = dev_priv->rps.max_freq; |
3492 | hw_min = (rp_state_cap >> 16) & 0xff; | 3492 | hw_min = (rp_state_cap >> 16) & 0xff; |
3493 | } | 3493 | } |
3494 | 3494 | ||
3495 | if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { | 3495 | if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { |
3496 | mutex_unlock(&dev_priv->rps.hw_lock); | 3496 | mutex_unlock(&dev_priv->rps.hw_lock); |
3497 | return -EINVAL; | 3497 | return -EINVAL; |
3498 | } | 3498 | } |
3499 | 3499 | ||
3500 | dev_priv->rps.max_delay = val; | 3500 | dev_priv->rps.max_freq_softlimit = val; |
3501 | 3501 | ||
3502 | if (IS_VALLEYVIEW(dev)) | 3502 | if (IS_VALLEYVIEW(dev)) |
3503 | valleyview_set_rps(dev, val); | 3503 | valleyview_set_rps(dev, val); |
@@ -3530,9 +3530,9 @@ i915_min_freq_get(void *data, u64 *val) | |||
3530 | return ret; | 3530 | return ret; |
3531 | 3531 | ||
3532 | if (IS_VALLEYVIEW(dev)) | 3532 | if (IS_VALLEYVIEW(dev)) |
3533 | *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); | 3533 | *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); |
3534 | else | 3534 | else |
3535 | *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; | 3535 | *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER; |
3536 | mutex_unlock(&dev_priv->rps.hw_lock); | 3536 | mutex_unlock(&dev_priv->rps.hw_lock); |
3537 | 3537 | ||
3538 | return 0; | 3538 | return 0; |
@@ -3569,16 +3569,16 @@ i915_min_freq_set(void *data, u64 val) | |||
3569 | do_div(val, GT_FREQUENCY_MULTIPLIER); | 3569 | do_div(val, GT_FREQUENCY_MULTIPLIER); |
3570 | 3570 | ||
3571 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 3571 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
3572 | hw_max = dev_priv->rps.hw_max; | 3572 | hw_max = dev_priv->rps.max_freq; |
3573 | hw_min = (rp_state_cap >> 16) & 0xff; | 3573 | hw_min = (rp_state_cap >> 16) & 0xff; |
3574 | } | 3574 | } |
3575 | 3575 | ||
3576 | if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { | 3576 | if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { |
3577 | mutex_unlock(&dev_priv->rps.hw_lock); | 3577 | mutex_unlock(&dev_priv->rps.hw_lock); |
3578 | return -EINVAL; | 3578 | return -EINVAL; |
3579 | } | 3579 | } |
3580 | 3580 | ||
3581 | dev_priv->rps.min_delay = val; | 3581 | dev_priv->rps.min_freq_softlimit = val; |
3582 | 3582 | ||
3583 | if (IS_VALLEYVIEW(dev)) | 3583 | if (IS_VALLEYVIEW(dev)) |
3584 | valleyview_set_rps(dev, val); | 3584 | valleyview_set_rps(dev, val); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 241f5e16cee6..c5c57608460c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -980,14 +980,24 @@ struct intel_gen6_power_mgmt { | |||
980 | struct work_struct work; | 980 | struct work_struct work; |
981 | u32 pm_iir; | 981 | u32 pm_iir; |
982 | 982 | ||
983 | u8 cur_delay; | 983 | /* Frequencies are stored in potentially platform dependent multiples. |
984 | u8 min_delay; | 984 | * In other words, *_freq needs to be multiplied by X to be interesting. |
985 | u8 max_delay; | 985 | * Soft limits are those which are used for the dynamic reclocking done |
986 | u8 rpe_delay; | 986 | * by the driver (raise frequencies under heavy loads, and lower for |
987 | u8 rp1_delay; | 987 | * lighter loads). Hard limits are those imposed by the hardware. |
988 | u8 rp0_delay; | 988 | * |
989 | u8 hw_max; | 989 | * A distinction is made for overclocking, which is never enabled by |
990 | u8 min_freq; | 990 | * default, and is considered to be above the hard limit if it's |
991 | * possible at all. | ||
992 | */ | ||
993 | u8 cur_freq; /* Current frequency (cached, may not == HW) */ | ||
994 | u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ | ||
995 | u8 max_freq_softlimit; /* Max frequency permitted by the driver */ | ||
996 | u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ | ||
997 | u8 min_freq; /* AKA RPn. Minimum frequency */ | ||
998 | u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ | ||
999 | u8 rp1_freq; /* "less than" RP0 power/freqency */ | ||
1000 | u8 rp0_freq; /* Non-overclocked max frequency. */ | ||
991 | 1001 | ||
992 | bool rp_up_masked; | 1002 | bool rp_up_masked; |
993 | bool rp_down_masked; | 1003 | bool rp_down_masked; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1c00751eca69..acf1ab3ff0d9 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -1075,7 +1075,7 @@ void gen6_set_pm_mask(struct drm_i915_private *dev_priv, | |||
1075 | u32 pm_iir, int new_delay) | 1075 | u32 pm_iir, int new_delay) |
1076 | { | 1076 | { |
1077 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { | 1077 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { |
1078 | if (new_delay >= dev_priv->rps.max_delay) { | 1078 | if (new_delay >= dev_priv->rps.max_freq_softlimit) { |
1079 | /* Mask UP THRESHOLD Interrupts */ | 1079 | /* Mask UP THRESHOLD Interrupts */ |
1080 | I915_WRITE(GEN6_PMINTRMSK, | 1080 | I915_WRITE(GEN6_PMINTRMSK, |
1081 | I915_READ(GEN6_PMINTRMSK) | | 1081 | I915_READ(GEN6_PMINTRMSK) | |
@@ -1090,7 +1090,7 @@ void gen6_set_pm_mask(struct drm_i915_private *dev_priv, | |||
1090 | dev_priv->rps.rp_down_masked = false; | 1090 | dev_priv->rps.rp_down_masked = false; |
1091 | } | 1091 | } |
1092 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { | 1092 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { |
1093 | if (new_delay <= dev_priv->rps.min_delay) { | 1093 | if (new_delay <= dev_priv->rps.min_freq_softlimit) { |
1094 | /* Mask DOWN THRESHOLD Interrupts */ | 1094 | /* Mask DOWN THRESHOLD Interrupts */ |
1095 | I915_WRITE(GEN6_PMINTRMSK, | 1095 | I915_WRITE(GEN6_PMINTRMSK, |
1096 | I915_READ(GEN6_PMINTRMSK) | | 1096 | I915_READ(GEN6_PMINTRMSK) | |
@@ -1136,38 +1136,39 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
1136 | adj *= 2; | 1136 | adj *= 2; |
1137 | else | 1137 | else |
1138 | adj = 1; | 1138 | adj = 1; |
1139 | new_delay = dev_priv->rps.cur_delay + adj; | 1139 | new_delay = dev_priv->rps.cur_freq + adj; |
1140 | 1140 | ||
1141 | /* | 1141 | /* |
1142 | * For better performance, jump directly | 1142 | * For better performance, jump directly |
1143 | * to RPe if we're below it. | 1143 | * to RPe if we're below it. |
1144 | */ | 1144 | */ |
1145 | if (new_delay < dev_priv->rps.rpe_delay) | 1145 | if (new_delay < dev_priv->rps.efficient_freq) |
1146 | new_delay = dev_priv->rps.rpe_delay; | 1146 | new_delay = dev_priv->rps.efficient_freq; |
1147 | } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { | 1147 | } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { |
1148 | if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) | 1148 | if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) |
1149 | new_delay = dev_priv->rps.rpe_delay; | 1149 | new_delay = dev_priv->rps.efficient_freq; |
1150 | else | 1150 | else |
1151 | new_delay = dev_priv->rps.min_delay; | 1151 | new_delay = dev_priv->rps.min_freq_softlimit; |
1152 | adj = 0; | 1152 | adj = 0; |
1153 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { | 1153 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { |
1154 | if (adj < 0) | 1154 | if (adj < 0) |
1155 | adj *= 2; | 1155 | adj *= 2; |
1156 | else | 1156 | else |
1157 | adj = -1; | 1157 | adj = -1; |
1158 | new_delay = dev_priv->rps.cur_delay + adj; | 1158 | new_delay = dev_priv->rps.cur_freq + adj; |
1159 | } else { /* unknown event */ | 1159 | } else { /* unknown event */ |
1160 | new_delay = dev_priv->rps.cur_delay; | 1160 | new_delay = dev_priv->rps.cur_freq; |
1161 | } | 1161 | } |
1162 | 1162 | ||
1163 | /* sysfs frequency interfaces may have snuck in while servicing the | 1163 | /* sysfs frequency interfaces may have snuck in while servicing the |
1164 | * interrupt | 1164 | * interrupt |
1165 | */ | 1165 | */ |
1166 | new_delay = clamp_t(int, new_delay, | 1166 | new_delay = clamp_t(int, new_delay, |
1167 | dev_priv->rps.min_delay, dev_priv->rps.max_delay); | 1167 | dev_priv->rps.min_freq_softlimit, |
1168 | dev_priv->rps.max_freq_softlimit); | ||
1168 | 1169 | ||
1169 | gen6_set_pm_mask(dev_priv, pm_iir, new_delay); | 1170 | gen6_set_pm_mask(dev_priv, pm_iir, new_delay); |
1170 | dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; | 1171 | dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; |
1171 | 1172 | ||
1172 | if (IS_VALLEYVIEW(dev_priv->dev)) | 1173 | if (IS_VALLEYVIEW(dev_priv->dev)) |
1173 | valleyview_set_rps(dev_priv->dev, new_delay); | 1174 | valleyview_set_rps(dev_priv->dev, new_delay); |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index e9ffefb720de..e3fa8cd419da 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -269,7 +269,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, | |||
269 | freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | 269 | freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); |
270 | ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff); | 270 | ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff); |
271 | } else { | 271 | } else { |
272 | ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; | 272 | ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER; |
273 | } | 273 | } |
274 | mutex_unlock(&dev_priv->rps.hw_lock); | 274 | mutex_unlock(&dev_priv->rps.hw_lock); |
275 | 275 | ||
@@ -284,7 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, | |||
284 | struct drm_i915_private *dev_priv = dev->dev_private; | 284 | struct drm_i915_private *dev_priv = dev->dev_private; |
285 | 285 | ||
286 | return snprintf(buf, PAGE_SIZE, "%d\n", | 286 | return snprintf(buf, PAGE_SIZE, "%d\n", |
287 | vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay)); | 287 | vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); |
288 | } | 288 | } |
289 | 289 | ||
290 | static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) | 290 | static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
@@ -298,9 +298,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute | |||
298 | 298 | ||
299 | mutex_lock(&dev_priv->rps.hw_lock); | 299 | mutex_lock(&dev_priv->rps.hw_lock); |
300 | if (IS_VALLEYVIEW(dev_priv->dev)) | 300 | if (IS_VALLEYVIEW(dev_priv->dev)) |
301 | ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); | 301 | ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); |
302 | else | 302 | else |
303 | ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; | 303 | ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER; |
304 | mutex_unlock(&dev_priv->rps.hw_lock); | 304 | mutex_unlock(&dev_priv->rps.hw_lock); |
305 | 305 | ||
306 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); | 306 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); |
@@ -334,13 +334,13 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
334 | val /= GT_FREQUENCY_MULTIPLIER; | 334 | val /= GT_FREQUENCY_MULTIPLIER; |
335 | 335 | ||
336 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 336 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
337 | hw_max = dev_priv->rps.hw_max; | 337 | hw_max = dev_priv->rps.max_freq; |
338 | non_oc_max = (rp_state_cap & 0xff); | 338 | non_oc_max = (rp_state_cap & 0xff); |
339 | hw_min = ((rp_state_cap & 0xff0000) >> 16); | 339 | hw_min = ((rp_state_cap & 0xff0000) >> 16); |
340 | } | 340 | } |
341 | 341 | ||
342 | if (val < hw_min || val > hw_max || | 342 | if (val < hw_min || val > hw_max || |
343 | val < dev_priv->rps.min_delay) { | 343 | val < dev_priv->rps.min_freq_softlimit) { |
344 | mutex_unlock(&dev_priv->rps.hw_lock); | 344 | mutex_unlock(&dev_priv->rps.hw_lock); |
345 | return -EINVAL; | 345 | return -EINVAL; |
346 | } | 346 | } |
@@ -349,9 +349,9 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
349 | DRM_DEBUG("User requested overclocking to %d\n", | 349 | DRM_DEBUG("User requested overclocking to %d\n", |
350 | val * GT_FREQUENCY_MULTIPLIER); | 350 | val * GT_FREQUENCY_MULTIPLIER); |
351 | 351 | ||
352 | dev_priv->rps.max_delay = val; | 352 | dev_priv->rps.max_freq_softlimit = val; |
353 | 353 | ||
354 | if (dev_priv->rps.cur_delay > val) { | 354 | if (dev_priv->rps.cur_freq > val) { |
355 | if (IS_VALLEYVIEW(dev)) | 355 | if (IS_VALLEYVIEW(dev)) |
356 | valleyview_set_rps(dev, val); | 356 | valleyview_set_rps(dev, val); |
357 | else | 357 | else |
@@ -360,7 +360,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
360 | /* We still need gen6_set_rps to process the new max_delay and | 360 | /* We still need gen6_set_rps to process the new max_delay and |
361 | * update the interrupt limits even though frequency request is | 361 | * update the interrupt limits even though frequency request is |
362 | * unchanged. */ | 362 | * unchanged. */ |
363 | gen6_set_rps(dev, dev_priv->rps.cur_delay); | 363 | gen6_set_rps(dev, dev_priv->rps.cur_freq); |
364 | } | 364 | } |
365 | 365 | ||
366 | mutex_unlock(&dev_priv->rps.hw_lock); | 366 | mutex_unlock(&dev_priv->rps.hw_lock); |
@@ -379,9 +379,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute | |||
379 | 379 | ||
380 | mutex_lock(&dev_priv->rps.hw_lock); | 380 | mutex_lock(&dev_priv->rps.hw_lock); |
381 | if (IS_VALLEYVIEW(dev_priv->dev)) | 381 | if (IS_VALLEYVIEW(dev_priv->dev)) |
382 | ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); | 382 | ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); |
383 | else | 383 | else |
384 | ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; | 384 | ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER; |
385 | mutex_unlock(&dev_priv->rps.hw_lock); | 385 | mutex_unlock(&dev_priv->rps.hw_lock); |
386 | 386 | ||
387 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); | 387 | return snprintf(buf, PAGE_SIZE, "%d\n", ret); |
@@ -414,18 +414,18 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, | |||
414 | val /= GT_FREQUENCY_MULTIPLIER; | 414 | val /= GT_FREQUENCY_MULTIPLIER; |
415 | 415 | ||
416 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 416 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
417 | hw_max = dev_priv->rps.hw_max; | 417 | hw_max = dev_priv->rps.max_freq; |
418 | hw_min = ((rp_state_cap & 0xff0000) >> 16); | 418 | hw_min = ((rp_state_cap & 0xff0000) >> 16); |
419 | } | 419 | } |
420 | 420 | ||
421 | if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { | 421 | if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { |
422 | mutex_unlock(&dev_priv->rps.hw_lock); | 422 | mutex_unlock(&dev_priv->rps.hw_lock); |
423 | return -EINVAL; | 423 | return -EINVAL; |
424 | } | 424 | } |
425 | 425 | ||
426 | dev_priv->rps.min_delay = val; | 426 | dev_priv->rps.min_freq_softlimit = val; |
427 | 427 | ||
428 | if (dev_priv->rps.cur_delay < val) { | 428 | if (dev_priv->rps.cur_freq < val) { |
429 | if (IS_VALLEYVIEW(dev)) | 429 | if (IS_VALLEYVIEW(dev)) |
430 | valleyview_set_rps(dev, val); | 430 | valleyview_set_rps(dev, val); |
431 | else | 431 | else |
@@ -434,7 +434,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, | |||
434 | /* We still need gen6_set_rps to process the new min_delay and | 434 | /* We still need gen6_set_rps to process the new min_delay and |
435 | * update the interrupt limits even though frequency request is | 435 | * update the interrupt limits even though frequency request is |
436 | * unchanged. */ | 436 | * unchanged. */ |
437 | gen6_set_rps(dev, dev_priv->rps.cur_delay); | 437 | gen6_set_rps(dev, dev_priv->rps.cur_freq); |
438 | } | 438 | } |
439 | 439 | ||
440 | mutex_unlock(&dev_priv->rps.hw_lock); | 440 | mutex_unlock(&dev_priv->rps.hw_lock); |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index dd631d19b2bd..3db7c40cc9ae 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2907,9 +2907,9 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val) | |||
2907 | * the hw runs at the minimal clock before selecting the desired | 2907 | * the hw runs at the minimal clock before selecting the desired |
2908 | * frequency, if the down threshold expires in that window we will not | 2908 | * frequency, if the down threshold expires in that window we will not |
2909 | * receive a down interrupt. */ | 2909 | * receive a down interrupt. */ |
2910 | limits = dev_priv->rps.max_delay << 24; | 2910 | limits = dev_priv->rps.max_freq_softlimit << 24; |
2911 | if (val <= dev_priv->rps.min_delay) | 2911 | if (val <= dev_priv->rps.min_freq_softlimit) |
2912 | limits |= dev_priv->rps.min_delay << 16; | 2912 | limits |= dev_priv->rps.min_freq_softlimit << 16; |
2913 | 2913 | ||
2914 | return limits; | 2914 | return limits; |
2915 | } | 2915 | } |
@@ -2921,26 +2921,26 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) | |||
2921 | new_power = dev_priv->rps.power; | 2921 | new_power = dev_priv->rps.power; |
2922 | switch (dev_priv->rps.power) { | 2922 | switch (dev_priv->rps.power) { |
2923 | case LOW_POWER: | 2923 | case LOW_POWER: |
2924 | if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay) | 2924 | if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq) |
2925 | new_power = BETWEEN; | 2925 | new_power = BETWEEN; |
2926 | break; | 2926 | break; |
2927 | 2927 | ||
2928 | case BETWEEN: | 2928 | case BETWEEN: |
2929 | if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay) | 2929 | if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq) |
2930 | new_power = LOW_POWER; | 2930 | new_power = LOW_POWER; |
2931 | else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay) | 2931 | else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq) |
2932 | new_power = HIGH_POWER; | 2932 | new_power = HIGH_POWER; |
2933 | break; | 2933 | break; |
2934 | 2934 | ||
2935 | case HIGH_POWER: | 2935 | case HIGH_POWER: |
2936 | if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay) | 2936 | if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq) |
2937 | new_power = BETWEEN; | 2937 | new_power = BETWEEN; |
2938 | break; | 2938 | break; |
2939 | } | 2939 | } |
2940 | /* Max/min bins are special */ | 2940 | /* Max/min bins are special */ |
2941 | if (val == dev_priv->rps.min_delay) | 2941 | if (val == dev_priv->rps.min_freq_softlimit) |
2942 | new_power = LOW_POWER; | 2942 | new_power = LOW_POWER; |
2943 | if (val == dev_priv->rps.max_delay) | 2943 | if (val == dev_priv->rps.max_freq_softlimit) |
2944 | new_power = HIGH_POWER; | 2944 | new_power = HIGH_POWER; |
2945 | if (new_power == dev_priv->rps.power) | 2945 | if (new_power == dev_priv->rps.power) |
2946 | return; | 2946 | return; |
@@ -3014,10 +3014,10 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
3014 | struct drm_i915_private *dev_priv = dev->dev_private; | 3014 | struct drm_i915_private *dev_priv = dev->dev_private; |
3015 | 3015 | ||
3016 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 3016 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
3017 | WARN_ON(val > dev_priv->rps.max_delay); | 3017 | WARN_ON(val > dev_priv->rps.max_freq_softlimit); |
3018 | WARN_ON(val < dev_priv->rps.min_delay); | 3018 | WARN_ON(val < dev_priv->rps.min_freq_softlimit); |
3019 | 3019 | ||
3020 | if (val == dev_priv->rps.cur_delay) { | 3020 | if (val == dev_priv->rps.cur_freq) { |
3021 | /* min/max delay may still have been modified so be sure to | 3021 | /* min/max delay may still have been modified so be sure to |
3022 | * write the limits value */ | 3022 | * write the limits value */ |
3023 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | 3023 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, |
@@ -3045,7 +3045,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
3045 | 3045 | ||
3046 | POSTING_READ(GEN6_RPNSWREQ); | 3046 | POSTING_READ(GEN6_RPNSWREQ); |
3047 | 3047 | ||
3048 | dev_priv->rps.cur_delay = val; | 3048 | dev_priv->rps.cur_freq = val; |
3049 | 3049 | ||
3050 | trace_intel_gpu_freq_change(val * 50); | 3050 | trace_intel_gpu_freq_change(val * 50); |
3051 | } | 3051 | } |
@@ -3065,7 +3065,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) | |||
3065 | * When we are idle. Drop to min voltage state. | 3065 | * When we are idle. Drop to min voltage state. |
3066 | */ | 3066 | */ |
3067 | 3067 | ||
3068 | if (dev_priv->rps.cur_delay <= dev_priv->rps.min_delay) | 3068 | if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit) |
3069 | return; | 3069 | return; |
3070 | 3070 | ||
3071 | /* Mask turbo interrupt so that they will not come in between */ | 3071 | /* Mask turbo interrupt so that they will not come in between */ |
@@ -3082,10 +3082,10 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) | |||
3082 | return; | 3082 | return; |
3083 | } | 3083 | } |
3084 | 3084 | ||
3085 | dev_priv->rps.cur_delay = dev_priv->rps.min_delay; | 3085 | dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit; |
3086 | 3086 | ||
3087 | vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, | 3087 | vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, |
3088 | dev_priv->rps.min_delay); | 3088 | dev_priv->rps.min_freq_softlimit); |
3089 | 3089 | ||
3090 | if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) | 3090 | if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) |
3091 | & GENFREQSTATUS) == 0, 5)) | 3091 | & GENFREQSTATUS) == 0, 5)) |
@@ -3099,7 +3099,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) | |||
3099 | /* Unmask Up interrupts */ | 3099 | /* Unmask Up interrupts */ |
3100 | dev_priv->rps.rp_up_masked = true; | 3100 | dev_priv->rps.rp_up_masked = true; |
3101 | gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD, | 3101 | gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD, |
3102 | dev_priv->rps.min_delay); | 3102 | dev_priv->rps.min_freq_softlimit); |
3103 | } | 3103 | } |
3104 | 3104 | ||
3105 | void gen6_rps_idle(struct drm_i915_private *dev_priv) | 3105 | void gen6_rps_idle(struct drm_i915_private *dev_priv) |
@@ -3111,7 +3111,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) | |||
3111 | if (IS_VALLEYVIEW(dev)) | 3111 | if (IS_VALLEYVIEW(dev)) |
3112 | vlv_set_rps_idle(dev_priv); | 3112 | vlv_set_rps_idle(dev_priv); |
3113 | else | 3113 | else |
3114 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); | 3114 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); |
3115 | dev_priv->rps.last_adj = 0; | 3115 | dev_priv->rps.last_adj = 0; |
3116 | } | 3116 | } |
3117 | mutex_unlock(&dev_priv->rps.hw_lock); | 3117 | mutex_unlock(&dev_priv->rps.hw_lock); |
@@ -3124,9 +3124,9 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv) | |||
3124 | mutex_lock(&dev_priv->rps.hw_lock); | 3124 | mutex_lock(&dev_priv->rps.hw_lock); |
3125 | if (dev_priv->rps.enabled) { | 3125 | if (dev_priv->rps.enabled) { |
3126 | if (IS_VALLEYVIEW(dev)) | 3126 | if (IS_VALLEYVIEW(dev)) |
3127 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); | 3127 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); |
3128 | else | 3128 | else |
3129 | gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); | 3129 | gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); |
3130 | dev_priv->rps.last_adj = 0; | 3130 | dev_priv->rps.last_adj = 0; |
3131 | } | 3131 | } |
3132 | mutex_unlock(&dev_priv->rps.hw_lock); | 3132 | mutex_unlock(&dev_priv->rps.hw_lock); |
@@ -3137,20 +3137,20 @@ void valleyview_set_rps(struct drm_device *dev, u8 val) | |||
3137 | struct drm_i915_private *dev_priv = dev->dev_private; | 3137 | struct drm_i915_private *dev_priv = dev->dev_private; |
3138 | 3138 | ||
3139 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 3139 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
3140 | WARN_ON(val > dev_priv->rps.max_delay); | 3140 | WARN_ON(val > dev_priv->rps.max_freq_softlimit); |
3141 | WARN_ON(val < dev_priv->rps.min_delay); | 3141 | WARN_ON(val < dev_priv->rps.min_freq_softlimit); |
3142 | 3142 | ||
3143 | DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", | 3143 | DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", |
3144 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), | 3144 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), |
3145 | dev_priv->rps.cur_delay, | 3145 | dev_priv->rps.cur_freq, |
3146 | vlv_gpu_freq(dev_priv, val), val); | 3146 | vlv_gpu_freq(dev_priv, val), val); |
3147 | 3147 | ||
3148 | if (val == dev_priv->rps.cur_delay) | 3148 | if (val == dev_priv->rps.cur_freq) |
3149 | return; | 3149 | return; |
3150 | 3150 | ||
3151 | vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); | 3151 | vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); |
3152 | 3152 | ||
3153 | dev_priv->rps.cur_delay = val; | 3153 | dev_priv->rps.cur_freq = val; |
3154 | 3154 | ||
3155 | trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); | 3155 | trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); |
3156 | } | 3156 | } |
@@ -3292,8 +3292,8 @@ static void gen8_enable_rps(struct drm_device *dev) | |||
3292 | 3292 | ||
3293 | /* Docs recommend 900MHz, and 300 MHz respectively */ | 3293 | /* Docs recommend 900MHz, and 300 MHz respectively */ |
3294 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | 3294 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, |
3295 | dev_priv->rps.max_delay << 24 | | 3295 | dev_priv->rps.max_freq_softlimit << 24 | |
3296 | dev_priv->rps.min_delay << 16); | 3296 | dev_priv->rps.min_freq_softlimit << 16); |
3297 | 3297 | ||
3298 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ | 3298 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ |
3299 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ | 3299 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ |
@@ -3352,20 +3352,22 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
3352 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 3352 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
3353 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | 3353 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
3354 | 3354 | ||
3355 | /* In units of 50MHz */ | 3355 | /* All of these values are in units of 50MHz */ |
3356 | dev_priv->rps.hw_max = hw_max = rp_state_cap & 0xff; | 3356 | dev_priv->rps.cur_freq = 0; |
3357 | /* hw_max = RP0 until we check for overclocking */ | ||
3358 | dev_priv->rps.max_freq = hw_max = rp_state_cap & 0xff; | ||
3359 | /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */ | ||
3360 | dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; | ||
3361 | dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; | ||
3362 | dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; | ||
3357 | dev_priv->rps.min_freq = hw_min = (rp_state_cap >> 16) & 0xff; | 3363 | dev_priv->rps.min_freq = hw_min = (rp_state_cap >> 16) & 0xff; |
3358 | dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff; | ||
3359 | dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff; | ||
3360 | dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay; | ||
3361 | dev_priv->rps.cur_delay = 0; | ||
3362 | 3364 | ||
3363 | /* Preserve min/max settings in case of re-init */ | 3365 | /* Preserve min/max settings in case of re-init */ |
3364 | if (dev_priv->rps.max_delay == 0) | 3366 | if (dev_priv->rps.max_freq_softlimit == 0) |
3365 | dev_priv->rps.max_delay = hw_max; | 3367 | dev_priv->rps.max_freq_softlimit = hw_max; |
3366 | 3368 | ||
3367 | if (dev_priv->rps.min_delay == 0) | 3369 | if (dev_priv->rps.min_freq_softlimit == 0) |
3368 | dev_priv->rps.min_delay = hw_min; | 3370 | dev_priv->rps.min_freq_softlimit = hw_min; |
3369 | 3371 | ||
3370 | /* disable the counters and set deterministic thresholds */ | 3372 | /* disable the counters and set deterministic thresholds */ |
3371 | I915_WRITE(GEN6_RC_CONTROL, 0); | 3373 | I915_WRITE(GEN6_RC_CONTROL, 0); |
@@ -3420,13 +3422,13 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
3420 | ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); | 3422 | ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); |
3421 | if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ | 3423 | if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ |
3422 | DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", | 3424 | DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", |
3423 | (dev_priv->rps.max_delay & 0xff) * 50, | 3425 | (dev_priv->rps.max_freq_softlimit & 0xff) * 50, |
3424 | (pcu_mbox & 0xff) * 50); | 3426 | (pcu_mbox & 0xff) * 50); |
3425 | dev_priv->rps.hw_max = pcu_mbox & 0xff; | 3427 | dev_priv->rps.max_freq = pcu_mbox & 0xff; |
3426 | } | 3428 | } |
3427 | 3429 | ||
3428 | dev_priv->rps.power = HIGH_POWER; /* force a reset */ | 3430 | dev_priv->rps.power = HIGH_POWER; /* force a reset */ |
3429 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); | 3431 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); |
3430 | 3432 | ||
3431 | gen6_enable_rps_interrupts(dev); | 3433 | gen6_enable_rps_interrupts(dev); |
3432 | 3434 | ||
@@ -3482,9 +3484,9 @@ void gen6_update_ring_freq(struct drm_device *dev) | |||
3482 | * to use for memory access. We do this by specifying the IA frequency | 3484 | * to use for memory access. We do this by specifying the IA frequency |
3483 | * the PCU should use as a reference to determine the ring frequency. | 3485 | * the PCU should use as a reference to determine the ring frequency. |
3484 | */ | 3486 | */ |
3485 | for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay; | 3487 | for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit; |
3486 | gpu_freq--) { | 3488 | gpu_freq--) { |
3487 | int diff = dev_priv->rps.max_delay - gpu_freq; | 3489 | int diff = dev_priv->rps.max_freq_softlimit - gpu_freq; |
3488 | unsigned int ia_freq = 0, ring_freq = 0; | 3490 | unsigned int ia_freq = 0, ring_freq = 0; |
3489 | 3491 | ||
3490 | if (INTEL_INFO(dev)->gen >= 8) { | 3492 | if (INTEL_INFO(dev)->gen >= 8) { |
@@ -3650,20 +3652,20 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
3650 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); | 3652 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); |
3651 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); | 3653 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); |
3652 | 3654 | ||
3653 | dev_priv->rps.cur_delay = (val >> 8) & 0xff; | 3655 | dev_priv->rps.cur_freq = (val >> 8) & 0xff; |
3654 | DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", | 3656 | DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", |
3655 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), | 3657 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), |
3656 | dev_priv->rps.cur_delay); | 3658 | dev_priv->rps.cur_freq); |
3657 | 3659 | ||
3658 | dev_priv->rps.hw_max = hw_max = valleyview_rps_max_freq(dev_priv); | 3660 | dev_priv->rps.max_freq = hw_max = valleyview_rps_max_freq(dev_priv); |
3659 | DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", | 3661 | DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", |
3660 | vlv_gpu_freq(dev_priv, hw_max), | 3662 | vlv_gpu_freq(dev_priv, hw_max), |
3661 | hw_max); | 3663 | hw_max); |
3662 | 3664 | ||
3663 | dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv); | 3665 | dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv); |
3664 | DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", | 3666 | DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", |
3665 | vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), | 3667 | vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), |
3666 | dev_priv->rps.rpe_delay); | 3668 | dev_priv->rps.efficient_freq); |
3667 | 3669 | ||
3668 | hw_min = valleyview_rps_min_freq(dev_priv); | 3670 | hw_min = valleyview_rps_min_freq(dev_priv); |
3669 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", | 3671 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", |
@@ -3671,17 +3673,17 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
3671 | hw_min); | 3673 | hw_min); |
3672 | 3674 | ||
3673 | /* Preserve min/max settings in case of re-init */ | 3675 | /* Preserve min/max settings in case of re-init */ |
3674 | if (dev_priv->rps.max_delay == 0) | 3676 | if (dev_priv->rps.max_freq_softlimit == 0) |
3675 | dev_priv->rps.max_delay = hw_max; | 3677 | dev_priv->rps.max_freq_softlimit = hw_max; |
3676 | 3678 | ||
3677 | if (dev_priv->rps.min_delay == 0) | 3679 | if (dev_priv->rps.min_freq_softlimit == 0) |
3678 | dev_priv->rps.min_delay = hw_min; | 3680 | dev_priv->rps.min_freq_softlimit = hw_min; |
3679 | 3681 | ||
3680 | DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", | 3682 | DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", |
3681 | vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), | 3683 | vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), |
3682 | dev_priv->rps.rpe_delay); | 3684 | dev_priv->rps.efficient_freq); |
3683 | 3685 | ||
3684 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); | 3686 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); |
3685 | 3687 | ||
3686 | dev_priv->rps.rp_up_masked = false; | 3688 | dev_priv->rps.rp_up_masked = false; |
3687 | dev_priv->rps.rp_down_masked = false; | 3689 | dev_priv->rps.rp_down_masked = false; |
@@ -4122,7 +4124,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) | |||
4122 | 4124 | ||
4123 | assert_spin_locked(&mchdev_lock); | 4125 | assert_spin_locked(&mchdev_lock); |
4124 | 4126 | ||
4125 | pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4)); | 4127 | pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4)); |
4126 | pxvid = (pxvid >> 24) & 0x7f; | 4128 | pxvid = (pxvid >> 24) & 0x7f; |
4127 | ext_v = pvid_to_extvid(dev_priv, pxvid); | 4129 | ext_v = pvid_to_extvid(dev_priv, pxvid); |
4128 | 4130 | ||