aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2013-11-06 10:56:26 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-11-07 13:13:07 -0500
commit6917c7b9d9083272ddf7e64f5482e8820a31fb3c (patch)
treec083fc280b8265d9d213511206b047c1bf93862a /drivers
parentc5bd2bf61d487cd0125433aeaadd8bb87a11ccff (diff)
drm/i915: Initialise min/max frequencies before updating RPS registers
The RPS register writing routines use the current value of min/max to set certain limits and interrupt gating. If we set those afterwards, we risk setting up the hw incorrectly and losing power management events, and worse, trigger some internal assertions. Reorder the calling sequences to be correct, and remove the then unrequired clamping from inside set_rps(). And for a bonus, fix the bug of calling gen6_set_rps() from Valleyview. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@gmail.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> CC: Ville Syrjälä <ville.syrjala@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c16
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c19
3 files changed, 14 insertions, 23 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6889d81dc559..b5df88fa890a 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2756,7 +2756,7 @@ i915_max_freq_set(void *data, u64 val)
2756 if (IS_VALLEYVIEW(dev)) { 2756 if (IS_VALLEYVIEW(dev)) {
2757 val = vlv_freq_opcode(dev_priv, val); 2757 val = vlv_freq_opcode(dev_priv, val);
2758 dev_priv->rps.max_delay = val; 2758 dev_priv->rps.max_delay = val;
2759 gen6_set_rps(dev, val); 2759 valleyview_set_rps(dev, val);
2760 } else { 2760 } else {
2761 do_div(val, GT_FREQUENCY_MULTIPLIER); 2761 do_div(val, GT_FREQUENCY_MULTIPLIER);
2762 dev_priv->rps.max_delay = val; 2762 dev_priv->rps.max_delay = val;
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 85b98111d995..fdce8824723c 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -339,15 +339,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
339 DRM_DEBUG("User requested overclocking to %d\n", 339 DRM_DEBUG("User requested overclocking to %d\n",
340 val * GT_FREQUENCY_MULTIPLIER); 340 val * GT_FREQUENCY_MULTIPLIER);
341 341
342 dev_priv->rps.max_delay = val;
343
342 if (dev_priv->rps.cur_delay > val) { 344 if (dev_priv->rps.cur_delay > val) {
343 if (IS_VALLEYVIEW(dev_priv->dev)) 345 if (IS_VALLEYVIEW(dev))
344 valleyview_set_rps(dev_priv->dev, val); 346 valleyview_set_rps(dev, val);
345 else 347 else
346 gen6_set_rps(dev_priv->dev, val); 348 gen6_set_rps(dev, val);
347 } 349 }
348 350
349 dev_priv->rps.max_delay = val;
350
351 mutex_unlock(&dev_priv->rps.hw_lock); 351 mutex_unlock(&dev_priv->rps.hw_lock);
352 352
353 return count; 353 return count;
@@ -408,15 +408,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
408 return -EINVAL; 408 return -EINVAL;
409 } 409 }
410 410
411 dev_priv->rps.min_delay = val;
412
411 if (dev_priv->rps.cur_delay < val) { 413 if (dev_priv->rps.cur_delay < val) {
412 if (IS_VALLEYVIEW(dev)) 414 if (IS_VALLEYVIEW(dev))
413 valleyview_set_rps(dev, val); 415 valleyview_set_rps(dev, val);
414 else 416 else
415 gen6_set_rps(dev_priv->dev, val); 417 gen6_set_rps(dev, val);
416 } 418 }
417 419
418 dev_priv->rps.min_delay = val;
419
420 mutex_unlock(&dev_priv->rps.hw_lock); 420 mutex_unlock(&dev_priv->rps.hw_lock);
421 421
422 return count; 422 return count;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 38943f8ef260..e37860377285 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3414,26 +3414,19 @@ static void ironlake_disable_drps(struct drm_device *dev)
3414 * ourselves, instead of doing a rmw cycle (which might result in us clearing 3414 * ourselves, instead of doing a rmw cycle (which might result in us clearing
3415 * all limits and the gpu stuck at whatever frequency it is at atm). 3415 * all limits and the gpu stuck at whatever frequency it is at atm).
3416 */ 3416 */
3417static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val) 3417static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
3418{ 3418{
3419 u32 limits; 3419 u32 limits;
3420 3420
3421 limits = 0;
3422
3423 if (*val >= dev_priv->rps.max_delay)
3424 *val = dev_priv->rps.max_delay;
3425 limits |= dev_priv->rps.max_delay << 24;
3426
3427 /* Only set the down limit when we've reached the lowest level to avoid 3421 /* Only set the down limit when we've reached the lowest level to avoid
3428 * getting more interrupts, otherwise leave this clear. This prevents a 3422 * getting more interrupts, otherwise leave this clear. This prevents a
3429 * race in the hw when coming out of rc6: There's a tiny window where 3423 * race in the hw when coming out of rc6: There's a tiny window where
3430 * the hw runs at the minimal clock before selecting the desired 3424 * the hw runs at the minimal clock before selecting the desired
3431 * frequency, if the down threshold expires in that window we will not 3425 * frequency, if the down threshold expires in that window we will not
3432 * receive a down interrupt. */ 3426 * receive a down interrupt. */
3433 if (*val <= dev_priv->rps.min_delay) { 3427 limits = dev_priv->rps.max_delay << 24;
3434 *val = dev_priv->rps.min_delay; 3428 if (val <= dev_priv->rps.min_delay)
3435 limits |= dev_priv->rps.min_delay << 16; 3429 limits |= dev_priv->rps.min_delay << 16;
3436 }
3437 3430
3438 return limits; 3431 return limits;
3439} 3432}
@@ -3533,7 +3526,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3533void gen6_set_rps(struct drm_device *dev, u8 val) 3526void gen6_set_rps(struct drm_device *dev, u8 val)
3534{ 3527{
3535 struct drm_i915_private *dev_priv = dev->dev_private; 3528 struct drm_i915_private *dev_priv = dev->dev_private;
3536 u32 limits = gen6_rps_limits(dev_priv, &val);
3537 3529
3538 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3530 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3539 WARN_ON(val > dev_priv->rps.max_delay); 3531 WARN_ON(val > dev_priv->rps.max_delay);
@@ -3556,7 +3548,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3556 /* Make sure we continue to get interrupts 3548 /* Make sure we continue to get interrupts
3557 * until we hit the minimum or maximum frequencies. 3549 * until we hit the minimum or maximum frequencies.
3558 */ 3550 */
3559 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); 3551 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3552 gen6_rps_limits(dev_priv, val));
3560 3553
3561 POSTING_READ(GEN6_RPNSWREQ); 3554 POSTING_READ(GEN6_RPNSWREQ);
3562 3555
@@ -3620,8 +3613,6 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3620{ 3613{
3621 struct drm_i915_private *dev_priv = dev->dev_private; 3614 struct drm_i915_private *dev_priv = dev->dev_private;
3622 3615
3623 gen6_rps_limits(dev_priv, &val);
3624
3625 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3616 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3626 WARN_ON(val > dev_priv->rps.max_delay); 3617 WARN_ON(val > dev_priv->rps.max_delay);
3627 WARN_ON(val < dev_priv->rps.min_delay); 3618 WARN_ON(val < dev_priv->rps.min_delay);