diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 37 |
1 files changed, 7 insertions, 30 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6553dcc2ca79..0e876646d769 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -350,8 +350,8 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
350 | { | 350 | { |
351 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | 351 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
352 | rps_work); | 352 | rps_work); |
353 | u8 new_delay = dev_priv->cur_delay; | ||
354 | u32 pm_iir, pm_imr; | 353 | u32 pm_iir, pm_imr; |
354 | u8 new_delay; | ||
355 | 355 | ||
356 | spin_lock_irq(&dev_priv->rps_lock); | 356 | spin_lock_irq(&dev_priv->rps_lock); |
357 | pm_iir = dev_priv->pm_iir; | 357 | pm_iir = dev_priv->pm_iir; |
@@ -360,41 +360,18 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
360 | I915_WRITE(GEN6_PMIMR, 0); | 360 | I915_WRITE(GEN6_PMIMR, 0); |
361 | spin_unlock_irq(&dev_priv->rps_lock); | 361 | spin_unlock_irq(&dev_priv->rps_lock); |
362 | 362 | ||
363 | if (!pm_iir) | 363 | if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) |
364 | return; | 364 | return; |
365 | 365 | ||
366 | mutex_lock(&dev_priv->dev->struct_mutex); | 366 | mutex_lock(&dev_priv->dev->struct_mutex); |
367 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { | 367 | |
368 | if (dev_priv->cur_delay != dev_priv->max_delay) | 368 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) |
369 | new_delay = dev_priv->cur_delay + 1; | 369 | new_delay = dev_priv->cur_delay + 1; |
370 | if (new_delay > dev_priv->max_delay) | 370 | else |
371 | new_delay = dev_priv->max_delay; | 371 | new_delay = dev_priv->cur_delay - 1; |
372 | } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { | ||
373 | gen6_gt_force_wake_get(dev_priv); | ||
374 | if (dev_priv->cur_delay != dev_priv->min_delay) | ||
375 | new_delay = dev_priv->cur_delay - 1; | ||
376 | if (new_delay < dev_priv->min_delay) { | ||
377 | new_delay = dev_priv->min_delay; | ||
378 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||
379 | I915_READ(GEN6_RP_INTERRUPT_LIMITS) | | ||
380 | ((new_delay << 16) & 0x3f0000)); | ||
381 | } else { | ||
382 | /* Make sure we continue to get down interrupts | ||
383 | * until we hit the minimum frequency */ | ||
384 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||
385 | I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); | ||
386 | } | ||
387 | gen6_gt_force_wake_put(dev_priv); | ||
388 | } | ||
389 | 372 | ||
390 | gen6_set_rps(dev_priv->dev, new_delay); | 373 | gen6_set_rps(dev_priv->dev, new_delay); |
391 | dev_priv->cur_delay = new_delay; | ||
392 | 374 | ||
393 | /* | ||
394 | * rps_lock not held here because clearing is non-destructive. There is | ||
395 | * an *extremely* unlikely race with gen6_rps_enable() that is prevented | ||
396 | * by holding struct_mutex for the duration of the write. | ||
397 | */ | ||
398 | mutex_unlock(&dev_priv->dev->struct_mutex); | 375 | mutex_unlock(&dev_priv->dev->struct_mutex); |
399 | } | 376 | } |
400 | 377 | ||