diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-12-12 08:06:44 -0500 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-12-12 16:59:24 -0500 |
commit | 09153000b8ca32a539a1207edebabd0d40b6c61b (patch) | |
tree | afb797432fbb708cc2d7bdc7e18196c078ffb9e6 /drivers/gpu/drm/i915/intel_display.c | |
parent | 20afbda209d708be66944907966486d0c1331cb8 (diff) |
drm/i915: rework locking for intel_dpio|sbi_read|write
Spinning for up to 200 us with interrupts locked out is not good. So
let's just spin (and even that seems to be excessive).
And we don't call these functions from interrupt context, so this is
not required. Besides that doing anything in interrupt contexts which
might take a few hundred us is a no-go. So just convert the entire
thing to a mutex. Also move the mutex-grabbing out of the read/write
functions (add a WARN_ON(!is_locked)) instead) since all callers are
nicely grouped together.
Finally the real motivation for this change: Dont grab the modeset
mutex in the dpio debugfs file, we don't need that consistency. And
correctness of the dpio interface is ensured with the dpio_lock.
Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 53 |
1 files changed, 21 insertions, 32 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 115bf626037c..2a01b09221fb 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -416,13 +416,11 @@ static const intel_limit_t intel_limits_vlv_dp = { | |||
416 | 416 | ||
417 | u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg) | 417 | u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg) |
418 | { | 418 | { |
419 | unsigned long flags; | 419 | WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
420 | u32 val = 0; | ||
421 | 420 | ||
422 | spin_lock_irqsave(&dev_priv->dpio_lock, flags); | ||
423 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { | 421 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { |
424 | DRM_ERROR("DPIO idle wait timed out\n"); | 422 | DRM_ERROR("DPIO idle wait timed out\n"); |
425 | goto out_unlock; | 423 | return 0; |
426 | } | 424 | } |
427 | 425 | ||
428 | I915_WRITE(DPIO_REG, reg); | 426 | I915_WRITE(DPIO_REG, reg); |
@@ -430,24 +428,20 @@ u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg) | |||
430 | DPIO_BYTE); | 428 | DPIO_BYTE); |
431 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { | 429 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { |
432 | DRM_ERROR("DPIO read wait timed out\n"); | 430 | DRM_ERROR("DPIO read wait timed out\n"); |
433 | goto out_unlock; | 431 | return 0; |
434 | } | 432 | } |
435 | val = I915_READ(DPIO_DATA); | ||
436 | 433 | ||
437 | out_unlock: | 434 | return I915_READ(DPIO_DATA); |
438 | spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); | ||
439 | return val; | ||
440 | } | 435 | } |
441 | 436 | ||
442 | static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, | 437 | static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, |
443 | u32 val) | 438 | u32 val) |
444 | { | 439 | { |
445 | unsigned long flags; | 440 | WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
446 | 441 | ||
447 | spin_lock_irqsave(&dev_priv->dpio_lock, flags); | ||
448 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { | 442 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { |
449 | DRM_ERROR("DPIO idle wait timed out\n"); | 443 | DRM_ERROR("DPIO idle wait timed out\n"); |
450 | goto out_unlock; | 444 | return; |
451 | } | 445 | } |
452 | 446 | ||
453 | I915_WRITE(DPIO_DATA, val); | 447 | I915_WRITE(DPIO_DATA, val); |
@@ -456,9 +450,6 @@ static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, | |||
456 | DPIO_BYTE); | 450 | DPIO_BYTE); |
457 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) | 451 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) |
458 | DRM_ERROR("DPIO write wait timed out\n"); | 452 | DRM_ERROR("DPIO write wait timed out\n"); |
459 | |||
460 | out_unlock: | ||
461 | spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); | ||
462 | } | 453 | } |
463 | 454 | ||
464 | static void vlv_init_dpio(struct drm_device *dev) | 455 | static void vlv_init_dpio(struct drm_device *dev) |
@@ -1455,13 +1446,12 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | |||
1455 | static void | 1446 | static void |
1456 | intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value) | 1447 | intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value) |
1457 | { | 1448 | { |
1458 | unsigned long flags; | 1449 | WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
1459 | 1450 | ||
1460 | spin_lock_irqsave(&dev_priv->dpio_lock, flags); | ||
1461 | if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, | 1451 | if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, |
1462 | 100)) { | 1452 | 100)) { |
1463 | DRM_ERROR("timeout waiting for SBI to become ready\n"); | 1453 | DRM_ERROR("timeout waiting for SBI to become ready\n"); |
1464 | goto out_unlock; | 1454 | return; |
1465 | } | 1455 | } |
1466 | 1456 | ||
1467 | I915_WRITE(SBI_ADDR, | 1457 | I915_WRITE(SBI_ADDR, |
@@ -1475,24 +1465,19 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value) | |||
1475 | if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, | 1465 | if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, |
1476 | 100)) { | 1466 | 100)) { |
1477 | DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); | 1467 | DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); |
1478 | goto out_unlock; | 1468 | return; |
1479 | } | 1469 | } |
1480 | |||
1481 | out_unlock: | ||
1482 | spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); | ||
1483 | } | 1470 | } |
1484 | 1471 | ||
1485 | static u32 | 1472 | static u32 |
1486 | intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg) | 1473 | intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg) |
1487 | { | 1474 | { |
1488 | unsigned long flags; | 1475 | WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
1489 | u32 value = 0; | ||
1490 | 1476 | ||
1491 | spin_lock_irqsave(&dev_priv->dpio_lock, flags); | ||
1492 | if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, | 1477 | if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, |
1493 | 100)) { | 1478 | 100)) { |
1494 | DRM_ERROR("timeout waiting for SBI to become ready\n"); | 1479 | DRM_ERROR("timeout waiting for SBI to become ready\n"); |
1495 | goto out_unlock; | 1480 | return 0; |
1496 | } | 1481 | } |
1497 | 1482 | ||
1498 | I915_WRITE(SBI_ADDR, | 1483 | I915_WRITE(SBI_ADDR, |
@@ -1504,14 +1489,10 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg) | |||
1504 | if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, | 1489 | if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, |
1505 | 100)) { | 1490 | 100)) { |
1506 | DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); | 1491 | DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); |
1507 | goto out_unlock; | 1492 | return 0; |
1508 | } | 1493 | } |
1509 | 1494 | ||
1510 | value = I915_READ(SBI_DATA); | 1495 | return I915_READ(SBI_DATA); |
1511 | |||
1512 | out_unlock: | ||
1513 | spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); | ||
1514 | return value; | ||
1515 | } | 1496 | } |
1516 | 1497 | ||
1517 | /** | 1498 | /** |
@@ -2924,6 +2905,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc) | |||
2924 | u32 divsel, phaseinc, auxdiv, phasedir = 0; | 2905 | u32 divsel, phaseinc, auxdiv, phasedir = 0; |
2925 | u32 temp; | 2906 | u32 temp; |
2926 | 2907 | ||
2908 | mutex_lock(&dev_priv->dpio_lock); | ||
2909 | |||
2927 | /* It is necessary to ungate the pixclk gate prior to programming | 2910 | /* It is necessary to ungate the pixclk gate prior to programming |
2928 | * the divisors, and gate it back when it is done. | 2911 | * the divisors, and gate it back when it is done. |
2929 | */ | 2912 | */ |
@@ -3005,6 +2988,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc) | |||
3005 | udelay(24); | 2988 | udelay(24); |
3006 | 2989 | ||
3007 | I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); | 2990 | I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); |
2991 | |||
2992 | mutex_unlock(&dev_priv->dpio_lock); | ||
3008 | } | 2993 | } |
3009 | 2994 | ||
3010 | /* | 2995 | /* |
@@ -4222,6 +4207,8 @@ static void vlv_update_pll(struct drm_crtc *crtc, | |||
4222 | bool is_sdvo; | 4207 | bool is_sdvo; |
4223 | u32 temp; | 4208 | u32 temp; |
4224 | 4209 | ||
4210 | mutex_lock(&dev_priv->dpio_lock); | ||
4211 | |||
4225 | is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || | 4212 | is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || |
4226 | intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); | 4213 | intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); |
4227 | 4214 | ||
@@ -4305,6 +4292,8 @@ static void vlv_update_pll(struct drm_crtc *crtc, | |||
4305 | temp |= (1 << 21); | 4292 | temp |= (1 << 21); |
4306 | intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp); | 4293 | intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp); |
4307 | } | 4294 | } |
4295 | |||
4296 | mutex_unlock(&dev_priv->dpio_lock); | ||
4308 | } | 4297 | } |
4309 | 4298 | ||
4310 | static void i9xx_update_pll(struct drm_crtc *crtc, | 4299 | static void i9xx_update_pll(struct drm_crtc *crtc, |