aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/si.c
diff options
context:
space:
mode:
authorChristian Koenig <christian.koenig@amd.com>2012-05-16 15:45:24 -0400
committerChristian König <deathsimple@vodafone.de>2012-06-21 03:38:48 -0400
commitc20dc3698dc7ecf053e2bf77299ae5982c0c2c45 (patch)
treee150268483bee4137d9c22e3121171b23a53be76 /drivers/gpu/drm/radeon/si.c
parent6823d74003abedd688a3f535aefe6ce0e06444fd (diff)
drm/radeon: fix & improve ih ring handling v3
The spinlock was actually there to protect the rptr, but rptr was read outside of the locked area. Also we don't really need a spinlock here, an atomic should to quite fine since we only need to prevent it from being reentrant. v2: Keep the spinlock.... v3: Back to an atomic again after finding & fixing the real bug. Signed-off-by: Christian Koenig <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/si.c')
-rw-r--r--drivers/gpu/drm/radeon/si.c27
1 files changed, 13 insertions, 14 deletions
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 8868a1fa20e5..ecef972050d1 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2942,7 +2942,6 @@ static void si_disable_interrupts(struct radeon_device *rdev)
2942 WREG32(IH_RB_RPTR, 0); 2942 WREG32(IH_RB_RPTR, 0);
2943 WREG32(IH_RB_WPTR, 0); 2943 WREG32(IH_RB_WPTR, 0);
2944 rdev->ih.enabled = false; 2944 rdev->ih.enabled = false;
2945 rdev->ih.wptr = 0;
2946 rdev->ih.rptr = 0; 2945 rdev->ih.rptr = 0;
2947} 2946}
2948 2947
@@ -3359,29 +3358,27 @@ int si_irq_process(struct radeon_device *rdev)
3359 u32 rptr; 3358 u32 rptr;
3360 u32 src_id, src_data, ring_id; 3359 u32 src_id, src_data, ring_id;
3361 u32 ring_index; 3360 u32 ring_index;
3362 unsigned long flags;
3363 bool queue_hotplug = false; 3361 bool queue_hotplug = false;
3364 3362
3365 if (!rdev->ih.enabled || rdev->shutdown) 3363 if (!rdev->ih.enabled || rdev->shutdown)
3366 return IRQ_NONE; 3364 return IRQ_NONE;
3367 3365
3368 wptr = si_get_ih_wptr(rdev); 3366 wptr = si_get_ih_wptr(rdev);
3367
3368restart_ih:
3369 /* is somebody else already processing irqs? */
3370 if (atomic_xchg(&rdev->ih.lock, 1))
3371 return IRQ_NONE;
3372
3369 rptr = rdev->ih.rptr; 3373 rptr = rdev->ih.rptr;
3370 DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 3374 DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3371 3375
3372 spin_lock_irqsave(&rdev->ih.lock, flags);
3373 if (rptr == wptr) {
3374 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3375 return IRQ_NONE;
3376 }
3377restart_ih:
3378 /* Order reading of wptr vs. reading of IH ring data */ 3376 /* Order reading of wptr vs. reading of IH ring data */
3379 rmb(); 3377 rmb();
3380 3378
3381 /* display interrupts */ 3379 /* display interrupts */
3382 si_irq_ack(rdev); 3380 si_irq_ack(rdev);
3383 3381
3384 rdev->ih.wptr = wptr;
3385 while (rptr != wptr) { 3382 while (rptr != wptr) {
3386 /* wptr/rptr are in bytes! */ 3383 /* wptr/rptr are in bytes! */
3387 ring_index = rptr / 4; 3384 ring_index = rptr / 4;
@@ -3632,15 +3629,17 @@ restart_ih:
3632 rptr += 16; 3629 rptr += 16;
3633 rptr &= rdev->ih.ptr_mask; 3630 rptr &= rdev->ih.ptr_mask;
3634 } 3631 }
3635 /* make sure wptr hasn't changed while processing */
3636 wptr = si_get_ih_wptr(rdev);
3637 if (wptr != rdev->ih.wptr)
3638 goto restart_ih;
3639 if (queue_hotplug) 3632 if (queue_hotplug)
3640 schedule_work(&rdev->hotplug_work); 3633 schedule_work(&rdev->hotplug_work);
3641 rdev->ih.rptr = rptr; 3634 rdev->ih.rptr = rptr;
3642 WREG32(IH_RB_RPTR, rdev->ih.rptr); 3635 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3643 spin_unlock_irqrestore(&rdev->ih.lock, flags); 3636 atomic_set(&rdev->ih.lock, 0);
3637
3638 /* make sure wptr hasn't changed while processing */
3639 wptr = si_get_ih_wptr(rdev);
3640 if (wptr != rptr)
3641 goto restart_ih;
3642
3644 return IRQ_HANDLED; 3643 return IRQ_HANDLED;
3645} 3644}
3646 3645