aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorChristian König <deathsimple@vodafone.de>2012-06-29 05:33:12 -0400
committerChristian König <deathsimple@vodafone.de>2012-07-17 04:31:39 -0400
commit7ecc45e3ef8468abb062be2a8bb427029342f42d (patch)
treeb161f84f121bc15ed6d1a511da1be5984721069f /drivers/gpu/drm
parent49099c4991da3c94773f888aea2e9d27b8a7c6d1 (diff)
drm/radeon: add error handling to fence_wait_empty_locked
Instead of returning the error handle it directly and while at it fix the comments about the ring lock. Signed-off-by: Christian König <deathsimple@vodafone.de> Reviewed-by: Michel Dänzer <michel.daenzer@amd.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c33
2 files changed, 22 insertions, 13 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 77b4519b19b8..5861ec86725f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -239,7 +239,7 @@ void radeon_fence_process(struct radeon_device *rdev, int ring);
239bool radeon_fence_signaled(struct radeon_fence *fence); 239bool radeon_fence_signaled(struct radeon_fence *fence);
240int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); 240int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
241int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); 241int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
242int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); 242void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
243int radeon_fence_wait_any(struct radeon_device *rdev, 243int radeon_fence_wait_any(struct radeon_device *rdev,
244 struct radeon_fence **fences, 244 struct radeon_fence **fences,
245 bool intr); 245 bool intr);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7b55625a5e18..be4e4f390e89 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -440,14 +440,11 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
440 return 0; 440 return 0;
441} 441}
442 442
443/* caller must hold ring lock */
443int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) 444int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
444{ 445{
445 uint64_t seq; 446 uint64_t seq;
446 447
447 /* We are not protected by ring lock when reading current seq but
448 * it's ok as worst case is we return to early while we could have
449 * wait.
450 */
451 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; 448 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
452 if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { 449 if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
453 /* nothing to wait for, last_seq is 450 /* nothing to wait for, last_seq is
@@ -457,15 +454,27 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
457 return radeon_fence_wait_seq(rdev, seq, ring, false, false); 454 return radeon_fence_wait_seq(rdev, seq, ring, false, false);
458} 455}
459 456
460int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) 457/* caller must hold ring lock */
458void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
461{ 459{
462 /* We are not protected by ring lock when reading current seq 460 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
463 * but it's ok as wait empty is call from place where no more 461
464 * activity can be scheduled so there won't be concurrent access 462 while(1) {
465 * to seq value. 463 int r;
466 */ 464 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
467 return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].sync_seq[ring], 465 if (r == -EDEADLK) {
468 ring, false, false); 466 mutex_unlock(&rdev->ring_lock);
467 r = radeon_gpu_reset(rdev);
468 mutex_lock(&rdev->ring_lock);
469 if (!r)
470 continue;
471 }
472 if (r) {
473 dev_err(rdev->dev, "error waiting for ring to become"
474 " idle (%d)\n", r);
475 }
476 return;
477 }
469} 478}
470 479
471struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) 480struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)