aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_fence.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_fence.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c49
1 files changed, 33 insertions, 16 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 410a975a8eec..34356252567a 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -609,26 +609,20 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
609 * Returns 0 if the fences have passed, error for all other cases. 609 * Returns 0 if the fences have passed, error for all other cases.
610 * Caller must hold ring lock. 610 * Caller must hold ring lock.
611 */ 611 */
612void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) 612int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
613{ 613{
614 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; 614 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
615 int r;
615 616
616 while(1) { 617 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
617 int r; 618 if (r) {
618 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
619 if (r == -EDEADLK) { 619 if (r == -EDEADLK) {
620 mutex_unlock(&rdev->ring_lock); 620 return -EDEADLK;
621 r = radeon_gpu_reset(rdev);
622 mutex_lock(&rdev->ring_lock);
623 if (!r)
624 continue;
625 }
626 if (r) {
627 dev_err(rdev->dev, "error waiting for ring to become"
628 " idle (%d)\n", r);
629 } 621 }
630 return; 622 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
623 ring, r);
631 } 624 }
625 return 0;
632} 626}
633 627
634/** 628/**
@@ -854,13 +848,17 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
854 */ 848 */
855void radeon_fence_driver_fini(struct radeon_device *rdev) 849void radeon_fence_driver_fini(struct radeon_device *rdev)
856{ 850{
857 int ring; 851 int ring, r;
858 852
859 mutex_lock(&rdev->ring_lock); 853 mutex_lock(&rdev->ring_lock);
860 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 854 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
861 if (!rdev->fence_drv[ring].initialized) 855 if (!rdev->fence_drv[ring].initialized)
862 continue; 856 continue;
863 radeon_fence_wait_empty_locked(rdev, ring); 857 r = radeon_fence_wait_empty_locked(rdev, ring);
858 if (r) {
859 /* no need to trigger GPU reset as we are unloading */
860 radeon_fence_driver_force_completion(rdev);
861 }
864 wake_up_all(&rdev->fence_queue); 862 wake_up_all(&rdev->fence_queue);
865 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 863 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
866 rdev->fence_drv[ring].initialized = false; 864 rdev->fence_drv[ring].initialized = false;
@@ -868,6 +866,25 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
868 mutex_unlock(&rdev->ring_lock); 866 mutex_unlock(&rdev->ring_lock);
869} 867}
870 868
869/**
870 * radeon_fence_driver_force_completion - force all fence waiter to complete
871 *
872 * @rdev: radeon device pointer
873 *
874 * In case of GPU reset failure make sure no process keep waiting on fence
875 * that will never complete.
876 */
877void radeon_fence_driver_force_completion(struct radeon_device *rdev)
878{
879 int ring;
880
881 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
882 if (!rdev->fence_drv[ring].initialized)
883 continue;
884 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
885 }
886}
887
871 888
872/* 889/*
873 * Fence debugfs 890 * Fence debugfs