diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 50 |
1 files changed, 8 insertions, 42 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index fcad7e060938..003a219943f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -260,27 +260,8 @@ static void amdgpu_fence_check_lockup(struct work_struct *work) | |||
260 | lockup_work.work); | 260 | lockup_work.work); |
261 | ring = fence_drv->ring; | 261 | ring = fence_drv->ring; |
262 | 262 | ||
263 | if (!down_read_trylock(&ring->adev->exclusive_lock)) { | 263 | if (amdgpu_fence_activity(ring)) |
264 | /* just reschedule the check if a reset is going on */ | ||
265 | amdgpu_fence_schedule_check(ring); | ||
266 | return; | ||
267 | } | ||
268 | |||
269 | if (amdgpu_fence_activity(ring)) { | ||
270 | wake_up_all(&ring->fence_drv.fence_queue); | ||
271 | } | ||
272 | else if (amdgpu_ring_is_lockup(ring)) { | ||
273 | /* good news we believe it's a lockup */ | ||
274 | dev_warn(ring->adev->dev, "GPU lockup (current fence id " | ||
275 | "0x%016llx last fence id 0x%016llx on ring %d)\n", | ||
276 | (uint64_t)atomic64_read(&fence_drv->last_seq), | ||
277 | fence_drv->sync_seq[ring->idx], ring->idx); | ||
278 | |||
279 | /* remember that we need an reset */ | ||
280 | ring->adev->needs_reset = true; | ||
281 | wake_up_all(&ring->fence_drv.fence_queue); | 264 | wake_up_all(&ring->fence_drv.fence_queue); |
282 | } | ||
283 | up_read(&ring->adev->exclusive_lock); | ||
284 | } | 265 | } |
285 | 266 | ||
286 | /** | 267 | /** |
@@ -328,18 +309,15 @@ static bool amdgpu_fence_is_signaled(struct fence *f) | |||
328 | { | 309 | { |
329 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | 310 | struct amdgpu_fence *fence = to_amdgpu_fence(f); |
330 | struct amdgpu_ring *ring = fence->ring; | 311 | struct amdgpu_ring *ring = fence->ring; |
331 | struct amdgpu_device *adev = ring->adev; | ||
332 | 312 | ||
333 | if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) | 313 | if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) |
334 | return true; | 314 | return true; |
335 | 315 | ||
336 | if (down_read_trylock(&adev->exclusive_lock)) { | 316 | amdgpu_fence_process(ring); |
337 | amdgpu_fence_process(ring); | 317 | |
338 | up_read(&adev->exclusive_lock); | 318 | if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) |
319 | return true; | ||
339 | 320 | ||
340 | if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) | ||
341 | return true; | ||
342 | } | ||
343 | return false; | 321 | return false; |
344 | } | 322 | } |
345 | 323 | ||
@@ -380,7 +358,6 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) | |||
380 | */ | 358 | */ |
381 | static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) | 359 | static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) |
382 | { | 360 | { |
383 | struct amdgpu_device *adev = ring->adev; | ||
384 | bool signaled = false; | 361 | bool signaled = false; |
385 | 362 | ||
386 | BUG_ON(!ring); | 363 | BUG_ON(!ring); |
@@ -391,8 +368,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) | |||
391 | return 0; | 368 | return 0; |
392 | 369 | ||
393 | wait_event(ring->fence_drv.fence_queue, ( | 370 | wait_event(ring->fence_drv.fence_queue, ( |
394 | (signaled = amdgpu_fence_seq_signaled(ring, seq)) | 371 | (signaled = amdgpu_fence_seq_signaled(ring, seq)))); |
395 | || adev->needs_reset)); | ||
396 | 372 | ||
397 | if (signaled) | 373 | if (signaled) |
398 | return 0; | 374 | return 0; |
@@ -881,16 +857,12 @@ static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb) | |||
881 | static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, | 857 | static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, |
882 | signed long t) | 858 | signed long t) |
883 | { | 859 | { |
884 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | 860 | return amdgpu_fence_wait_any(&f, 1, intr, t); |
885 | struct amdgpu_device *adev = fence->ring->adev; | ||
886 | |||
887 | return amdgpu_fence_wait_any(adev, &f, 1, intr, t); | ||
888 | } | 861 | } |
889 | 862 | ||
890 | /** | 863 | /** |
891 | * Wait the fence array with timeout | 864 | * Wait the fence array with timeout |
892 | * | 865 | * |
893 | * @adev: amdgpu device | ||
894 | * @array: the fence array with amdgpu fence pointer | 866 | * @array: the fence array with amdgpu fence pointer |
895 | * @count: the number of the fence array | 867 | * @count: the number of the fence array |
896 | * @intr: when sleep, set the current task interruptable or not | 868 | * @intr: when sleep, set the current task interruptable or not |
@@ -898,8 +870,7 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, | |||
898 | * | 870 | * |
899 | * It will return when any fence is signaled or timeout. | 871 | * It will return when any fence is signaled or timeout. |
900 | */ | 872 | */ |
901 | signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, | 873 | signed long amdgpu_fence_wait_any(struct fence **array, uint32_t count, |
902 | struct fence **array, uint32_t count, | ||
903 | bool intr, signed long t) | 874 | bool intr, signed long t) |
904 | { | 875 | { |
905 | struct amdgpu_wait_cb *cb; | 876 | struct amdgpu_wait_cb *cb; |
@@ -939,11 +910,6 @@ signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, | |||
939 | if (amdgpu_test_signaled_any(array, count)) | 910 | if (amdgpu_test_signaled_any(array, count)) |
940 | break; | 911 | break; |
941 | 912 | ||
942 | if (adev->needs_reset) { | ||
943 | t = -EDEADLK; | ||
944 | break; | ||
945 | } | ||
946 | |||
947 | t = schedule_timeout(t); | 913 | t = schedule_timeout(t); |
948 | 914 | ||
949 | if (t > 0 && intr && signal_pending(current)) | 915 | if (t > 0 && intr && signal_pending(current)) |