diff options
author | Christian König <christian.koenig@amd.com> | 2015-10-20 11:38:07 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-10-30 01:51:11 -0400 |
commit | ee327caf1a2cdba9313167c36db2d7ff02d534bc (patch) | |
tree | c09c7b851e4a819944375b0c420cc2e689550c4e | |
parent | 318cd340c5573a1f021f5b7711893133fe5e8480 (diff) |
drm/amdgpu: switch to common fence_wait_any_timeout v2
No need to duplicate the functionality any more.
v2: fix handling if no fence is available.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com> (v1)
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 98 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | 20 |
3 files changed, 13 insertions, 109 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a9c0def6ce31..dd7d2ce2355c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -447,10 +447,6 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring); | |||
447 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); | 447 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); |
448 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); | 448 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); |
449 | 449 | ||
450 | signed long amdgpu_fence_wait_any(struct fence **array, | ||
451 | uint32_t count, | ||
452 | bool intr, | ||
453 | signed long t); | ||
454 | struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); | 450 | struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); |
455 | void amdgpu_fence_unref(struct amdgpu_fence **fence); | 451 | void amdgpu_fence_unref(struct amdgpu_fence **fence); |
456 | 452 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 663caa9d1175..c4bb28292b9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -822,104 +822,6 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f) | |||
822 | return (const char *)fence->ring->name; | 822 | return (const char *)fence->ring->name; |
823 | } | 823 | } |
824 | 824 | ||
825 | static bool amdgpu_test_signaled_any(struct fence **fences, uint32_t count) | ||
826 | { | ||
827 | int idx; | ||
828 | struct fence *fence; | ||
829 | |||
830 | for (idx = 0; idx < count; ++idx) { | ||
831 | fence = fences[idx]; | ||
832 | if (fence) { | ||
833 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
834 | return true; | ||
835 | } | ||
836 | } | ||
837 | return false; | ||
838 | } | ||
839 | |||
840 | struct amdgpu_wait_cb { | ||
841 | struct fence_cb base; | ||
842 | struct task_struct *task; | ||
843 | }; | ||
844 | |||
845 | static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb) | ||
846 | { | ||
847 | struct amdgpu_wait_cb *wait = | ||
848 | container_of(cb, struct amdgpu_wait_cb, base); | ||
849 | wake_up_process(wait->task); | ||
850 | } | ||
851 | |||
852 | /** | ||
853 | * Wait the fence array with timeout | ||
854 | * | ||
855 | * @array: the fence array with amdgpu fence pointer | ||
856 | * @count: the number of the fence array | ||
857 | * @intr: when sleep, set the current task interruptable or not | ||
858 | * @t: timeout to wait | ||
859 | * | ||
860 | * It will return when any fence is signaled or timeout. | ||
861 | */ | ||
862 | signed long amdgpu_fence_wait_any(struct fence **array, uint32_t count, | ||
863 | bool intr, signed long t) | ||
864 | { | ||
865 | struct amdgpu_wait_cb *cb; | ||
866 | struct fence *fence; | ||
867 | unsigned idx; | ||
868 | |||
869 | BUG_ON(!array); | ||
870 | |||
871 | cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL); | ||
872 | if (cb == NULL) { | ||
873 | t = -ENOMEM; | ||
874 | goto err_free_cb; | ||
875 | } | ||
876 | |||
877 | for (idx = 0; idx < count; ++idx) { | ||
878 | fence = array[idx]; | ||
879 | if (fence) { | ||
880 | cb[idx].task = current; | ||
881 | if (fence_add_callback(fence, | ||
882 | &cb[idx].base, amdgpu_fence_wait_cb)) { | ||
883 | /* The fence is already signaled */ | ||
884 | goto fence_rm_cb; | ||
885 | } | ||
886 | } | ||
887 | } | ||
888 | |||
889 | while (t > 0) { | ||
890 | if (intr) | ||
891 | set_current_state(TASK_INTERRUPTIBLE); | ||
892 | else | ||
893 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
894 | |||
895 | /* | ||
896 | * amdgpu_test_signaled_any must be called after | ||
897 | * set_current_state to prevent a race with wake_up_process | ||
898 | */ | ||
899 | if (amdgpu_test_signaled_any(array, count)) | ||
900 | break; | ||
901 | |||
902 | t = schedule_timeout(t); | ||
903 | |||
904 | if (t > 0 && intr && signal_pending(current)) | ||
905 | t = -ERESTARTSYS; | ||
906 | } | ||
907 | |||
908 | __set_current_state(TASK_RUNNING); | ||
909 | |||
910 | fence_rm_cb: | ||
911 | for (idx = 0; idx < count; ++idx) { | ||
912 | fence = array[idx]; | ||
913 | if (fence && cb[idx].base.func) | ||
914 | fence_remove_callback(fence, &cb[idx].base); | ||
915 | } | ||
916 | |||
917 | err_free_cb: | ||
918 | kfree(cb); | ||
919 | |||
920 | return t; | ||
921 | } | ||
922 | |||
923 | const struct fence_ops amdgpu_fence_ops = { | 825 | const struct fence_ops amdgpu_fence_ops = { |
924 | .get_driver_name = amdgpu_fence_get_driver_name, | 826 | .get_driver_name = amdgpu_fence_get_driver_name, |
925 | .get_timeline_name = amdgpu_fence_get_timeline_name, | 827 | .get_timeline_name = amdgpu_fence_get_timeline_name, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 5cb27d525e43..3f48759793de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |||
@@ -337,6 +337,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, | |||
337 | { | 337 | { |
338 | struct fence *fences[AMDGPU_MAX_RINGS]; | 338 | struct fence *fences[AMDGPU_MAX_RINGS]; |
339 | unsigned tries[AMDGPU_MAX_RINGS]; | 339 | unsigned tries[AMDGPU_MAX_RINGS]; |
340 | unsigned count; | ||
340 | int i, r; | 341 | int i, r; |
341 | signed long t; | 342 | signed long t; |
342 | 343 | ||
@@ -371,13 +372,18 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, | |||
371 | /* see if we can skip over some allocations */ | 372 | /* see if we can skip over some allocations */ |
372 | } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); | 373 | } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); |
373 | 374 | ||
374 | spin_unlock(&sa_manager->wq.lock); | 375 | for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i) |
375 | t = amdgpu_fence_wait_any(fences, AMDGPU_MAX_RINGS, | 376 | if (fences[i]) |
376 | false, MAX_SCHEDULE_TIMEOUT); | 377 | fences[count++] = fences[i]; |
377 | r = (t > 0) ? 0 : t; | 378 | |
378 | spin_lock(&sa_manager->wq.lock); | 379 | if (count) { |
379 | /* if we have nothing to wait for block */ | 380 | spin_unlock(&sa_manager->wq.lock); |
380 | if (r == -ENOENT) { | 381 | t = fence_wait_any_timeout(fences, count, false, |
382 | MAX_SCHEDULE_TIMEOUT); | ||
383 | r = (t > 0) ? 0 : t; | ||
384 | spin_lock(&sa_manager->wq.lock); | ||
385 | } else { | ||
386 | /* if we have nothing to wait for block */ | ||
381 | r = wait_event_interruptible_locked( | 387 | r = wait_event_interruptible_locked( |
382 | sa_manager->wq, | 388 | sa_manager->wq, |
383 | amdgpu_sa_event(sa_manager, size, align) | 389 | amdgpu_sa_event(sa_manager, size, align) |