aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
diff options
context:
space:
mode:
authorJunwei Zhang <Jerry.Zhang@amd.com>2015-08-19 04:24:19 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-08-25 10:38:28 -0400
commit1aa4051b7f5474cca6009c13868c59d78d06f983 (patch)
treebad07ba0fe771f75d08d6785a3eb333acd3d2694 /drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
parent52293c67f1e7542a6dc61037d83c266e216bef27 (diff)
drm/amdgpu: modify amdgpu_fence_wait_any() to amdgpu_fence_wait_multiple()
Rename the function and update the related code with this modified function. Add the new parameter of bool wait_all. If wait_all is true, it will return when all fences are signaled or timeout. If wait_all is false, it will return when any fence is signaled or timeout. Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com> Reviewed-by: Monk Liu <monk.liu@amd.com> Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c79
1 files changed, 61 insertions, 18 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 98500f1756f7..ae014fcf524e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -836,13 +836,12 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
836 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); 836 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
837} 837}
838 838
839static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences) 839static bool amdgpu_test_signaled_any(struct amdgpu_fence **fences, uint32_t count)
840{ 840{
841 int idx; 841 int idx;
842 struct amdgpu_fence *fence; 842 struct amdgpu_fence *fence;
843 843
844 idx = 0; 844 for (idx = 0; idx < count; ++idx) {
845 for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
846 fence = fences[idx]; 845 fence = fences[idx];
847 if (fence) { 846 if (fence) {
848 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) 847 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
@@ -852,6 +851,22 @@ static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences)
852 return false; 851 return false;
853} 852}
854 853
854static bool amdgpu_test_signaled_all(struct amdgpu_fence **fences, uint32_t count)
855{
856 int idx;
857 struct amdgpu_fence *fence;
858
859 for (idx = 0; idx < count; ++idx) {
860 fence = fences[idx];
861 if (fence) {
862 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
863 return false;
864 }
865 }
866
867 return true;
868}
869
855struct amdgpu_wait_cb { 870struct amdgpu_wait_cb {
856 struct fence_cb base; 871 struct fence_cb base;
857 struct task_struct *task; 872 struct task_struct *task;
@@ -867,33 +882,56 @@ static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
867static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, 882static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
868 signed long t) 883 signed long t)
869{ 884{
870 struct amdgpu_fence *array[AMDGPU_MAX_RINGS];
871 struct amdgpu_fence *fence = to_amdgpu_fence(f); 885 struct amdgpu_fence *fence = to_amdgpu_fence(f);
872 struct amdgpu_device *adev = fence->ring->adev; 886 struct amdgpu_device *adev = fence->ring->adev;
873 887
874 memset(&array[0], 0, sizeof(array)); 888 return amdgpu_fence_wait_multiple(adev, &fence, 1, false, intr, t);
875 array[0] = fence;
876
877 return amdgpu_fence_wait_any(adev, array, intr, t);
878} 889}
879 890
880/* wait until any fence in array signaled */ 891/**
881signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, 892 * Wait the fence array with timeout
882 struct amdgpu_fence **array, bool intr, signed long t) 893 *
894 * @adev: amdgpu device
895 * @array: the fence array with amdgpu fence pointer
896 * @count: the number of the fence array
897 * @wait_all: the flag of wait all(true) or wait any(false)
898 * @intr: when sleep, set the current task interruptable or not
899 * @t: timeout to wait
900 *
901 * If wait_all is true, it will return when all fences are signaled or timeout.
902 * If wait_all is false, it will return when any fence is signaled or timeout.
903 */
904signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
905 struct amdgpu_fence **array,
906 uint32_t count,
907 bool wait_all,
908 bool intr,
909 signed long t)
883{ 910{
884 long idx = 0; 911 long idx = 0;
885 struct amdgpu_wait_cb cb[AMDGPU_MAX_RINGS]; 912 struct amdgpu_wait_cb *cb;
886 struct amdgpu_fence *fence; 913 struct amdgpu_fence *fence;
887 914
888 BUG_ON(!array); 915 BUG_ON(!array);
889 916
890 for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { 917 cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL);
918 if (cb == NULL) {
919 t = -ENOMEM;
920 goto err_free_cb;
921 }
922
923 for (idx = 0; idx < count; ++idx) {
891 fence = array[idx]; 924 fence = array[idx];
892 if (fence) { 925 if (fence) {
893 cb[idx].task = current; 926 cb[idx].task = current;
894 if (fence_add_callback(&fence->base, 927 if (fence_add_callback(&fence->base,
895 &cb[idx].base, amdgpu_fence_wait_cb)) 928 &cb[idx].base, amdgpu_fence_wait_cb)) {
896 return t; /* return if fence is already signaled */ 929 /* The fence is already signaled */
930 if (wait_all)
931 continue;
932 else
933 goto fence_rm_cb;
934 }
897 } 935 }
898 } 936 }
899 937
@@ -907,7 +945,9 @@ signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
907 * amdgpu_test_signaled_any must be called after 945 * amdgpu_test_signaled_any must be called after
908 * set_current_state to prevent a race with wake_up_process 946 * set_current_state to prevent a race with wake_up_process
909 */ 947 */
910 if (amdgpu_test_signaled_any(array)) 948 if (!wait_all && amdgpu_test_signaled_any(array, count))
949 break;
950 if (wait_all && amdgpu_test_signaled_all(array, count))
911 break; 951 break;
912 952
913 if (adev->needs_reset) { 953 if (adev->needs_reset) {
@@ -923,13 +963,16 @@ signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
923 963
924 __set_current_state(TASK_RUNNING); 964 __set_current_state(TASK_RUNNING);
925 965
926 idx = 0; 966fence_rm_cb:
927 for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { 967 for (idx = 0; idx < count; ++idx) {
928 fence = array[idx]; 968 fence = array[idx];
929 if (fence) 969 if (fence)
930 fence_remove_callback(&fence->base, &cb[idx].base); 970 fence_remove_callback(&fence->base, &cb[idx].base);
931 } 971 }
932 972
973err_free_cb:
974 kfree(cb);
975
933 return t; 976 return t;
934} 977}
935 978