aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c58
1 files changed, 36 insertions, 22 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 21accbdd0a1a..7cb711fc1ee2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -53,20 +53,24 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
53} 53}
54 54
55/** 55/**
56 * amdgpu_sync_fence - use the semaphore to sync to a fence 56 * amdgpu_sync_fence - remember to sync to this fence
57 * 57 *
58 * @sync: sync object to add fence to 58 * @sync: sync object to add fence to
59 * @fence: fence to sync to 59 * @fence: fence to sync to
60 * 60 *
61 * Sync to the fence using the semaphore objects
62 */ 61 */
63void amdgpu_sync_fence(struct amdgpu_sync *sync, 62int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
64 struct amdgpu_fence *fence) 63 struct fence *f)
65{ 64{
65 struct amdgpu_fence *fence;
66 struct amdgpu_fence *other; 66 struct amdgpu_fence *other;
67 67
68 if (!fence) 68 if (!f)
69 return; 69 return 0;
70
71 fence = to_amdgpu_fence(f);
72 if (!fence || fence->ring->adev != adev)
73 return fence_wait(f, true);
70 74
71 other = sync->sync_to[fence->ring->idx]; 75 other = sync->sync_to[fence->ring->idx];
72 sync->sync_to[fence->ring->idx] = amdgpu_fence_ref( 76 sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
@@ -79,6 +83,8 @@ void amdgpu_sync_fence(struct amdgpu_sync *sync,
79 amdgpu_fence_later(fence, other)); 83 amdgpu_fence_later(fence, other));
80 amdgpu_fence_unref(&other); 84 amdgpu_fence_unref(&other);
81 } 85 }
86
87 return 0;
82} 88}
83 89
84/** 90/**
@@ -106,11 +112,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
106 112
107 /* always sync to the exclusive fence */ 113 /* always sync to the exclusive fence */
108 f = reservation_object_get_excl(resv); 114 f = reservation_object_get_excl(resv);
109 fence = f ? to_amdgpu_fence(f) : NULL; 115 r = amdgpu_sync_fence(adev, sync, f);
110 if (fence && fence->ring->adev == adev)
111 amdgpu_sync_fence(sync, fence);
112 else if (f)
113 r = fence_wait(f, true);
114 116
115 flist = reservation_object_get_list(resv); 117 flist = reservation_object_get_list(resv);
116 if (!flist || r) 118 if (!flist || r)
@@ -121,14 +123,26 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
121 reservation_object_held(resv)); 123 reservation_object_held(resv));
122 fence = f ? to_amdgpu_fence(f) : NULL; 124 fence = f ? to_amdgpu_fence(f) : NULL;
123 if (fence && fence->ring->adev == adev) { 125 if (fence && fence->ring->adev == adev) {
124 if (fence->owner != owner || 126 /* VM updates are only interesting
125 fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED) 127 * for other VM updates and moves.
126 amdgpu_sync_fence(sync, fence); 128 */
127 } else if (f) { 129 if ((owner != AMDGPU_FENCE_OWNER_MOVE) &&
128 r = fence_wait(f, true); 130 (fence->owner != AMDGPU_FENCE_OWNER_MOVE) &&
129 if (r) 131 ((owner == AMDGPU_FENCE_OWNER_VM) !=
130 break; 132 (fence->owner == AMDGPU_FENCE_OWNER_VM)))
133 continue;
134
135 /* Ignore fence from the same owner as
136 * long as it isn't undefined.
137 */
138 if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
139 fence->owner == owner)
140 continue;
131 } 141 }
142
143 r = amdgpu_sync_fence(adev, sync, f);
144 if (r)
145 break;
132 } 146 }
133 return r; 147 return r;
134} 148}
@@ -164,9 +178,9 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
164 return -EINVAL; 178 return -EINVAL;
165 } 179 }
166 180
167 if (count >= AMDGPU_NUM_SYNCS) { 181 if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
168 /* not enough room, wait manually */ 182 /* not enough room, wait manually */
169 r = amdgpu_fence_wait(fence, false); 183 r = fence_wait(&fence->base, false);
170 if (r) 184 if (r)
171 return r; 185 return r;
172 continue; 186 continue;
@@ -186,7 +200,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
186 if (!amdgpu_semaphore_emit_signal(other, semaphore)) { 200 if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
187 /* signaling wasn't successful wait manually */ 201 /* signaling wasn't successful wait manually */
188 amdgpu_ring_undo(other); 202 amdgpu_ring_undo(other);
189 r = amdgpu_fence_wait(fence, false); 203 r = fence_wait(&fence->base, false);
190 if (r) 204 if (r)
191 return r; 205 return r;
192 continue; 206 continue;
@@ -196,7 +210,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
196 if (!amdgpu_semaphore_emit_wait(ring, semaphore)) { 210 if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
197 /* waiting wasn't successful wait manually */ 211 /* waiting wasn't successful wait manually */
198 amdgpu_ring_undo(other); 212 amdgpu_ring_undo(other);
199 r = amdgpu_fence_wait(fence, false); 213 r = fence_wait(&fence->base, false);
200 if (r) 214 if (r)
201 return r; 215 return r;
202 continue; 216 continue;