aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c69
1 files changed, 15 insertions, 54 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 44eac91163eb..d5bdd9633c85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -228,57 +228,6 @@ static void amdgpu_fence_fallback(unsigned long arg)
228} 228}
229 229
230/** 230/**
231 * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
232 *
233 * @ring: ring the fence is associated with
234 * @seq: sequence number
235 *
236 * Check if the last signaled fence sequnce number is >= the requested
237 * sequence number (all asics).
238 * Returns true if the fence has signaled (current fence value
239 * is >= requested value) or false if it has not (current fence
240 * value is < the requested value. Helper function for
241 * amdgpu_fence_signaled().
242 */
243static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
244{
245 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
246 return true;
247
248 /* poll new last sequence at least once */
249 amdgpu_fence_process(ring);
250 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
251 return true;
252
253 return false;
254}
255
256/*
257 * amdgpu_ring_wait_seq - wait for seq of the specific ring to signal
258 * @ring: ring to wait on for the seq number
259 * @seq: seq number wait for
260 *
261 * return value:
262 * 0: seq signaled, and gpu not hang
263 * -EINVAL: some paramter is not valid
264 */
265static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
266{
267 BUG_ON(!ring);
268 if (seq > ring->fence_drv.sync_seq)
269 return -EINVAL;
270
271 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
272 return 0;
273
274 amdgpu_fence_schedule_fallback(ring);
275 wait_event(ring->fence_drv.fence_queue,
276 amdgpu_fence_seq_signaled(ring, seq));
277
278 return 0;
279}
280
281/**
282 * amdgpu_fence_wait_empty - wait for all fences to signal 231 * amdgpu_fence_wait_empty - wait for all fences to signal
283 * 232 *
284 * @adev: amdgpu device pointer 233 * @adev: amdgpu device pointer
@@ -286,16 +235,28 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
286 * 235 *
287 * Wait for all fences on the requested ring to signal (all asics). 236 * Wait for all fences on the requested ring to signal (all asics).
288 * Returns 0 if the fences have passed, error for all other cases. 237 * Returns 0 if the fences have passed, error for all other cases.
289 * Caller must hold ring lock.
290 */ 238 */
291int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 239int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
292{ 240{
293 uint64_t seq = ring->fence_drv.sync_seq; 241 uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
242 struct fence *fence, **ptr;
243 int r;
294 244
295 if (!seq) 245 if (!seq)
296 return 0; 246 return 0;
297 247
298 return amdgpu_fence_ring_wait_seq(ring, seq); 248 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
249 rcu_read_lock();
250 fence = rcu_dereference(*ptr);
251 if (!fence || !fence_get_rcu(fence)) {
252 rcu_read_unlock();
253 return 0;
254 }
255 rcu_read_unlock();
256
257 r = fence_wait(fence, false);
258 fence_put(fence);
259 return r;
299} 260}
300 261
301/** 262/**