aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c66
1 files changed, 64 insertions, 2 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 176f28777f5e..5448cf27654e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -196,6 +196,19 @@ int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
196} 196}
197 197
198/** 198/**
199 * amdgpu_fence_schedule_fallback - schedule fallback check
200 *
201 * @ring: pointer to struct amdgpu_ring
202 *
203 * Start a timer as fallback to our interrupts.
204 */
205static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
206{
207 mod_timer(&ring->fence_drv.fallback_timer,
208 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
209}
210
211/**
199 * amdgpu_fence_process - check for fence activity 212 * amdgpu_fence_process - check for fence activity
200 * 213 *
201 * @ring: pointer to struct amdgpu_ring 214 * @ring: pointer to struct amdgpu_ring
@@ -203,8 +216,10 @@ int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
203 * Checks the current fence value and calculates the last 216 * Checks the current fence value and calculates the last
204 * signalled fence value. Wakes the fence queue if the 217 * signalled fence value. Wakes the fence queue if the
205 * sequence number has increased. 218 * sequence number has increased.
219 *
220 * Returns true if fence was processed
206 */ 221 */
207void amdgpu_fence_process(struct amdgpu_ring *ring) 222bool amdgpu_fence_process(struct amdgpu_ring *ring)
208{ 223{
209 struct amdgpu_fence_driver *drv = &ring->fence_drv; 224 struct amdgpu_fence_driver *drv = &ring->fence_drv;
210 uint32_t seq, last_seq; 225 uint32_t seq, last_seq;
@@ -216,8 +231,12 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
216 231
217 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); 232 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
218 233
234 if (del_timer(&ring->fence_drv.fallback_timer) &&
235 seq != ring->fence_drv.sync_seq)
236 amdgpu_fence_schedule_fallback(ring);
237
219 if (unlikely(seq == last_seq)) 238 if (unlikely(seq == last_seq))
220 return; 239 return false;
221 240
222 last_seq &= drv->num_fences_mask; 241 last_seq &= drv->num_fences_mask;
223 seq &= drv->num_fences_mask; 242 seq &= drv->num_fences_mask;
@@ -244,6 +263,24 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
244 263
245 dma_fence_put(fence); 264 dma_fence_put(fence);
246 } while (last_seq != seq); 265 } while (last_seq != seq);
266
267 return true;
268}
269
270/**
271 * amdgpu_fence_fallback - fallback for hardware interrupts
272 *
273 * @work: delayed work item
274 *
275 * Checks for fence activity.
276 */
277static void amdgpu_fence_fallback(struct timer_list *t)
278{
279 struct amdgpu_ring *ring = from_timer(ring, t,
280 fence_drv.fallback_timer);
281
282 if (amdgpu_fence_process(ring))
283 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
247} 284}
248 285
249/** 286/**
@@ -393,6 +430,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
393 atomic_set(&ring->fence_drv.last_seq, 0); 430 atomic_set(&ring->fence_drv.last_seq, 0);
394 ring->fence_drv.initialized = false; 431 ring->fence_drv.initialized = false;
395 432
433 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
434
396 ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1; 435 ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
397 spin_lock_init(&ring->fence_drv.lock); 436 spin_lock_init(&ring->fence_drv.lock);
398 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *), 437 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
@@ -468,6 +507,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
468 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 507 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
469 ring->fence_drv.irq_type); 508 ring->fence_drv.irq_type);
470 drm_sched_fini(&ring->sched); 509 drm_sched_fini(&ring->sched);
510 del_timer_sync(&ring->fence_drv.fallback_timer);
471 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) 511 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
472 dma_fence_put(ring->fence_drv.fences[j]); 512 dma_fence_put(ring->fence_drv.fences[j]);
473 kfree(ring->fence_drv.fences); 513 kfree(ring->fence_drv.fences);
@@ -561,6 +601,27 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
561} 601}
562 602
563/** 603/**
604 * amdgpu_fence_enable_signaling - enable signalling on fence
605 * @fence: fence
606 *
607 * This function is called with fence_queue lock held, and adds a callback
608 * to fence_queue that checks if this fence is signaled, and if so it
609 * signals the fence and removes itself.
610 */
611static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
612{
613 struct amdgpu_fence *fence = to_amdgpu_fence(f);
614 struct amdgpu_ring *ring = fence->ring;
615
616 if (!timer_pending(&ring->fence_drv.fallback_timer))
617 amdgpu_fence_schedule_fallback(ring);
618
619 DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
620
621 return true;
622}
623
624/**
564 * amdgpu_fence_free - free up the fence memory 625 * amdgpu_fence_free - free up the fence memory
565 * 626 *
566 * @rcu: RCU callback head 627 * @rcu: RCU callback head
@@ -590,6 +651,7 @@ static void amdgpu_fence_release(struct dma_fence *f)
590static const struct dma_fence_ops amdgpu_fence_ops = { 651static const struct dma_fence_ops amdgpu_fence_ops = {
591 .get_driver_name = amdgpu_fence_get_driver_name, 652 .get_driver_name = amdgpu_fence_get_driver_name,
592 .get_timeline_name = amdgpu_fence_get_timeline_name, 653 .get_timeline_name = amdgpu_fence_get_timeline_name,
654 .enable_signaling = amdgpu_fence_enable_signaling,
593 .release = amdgpu_fence_release, 655 .release = amdgpu_fence_release,
594}; 656};
595 657