aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authormonk.liu <monk.liu@amd.com>2015-07-30 06:28:12 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-08-17 16:50:48 -0400
commit7f06c236b964db3d8d086c5a0087b2eb4de3bf7a (patch)
tree37893ae39ea28e71e1066ce10dca5b9318784e96 /drivers/gpu/drm/amd/amdgpu
parente29551556e055f463fb80cfb07e1cb15641e60a3 (diff)
drm/amdgpu: move wait_queue_head from adev to ring (v2)
thus unnecessary wake up could be avoid between rings v2: move wait_queue_head to fence_drv from ring Signed-off-by: monk.liu <monk.liu@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c225
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c2
3 files changed, 77 insertions, 152 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5f32f859230b..98b47601b30a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -391,6 +391,7 @@ struct amdgpu_fence_driver {
391 struct amdgpu_irq_src *irq_src; 391 struct amdgpu_irq_src *irq_src;
392 unsigned irq_type; 392 unsigned irq_type;
393 struct delayed_work lockup_work; 393 struct delayed_work lockup_work;
394 wait_queue_head_t fence_queue;
394}; 395};
395 396
396/* some special values for the owner field */ 397/* some special values for the owner field */
@@ -2036,7 +2037,6 @@ struct amdgpu_device {
2036 struct amdgpu_irq_src hpd_irq; 2037 struct amdgpu_irq_src hpd_irq;
2037 2038
2038 /* rings */ 2039 /* rings */
2039 wait_queue_head_t fence_queue;
2040 unsigned fence_context; 2040 unsigned fence_context;
2041 struct mutex ring_lock; 2041 struct mutex ring_lock;
2042 unsigned num_rings; 2042 unsigned num_rings;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 0f9800b7e5a8..60e6d668f6b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -126,7 +126,8 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
126 (*fence)->ring = ring; 126 (*fence)->ring = ring;
127 (*fence)->owner = owner; 127 (*fence)->owner = owner;
128 fence_init(&(*fence)->base, &amdgpu_fence_ops, 128 fence_init(&(*fence)->base, &amdgpu_fence_ops,
129 &adev->fence_queue.lock, adev->fence_context + ring->idx, 129 &ring->fence_drv.fence_queue.lock,
130 adev->fence_context + ring->idx,
130 (*fence)->seq); 131 (*fence)->seq);
131 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 132 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
132 (*fence)->seq, 133 (*fence)->seq,
@@ -164,7 +165,7 @@ static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl
164 else 165 else
165 FENCE_TRACE(&fence->base, "was already signaled\n"); 166 FENCE_TRACE(&fence->base, "was already signaled\n");
166 167
167 __remove_wait_queue(&adev->fence_queue, &fence->fence_wake); 168 __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
168 fence_put(&fence->base); 169 fence_put(&fence->base);
169 } else 170 } else
170 FENCE_TRACE(&fence->base, "pending\n"); 171 FENCE_TRACE(&fence->base, "pending\n");
@@ -265,8 +266,9 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
265 return; 266 return;
266 } 267 }
267 268
268 if (amdgpu_fence_activity(ring)) 269 if (amdgpu_fence_activity(ring)) {
269 wake_up_all(&ring->adev->fence_queue); 270 wake_up_all(&ring->fence_drv.fence_queue);
271 }
270 else if (amdgpu_ring_is_lockup(ring)) { 272 else if (amdgpu_ring_is_lockup(ring)) {
271 /* good news we believe it's a lockup */ 273 /* good news we believe it's a lockup */
272 dev_warn(ring->adev->dev, "GPU lockup (current fence id " 274 dev_warn(ring->adev->dev, "GPU lockup (current fence id "
@@ -276,7 +278,7 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
276 278
277 /* remember that we need an reset */ 279 /* remember that we need an reset */
278 ring->adev->needs_reset = true; 280 ring->adev->needs_reset = true;
279 wake_up_all(&ring->adev->fence_queue); 281 wake_up_all(&ring->fence_drv.fence_queue);
280 } 282 }
281 up_read(&ring->adev->exclusive_lock); 283 up_read(&ring->adev->exclusive_lock);
282} 284}
@@ -364,7 +366,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
364 } while (amd_sched_get_handled_seq(ring->scheduler) < latest_seq); 366 } while (amd_sched_get_handled_seq(ring->scheduler) < latest_seq);
365 } 367 }
366 368
367 wake_up_all(&ring->adev->fence_queue); 369 wake_up_all(&ring->fence_drv.fence_queue);
368 } 370 }
369exit: 371exit:
370 spin_unlock_irqrestore(&ring->fence_lock, irqflags); 372 spin_unlock_irqrestore(&ring->fence_lock, irqflags);
@@ -427,7 +429,6 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
427{ 429{
428 struct amdgpu_fence *fence = to_amdgpu_fence(f); 430 struct amdgpu_fence *fence = to_amdgpu_fence(f);
429 struct amdgpu_ring *ring = fence->ring; 431 struct amdgpu_ring *ring = fence->ring;
430 struct amdgpu_device *adev = ring->adev;
431 432
432 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) 433 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
433 return false; 434 return false;
@@ -435,7 +436,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
435 fence->fence_wake.flags = 0; 436 fence->fence_wake.flags = 0;
436 fence->fence_wake.private = NULL; 437 fence->fence_wake.private = NULL;
437 fence->fence_wake.func = amdgpu_fence_check_signaled; 438 fence->fence_wake.func = amdgpu_fence_check_signaled;
438 __add_wait_queue(&adev->fence_queue, &fence->fence_wake); 439 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
439 fence_get(f); 440 fence_get(f);
440 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 441 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
441 return true; 442 return true;
@@ -463,152 +464,79 @@ bool amdgpu_fence_signaled(struct amdgpu_fence *fence)
463 return false; 464 return false;
464} 465}
465 466
466/** 467/*
467 * amdgpu_fence_any_seq_signaled - check if any sequence number is signaled 468 * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
468 * 469 * @ring: ring to wait on for the seq number
469 * @adev: amdgpu device pointer 470 * @seq: seq number wait for
470 * @seq: sequence numbers 471 * @intr: if interruptible
471 * 472 * @timeout: jiffies before time out
472 * Check if the last signaled fence sequnce number is >= the requested
473 * sequence number (all asics).
474 * Returns true if any has signaled (current value is >= requested value)
475 * or false if it has not. Helper function for amdgpu_fence_wait_seq.
476 */
477static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
478{
479 unsigned i;
480
481 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
482 if (!adev->rings[i] || !seq[i])
483 continue;
484
485 if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i]))
486 return true;
487 }
488
489 return false;
490}
491
492/**
493 * amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers
494 *
495 * @adev: amdgpu device pointer
496 * @target_seq: sequence number(s) we want to wait for
497 * @intr: use interruptable sleep
498 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
499 * 473 *
500 * Wait for the requested sequence number(s) to be written by any ring 474 * return value:
501 * (all asics). Sequnce number array is indexed by ring id. 475 * 0: time out but seq not signaled, and gpu not hang
502 * @intr selects whether to use interruptable (true) or non-interruptable 476 * X (X > 0): seq signaled and X means how many jiffies remains before time out
503 * (false) sleep when waiting for the sequence number. Helper function 477 * -EDEADL: GPU hang before time out
504 * for amdgpu_fence_wait_*(). 478 * -ESYSRESTART: interrupted before seq signaled
505 * Returns remaining time if the sequence number has passed, 0 when 479 * -EINVAL: some paramter is not valid
506 * the wait timeout, or an error for all other cases.
507 * -EDEADLK is returned when a GPU lockup has been detected.
508 */ 480 */
509static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, 481static long amdgpu_fence_ring_wait_seq_timeout(struct amdgpu_ring *ring, uint64_t seq,
510 u64 *target_seq, bool intr, 482 bool intr, long timeout)
511 long timeout)
512{ 483{
513 uint64_t last_seq[AMDGPU_MAX_RINGS]; 484 struct amdgpu_device *adev = ring->adev;
514 bool signaled; 485 long r = 0;
515 int i; 486 bool signaled = false;
516 long r;
517
518 if (timeout == 0) {
519 return amdgpu_fence_any_seq_signaled(adev, target_seq);
520 }
521
522 while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) {
523
524 /* Save current sequence values, used to check for GPU lockups */
525 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
526 struct amdgpu_ring *ring = adev->rings[i];
527 487
528 if (!ring || !target_seq[i]) 488 BUG_ON(!ring);
529 continue; 489 if (seq > ring->fence_drv.sync_seq[ring->idx])
490 return -EINVAL;
530 491
531 last_seq[i] = atomic64_read(&ring->fence_drv.last_seq); 492 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
532 trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]); 493 return timeout;
533 }
534 494
495 while (1) {
535 if (intr) { 496 if (intr) {
536 r = wait_event_interruptible_timeout(adev->fence_queue, ( 497 r = wait_event_interruptible_timeout(ring->fence_drv.fence_queue, (
537 (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq)) 498 (signaled = amdgpu_fence_seq_signaled(ring, seq))
538 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); 499 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
500
501 if (r == -ERESTARTSYS) /* interrupted */
502 return r;
539 } else { 503 } else {
540 r = wait_event_timeout(adev->fence_queue, ( 504 r = wait_event_timeout(ring->fence_drv.fence_queue, (
541 (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq)) 505 (signaled = amdgpu_fence_seq_signaled(ring, seq))
542 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); 506 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
543 } 507 }
544 508
545 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 509 if (signaled) {
546 struct amdgpu_ring *ring = adev->rings[i]; 510 /* seq signaled */
547 511 if (timeout == MAX_SCHEDULE_TIMEOUT)
548 if (!ring || !target_seq[i]) 512 return timeout;
549 continue; 513 return (timeout - AMDGPU_FENCE_JIFFIES_TIMEOUT - r);
550 514 }
551 trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]); 515 else if (adev->needs_reset) {
516 return -EDEADLK;
552 } 517 }
553 518
554 if (unlikely(r < 0)) 519 /* check if it's a lockup */
555 return r; 520 if (amdgpu_ring_is_lockup(ring)) {
556 521 uint64_t last_seq = atomic64_read(&ring->fence_drv.last_seq);
557 if (unlikely(!signaled)) { 522 /* ring lookup */
558 523 dev_warn(adev->dev, "GPU lockup (waiting for "
559 if (adev->needs_reset)
560 return -EDEADLK;
561
562 /* we were interrupted for some reason and fence
563 * isn't signaled yet, resume waiting */
564 if (r)
565 continue;
566
567 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
568 struct amdgpu_ring *ring = adev->rings[i];
569
570 if (!ring || !target_seq[i])
571 continue;
572
573 if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq))
574 break;
575 }
576
577 if (i != AMDGPU_MAX_RINGS)
578 continue;
579
580 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
581 if (!adev->rings[i] || !target_seq[i])
582 continue;
583
584 if (amdgpu_ring_is_lockup(adev->rings[i]))
585 break;
586 }
587
588 if (i < AMDGPU_MAX_RINGS) {
589 /* good news we believe it's a lockup */
590 dev_warn(adev->dev, "GPU lockup (waiting for "
591 "0x%016llx last fence id 0x%016llx on" 524 "0x%016llx last fence id 0x%016llx on"
592 " ring %d)\n", 525 " ring %d)\n",
593 target_seq[i], last_seq[i], i); 526 seq, last_seq, ring->idx);
594 527 wake_up_all(&ring->fence_drv.fence_queue);
595 /* remember that we need an reset */ 528 return -EDEADLK;
596 adev->needs_reset = true; 529 }
597 wake_up_all(&adev->fence_queue);
598 return -EDEADLK;
599 }
600 530
601 if (timeout < MAX_SCHEDULE_TIMEOUT) { 531 if (timeout < MAX_SCHEDULE_TIMEOUT) {
602 timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT; 532 timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT;
603 if (timeout <= 0) { 533 if (timeout < 1)
604 return 0; 534 return 0;
605 }
606 }
607 } 535 }
608 } 536 }
609 return timeout;
610} 537}
611 538
539
612/** 540/**
613 * amdgpu_fence_wait - wait for a fence to signal 541 * amdgpu_fence_wait - wait for a fence to signal
614 * 542 *
@@ -642,18 +570,15 @@ int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
642 */ 570 */
643int amdgpu_fence_wait_next(struct amdgpu_ring *ring) 571int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
644{ 572{
645 uint64_t seq[AMDGPU_MAX_RINGS] = {};
646 long r; 573 long r;
647 574
648 seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; 575 uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
649 if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) { 576 if (seq >= ring->fence_drv.sync_seq[ring->idx])
650 /* nothing to wait for, last_seq is
651 already the last emited fence */
652 return -ENOENT; 577 return -ENOENT;
653 } 578 r = amdgpu_fence_ring_wait_seq_timeout(ring, seq, false, MAX_SCHEDULE_TIMEOUT);
654 r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT);
655 if (r < 0) 579 if (r < 0)
656 return r; 580 return r;
581
657 return 0; 582 return 0;
658} 583}
659 584
@@ -669,21 +594,20 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
669 */ 594 */
670int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 595int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
671{ 596{
672 struct amdgpu_device *adev = ring->adev;
673 uint64_t seq[AMDGPU_MAX_RINGS] = {};
674 long r; 597 long r;
675 598
676 seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx]; 599 uint64_t seq = ring->fence_drv.sync_seq[ring->idx];
677 if (!seq[ring->idx]) 600 if (!seq)
678 return 0; 601 return 0;
679 602
680 r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT); 603 r = amdgpu_fence_ring_wait_seq_timeout(ring, seq, false, MAX_SCHEDULE_TIMEOUT);
604
681 if (r < 0) { 605 if (r < 0) {
682 if (r == -EDEADLK) 606 if (r == -EDEADLK)
683 return -EDEADLK; 607 return -EDEADLK;
684 608
685 dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n", 609 dev_err(ring->adev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
686 ring->idx, r); 610 ring->idx, r);
687 } 611 }
688 return 0; 612 return 0;
689} 613}
@@ -898,7 +822,6 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
898 */ 822 */
899int amdgpu_fence_driver_init(struct amdgpu_device *adev) 823int amdgpu_fence_driver_init(struct amdgpu_device *adev)
900{ 824{
901 init_waitqueue_head(&adev->fence_queue);
902 if (amdgpu_debugfs_fence_init(adev)) 825 if (amdgpu_debugfs_fence_init(adev))
903 dev_err(adev->dev, "fence debugfs file creation failed\n"); 826 dev_err(adev->dev, "fence debugfs file creation failed\n");
904 827
@@ -927,7 +850,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
927 /* no need to trigger GPU reset as we are unloading */ 850 /* no need to trigger GPU reset as we are unloading */
928 amdgpu_fence_driver_force_completion(adev); 851 amdgpu_fence_driver_force_completion(adev);
929 } 852 }
930 wake_up_all(&adev->fence_queue); 853 wake_up_all(&ring->fence_drv.fence_queue);
931 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 854 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
932 ring->fence_drv.irq_type); 855 ring->fence_drv.irq_type);
933 if (ring->scheduler) 856 if (ring->scheduler)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 1e68a561bbfe..7d442c51063e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -342,6 +342,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
342 amdgpu_fence_driver_init_ring(ring); 342 amdgpu_fence_driver_init_ring(ring);
343 } 343 }
344 344
345 init_waitqueue_head(&ring->fence_drv.fence_queue);
346
345 r = amdgpu_wb_get(adev, &ring->rptr_offs); 347 r = amdgpu_wb_get(adev, &ring->rptr_offs);
346 if (r) { 348 if (r) {
347 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); 349 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);