diff options
author | Monk Liu <Monk.Liu@amd.com> | 2017-10-16 02:38:10 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-12-04 16:33:08 -0500 |
commit | 2f9d4084cac96a0281522b548ca0526c1e241b75 (patch) | |
tree | 3abcb0009595224c1be2da0382fff50ffe812cc0 /drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |
parent | d1f6dc1a9a106a73510181cfad9b4a7a0b140990 (diff) |
drm/amdgpu:cleanup force_completion
cleanups, now only operate on the given ring
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 29 |
1 files changed, 7 insertions, 22 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 2fa95aef74d5..219c15f79a5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -499,7 +499,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | |||
499 | r = amdgpu_fence_wait_empty(ring); | 499 | r = amdgpu_fence_wait_empty(ring); |
500 | if (r) { | 500 | if (r) { |
501 | /* no need to trigger GPU reset as we are unloading */ | 501 | /* no need to trigger GPU reset as we are unloading */ |
502 | amdgpu_fence_driver_force_completion(adev); | 502 | amdgpu_fence_driver_force_completion(ring); |
503 | } | 503 | } |
504 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, | 504 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, |
505 | ring->fence_drv.irq_type); | 505 | ring->fence_drv.irq_type); |
@@ -534,7 +534,7 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) | |||
534 | r = amdgpu_fence_wait_empty(ring); | 534 | r = amdgpu_fence_wait_empty(ring); |
535 | if (r) { | 535 | if (r) { |
536 | /* delay GPU reset to resume */ | 536 | /* delay GPU reset to resume */ |
537 | amdgpu_fence_driver_force_completion(adev); | 537 | amdgpu_fence_driver_force_completion(ring); |
538 | } | 538 | } |
539 | 539 | ||
540 | /* disable the interrupt */ | 540 | /* disable the interrupt */ |
@@ -571,30 +571,15 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev) | |||
571 | } | 571 | } |
572 | 572 | ||
573 | /** | 573 | /** |
574 | * amdgpu_fence_driver_force_completion - force all fence waiter to complete | 574 | * amdgpu_fence_driver_force_completion - force signal latest fence of ring |
575 | * | 575 | * |
576 | * @adev: amdgpu device pointer | 576 | * @ring: fence of the ring to signal |
577 | * | 577 | * |
578 | * In case of GPU reset failure make sure no process keep waiting on fence | ||
579 | * that will never complete. | ||
580 | */ | 578 | */ |
581 | void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) | 579 | void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) |
582 | { | 580 | { |
583 | int i; | 581 | amdgpu_fence_write(ring, ring->fence_drv.sync_seq); |
584 | 582 | amdgpu_fence_process(ring); | |
585 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | ||
586 | struct amdgpu_ring *ring = adev->rings[i]; | ||
587 | if (!ring || !ring->fence_drv.initialized) | ||
588 | continue; | ||
589 | |||
590 | amdgpu_fence_write(ring, ring->fence_drv.sync_seq); | ||
591 | } | ||
592 | } | ||
593 | |||
594 | void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring *ring) | ||
595 | { | ||
596 | if (ring) | ||
597 | amdgpu_fence_write(ring, ring->fence_drv.sync_seq); | ||
598 | } | 583 | } |
599 | 584 | ||
600 | /* | 585 | /* |