diff options
author | Monk Liu <Monk.Liu@amd.com> | 2017-06-06 05:25:13 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-07-14 11:06:10 -0400 |
commit | 8fdf074f1840eae838bbccbec37d0a1504ee432b (patch) | |
tree | 7bfca536bc87c4ae0e8ad6129f95bc72dd6bef9a /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |
parent | bdb8cd10b99d55d35b869fbf5cd6df9420b9bd85 (diff) |
drm/amdgpu:fix world switch hang
for SR-IOV, we must keep the pipeline-sync in the protection
of COND_EXEC, otherwise the command consumed by CPG is not
consistent when world switch triggerd, e.g.:
world switch hit and the IB frame is skipped so the fence
won't signal, thus CP will jump to the next DMAframe's pipeline-sync
command, and it will make CP hang foever.
after pipelin-sync moved into COND_EXEC the consistency can be
guaranteed
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index cda9e5d8b831..30c4322ddce7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -743,7 +743,7 @@ static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev) | |||
743 | * | 743 | * |
744 | * Emit a VM flush when it is necessary. | 744 | * Emit a VM flush when it is necessary. |
745 | */ | 745 | */ |
746 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) | 746 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync) |
747 | { | 747 | { |
748 | struct amdgpu_device *adev = ring->adev; | 748 | struct amdgpu_device *adev = ring->adev; |
749 | unsigned vmhub = ring->funcs->vmhub; | 749 | unsigned vmhub = ring->funcs->vmhub; |
@@ -765,12 +765,15 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) | |||
765 | vm_flush_needed = true; | 765 | vm_flush_needed = true; |
766 | } | 766 | } |
767 | 767 | ||
768 | if (!vm_flush_needed && !gds_switch_needed) | 768 | if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) |
769 | return 0; | 769 | return 0; |
770 | 770 | ||
771 | if (ring->funcs->init_cond_exec) | 771 | if (ring->funcs->init_cond_exec) |
772 | patch_offset = amdgpu_ring_init_cond_exec(ring); | 772 | patch_offset = amdgpu_ring_init_cond_exec(ring); |
773 | 773 | ||
774 | if (need_pipe_sync) | ||
775 | amdgpu_ring_emit_pipeline_sync(ring); | ||
776 | |||
774 | if (ring->funcs->emit_vm_flush && vm_flush_needed) { | 777 | if (ring->funcs->emit_vm_flush && vm_flush_needed) { |
775 | struct dma_fence *fence; | 778 | struct dma_fence *fence; |
776 | 779 | ||