diff options
author | Chunming Zhou <david1.zhou@amd.com> | 2015-07-28 05:31:04 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-08-17 16:50:42 -0400 |
commit | 51b9db27d07869cf565ba135e97e2ed5f858612e (patch) | |
tree | 2120e2873ceef6ab8d0ef0e79dffcc9b4337277b /drivers/gpu/drm/amd/amdgpu | |
parent | 4afcb30383bef8bf972c6aae47995ef314e5f8a1 (diff) |
drm/amdgpu: wait forever for wait emit
the job must be emitted by scheduler, otherwise scheduler is abnormal.
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 |
2 files changed, 5 insertions, 5 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 58ce2655a8fd..95807b678b6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -302,8 +302,8 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, | |||
302 | if (amdgpu_enable_scheduler) { | 302 | if (amdgpu_enable_scheduler) { |
303 | r = amd_sched_wait_emit(&cring->c_entity, | 303 | r = amd_sched_wait_emit(&cring->c_entity, |
304 | seq, | 304 | seq, |
305 | true, | 305 | false, |
306 | AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS); | 306 | -1); |
307 | if (r) | 307 | if (r) |
308 | return NULL; | 308 | return NULL; |
309 | } | 309 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 34938d2417a1..26c55a7a1a88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -386,7 +386,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
386 | sched_job); | 386 | sched_job); |
387 | r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity, | 387 | r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity, |
388 | v_seq, | 388 | v_seq, |
389 | true, | 389 | false, |
390 | -1); | 390 | -1); |
391 | if (r) | 391 | if (r) |
392 | DRM_ERROR("emit timeout\n"); | 392 | DRM_ERROR("emit timeout\n"); |
@@ -537,7 +537,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
537 | sched_job); | 537 | sched_job); |
538 | r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity, | 538 | r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity, |
539 | v_seq, | 539 | v_seq, |
540 | true, | 540 | false, |
541 | -1); | 541 | -1); |
542 | if (r) | 542 | if (r) |
543 | DRM_ERROR("emit timeout\n"); | 543 | DRM_ERROR("emit timeout\n"); |
@@ -890,7 +890,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
890 | sched_job); | 890 | sched_job); |
891 | r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity, | 891 | r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity, |
892 | v_seq, | 892 | v_seq, |
893 | true, | 893 | false, |
894 | -1); | 894 | -1); |
895 | if (r) | 895 | if (r) |
896 | DRM_ERROR("emit timeout\n"); | 896 | DRM_ERROR("emit timeout\n"); |