aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
diff options
context:
space:
mode:
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>2018-05-30 15:28:52 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-06-15 13:20:33 -0400
commit48ad368a8a3ab2fd3c2bc2ccccc6e29b1acda1bb (patch)
treeff89a29b752a9a06c7003f8ccd93cc82debf8e8f /drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
parent741f01e636b72ff3f81204fd595ac1078907671b (diff)
drm/amdgpu: move amdgpu_ctx_mgr_entity_fini to f_ops flush hook (V4)
With this we can now terminate jobs enqueue into SW queue the moment the task is being killed instead of waiting for last user of drm file to release it. Also stop checking for kref_read(&ctx->refcount) == 1 when calling drm_sched_entity_do_release since other task might still hold a reference to this entity but we don't care since KILL means terminate job submission regardless of what other tasks are doing. v2: Use returned remaining timeout as parameter for the next call. Rebase. v3: Switch to working with jiffies. Streamline remainder TO usage. Rebase. v4: Rebase. Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index c5bb36275e93..64b3a1ed04dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -449,26 +449,28 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
449 struct amdgpu_ctx *ctx; 449 struct amdgpu_ctx *ctx;
450 struct idr *idp; 450 struct idr *idp;
451 uint32_t id, i; 451 uint32_t id, i;
452 long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
452 453
453 idp = &mgr->ctx_handles; 454 idp = &mgr->ctx_handles;
454 455
456 mutex_lock(&mgr->lock);
455 idr_for_each_entry(idp, ctx, id) { 457 idr_for_each_entry(idp, ctx, id) {
456 458
457 if (!ctx->adev) 459 if (!ctx->adev) {
460 mutex_unlock(&mgr->lock);
458 return; 461 return;
462 }
459 463
460 for (i = 0; i < ctx->adev->num_rings; i++) { 464 for (i = 0; i < ctx->adev->num_rings; i++) {
461 465
462 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) 466 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
463 continue; 467 continue;
464 468
465 if (kref_read(&ctx->refcount) == 1) 469 max_wait = drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
466 drm_sched_entity_do_release(&ctx->adev->rings[i]->sched, 470 &ctx->rings[i].entity, max_wait);
467 &ctx->rings[i].entity);
468 else
469 DRM_ERROR("ctx %p is still alive\n", ctx);
470 } 471 }
471 } 472 }
473 mutex_unlock(&mgr->lock);
472} 474}
473 475
474void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr) 476void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)