aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c81
1 files changed, 72 insertions, 9 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 3fabf9f97022..c5bb36275e93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -91,7 +91,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
91 continue; 91 continue;
92 92
93 r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity, 93 r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
94 rq, amdgpu_sched_jobs, &ctx->guilty); 94 rq, &ctx->guilty);
95 if (r) 95 if (r)
96 goto failed; 96 goto failed;
97 } 97 }
@@ -111,8 +111,9 @@ failed:
111 return r; 111 return r;
112} 112}
113 113
114static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) 114static void amdgpu_ctx_fini(struct kref *ref)
115{ 115{
116 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
116 struct amdgpu_device *adev = ctx->adev; 117 struct amdgpu_device *adev = ctx->adev;
117 unsigned i, j; 118 unsigned i, j;
118 119
@@ -125,13 +126,11 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
125 kfree(ctx->fences); 126 kfree(ctx->fences);
126 ctx->fences = NULL; 127 ctx->fences = NULL;
127 128
128 for (i = 0; i < adev->num_rings; i++)
129 drm_sched_entity_fini(&adev->rings[i]->sched,
130 &ctx->rings[i].entity);
131
132 amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr); 129 amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
133 130
134 mutex_destroy(&ctx->lock); 131 mutex_destroy(&ctx->lock);
132
133 kfree(ctx);
135} 134}
136 135
137static int amdgpu_ctx_alloc(struct amdgpu_device *adev, 136static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
@@ -170,12 +169,20 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
170static void amdgpu_ctx_do_release(struct kref *ref) 169static void amdgpu_ctx_do_release(struct kref *ref)
171{ 170{
172 struct amdgpu_ctx *ctx; 171 struct amdgpu_ctx *ctx;
172 u32 i;
173 173
174 ctx = container_of(ref, struct amdgpu_ctx, refcount); 174 ctx = container_of(ref, struct amdgpu_ctx, refcount);
175 175
176 amdgpu_ctx_fini(ctx); 176 for (i = 0; i < ctx->adev->num_rings; i++) {
177 177
178 kfree(ctx); 178 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
179 continue;
180
181 drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
182 &ctx->rings[i].entity);
183 }
184
185 amdgpu_ctx_fini(ref);
179} 186}
180 187
181static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) 188static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
@@ -437,16 +444,72 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
437 idr_init(&mgr->ctx_handles); 444 idr_init(&mgr->ctx_handles);
438} 445}
439 446
447void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
448{
449 struct amdgpu_ctx *ctx;
450 struct idr *idp;
451 uint32_t id, i;
452
453 idp = &mgr->ctx_handles;
454
455 idr_for_each_entry(idp, ctx, id) {
456
457 if (!ctx->adev)
458 return;
459
460 for (i = 0; i < ctx->adev->num_rings; i++) {
461
462 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
463 continue;
464
465 if (kref_read(&ctx->refcount) == 1)
466 drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
467 &ctx->rings[i].entity);
468 else
469 DRM_ERROR("ctx %p is still alive\n", ctx);
470 }
471 }
472}
473
474void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
475{
476 struct amdgpu_ctx *ctx;
477 struct idr *idp;
478 uint32_t id, i;
479
480 idp = &mgr->ctx_handles;
481
482 idr_for_each_entry(idp, ctx, id) {
483
484 if (!ctx->adev)
485 return;
486
487 for (i = 0; i < ctx->adev->num_rings; i++) {
488
489 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
490 continue;
491
492 if (kref_read(&ctx->refcount) == 1)
493 drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
494 &ctx->rings[i].entity);
495 else
496 DRM_ERROR("ctx %p is still alive\n", ctx);
497 }
498 }
499}
500
440void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) 501void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
441{ 502{
442 struct amdgpu_ctx *ctx; 503 struct amdgpu_ctx *ctx;
443 struct idr *idp; 504 struct idr *idp;
444 uint32_t id; 505 uint32_t id;
445 506
507 amdgpu_ctx_mgr_entity_cleanup(mgr);
508
446 idp = &mgr->ctx_handles; 509 idp = &mgr->ctx_handles;
447 510
448 idr_for_each_entry(idp, ctx, id) { 511 idr_for_each_entry(idp, ctx, id) {
449 if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1) 512 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
450 DRM_ERROR("ctx %p is still alive\n", ctx); 513 DRM_ERROR("ctx %p is still alive\n", ctx);
451 } 514 }
452 515