aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>2018-06-05 12:43:23 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-07-05 17:38:45 -0400
commit180fc134d712a93a2bbc3d11ed657b5208e6f90f (patch)
treedc13aba9cf9d8baa5ed252a26d4a8cde6c0abc98 /drivers
parentf3efec54ed6ac05ba63be1bd93bd741d41b6eb37 (diff)
drm/scheduler: Rename cleanup functions v2.
Everything in the flush code path (i.e. waiting for SW queue to become empty) names with *_flush() and everything in the release code path names *_fini() This patch also effect the amdgpu and etnaviv drivers which use those functions. v2: Also pplay the change to vd3. Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Suggested-by: Christian König <christian.koenig@amd.com> Acked-by: Lucas Stach <l.stach@pengutronix.de> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c4
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c18
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c2
10 files changed, 23 insertions, 23 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 64b3a1ed04dc..c0f06c02f2de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -104,7 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
104 104
105failed: 105failed:
106 for (j = 0; j < i; j++) 106 for (j = 0; j < i; j++)
107 drm_sched_entity_fini(&adev->rings[j]->sched, 107 drm_sched_entity_destroy(&adev->rings[j]->sched,
108 &ctx->rings[j].entity); 108 &ctx->rings[j].entity);
109 kfree(ctx->fences); 109 kfree(ctx->fences);
110 ctx->fences = NULL; 110 ctx->fences = NULL;
@@ -178,7 +178,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
178 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) 178 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
179 continue; 179 continue;
180 180
181 drm_sched_entity_fini(&ctx->adev->rings[i]->sched, 181 drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
182 &ctx->rings[i].entity); 182 &ctx->rings[i].entity);
183 } 183 }
184 184
@@ -466,7 +466,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
466 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) 466 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
467 continue; 467 continue;
468 468
469 max_wait = drm_sched_entity_do_release(&ctx->adev->rings[i]->sched, 469 max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
470 &ctx->rings[i].entity, max_wait); 470 &ctx->rings[i].entity, max_wait);
471 } 471 }
472 } 472 }
@@ -492,7 +492,7 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
492 continue; 492 continue;
493 493
494 if (kref_read(&ctx->refcount) == 1) 494 if (kref_read(&ctx->refcount) == 1)
495 drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched, 495 drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
496 &ctx->rings[i].entity); 496 &ctx->rings[i].entity);
497 else 497 else
498 DRM_ERROR("ctx %p is still alive\n", ctx); 498 DRM_ERROR("ctx %p is still alive\n", ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 0c084d3d0865..0246cb87d9e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -162,7 +162,7 @@ error_mem:
162static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) 162static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
163{ 163{
164 if (adev->mman.mem_global_referenced) { 164 if (adev->mman.mem_global_referenced) {
165 drm_sched_entity_fini(adev->mman.entity.sched, 165 drm_sched_entity_destroy(adev->mman.entity.sched,
166 &adev->mman.entity); 166 &adev->mman.entity);
167 mutex_destroy(&adev->mman.gtt_window_lock); 167 mutex_destroy(&adev->mman.gtt_window_lock);
168 drm_global_item_unref(&adev->mman.bo_global_ref.ref); 168 drm_global_item_unref(&adev->mman.bo_global_ref.ref);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index cc15d3230402..0b46ea1c6290 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -309,7 +309,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
309 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 309 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
310 kfree(adev->uvd.inst[j].saved_bo); 310 kfree(adev->uvd.inst[j].saved_bo);
311 311
312 drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity); 312 drm_sched_entity_destroy(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
313 313
314 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, 314 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
315 &adev->uvd.inst[j].gpu_addr, 315 &adev->uvd.inst[j].gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 23d960ec1cf2..b0dcdfd85f5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -222,7 +222,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
222 if (adev->vce.vcpu_bo == NULL) 222 if (adev->vce.vcpu_bo == NULL)
223 return 0; 223 return 0;
224 224
225 drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity); 225 drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
226 226
227 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, 227 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
228 (void **)&adev->vce.cpu_addr); 228 (void **)&adev->vce.cpu_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 590db78b8c72..837066076ccf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2643,7 +2643,7 @@ error_free_root:
2643 vm->root.base.bo = NULL; 2643 vm->root.base.bo = NULL;
2644 2644
2645error_free_sched_entity: 2645error_free_sched_entity:
2646 drm_sched_entity_fini(&ring->sched, &vm->entity); 2646 drm_sched_entity_destroy(&ring->sched, &vm->entity);
2647 2647
2648 return r; 2648 return r;
2649} 2649}
@@ -2780,7 +2780,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2780 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2780 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2781 } 2781 }
2782 2782
2783 drm_sched_entity_fini(vm->entity.sched, &vm->entity); 2783 drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
2784 2784
2785 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { 2785 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2786 dev_err(adev->dev, "still active bo inside vm\n"); 2786 dev_err(adev->dev, "still active bo inside vm\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index bfddf97dd13e..1df1c6115341 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -470,7 +470,7 @@ static int uvd_v6_0_sw_fini(void *handle)
470 return r; 470 return r;
471 471
472 if (uvd_v6_0_enc_support(adev)) { 472 if (uvd_v6_0_enc_support(adev)) {
473 drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc); 473 drm_sched_entity_destroy(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
474 474
475 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 475 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
476 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); 476 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 57d32f21b3a6..ba244d3b74db 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -491,7 +491,7 @@ static int uvd_v7_0_sw_fini(void *handle)
491 return r; 491 return r;
492 492
493 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 493 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
494 drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc); 494 drm_sched_entity_destroy(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
495 495
496 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 496 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
497 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); 497 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index e5013a999147..45bfdf4cc107 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -78,8 +78,8 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
78 gpu->lastctx = NULL; 78 gpu->lastctx = NULL;
79 mutex_unlock(&gpu->lock); 79 mutex_unlock(&gpu->lock);
80 80
81 drm_sched_entity_fini(&gpu->sched, 81 drm_sched_entity_destroy(&gpu->sched,
82 &ctx->sched_entity[i]); 82 &ctx->sched_entity[i]);
83 } 83 }
84 } 84 }
85 85
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 6a316701da73..7d2560699b84 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -256,7 +256,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
256 256
257 257
258/** 258/**
259 * drm_sched_entity_do_release - Destroy a context entity 259 * drm_sched_entity_flush - Flush a context entity
260 * 260 *
261 * @sched: scheduler instance 261 * @sched: scheduler instance
262 * @entity: scheduler entity 262 * @entity: scheduler entity
@@ -267,7 +267,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
267 * 267 *
268 * Returns the remaining time in jiffies left from the input timeout 268 * Returns the remaining time in jiffies left from the input timeout
269 */ 269 */
270long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched, 270long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
271 struct drm_sched_entity *entity, long timeout) 271 struct drm_sched_entity *entity, long timeout)
272{ 272{
273 long ret = timeout; 273 long ret = timeout;
@@ -294,7 +294,7 @@ long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
294 294
295 return ret; 295 return ret;
296} 296}
297EXPORT_SYMBOL(drm_sched_entity_do_release); 297EXPORT_SYMBOL(drm_sched_entity_flush);
298 298
299/** 299/**
300 * drm_sched_entity_cleanup - Destroy a context entity 300 * drm_sched_entity_cleanup - Destroy a context entity
@@ -306,7 +306,7 @@ EXPORT_SYMBOL(drm_sched_entity_do_release);
306 * entity and signals all jobs with an error code if the process was killed. 306 * entity and signals all jobs with an error code if the process was killed.
307 * 307 *
308 */ 308 */
309void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched, 309void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
310 struct drm_sched_entity *entity) 310 struct drm_sched_entity *entity)
311{ 311{
312 312
@@ -357,7 +357,7 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
357 dma_fence_put(entity->last_scheduled); 357 dma_fence_put(entity->last_scheduled);
358 entity->last_scheduled = NULL; 358 entity->last_scheduled = NULL;
359} 359}
360EXPORT_SYMBOL(drm_sched_entity_cleanup); 360EXPORT_SYMBOL(drm_sched_entity_fini);
361 361
362/** 362/**
363 * drm_sched_entity_fini - Destroy a context entity 363 * drm_sched_entity_fini - Destroy a context entity
@@ -367,13 +367,13 @@ EXPORT_SYMBOL(drm_sched_entity_cleanup);
367 * 367 *
368 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() 368 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
369 */ 369 */
370void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, 370void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
371 struct drm_sched_entity *entity) 371 struct drm_sched_entity *entity)
372{ 372{
373 drm_sched_entity_do_release(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); 373 drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
374 drm_sched_entity_cleanup(sched, entity); 374 drm_sched_entity_fini(sched, entity);
375} 375}
376EXPORT_SYMBOL(drm_sched_entity_fini); 376EXPORT_SYMBOL(drm_sched_entity_destroy);
377 377
378static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) 378static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
379{ 379{
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index cdb582043b4f..567f7d46d912 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -151,7 +151,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
151 enum v3d_queue q; 151 enum v3d_queue q;
152 152
153 for (q = 0; q < V3D_MAX_QUEUES; q++) { 153 for (q = 0; q < V3D_MAX_QUEUES; q++) {
154 drm_sched_entity_fini(&v3d->queue[q].sched, 154 drm_sched_entity_destroy(&v3d->queue[q].sched,
155 &v3d_priv->sched_entity[q]); 155 &v3d_priv->sched_entity[q]);
156 } 156 }
157 157