aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorNayan Deshmukh <nayan26deshmukh@gmail.com>2018-07-20 08:21:05 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-07-25 16:06:19 -0400
commitcdc50176597cb44ce25eb7331c450058775b8d2a (patch)
tree3a225ac827c904a893f45677e08c51d75170d179 /drivers
parentbf314ca3f10d4ba4335808dc678631908881db8b (diff)
drm/scheduler: modify API to avoid redundancy
entity has a scheduler field and we don't need the sched argument in any of the functions where entity is provided. Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com> Reviewed-by: Christian König <christian.koenig@amd.com> Reviewed-by: Eric Anholt <eric@anholt.net> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c4
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c20
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c2
12 files changed, 27 insertions, 35 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index c5d81d6a90e0..4d4575b3bba7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1232 job = p->job; 1232 job = p->job;
1233 p->job = NULL; 1233 p->job = NULL;
1234 1234
1235 r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp); 1235 r = drm_sched_job_init(&job->base, entity, p->filp);
1236 if (r) { 1236 if (r) {
1237 amdgpu_job_free(job); 1237 amdgpu_job_free(job);
1238 amdgpu_mn_unlock(p->mn); 1238 amdgpu_mn_unlock(p->mn);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 83e3b320a793..df6965761046 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
104 104
105failed: 105failed:
106 for (j = 0; j < i; j++) 106 for (j = 0; j < i; j++)
107 drm_sched_entity_destroy(&adev->rings[j]->sched, 107 drm_sched_entity_destroy(&ctx->rings[j].entity);
108 &ctx->rings[j].entity);
109 kfree(ctx->fences); 108 kfree(ctx->fences);
110 ctx->fences = NULL; 109 ctx->fences = NULL;
111 return r; 110 return r;
@@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
178 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) 177 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
179 continue; 178 continue;
180 179
181 drm_sched_entity_destroy(&ctx->adev->rings[i]->sched, 180 drm_sched_entity_destroy(&ctx->rings[i].entity);
182 &ctx->rings[i].entity);
183 } 181 }
184 182
185 amdgpu_ctx_fini(ref); 183 amdgpu_ctx_fini(ref);
@@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
466 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) 464 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
467 continue; 465 continue;
468 466
469 max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched, 467 max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
470 &ctx->rings[i].entity, max_wait); 468 max_wait);
471 } 469 }
472 } 470 }
473 mutex_unlock(&mgr->lock); 471 mutex_unlock(&mgr->lock);
@@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
492 continue; 490 continue;
493 491
494 if (kref_read(&ctx->refcount) == 1) 492 if (kref_read(&ctx->refcount) == 1)
495 drm_sched_entity_fini(&ctx->adev->rings[i]->sched, 493 drm_sched_entity_fini(&ctx->rings[i].entity);
496 &ctx->rings[i].entity);
497 else 494 else
498 DRM_ERROR("ctx %p is still alive\n", ctx); 495 DRM_ERROR("ctx %p is still alive\n", ctx);
499 } 496 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 5a2c26a85984..631481a730e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
133 if (!f) 133 if (!f)
134 return -EINVAL; 134 return -EINVAL;
135 135
136 r = drm_sched_job_init(&job->base, entity->sched, entity, owner); 136 r = drm_sched_job_init(&job->base, entity, owner);
137 if (r) 137 if (r)
138 return r; 138 return r;
139 139
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 6039f8e21358..8c4358e36c87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1925,8 +1925,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1925 return; 1925 return;
1926 } 1926 }
1927 } else { 1927 } else {
1928 drm_sched_entity_destroy(adev->mman.entity.sched, 1928 drm_sched_entity_destroy(&adev->mman.entity);
1929 &adev->mman.entity);
1930 dma_fence_put(man->move); 1929 dma_fence_put(man->move);
1931 man->move = NULL; 1930 man->move = NULL;
1932 } 1931 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e1e4810b9d9e..fca86d71fafc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
305{ 305{
306 int i, j; 306 int i, j;
307 307
308 drm_sched_entity_destroy(&adev->uvd.inst->ring.sched, 308 drm_sched_entity_destroy(&adev->uvd.entity);
309 &adev->uvd.entity);
310 309
311 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 310 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
312 kfree(adev->uvd.inst[j].saved_bo); 311 kfree(adev->uvd.inst[j].saved_bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 86182c966ed6..b6ab4f5350c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
221 if (adev->vce.vcpu_bo == NULL) 221 if (adev->vce.vcpu_bo == NULL)
222 return 0; 222 return 0;
223 223
224 drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity); 224 drm_sched_entity_destroy(&adev->vce.entity);
225 225
226 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, 226 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
227 (void **)&adev->vce.cpu_addr); 227 (void **)&adev->vce.cpu_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 098dd1ba751a..74b4a28a41d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2642,7 +2642,7 @@ error_free_root:
2642 vm->root.base.bo = NULL; 2642 vm->root.base.bo = NULL;
2643 2643
2644error_free_sched_entity: 2644error_free_sched_entity:
2645 drm_sched_entity_destroy(&ring->sched, &vm->entity); 2645 drm_sched_entity_destroy(&vm->entity);
2646 2646
2647 return r; 2647 return r;
2648} 2648}
@@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2779 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2779 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2780 } 2780 }
2781 2781
2782 drm_sched_entity_destroy(vm->entity.sched, &vm->entity); 2782 drm_sched_entity_destroy(&vm->entity);
2783 2783
2784 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { 2784 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2785 dev_err(adev->dev, "still active bo inside vm\n"); 2785 dev_err(adev->dev, "still active bo inside vm\n");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 36414ba56b22..207532c05eb8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
78 gpu->lastctx = NULL; 78 gpu->lastctx = NULL;
79 mutex_unlock(&gpu->lock); 79 mutex_unlock(&gpu->lock);
80 80
81 drm_sched_entity_destroy(&gpu->sched, 81 drm_sched_entity_destroy(&ctx->sched_entity[i]);
82 &ctx->sched_entity[i]);
83 } 82 }
84 } 83 }
85 84
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index a74eb57af15b..590e44b0d963 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -118,8 +118,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
118{ 118{
119 int ret; 119 int ret;
120 120
121 ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched, 121 ret = drm_sched_job_init(&submit->sched_job, sched_entity,
122 sched_entity, submit->cmdbuf.ctx); 122 submit->cmdbuf.ctx);
123 if (ret) 123 if (ret)
124 return ret; 124 return ret;
125 125
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index dac71e3b4514..a3b55c542025 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
273 * 273 *
274 * Returns the remaining time in jiffies left from the input timeout 274 * Returns the remaining time in jiffies left from the input timeout
275 */ 275 */
276long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, 276long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
277 struct drm_sched_entity *entity, long timeout)
278{ 277{
278 struct drm_gpu_scheduler *sched;
279 long ret = timeout; 279 long ret = timeout;
280 280
281 sched = entity->sched;
281 if (!drm_sched_entity_is_initialized(sched, entity)) 282 if (!drm_sched_entity_is_initialized(sched, entity))
282 return ret; 283 return ret;
283 /** 284 /**
@@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
312 * entity and signals all jobs with an error code if the process was killed. 313 * entity and signals all jobs with an error code if the process was killed.
313 * 314 *
314 */ 315 */
315void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, 316void drm_sched_entity_fini(struct drm_sched_entity *entity)
316 struct drm_sched_entity *entity)
317{ 317{
318 struct drm_gpu_scheduler *sched;
318 319
320 sched = entity->sched;
319 drm_sched_entity_set_rq(entity, NULL); 321 drm_sched_entity_set_rq(entity, NULL);
320 322
321 /* Consumption of existing IBs wasn't completed. Forcefully 323 /* Consumption of existing IBs wasn't completed. Forcefully
@@ -373,11 +375,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
373 * 375 *
374 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() 376 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
375 */ 377 */
376void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched, 378void drm_sched_entity_destroy(struct drm_sched_entity *entity)
377 struct drm_sched_entity *entity)
378{ 379{
379 drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); 380 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
380 drm_sched_entity_fini(sched, entity); 381 drm_sched_entity_fini(entity);
381} 382}
382EXPORT_SYMBOL(drm_sched_entity_destroy); 383EXPORT_SYMBOL(drm_sched_entity_destroy);
383 384
@@ -740,10 +741,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery);
740 * Returns 0 for success, negative error code otherwise. 741 * Returns 0 for success, negative error code otherwise.
741 */ 742 */
742int drm_sched_job_init(struct drm_sched_job *job, 743int drm_sched_job_init(struct drm_sched_job *job,
743 struct drm_gpu_scheduler *sched,
744 struct drm_sched_entity *entity, 744 struct drm_sched_entity *entity,
745 void *owner) 745 void *owner)
746{ 746{
747 struct drm_gpu_scheduler *sched = entity->sched;
748
747 job->sched = sched; 749 job->sched = sched;
748 job->entity = entity; 750 job->entity = entity;
749 job->s_priority = entity->rq - sched->sched_rq; 751 job->s_priority = entity->rq - sched->sched_rq;
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 1dceba2b42fd..2a85fa68ffea 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
145static void 145static void
146v3d_postclose(struct drm_device *dev, struct drm_file *file) 146v3d_postclose(struct drm_device *dev, struct drm_file *file)
147{ 147{
148 struct v3d_dev *v3d = to_v3d_dev(dev);
149 struct v3d_file_priv *v3d_priv = file->driver_priv; 148 struct v3d_file_priv *v3d_priv = file->driver_priv;
150 enum v3d_queue q; 149 enum v3d_queue q;
151 150
152 for (q = 0; q < V3D_MAX_QUEUES; q++) { 151 for (q = 0; q < V3D_MAX_QUEUES; q++) {
153 drm_sched_entity_destroy(&v3d->queue[q].sched, 152 drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
154 &v3d_priv->sched_entity[q]);
155 } 153 }
156 154
157 kfree(v3d_priv); 155 kfree(v3d_priv);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index e1fcbb4cd0ae..5ce24098a5fd 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -553,7 +553,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
553 mutex_lock(&v3d->sched_lock); 553 mutex_lock(&v3d->sched_lock);
554 if (exec->bin.start != exec->bin.end) { 554 if (exec->bin.start != exec->bin.end) {
555 ret = drm_sched_job_init(&exec->bin.base, 555 ret = drm_sched_job_init(&exec->bin.base,
556 &v3d->queue[V3D_BIN].sched,
557 &v3d_priv->sched_entity[V3D_BIN], 556 &v3d_priv->sched_entity[V3D_BIN],
558 v3d_priv); 557 v3d_priv);
559 if (ret) 558 if (ret)
@@ -568,7 +567,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
568 } 567 }
569 568
570 ret = drm_sched_job_init(&exec->render.base, 569 ret = drm_sched_job_init(&exec->render.base,
571 &v3d->queue[V3D_RENDER].sched,
572 &v3d_priv->sched_entity[V3D_RENDER], 570 &v3d_priv->sched_entity[V3D_RENDER],
573 v3d_priv); 571 v3d_priv);
574 if (ret) 572 if (ret)