aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c')
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 89619a5a4289..ea30d6ad4c13 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -34,6 +34,9 @@ static struct amd_sched_job *
34amd_sched_entity_pop_job(struct amd_sched_entity *entity); 34amd_sched_entity_pop_job(struct amd_sched_entity *entity);
35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36 36
37struct kmem_cache *sched_fence_slab;
38atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
39
37/* Initialize a given run queue struct */ 40/* Initialize a given run queue struct */
38static void amd_sched_rq_init(struct amd_sched_rq *rq) 41static void amd_sched_rq_init(struct amd_sched_rq *rq)
39{ 42{
@@ -273,22 +276,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
273 * 276 *
274 * Returns 0 for success, negative error code otherwise. 277 * Returns 0 for success, negative error code otherwise.
275 */ 278 */
276int amd_sched_entity_push_job(struct amd_sched_job *sched_job) 279void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
277{ 280{
278 struct amd_sched_entity *entity = sched_job->s_entity; 281 struct amd_sched_entity *entity = sched_job->s_entity;
279 struct amd_sched_fence *fence = amd_sched_fence_create(
280 entity, sched_job->owner);
281
282 if (!fence)
283 return -ENOMEM;
284
285 fence_get(&fence->base);
286 sched_job->s_fence = fence;
287 282
288 wait_event(entity->sched->job_scheduled, 283 wait_event(entity->sched->job_scheduled,
289 amd_sched_entity_in(sched_job)); 284 amd_sched_entity_in(sched_job));
290 trace_amd_sched_job(sched_job); 285 trace_amd_sched_job(sched_job);
291 return 0;
292} 286}
293 287
294/** 288/**
@@ -343,6 +337,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
343 list_del_init(&s_fence->list); 337 list_del_init(&s_fence->list);
344 spin_unlock_irqrestore(&sched->fence_list_lock, flags); 338 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
345 } 339 }
340 trace_amd_sched_process_job(s_fence);
346 fence_put(&s_fence->base); 341 fence_put(&s_fence->base);
347 wake_up_interruptible(&sched->wake_up_worker); 342 wake_up_interruptible(&sched->wake_up_worker);
348} 343}
@@ -450,6 +445,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
450 init_waitqueue_head(&sched->wake_up_worker); 445 init_waitqueue_head(&sched->wake_up_worker);
451 init_waitqueue_head(&sched->job_scheduled); 446 init_waitqueue_head(&sched->job_scheduled);
452 atomic_set(&sched->hw_rq_count, 0); 447 atomic_set(&sched->hw_rq_count, 0);
448 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
449 sched_fence_slab = kmem_cache_create(
450 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
451 SLAB_HWCACHE_ALIGN, NULL);
452 if (!sched_fence_slab)
453 return -ENOMEM;
454 }
453 455
454 /* Each scheduler will run on a seperate kernel thread */ 456 /* Each scheduler will run on a seperate kernel thread */
455 sched->thread = kthread_run(amd_sched_main, sched, sched->name); 457 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -470,4 +472,6 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
470{ 472{
471 if (sched->thread) 473 if (sched->thread)
472 kthread_stop(sched->thread); 474 kthread_stop(sched->thread);
475 if (atomic_dec_and_test(&sched_fence_slab_ref))
476 kmem_cache_destroy(sched_fence_slab);
473} 477}