aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c71
1 files changed, 44 insertions, 27 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index b7cd108212c2..651129f2ec1d 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -30,8 +30,7 @@
30#define CREATE_TRACE_POINTS 30#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h" 31#include "gpu_sched_trace.h"
32 32
33static struct amd_sched_job * 33static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
34amd_sched_entity_pop_job(struct amd_sched_entity *entity);
35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36 35
37struct kmem_cache *sched_fence_slab; 36struct kmem_cache *sched_fence_slab;
@@ -64,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
64} 63}
65 64
66/** 65/**
67 * Select next job from a specified run queue with round robin policy. 66 * Select an entity which could provide a job to run
68 * Return NULL if nothing available. 67 *
68 * @rq The run queue to check.
69 *
70 * Try to find a ready entity, returns NULL if none found.
69 */ 71 */
70static struct amd_sched_job * 72static struct amd_sched_entity *
71amd_sched_rq_select_job(struct amd_sched_rq *rq) 73amd_sched_rq_select_entity(struct amd_sched_rq *rq)
72{ 74{
73 struct amd_sched_entity *entity; 75 struct amd_sched_entity *entity;
74 struct amd_sched_job *sched_job;
75 76
76 spin_lock(&rq->lock); 77 spin_lock(&rq->lock);
77 78
78 entity = rq->current_entity; 79 entity = rq->current_entity;
79 if (entity) { 80 if (entity) {
80 list_for_each_entry_continue(entity, &rq->entities, list) { 81 list_for_each_entry_continue(entity, &rq->entities, list) {
81 sched_job = amd_sched_entity_pop_job(entity); 82 if (amd_sched_entity_is_ready(entity)) {
82 if (sched_job) {
83 rq->current_entity = entity; 83 rq->current_entity = entity;
84 spin_unlock(&rq->lock); 84 spin_unlock(&rq->lock);
85 return sched_job; 85 return entity;
86 } 86 }
87 } 87 }
88 } 88 }
89 89
90 list_for_each_entry(entity, &rq->entities, list) { 90 list_for_each_entry(entity, &rq->entities, list) {
91 91
92 sched_job = amd_sched_entity_pop_job(entity); 92 if (amd_sched_entity_is_ready(entity)) {
93 if (sched_job) {
94 rq->current_entity = entity; 93 rq->current_entity = entity;
95 spin_unlock(&rq->lock); 94 spin_unlock(&rq->lock);
96 return sched_job; 95 return entity;
97 } 96 }
98 97
99 if (entity == rq->current_entity) 98 if (entity == rq->current_entity)
@@ -177,6 +176,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
177} 176}
178 177
179/** 178/**
179 * Check if entity is ready
180 *
181 * @entity The pointer to a valid scheduler entity
182 *
183 * Return true if entity could provide a job.
184 */
185static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
186{
187 if (kfifo_is_empty(&entity->job_queue))
188 return false;
189
190 if (ACCESS_ONCE(entity->dependency))
191 return false;
192
193 return true;
194}
195
196/**
180 * Destroy a context entity 197 * Destroy a context entity
181 * 198 *
182 * @sched Pointer to scheduler instance 199 * @sched Pointer to scheduler instance
@@ -252,9 +269,6 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
252 struct amd_gpu_scheduler *sched = entity->sched; 269 struct amd_gpu_scheduler *sched = entity->sched;
253 struct amd_sched_job *sched_job; 270 struct amd_sched_job *sched_job;
254 271
255 if (ACCESS_ONCE(entity->dependency))
256 return NULL;
257
258 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) 272 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
259 return NULL; 273 return NULL;
260 274
@@ -328,22 +342,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
328} 342}
329 343
330/** 344/**
331 * Select next to run 345 * Select next entity to process
332*/ 346*/
333static struct amd_sched_job * 347static struct amd_sched_entity *
334amd_sched_select_job(struct amd_gpu_scheduler *sched) 348amd_sched_select_entity(struct amd_gpu_scheduler *sched)
335{ 349{
336 struct amd_sched_job *sched_job; 350 struct amd_sched_entity *entity;
337 351
338 if (!amd_sched_ready(sched)) 352 if (!amd_sched_ready(sched))
339 return NULL; 353 return NULL;
340 354
341 /* Kernel run queue has higher priority than normal run queue*/ 355 /* Kernel run queue has higher priority than normal run queue*/
342 sched_job = amd_sched_rq_select_job(&sched->kernel_rq); 356 entity = amd_sched_rq_select_entity(&sched->kernel_rq);
343 if (sched_job == NULL) 357 if (entity == NULL)
344 sched_job = amd_sched_rq_select_job(&sched->sched_rq); 358 entity = amd_sched_rq_select_entity(&sched->sched_rq);
345 359
346 return sched_job; 360 return entity;
347} 361}
348 362
349static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) 363static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
@@ -405,13 +419,16 @@ static int amd_sched_main(void *param)
405 unsigned long flags; 419 unsigned long flags;
406 420
407 wait_event_interruptible(sched->wake_up_worker, 421 wait_event_interruptible(sched->wake_up_worker,
408 kthread_should_stop() || 422 (entity = amd_sched_select_entity(sched)) ||
409 (sched_job = amd_sched_select_job(sched))); 423 kthread_should_stop());
410 424
425 if (!entity)
426 continue;
427
428 sched_job = amd_sched_entity_pop_job(entity);
411 if (!sched_job) 429 if (!sched_job)
412 continue; 430 continue;
413 431
414 entity = sched_job->s_entity;
415 s_fence = sched_job->s_fence; 432 s_fence = sched_job->s_fence;
416 433
417 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { 434 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {