aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c')
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c90
1 files changed, 62 insertions, 28 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index d99fe90991dc..ab8577f8ed4a 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -27,6 +27,8 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "gpu_scheduler.h" 28#include "gpu_scheduler.h"
29 29
30static struct amd_sched_job *
31amd_sched_entity_pop_job(struct amd_sched_entity *entity);
30static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 32static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
31 33
32/* Initialize a given run queue struct */ 34/* Initialize a given run queue struct */
@@ -56,34 +58,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
56} 58}
57 59
58/** 60/**
59 * Select next entity from a specified run queue with round robin policy. 61 * Select next job from a specified run queue with round robin policy.
60 * It could return the same entity as current one if current is the only 62 * Return NULL if nothing available.
61 * available one in the queue. Return NULL if nothing available.
62 */ 63 */
63static struct amd_sched_entity * 64static struct amd_sched_job *
64amd_sched_rq_select_entity(struct amd_sched_rq *rq) 65amd_sched_rq_select_job(struct amd_sched_rq *rq)
65{ 66{
66 struct amd_sched_entity *entity; 67 struct amd_sched_entity *entity;
68 struct amd_sched_job *job;
67 69
68 spin_lock(&rq->lock); 70 spin_lock(&rq->lock);
69 71
70 entity = rq->current_entity; 72 entity = rq->current_entity;
71 if (entity) { 73 if (entity) {
72 list_for_each_entry_continue(entity, &rq->entities, list) { 74 list_for_each_entry_continue(entity, &rq->entities, list) {
73 if (!kfifo_is_empty(&entity->job_queue)) { 75 job = amd_sched_entity_pop_job(entity);
76 if (job) {
74 rq->current_entity = entity; 77 rq->current_entity = entity;
75 spin_unlock(&rq->lock); 78 spin_unlock(&rq->lock);
76 return rq->current_entity; 79 return job;
77 } 80 }
78 } 81 }
79 } 82 }
80 83
81 list_for_each_entry(entity, &rq->entities, list) { 84 list_for_each_entry(entity, &rq->entities, list) {
82 85
83 if (!kfifo_is_empty(&entity->job_queue)) { 86 job = amd_sched_entity_pop_job(entity);
87 if (job) {
84 rq->current_entity = entity; 88 rq->current_entity = entity;
85 spin_unlock(&rq->lock); 89 spin_unlock(&rq->lock);
86 return rq->current_entity; 90 return job;
87 } 91 }
88 92
89 if (entity == rq->current_entity) 93 if (entity == rq->current_entity)
@@ -188,6 +192,39 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
188 kfifo_free(&entity->job_queue); 192 kfifo_free(&entity->job_queue);
189} 193}
190 194
195static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
196{
197 struct amd_sched_entity *entity =
198 container_of(cb, struct amd_sched_entity, cb);
199 entity->dependency = NULL;
200 fence_put(f);
201 amd_sched_wakeup(entity->scheduler);
202}
203
204static struct amd_sched_job *
205amd_sched_entity_pop_job(struct amd_sched_entity *entity)
206{
207 struct amd_gpu_scheduler *sched = entity->scheduler;
208 struct amd_sched_job *job;
209
210 if (ACCESS_ONCE(entity->dependency))
211 return NULL;
212
213 if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job)))
214 return NULL;
215
216 while ((entity->dependency = sched->ops->dependency(job))) {
217
218 if (fence_add_callback(entity->dependency, &entity->cb,
219 amd_sched_entity_wakeup))
220 fence_put(entity->dependency);
221 else
222 return NULL;
223 }
224
225 return job;
226}
227
191/** 228/**
192 * Helper to submit a job to the job queue 229 * Helper to submit a job to the job queue
193 * 230 *
@@ -227,7 +264,6 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
227 struct amd_sched_entity *entity = sched_job->s_entity; 264 struct amd_sched_entity *entity = sched_job->s_entity;
228 struct amd_sched_fence *fence = amd_sched_fence_create( 265 struct amd_sched_fence *fence = amd_sched_fence_create(
229 entity, sched_job->owner); 266 entity, sched_job->owner);
230 int r;
231 267
232 if (!fence) 268 if (!fence)
233 return -ENOMEM; 269 return -ENOMEM;
@@ -235,10 +271,10 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
235 fence_get(&fence->base); 271 fence_get(&fence->base);
236 sched_job->s_fence = fence; 272 sched_job->s_fence = fence;
237 273
238 r = wait_event_interruptible(entity->scheduler->job_scheduled, 274 wait_event(entity->scheduler->job_scheduled,
239 amd_sched_entity_in(sched_job)); 275 amd_sched_entity_in(sched_job));
240 276
241 return r; 277 return 0;
242} 278}
243 279
244/** 280/**
@@ -260,22 +296,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
260} 296}
261 297
262/** 298/**
263 * Select next entity containing real IB submissions 299 * Select next to run
264*/ 300*/
265static struct amd_sched_entity * 301static struct amd_sched_job *
266amd_sched_select_context(struct amd_gpu_scheduler *sched) 302amd_sched_select_job(struct amd_gpu_scheduler *sched)
267{ 303{
268 struct amd_sched_entity *tmp; 304 struct amd_sched_job *job;
269 305
270 if (!amd_sched_ready(sched)) 306 if (!amd_sched_ready(sched))
271 return NULL; 307 return NULL;
272 308
273 /* Kernel run queue has higher priority than normal run queue*/ 309 /* Kernel run queue has higher priority than normal run queue*/
274 tmp = amd_sched_rq_select_entity(&sched->kernel_rq); 310 job = amd_sched_rq_select_job(&sched->kernel_rq);
275 if (tmp == NULL) 311 if (job == NULL)
276 tmp = amd_sched_rq_select_entity(&sched->sched_rq); 312 job = amd_sched_rq_select_job(&sched->sched_rq);
277 313
278 return tmp; 314 return job;
279} 315}
280 316
281static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) 317static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
@@ -301,22 +337,19 @@ static int amd_sched_main(void *param)
301 sched_setscheduler(current, SCHED_FIFO, &sparam); 337 sched_setscheduler(current, SCHED_FIFO, &sparam);
302 338
303 while (!kthread_should_stop()) { 339 while (!kthread_should_stop()) {
304 struct amd_sched_entity *c_entity = NULL; 340 struct amd_sched_entity *entity;
305 struct amd_sched_job *job; 341 struct amd_sched_job *job;
306 struct fence *fence; 342 struct fence *fence;
307 343
308 wait_event_interruptible(sched->wake_up_worker, 344 wait_event_interruptible(sched->wake_up_worker,
309 kthread_should_stop() || 345 kthread_should_stop() ||
310 (c_entity = amd_sched_select_context(sched))); 346 (job = amd_sched_select_job(sched)));
311 347
312 if (!c_entity) 348 if (!job)
313 continue; 349 continue;
314 350
315 r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *)); 351 entity = job->s_entity;
316 if (r != sizeof(void *))
317 continue;
318 atomic_inc(&sched->hw_rq_count); 352 atomic_inc(&sched->hw_rq_count);
319
320 fence = sched->ops->run_job(job); 353 fence = sched->ops->run_job(job);
321 if (fence) { 354 if (fence) {
322 r = fence_add_callback(fence, &job->cb, 355 r = fence_add_callback(fence, &job->cb,
@@ -328,6 +361,7 @@ static int amd_sched_main(void *param)
328 fence_put(fence); 361 fence_put(fence);
329 } 362 }
330 363
364 kfifo_out(&entity->job_queue, &job, sizeof(job));
331 wake_up(&sched->job_scheduled); 365 wake_up(&sched->job_scheduled);
332 } 366 }
333 return 0; 367 return 0;