aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c')
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c127
1 files changed, 85 insertions, 42 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index ea30d6ad4c13..3a4820e863ec 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -30,8 +30,7 @@
30#define CREATE_TRACE_POINTS 30#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h" 31#include "gpu_sched_trace.h"
32 32
33static struct amd_sched_job * 33static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
34amd_sched_entity_pop_job(struct amd_sched_entity *entity);
35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36 35
37struct kmem_cache *sched_fence_slab; 36struct kmem_cache *sched_fence_slab;
@@ -64,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
64} 63}
65 64
66/** 65/**
67 * Select next job from a specified run queue with round robin policy. 66 * Select an entity which could provide a job to run
68 * Return NULL if nothing available. 67 *
68 * @rq The run queue to check.
69 *
70 * Try to find a ready entity, returns NULL if none found.
69 */ 71 */
70static struct amd_sched_job * 72static struct amd_sched_entity *
71amd_sched_rq_select_job(struct amd_sched_rq *rq) 73amd_sched_rq_select_entity(struct amd_sched_rq *rq)
72{ 74{
73 struct amd_sched_entity *entity; 75 struct amd_sched_entity *entity;
74 struct amd_sched_job *sched_job;
75 76
76 spin_lock(&rq->lock); 77 spin_lock(&rq->lock);
77 78
78 entity = rq->current_entity; 79 entity = rq->current_entity;
79 if (entity) { 80 if (entity) {
80 list_for_each_entry_continue(entity, &rq->entities, list) { 81 list_for_each_entry_continue(entity, &rq->entities, list) {
81 sched_job = amd_sched_entity_pop_job(entity); 82 if (amd_sched_entity_is_ready(entity)) {
82 if (sched_job) {
83 rq->current_entity = entity; 83 rq->current_entity = entity;
84 spin_unlock(&rq->lock); 84 spin_unlock(&rq->lock);
85 return sched_job; 85 return entity;
86 } 86 }
87 } 87 }
88 } 88 }
89 89
90 list_for_each_entry(entity, &rq->entities, list) { 90 list_for_each_entry(entity, &rq->entities, list) {
91 91
92 sched_job = amd_sched_entity_pop_job(entity); 92 if (amd_sched_entity_is_ready(entity)) {
93 if (sched_job) {
94 rq->current_entity = entity; 93 rq->current_entity = entity;
95 spin_unlock(&rq->lock); 94 spin_unlock(&rq->lock);
96 return sched_job; 95 return entity;
97 } 96 }
98 97
99 if (entity == rq->current_entity) 98 if (entity == rq->current_entity)
@@ -177,6 +176,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
177} 176}
178 177
179/** 178/**
179 * Check if entity is ready
180 *
181 * @entity The pointer to a valid scheduler entity
182 *
183 * Return true if entity could provide a job.
184 */
185static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
186{
187 if (kfifo_is_empty(&entity->job_queue))
188 return false;
189
190 if (ACCESS_ONCE(entity->dependency))
191 return false;
192
193 return true;
194}
195
196/**
180 * Destroy a context entity 197 * Destroy a context entity
181 * 198 *
182 * @sched Pointer to scheduler instance 199 * @sched Pointer to scheduler instance
@@ -211,32 +228,53 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
211 amd_sched_wakeup(entity->sched); 228 amd_sched_wakeup(entity->sched);
212} 229}
213 230
231static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
232{
233 struct amd_gpu_scheduler *sched = entity->sched;
234 struct fence * fence = entity->dependency;
235 struct amd_sched_fence *s_fence;
236
237 if (fence->context == entity->fence_context) {
238 /* We can ignore fences from ourself */
239 fence_put(entity->dependency);
240 return false;
241 }
242
243 s_fence = to_amd_sched_fence(fence);
244 if (s_fence && s_fence->sched == sched) {
245 /* Fence is from the same scheduler */
246 if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
247 /* Ignore it when it is already scheduled */
248 fence_put(entity->dependency);
249 return false;
250 }
251
252 /* Wait for fence to be scheduled */
253 entity->cb.func = amd_sched_entity_wakeup;
254 list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
255 return true;
256 }
257
258 if (!fence_add_callback(entity->dependency, &entity->cb,
259 amd_sched_entity_wakeup))
260 return true;
261
262 fence_put(entity->dependency);
263 return false;
264}
265
214static struct amd_sched_job * 266static struct amd_sched_job *
215amd_sched_entity_pop_job(struct amd_sched_entity *entity) 267amd_sched_entity_pop_job(struct amd_sched_entity *entity)
216{ 268{
217 struct amd_gpu_scheduler *sched = entity->sched; 269 struct amd_gpu_scheduler *sched = entity->sched;
218 struct amd_sched_job *sched_job; 270 struct amd_sched_job *sched_job;
219 271
220 if (ACCESS_ONCE(entity->dependency))
221 return NULL;
222
223 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) 272 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
224 return NULL; 273 return NULL;
225 274
226 while ((entity->dependency = sched->ops->dependency(sched_job))) { 275 while ((entity->dependency = sched->ops->dependency(sched_job)))
227 276 if (amd_sched_entity_add_dependency_cb(entity))
228 if (entity->dependency->context == entity->fence_context) {
229 /* We can ignore fences from ourself */
230 fence_put(entity->dependency);
231 continue;
232 }
233
234 if (fence_add_callback(entity->dependency, &entity->cb,
235 amd_sched_entity_wakeup))
236 fence_put(entity->dependency);
237 else
238 return NULL; 277 return NULL;
239 }
240 278
241 return sched_job; 279 return sched_job;
242} 280}
@@ -250,6 +288,7 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
250 */ 288 */
251static bool amd_sched_entity_in(struct amd_sched_job *sched_job) 289static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
252{ 290{
291 struct amd_gpu_scheduler *sched = sched_job->sched;
253 struct amd_sched_entity *entity = sched_job->s_entity; 292 struct amd_sched_entity *entity = sched_job->s_entity;
254 bool added, first = false; 293 bool added, first = false;
255 294
@@ -264,7 +303,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
264 303
265 /* first job wakes up scheduler */ 304 /* first job wakes up scheduler */
266 if (first) 305 if (first)
267 amd_sched_wakeup(sched_job->sched); 306 amd_sched_wakeup(sched);
268 307
269 return added; 308 return added;
270} 309}
@@ -280,9 +319,9 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
280{ 319{
281 struct amd_sched_entity *entity = sched_job->s_entity; 320 struct amd_sched_entity *entity = sched_job->s_entity;
282 321
322 trace_amd_sched_job(sched_job);
283 wait_event(entity->sched->job_scheduled, 323 wait_event(entity->sched->job_scheduled,
284 amd_sched_entity_in(sched_job)); 324 amd_sched_entity_in(sched_job));
285 trace_amd_sched_job(sched_job);
286} 325}
287 326
288/** 327/**
@@ -304,22 +343,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
304} 343}
305 344
306/** 345/**
307 * Select next to run 346 * Select next entity to process
308*/ 347*/
309static struct amd_sched_job * 348static struct amd_sched_entity *
310amd_sched_select_job(struct amd_gpu_scheduler *sched) 349amd_sched_select_entity(struct amd_gpu_scheduler *sched)
311{ 350{
312 struct amd_sched_job *sched_job; 351 struct amd_sched_entity *entity;
313 352
314 if (!amd_sched_ready(sched)) 353 if (!amd_sched_ready(sched))
315 return NULL; 354 return NULL;
316 355
317 /* Kernel run queue has higher priority than normal run queue*/ 356 /* Kernel run queue has higher priority than normal run queue*/
318 sched_job = amd_sched_rq_select_job(&sched->kernel_rq); 357 entity = amd_sched_rq_select_entity(&sched->kernel_rq);
319 if (sched_job == NULL) 358 if (entity == NULL)
320 sched_job = amd_sched_rq_select_job(&sched->sched_rq); 359 entity = amd_sched_rq_select_entity(&sched->sched_rq);
321 360
322 return sched_job; 361 return entity;
323} 362}
324 363
325static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) 364static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
@@ -381,13 +420,16 @@ static int amd_sched_main(void *param)
381 unsigned long flags; 420 unsigned long flags;
382 421
383 wait_event_interruptible(sched->wake_up_worker, 422 wait_event_interruptible(sched->wake_up_worker,
384 kthread_should_stop() || 423 (entity = amd_sched_select_entity(sched)) ||
385 (sched_job = amd_sched_select_job(sched))); 424 kthread_should_stop());
386 425
426 if (!entity)
427 continue;
428
429 sched_job = amd_sched_entity_pop_job(entity);
387 if (!sched_job) 430 if (!sched_job)
388 continue; 431 continue;
389 432
390 entity = sched_job->s_entity;
391 s_fence = sched_job->s_fence; 433 s_fence = sched_job->s_fence;
392 434
393 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { 435 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
@@ -400,6 +442,7 @@ static int amd_sched_main(void *param)
400 442
401 atomic_inc(&sched->hw_rq_count); 443 atomic_inc(&sched->hw_rq_count);
402 fence = sched->ops->run_job(sched_job); 444 fence = sched->ops->run_job(sched_job);
445 amd_sched_fence_scheduled(s_fence);
403 if (fence) { 446 if (fence) {
404 r = fence_add_callback(fence, &s_fence->cb, 447 r = fence_add_callback(fence, &s_fence->cb,
405 amd_sched_process_job); 448 amd_sched_process_job);