aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/scheduler/sched_main.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2018-08-06 09:01:45 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-08-27 12:10:44 -0400
commit23f67981fd92859a156fc7d2e41f98d826f68a6c (patch)
tree556401e0e927c5a08579f93e2ded426c0723faaa /drivers/gpu/drm/scheduler/sched_main.c
parent7b10574eac0b44f99e8e1d3ea9345a78d1fcaf07 (diff)
drm/scheduler: rename gpu_scheduler.c to sched_main.c
Better match the naming of the other components. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Huang Rui <ray.huang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c604
1 files changed, 604 insertions, 0 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
new file mode 100644
index 000000000000..9ca741f3a0bc
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -0,0 +1,604 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24/**
25 * DOC: Overview
26 *
27 * The GPU scheduler provides entities which allow userspace to push jobs
28 * into software queues which are then scheduled on a hardware run queue.
29 * The software queues have a priority among them. The scheduler selects the entities
30 * from the run queue using a FIFO. The scheduler provides dependency handling
31 * features among jobs. The driver is supposed to provide callback functions for
32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
34 *
35 * The organisation of the scheduler is the following:
36 *
37 * 1. Each hw run queue has one scheduler
38 * 2. Each scheduler has multiple run queues with different priorities
39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40 * 3. Each scheduler run queue has a queue of entities to schedule
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42 * the hardware.
43 *
44 * The jobs in a entity are always scheduled in the order that they were pushed.
45 */
46
47#include <linux/kthread.h>
48#include <linux/wait.h>
49#include <linux/sched.h>
50#include <uapi/linux/sched/types.h>
51#include <drm/drmP.h>
52#include <drm/gpu_scheduler.h>
53#include <drm/spsc_queue.h>
54
55#define CREATE_TRACE_POINTS
56#include "gpu_scheduler_trace.h"
57
58#define to_drm_sched_job(sched_job) \
59 container_of((sched_job), struct drm_sched_job, queue_node)
60
61static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
62
63/**
64 * drm_sched_rq_init - initialize a given run queue struct
65 *
66 * @rq: scheduler run queue
67 *
68 * Initializes a scheduler runqueue.
69 */
70static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
71 struct drm_sched_rq *rq)
72{
73 spin_lock_init(&rq->lock);
74 INIT_LIST_HEAD(&rq->entities);
75 rq->current_entity = NULL;
76 rq->sched = sched;
77}
78
79/**
80 * drm_sched_rq_add_entity - add an entity
81 *
82 * @rq: scheduler run queue
83 * @entity: scheduler entity
84 *
85 * Adds a scheduler entity to the run queue.
86 */
87void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
88 struct drm_sched_entity *entity)
89{
90 if (!list_empty(&entity->list))
91 return;
92 spin_lock(&rq->lock);
93 list_add_tail(&entity->list, &rq->entities);
94 spin_unlock(&rq->lock);
95}
96
97/**
98 * drm_sched_rq_remove_entity - remove an entity
99 *
100 * @rq: scheduler run queue
101 * @entity: scheduler entity
102 *
103 * Removes a scheduler entity from the run queue.
104 */
105void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
106 struct drm_sched_entity *entity)
107{
108 if (list_empty(&entity->list))
109 return;
110 spin_lock(&rq->lock);
111 list_del_init(&entity->list);
112 if (rq->current_entity == entity)
113 rq->current_entity = NULL;
114 spin_unlock(&rq->lock);
115}
116
117/**
118 * drm_sched_rq_select_entity - Select an entity which could provide a job to run
119 *
120 * @rq: scheduler run queue to check.
121 *
122 * Try to find a ready entity, returns NULL if none found.
123 */
124static struct drm_sched_entity *
125drm_sched_rq_select_entity(struct drm_sched_rq *rq)
126{
127 struct drm_sched_entity *entity;
128
129 spin_lock(&rq->lock);
130
131 entity = rq->current_entity;
132 if (entity) {
133 list_for_each_entry_continue(entity, &rq->entities, list) {
134 if (drm_sched_entity_is_ready(entity)) {
135 rq->current_entity = entity;
136 spin_unlock(&rq->lock);
137 return entity;
138 }
139 }
140 }
141
142 list_for_each_entry(entity, &rq->entities, list) {
143
144 if (drm_sched_entity_is_ready(entity)) {
145 rq->current_entity = entity;
146 spin_unlock(&rq->lock);
147 return entity;
148 }
149
150 if (entity == rq->current_entity)
151 break;
152 }
153
154 spin_unlock(&rq->lock);
155
156 return NULL;
157}
158
159/**
160 * drm_sched_dependency_optimized
161 *
162 * @fence: the dependency fence
163 * @entity: the entity which depends on the above fence
164 *
165 * Returns true if the dependency can be optimized and false otherwise
166 */
167bool drm_sched_dependency_optimized(struct dma_fence* fence,
168 struct drm_sched_entity *entity)
169{
170 struct drm_gpu_scheduler *sched = entity->rq->sched;
171 struct drm_sched_fence *s_fence;
172
173 if (!fence || dma_fence_is_signaled(fence))
174 return false;
175 if (fence->context == entity->fence_context)
176 return true;
177 s_fence = to_drm_sched_fence(fence);
178 if (s_fence && s_fence->sched == sched)
179 return true;
180
181 return false;
182}
183EXPORT_SYMBOL(drm_sched_dependency_optimized);
184
185/* job_finish is called after hw fence signaled
186 */
187static void drm_sched_job_finish(struct work_struct *work)
188{
189 struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
190 finish_work);
191 struct drm_gpu_scheduler *sched = s_job->sched;
192
193 /*
194 * Canceling the timeout without removing our job from the ring mirror
195 * list is safe, as we will only end up in this worker if our jobs
196 * finished fence has been signaled. So even if some another worker
197 * manages to find this job as the next job in the list, the fence
198 * signaled check below will prevent the timeout to be restarted.
199 */
200 cancel_delayed_work_sync(&s_job->work_tdr);
201
202 spin_lock(&sched->job_list_lock);
203 /* queue TDR for next job */
204 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
205 !list_is_last(&s_job->node, &sched->ring_mirror_list)) {
206 struct drm_sched_job *next = list_next_entry(s_job, node);
207
208 if (!dma_fence_is_signaled(&next->s_fence->finished))
209 schedule_delayed_work(&next->work_tdr, sched->timeout);
210 }
211 /* remove job from ring_mirror_list */
212 list_del(&s_job->node);
213 spin_unlock(&sched->job_list_lock);
214
215 dma_fence_put(&s_job->s_fence->finished);
216 sched->ops->free_job(s_job);
217}
218
219static void drm_sched_job_finish_cb(struct dma_fence *f,
220 struct dma_fence_cb *cb)
221{
222 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
223 finish_cb);
224 schedule_work(&job->finish_work);
225}
226
227static void drm_sched_job_begin(struct drm_sched_job *s_job)
228{
229 struct drm_gpu_scheduler *sched = s_job->sched;
230
231 dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
232 drm_sched_job_finish_cb);
233
234 spin_lock(&sched->job_list_lock);
235 list_add_tail(&s_job->node, &sched->ring_mirror_list);
236 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
237 list_first_entry_or_null(&sched->ring_mirror_list,
238 struct drm_sched_job, node) == s_job)
239 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
240 spin_unlock(&sched->job_list_lock);
241}
242
243static void drm_sched_job_timedout(struct work_struct *work)
244{
245 struct drm_sched_job *job = container_of(work, struct drm_sched_job,
246 work_tdr.work);
247
248 job->sched->ops->timedout_job(job);
249}
250
251/**
252 * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
253 *
254 * @sched: scheduler instance
255 * @bad: bad scheduler job
256 *
257 */
258void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
259{
260 struct drm_sched_job *s_job;
261 struct drm_sched_entity *entity, *tmp;
262 int i;
263
264 spin_lock(&sched->job_list_lock);
265 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
266 if (s_job->s_fence->parent &&
267 dma_fence_remove_callback(s_job->s_fence->parent,
268 &s_job->s_fence->cb)) {
269 dma_fence_put(s_job->s_fence->parent);
270 s_job->s_fence->parent = NULL;
271 atomic_dec(&sched->hw_rq_count);
272 }
273 }
274 spin_unlock(&sched->job_list_lock);
275
276 if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
277 atomic_inc(&bad->karma);
278 /* don't increase @bad's karma if it's from KERNEL RQ,
279 * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
280 * corrupt but keep in mind that kernel jobs always considered good.
281 */
282 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) {
283 struct drm_sched_rq *rq = &sched->sched_rq[i];
284
285 spin_lock(&rq->lock);
286 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
287 if (bad->s_fence->scheduled.context == entity->fence_context) {
288 if (atomic_read(&bad->karma) > bad->sched->hang_limit)
289 if (entity->guilty)
290 atomic_set(entity->guilty, 1);
291 break;
292 }
293 }
294 spin_unlock(&rq->lock);
295 if (&entity->list != &rq->entities)
296 break;
297 }
298 }
299}
300EXPORT_SYMBOL(drm_sched_hw_job_reset);
301
302/**
303 * drm_sched_job_recovery - recover jobs after a reset
304 *
305 * @sched: scheduler instance
306 *
307 */
308void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
309{
310 struct drm_sched_job *s_job, *tmp;
311 bool found_guilty = false;
312 int r;
313
314 spin_lock(&sched->job_list_lock);
315 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
316 struct drm_sched_job, node);
317 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
318 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
319
320 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
321 struct drm_sched_fence *s_fence = s_job->s_fence;
322 struct dma_fence *fence;
323 uint64_t guilty_context;
324
325 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
326 found_guilty = true;
327 guilty_context = s_job->s_fence->scheduled.context;
328 }
329
330 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
331 dma_fence_set_error(&s_fence->finished, -ECANCELED);
332
333 spin_unlock(&sched->job_list_lock);
334 fence = sched->ops->run_job(s_job);
335 atomic_inc(&sched->hw_rq_count);
336
337 if (fence) {
338 s_fence->parent = dma_fence_get(fence);
339 r = dma_fence_add_callback(fence, &s_fence->cb,
340 drm_sched_process_job);
341 if (r == -ENOENT)
342 drm_sched_process_job(fence, &s_fence->cb);
343 else if (r)
344 DRM_ERROR("fence add callback failed (%d)\n",
345 r);
346 dma_fence_put(fence);
347 } else {
348 drm_sched_process_job(NULL, &s_fence->cb);
349 }
350 spin_lock(&sched->job_list_lock);
351 }
352 spin_unlock(&sched->job_list_lock);
353}
354EXPORT_SYMBOL(drm_sched_job_recovery);
355
356/**
357 * drm_sched_job_init - init a scheduler job
358 *
359 * @job: scheduler job to init
360 * @entity: scheduler entity to use
361 * @owner: job owner for debugging
362 *
363 * Refer to drm_sched_entity_push_job() documentation
364 * for locking considerations.
365 *
366 * Returns 0 for success, negative error code otherwise.
367 */
368int drm_sched_job_init(struct drm_sched_job *job,
369 struct drm_sched_entity *entity,
370 void *owner)
371{
372 struct drm_gpu_scheduler *sched;
373
374 drm_sched_entity_select_rq(entity);
375 sched = entity->rq->sched;
376
377 job->sched = sched;
378 job->entity = entity;
379 job->s_priority = entity->rq - sched->sched_rq;
380 job->s_fence = drm_sched_fence_create(entity, owner);
381 if (!job->s_fence)
382 return -ENOMEM;
383 job->id = atomic64_inc_return(&sched->job_id_count);
384
385 INIT_WORK(&job->finish_work, drm_sched_job_finish);
386 INIT_LIST_HEAD(&job->node);
387 INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout);
388
389 return 0;
390}
391EXPORT_SYMBOL(drm_sched_job_init);
392
393/**
394 * drm_sched_ready - is the scheduler ready
395 *
396 * @sched: scheduler instance
397 *
398 * Return true if we can push more jobs to the hw, otherwise false.
399 */
400static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
401{
402 return atomic_read(&sched->hw_rq_count) <
403 sched->hw_submission_limit;
404}
405
406/**
407 * drm_sched_wakeup - Wake up the scheduler when it is ready
408 *
409 * @sched: scheduler instance
410 *
411 */
412void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
413{
414 if (drm_sched_ready(sched))
415 wake_up_interruptible(&sched->wake_up_worker);
416}
417
418/**
419 * drm_sched_select_entity - Select next entity to process
420 *
421 * @sched: scheduler instance
422 *
423 * Returns the entity to process or NULL if none are found.
424 */
425static struct drm_sched_entity *
426drm_sched_select_entity(struct drm_gpu_scheduler *sched)
427{
428 struct drm_sched_entity *entity;
429 int i;
430
431 if (!drm_sched_ready(sched))
432 return NULL;
433
434 /* Kernel run queue has higher priority than normal run queue*/
435 for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
436 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
437 if (entity)
438 break;
439 }
440
441 return entity;
442}
443
444/**
445 * drm_sched_process_job - process a job
446 *
447 * @f: fence
448 * @cb: fence callbacks
449 *
450 * Called after job has finished execution.
451 */
452static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
453{
454 struct drm_sched_fence *s_fence =
455 container_of(cb, struct drm_sched_fence, cb);
456 struct drm_gpu_scheduler *sched = s_fence->sched;
457
458 dma_fence_get(&s_fence->finished);
459 atomic_dec(&sched->hw_rq_count);
460 atomic_dec(&sched->num_jobs);
461 drm_sched_fence_finished(s_fence);
462
463 trace_drm_sched_process_job(s_fence);
464 dma_fence_put(&s_fence->finished);
465 wake_up_interruptible(&sched->wake_up_worker);
466}
467
468/**
469 * drm_sched_blocked - check if the scheduler is blocked
470 *
471 * @sched: scheduler instance
472 *
473 * Returns true if blocked, otherwise false.
474 */
475static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
476{
477 if (kthread_should_park()) {
478 kthread_parkme();
479 return true;
480 }
481
482 return false;
483}
484
485/**
486 * drm_sched_main - main scheduler thread
487 *
488 * @param: scheduler instance
489 *
490 * Returns 0.
491 */
492static int drm_sched_main(void *param)
493{
494 struct sched_param sparam = {.sched_priority = 1};
495 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
496 int r;
497
498 sched_setscheduler(current, SCHED_FIFO, &sparam);
499
500 while (!kthread_should_stop()) {
501 struct drm_sched_entity *entity = NULL;
502 struct drm_sched_fence *s_fence;
503 struct drm_sched_job *sched_job;
504 struct dma_fence *fence;
505
506 wait_event_interruptible(sched->wake_up_worker,
507 (!drm_sched_blocked(sched) &&
508 (entity = drm_sched_select_entity(sched))) ||
509 kthread_should_stop());
510
511 if (!entity)
512 continue;
513
514 sched_job = drm_sched_entity_pop_job(entity);
515 if (!sched_job)
516 continue;
517
518 s_fence = sched_job->s_fence;
519
520 atomic_inc(&sched->hw_rq_count);
521 drm_sched_job_begin(sched_job);
522
523 fence = sched->ops->run_job(sched_job);
524 drm_sched_fence_scheduled(s_fence);
525
526 if (fence) {
527 s_fence->parent = dma_fence_get(fence);
528 r = dma_fence_add_callback(fence, &s_fence->cb,
529 drm_sched_process_job);
530 if (r == -ENOENT)
531 drm_sched_process_job(fence, &s_fence->cb);
532 else if (r)
533 DRM_ERROR("fence add callback failed (%d)\n",
534 r);
535 dma_fence_put(fence);
536 } else {
537 drm_sched_process_job(NULL, &s_fence->cb);
538 }
539
540 wake_up(&sched->job_scheduled);
541 }
542 return 0;
543}
544
545/**
546 * drm_sched_init - Init a gpu scheduler instance
547 *
548 * @sched: scheduler instance
549 * @ops: backend operations for this scheduler
550 * @hw_submission: number of hw submissions that can be in flight
551 * @hang_limit: number of times to allow a job to hang before dropping it
552 * @timeout: timeout value in jiffies for the scheduler
553 * @name: name used for debugging
554 *
555 * Return 0 on success, otherwise error code.
556 */
557int drm_sched_init(struct drm_gpu_scheduler *sched,
558 const struct drm_sched_backend_ops *ops,
559 unsigned hw_submission,
560 unsigned hang_limit,
561 long timeout,
562 const char *name)
563{
564 int i;
565 sched->ops = ops;
566 sched->hw_submission_limit = hw_submission;
567 sched->name = name;
568 sched->timeout = timeout;
569 sched->hang_limit = hang_limit;
570 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
571 drm_sched_rq_init(sched, &sched->sched_rq[i]);
572
573 init_waitqueue_head(&sched->wake_up_worker);
574 init_waitqueue_head(&sched->job_scheduled);
575 INIT_LIST_HEAD(&sched->ring_mirror_list);
576 spin_lock_init(&sched->job_list_lock);
577 atomic_set(&sched->hw_rq_count, 0);
578 atomic_set(&sched->num_jobs, 0);
579 atomic64_set(&sched->job_id_count, 0);
580
581 /* Each scheduler will run on a seperate kernel thread */
582 sched->thread = kthread_run(drm_sched_main, sched, sched->name);
583 if (IS_ERR(sched->thread)) {
584 DRM_ERROR("Failed to create scheduler for %s.\n", name);
585 return PTR_ERR(sched->thread);
586 }
587
588 return 0;
589}
590EXPORT_SYMBOL(drm_sched_init);
591
592/**
593 * drm_sched_fini - Destroy a gpu scheduler
594 *
595 * @sched: scheduler instance
596 *
597 * Tears down and cleans up the scheduler.
598 */
599void drm_sched_fini(struct drm_gpu_scheduler *sched)
600{
601 if (sched->thread)
602 kthread_stop(sched->thread);
603}
604EXPORT_SYMBOL(drm_sched_fini);