aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/scheduler/sched_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c33
1 files changed, 32 insertions, 1 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 44fe587aaef9..18ebbb05762e 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -196,6 +196,19 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
196 schedule_delayed_work(&sched->work_tdr, sched->timeout); 196 schedule_delayed_work(&sched->work_tdr, sched->timeout);
197} 197}
198 198
199/**
200 * drm_sched_fault - immediately start timeout handler
201 *
202 * @sched: scheduler where the timeout handling should be started.
203 *
204 * Start timeout handling immediately when the driver detects a hardware fault.
205 */
206void drm_sched_fault(struct drm_gpu_scheduler *sched)
207{
208 mod_delayed_work(system_wq, &sched->work_tdr, 0);
209}
210EXPORT_SYMBOL(drm_sched_fault);
211
199/* job_finish is called after hw fence signaled 212/* job_finish is called after hw fence signaled
200 */ 213 */
201static void drm_sched_job_finish(struct work_struct *work) 214static void drm_sched_job_finish(struct work_struct *work)
@@ -220,7 +233,6 @@ static void drm_sched_job_finish(struct work_struct *work)
220 drm_sched_start_timeout(sched); 233 drm_sched_start_timeout(sched);
221 spin_unlock(&sched->job_list_lock); 234 spin_unlock(&sched->job_list_lock);
222 235
223 dma_fence_put(&s_job->s_fence->finished);
224 sched->ops->free_job(s_job); 236 sched->ops->free_job(s_job);
225} 237}
226 238
@@ -283,6 +295,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
283already_signaled: 295already_signaled:
284 ; 296 ;
285 } 297 }
298 drm_sched_start_timeout(sched);
286 spin_unlock(&sched->job_list_lock); 299 spin_unlock(&sched->job_list_lock);
287} 300}
288 301
@@ -406,6 +419,9 @@ int drm_sched_job_init(struct drm_sched_job *job,
406 struct drm_gpu_scheduler *sched; 419 struct drm_gpu_scheduler *sched;
407 420
408 drm_sched_entity_select_rq(entity); 421 drm_sched_entity_select_rq(entity);
422 if (!entity->rq)
423 return -ENOENT;
424
409 sched = entity->rq->sched; 425 sched = entity->rq->sched;
410 426
411 job->sched = sched; 427 job->sched = sched;
@@ -424,6 +440,18 @@ int drm_sched_job_init(struct drm_sched_job *job,
424EXPORT_SYMBOL(drm_sched_job_init); 440EXPORT_SYMBOL(drm_sched_job_init);
425 441
426/** 442/**
443 * drm_sched_job_cleanup - clean up scheduler job resources
444 *
445 * @job: scheduler job to clean up
446 */
447void drm_sched_job_cleanup(struct drm_sched_job *job)
448{
449 dma_fence_put(&job->s_fence->finished);
450 job->s_fence = NULL;
451}
452EXPORT_SYMBOL(drm_sched_job_cleanup);
453
454/**
427 * drm_sched_ready - is the scheduler ready 455 * drm_sched_ready - is the scheduler ready
428 * 456 *
429 * @sched: scheduler instance 457 * @sched: scheduler instance
@@ -619,6 +647,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
619 return PTR_ERR(sched->thread); 647 return PTR_ERR(sched->thread);
620 } 648 }
621 649
650 sched->ready = true;
622 return 0; 651 return 0;
623} 652}
624EXPORT_SYMBOL(drm_sched_init); 653EXPORT_SYMBOL(drm_sched_init);
@@ -634,5 +663,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
634{ 663{
635 if (sched->thread) 664 if (sched->thread)
636 kthread_stop(sched->thread); 665 kthread_stop(sched->thread);
666
667 sched->ready = false;
637} 668}
638EXPORT_SYMBOL(drm_sched_fini); 669EXPORT_SYMBOL(drm_sched_fini);