aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>2018-10-18 12:32:46 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-11-05 14:21:22 -0500
commitfaf6e1a87e07423a729e04fb2e8188742e89ea4c (patch)
tree51c205851d3e99371bbf271b8f7e3e59b32ceb04 /drivers/gpu
parent2bb42410b1bd324912389c6ac748df1c1befd69f (diff)
drm/sched: Add boolean to mark if sched is ready to work v5
Problem: A particular scheduler may become unsuable (underlying HW) after some event (e.g. GPU reset). If it's later chosen by the get free sched. policy a command will fail to be submitted. Fix: Add a driver specific callback to report the sched status so rq with bad sched can be avoided in favor of working one or none in which case job init will fail. v2: Switch from driver callback to flag in scheduler. v3: rebase v4: Remove ready paramter from drm_sched_init, set uncoditionally to true once init done. v5: fix missed change in v3d in v4 (Alex) Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c9
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c6
2 files changed, 14 insertions, 1 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 3e22a54a99c2..ba54c30a466e 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -130,7 +130,14 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
130 int i; 130 int i;
131 131
132 for (i = 0; i < entity->num_rq_list; ++i) { 132 for (i = 0; i < entity->num_rq_list; ++i) {
133 num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs); 133 struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
134
135 if (!entity->rq_list[i]->sched->ready) {
136 DRM_WARN("sched%s is not ready, skipping", sched->name);
137 continue;
138 }
139
140 num_jobs = atomic_read(&sched->num_jobs);
134 if (num_jobs < min_jobs) { 141 if (num_jobs < min_jobs) {
135 min_jobs = num_jobs; 142 min_jobs = num_jobs;
136 rq = entity->rq_list[i]; 143 rq = entity->rq_list[i];
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 63b997d9c562..6b2fd49334f7 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -420,6 +420,9 @@ int drm_sched_job_init(struct drm_sched_job *job,
420 struct drm_gpu_scheduler *sched; 420 struct drm_gpu_scheduler *sched;
421 421
422 drm_sched_entity_select_rq(entity); 422 drm_sched_entity_select_rq(entity);
423 if (!entity->rq)
424 return -ENOENT;
425
423 sched = entity->rq->sched; 426 sched = entity->rq->sched;
424 427
425 job->sched = sched; 428 job->sched = sched;
@@ -633,6 +636,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
633 return PTR_ERR(sched->thread); 636 return PTR_ERR(sched->thread);
634 } 637 }
635 638
639 sched->ready = true;
636 return 0; 640 return 0;
637} 641}
638EXPORT_SYMBOL(drm_sched_init); 642EXPORT_SYMBOL(drm_sched_init);
@@ -648,5 +652,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
648{ 652{
649 if (sched->thread) 653 if (sched->thread)
650 kthread_stop(sched->thread); 654 kthread_stop(sched->thread);
655
656 sched->ready = false;
651} 657}
652EXPORT_SYMBOL(drm_sched_fini); 658EXPORT_SYMBOL(drm_sched_fini);