aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorMonk Liu <Monk.Liu@amd.com>2016-03-04 01:33:44 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-05-02 15:17:53 -0400
commit4835096b07420c1d74cc5711c461830016e6cb03 (patch)
treef52f4f47daa5a5319ad9823a0cdfe75394314236 /drivers/gpu/drm/amd
parente472d2588eef38c2f16f71d6160e58fb5948e84f (diff)
drm/amdgpu: put job to list before done
the mirror_list will be used for later time out detect feature. This is needed to properly detect a GPU timeout with the scheduler. Signed-off-by: Monk Liu <Monk.Liu@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c12
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h6
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c9
3 files changed, 27 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 8d49ea2e4134..af846f208c67 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -349,12 +349,15 @@ int amd_sched_job_init(struct amd_sched_job *job,
349 struct amd_sched_entity *entity, 349 struct amd_sched_entity *entity,
350 void *owner, struct fence **fence) 350 void *owner, struct fence **fence)
351{ 351{
352 INIT_LIST_HEAD(&job->node);
352 job->sched = sched; 353 job->sched = sched;
353 job->s_entity = entity; 354 job->s_entity = entity;
354 job->s_fence = amd_sched_fence_create(entity, owner); 355 job->s_fence = amd_sched_fence_create(entity, owner);
355 if (!job->s_fence) 356 if (!job->s_fence)
356 return -ENOMEM; 357 return -ENOMEM;
357 358
359 job->s_fence->s_job = job;
360
358 if (fence) 361 if (fence)
359 *fence = &job->s_fence->base; 362 *fence = &job->s_fence->base;
360 return 0; 363 return 0;
@@ -408,6 +411,12 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
408 unsigned long flags; 411 unsigned long flags;
409 412
410 atomic_dec(&sched->hw_rq_count); 413 atomic_dec(&sched->hw_rq_count);
414
415 /* remove job from ring_mirror_list */
416 spin_lock_irqsave(&sched->job_list_lock, flags);
417 list_del_init(&s_fence->s_job->node);
418 spin_unlock_irqrestore(&sched->job_list_lock, flags);
419
411 amd_sched_fence_signal(s_fence); 420 amd_sched_fence_signal(s_fence);
412 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { 421 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
413 cancel_delayed_work(&s_fence->dwork); 422 cancel_delayed_work(&s_fence->dwork);
@@ -480,6 +489,7 @@ static int amd_sched_main(void *param)
480 } 489 }
481 490
482 atomic_inc(&sched->hw_rq_count); 491 atomic_inc(&sched->hw_rq_count);
492 amd_sched_job_pre_schedule(sched, sched_job);
483 fence = sched->ops->run_job(sched_job); 493 fence = sched->ops->run_job(sched_job);
484 amd_sched_fence_scheduled(s_fence); 494 amd_sched_fence_scheduled(s_fence);
485 if (fence) { 495 if (fence) {
@@ -527,6 +537,8 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
527 537
528 init_waitqueue_head(&sched->wake_up_worker); 538 init_waitqueue_head(&sched->wake_up_worker);
529 init_waitqueue_head(&sched->job_scheduled); 539 init_waitqueue_head(&sched->job_scheduled);
540 INIT_LIST_HEAD(&sched->ring_mirror_list);
541 spin_lock_init(&sched->job_list_lock);
530 atomic_set(&sched->hw_rq_count, 0); 542 atomic_set(&sched->hw_rq_count, 0);
531 if (atomic_inc_return(&sched_fence_slab_ref) == 1) { 543 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
532 sched_fence_slab = kmem_cache_create( 544 sched_fence_slab = kmem_cache_create(
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index ee1e8127f863..2e3b8308186c 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -76,6 +76,7 @@ struct amd_sched_fence {
76 void *owner; 76 void *owner;
77 struct delayed_work dwork; 77 struct delayed_work dwork;
78 struct list_head list; 78 struct list_head list;
79 struct amd_sched_job *s_job;
79}; 80};
80 81
81struct amd_sched_job { 82struct amd_sched_job {
@@ -85,6 +86,7 @@ struct amd_sched_job {
85 bool use_sched; /* true if the job goes to scheduler */ 86 bool use_sched; /* true if the job goes to scheduler */
86 struct fence_cb cb_free_job; 87 struct fence_cb cb_free_job;
87 struct work_struct work_free_job; 88 struct work_struct work_free_job;
89 struct list_head node;
88}; 90};
89 91
90extern const struct fence_ops amd_sched_fence_ops; 92extern const struct fence_ops amd_sched_fence_ops;
@@ -128,6 +130,8 @@ struct amd_gpu_scheduler {
128 struct list_head fence_list; 130 struct list_head fence_list;
129 spinlock_t fence_list_lock; 131 spinlock_t fence_list_lock;
130 struct task_struct *thread; 132 struct task_struct *thread;
133 struct list_head ring_mirror_list;
134 spinlock_t job_list_lock;
131}; 135};
132 136
133int amd_sched_init(struct amd_gpu_scheduler *sched, 137int amd_sched_init(struct amd_gpu_scheduler *sched,
@@ -151,4 +155,6 @@ int amd_sched_job_init(struct amd_sched_job *job,
151 struct amd_gpu_scheduler *sched, 155 struct amd_gpu_scheduler *sched,
152 struct amd_sched_entity *entity, 156 struct amd_sched_entity *entity,
153 void *owner, struct fence **fence); 157 void *owner, struct fence **fence);
158void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
159 struct amd_sched_job *s_job);
154#endif 160#endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index dc115aea352b..33ddd38185d5 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -57,6 +57,15 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence)
57 FENCE_TRACE(&fence->base, "was already signaled\n"); 57 FENCE_TRACE(&fence->base, "was already signaled\n");
58} 58}
59 59
60void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
61 struct amd_sched_job *s_job)
62{
63 unsigned long flags;
64 spin_lock_irqsave(&sched->job_list_lock, flags);
65 list_add_tail(&s_job->node, &sched->ring_mirror_list);
66 spin_unlock_irqrestore(&sched->job_list_lock, flags);
67}
68
60void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence) 69void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
61{ 70{
62 struct fence_cb *cur, *tmp; 71 struct fence_cb *cur, *tmp;