aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2015-08-25 15:39:31 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-08-26 17:55:07 -0400
commitc2b6bd7e91aad8440a2f55bdbde6f5a8ae19fac5 (patch)
treea37deed40cd232edcdcb942bb3ffbe76048a1857
parentbd755d08709f05a81104e8f81d721b5cc353a2b3 (diff)
drm/amdgpu: fix wait queue handling in the scheduler
Freeing up a queue after signalling it isn't race free. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c16
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h4
2 files changed, 10 insertions, 10 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 6dfbdea85e87..d99fe90991dc 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -117,7 +117,6 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
117 memset(entity, 0, sizeof(struct amd_sched_entity)); 117 memset(entity, 0, sizeof(struct amd_sched_entity));
118 entity->belongto_rq = rq; 118 entity->belongto_rq = rq;
119 entity->scheduler = sched; 119 entity->scheduler = sched;
120 init_waitqueue_head(&entity->wait_queue);
121 entity->fence_context = fence_context_alloc(1); 120 entity->fence_context = fence_context_alloc(1);
122 if(kfifo_alloc(&entity->job_queue, 121 if(kfifo_alloc(&entity->job_queue,
123 jobs * sizeof(void *), 122 jobs * sizeof(void *),
@@ -183,7 +182,7 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
183 * The client will not queue more IBs during this fini, consume existing 182 * The client will not queue more IBs during this fini, consume existing
184 * queued IBs 183 * queued IBs
185 */ 184 */
186 wait_event(entity->wait_queue, amd_sched_entity_is_idle(entity)); 185 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
187 186
188 amd_sched_rq_remove_entity(rq, entity); 187 amd_sched_rq_remove_entity(rq, entity);
189 kfifo_free(&entity->job_queue); 188 kfifo_free(&entity->job_queue);
@@ -236,7 +235,7 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
236 fence_get(&fence->base); 235 fence_get(&fence->base);
237 sched_job->s_fence = fence; 236 sched_job->s_fence = fence;
238 237
239 r = wait_event_interruptible(entity->wait_queue, 238 r = wait_event_interruptible(entity->scheduler->job_scheduled,
240 amd_sched_entity_in(sched_job)); 239 amd_sched_entity_in(sched_job));
241 240
242 return r; 241 return r;
@@ -257,7 +256,7 @@ static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
257static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) 256static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
258{ 257{
259 if (amd_sched_ready(sched)) 258 if (amd_sched_ready(sched))
260 wake_up_interruptible(&sched->wait_queue); 259 wake_up_interruptible(&sched->wake_up_worker);
261} 260}
262 261
263/** 262/**
@@ -290,7 +289,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
290 atomic_dec(&sched->hw_rq_count); 289 atomic_dec(&sched->hw_rq_count);
291 fence_put(&sched_job->s_fence->base); 290 fence_put(&sched_job->s_fence->base);
292 sched->ops->process_job(sched_job); 291 sched->ops->process_job(sched_job);
293 wake_up_interruptible(&sched->wait_queue); 292 wake_up_interruptible(&sched->wake_up_worker);
294} 293}
295 294
296static int amd_sched_main(void *param) 295static int amd_sched_main(void *param)
@@ -306,7 +305,7 @@ static int amd_sched_main(void *param)
306 struct amd_sched_job *job; 305 struct amd_sched_job *job;
307 struct fence *fence; 306 struct fence *fence;
308 307
309 wait_event_interruptible(sched->wait_queue, 308 wait_event_interruptible(sched->wake_up_worker,
310 kthread_should_stop() || 309 kthread_should_stop() ||
311 (c_entity = amd_sched_select_context(sched))); 310 (c_entity = amd_sched_select_context(sched)));
312 311
@@ -329,7 +328,7 @@ static int amd_sched_main(void *param)
329 fence_put(fence); 328 fence_put(fence);
330 } 329 }
331 330
332 wake_up(&c_entity->wait_queue); 331 wake_up(&sched->job_scheduled);
333 } 332 }
334 return 0; 333 return 0;
335} 334}
@@ -361,7 +360,8 @@ struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
361 amd_sched_rq_init(&sched->sched_rq); 360 amd_sched_rq_init(&sched->sched_rq);
362 amd_sched_rq_init(&sched->kernel_rq); 361 amd_sched_rq_init(&sched->kernel_rq);
363 362
364 init_waitqueue_head(&sched->wait_queue); 363 init_waitqueue_head(&sched->wake_up_worker);
364 init_waitqueue_head(&sched->job_scheduled);
365 atomic_set(&sched->hw_rq_count, 0); 365 atomic_set(&sched->hw_rq_count, 0);
366 /* Each scheduler will run on a seperate kernel thread */ 366 /* Each scheduler will run on a seperate kernel thread */
367 sched->thread = kthread_run(amd_sched_main, sched, sched->name); 367 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 13349a6a00af..e797796dcad7 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -44,7 +44,6 @@ struct amd_sched_entity {
44 struct kfifo job_queue; 44 struct kfifo job_queue;
45 spinlock_t queue_lock; 45 spinlock_t queue_lock;
46 struct amd_gpu_scheduler *scheduler; 46 struct amd_gpu_scheduler *scheduler;
47 wait_queue_head_t wait_queue;
48 uint64_t fence_context; 47 uint64_t fence_context;
49}; 48};
50 49
@@ -104,7 +103,8 @@ struct amd_gpu_scheduler {
104 atomic_t hw_rq_count; 103 atomic_t hw_rq_count;
105 struct amd_sched_backend_ops *ops; 104 struct amd_sched_backend_ops *ops;
106 uint32_t ring_id; 105 uint32_t ring_id;
107 wait_queue_head_t wait_queue; 106 wait_queue_head_t wake_up_worker;
107 wait_queue_head_t job_scheduled;
108 uint32_t hw_submission_limit; 108 uint32_t hw_submission_limit;
109 char name[20]; 109 char name[20];
110 void *priv; 110 void *priv;