diff options
author | Christian König <christian.koenig@amd.com> | 2015-08-05 15:22:10 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-08-17 16:51:05 -0400 |
commit | 6f0e54a964932d3d5252ac1ff7ab153c984a5d51 (patch) | |
tree | b76540873ccdb18a92e9369943466ba9d8ca67ec | |
parent | 91404fb20825418fd9ab8e6533bc336e1ffc748e (diff) |
drm/amdgpu: cleanup and fix scheduler fence handling v2
v2: rebased
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com> (v1)
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 61 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 7 |
3 files changed, 44 insertions, 50 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index 787b93db6796..039bd1f748f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | |||
@@ -43,16 +43,9 @@ static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched, | |||
43 | return r; | 43 | return r; |
44 | } | 44 | } |
45 | 45 | ||
46 | static void amdgpu_fence_sched_cb(struct fence *f, struct fence_cb *cb) | 46 | static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, |
47 | { | 47 | struct amd_sched_entity *entity, |
48 | struct amd_sched_job *sched_job = | 48 | struct amd_sched_job *job) |
49 | container_of(cb, struct amd_sched_job, cb); | ||
50 | amd_sched_process_job(sched_job); | ||
51 | } | ||
52 | |||
53 | static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, | ||
54 | struct amd_sched_entity *entity, | ||
55 | struct amd_sched_job *job) | ||
56 | { | 49 | { |
57 | int r = 0; | 50 | int r = 0; |
58 | struct amdgpu_cs_parser *sched_job; | 51 | struct amdgpu_cs_parser *sched_job; |
@@ -60,7 +53,7 @@ static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, | |||
60 | 53 | ||
61 | if (!job || !job->job) { | 54 | if (!job || !job->job) { |
62 | DRM_ERROR("job is null\n"); | 55 | DRM_ERROR("job is null\n"); |
63 | return; | 56 | return NULL; |
64 | } | 57 | } |
65 | sched_job = (struct amdgpu_cs_parser *)job->job; | 58 | sched_job = (struct amdgpu_cs_parser *)job->job; |
66 | mutex_lock(&sched_job->job_lock); | 59 | mutex_lock(&sched_job->job_lock); |
@@ -70,12 +63,7 @@ static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, | |||
70 | sched_job->filp); | 63 | sched_job->filp); |
71 | if (r) | 64 | if (r) |
72 | goto err; | 65 | goto err; |
73 | fence = sched_job->ibs[sched_job->num_ibs - 1].fence; | 66 | fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); |
74 | if (fence_add_callback(&fence->base, | ||
75 | &job->cb, amdgpu_fence_sched_cb)) { | ||
76 | DRM_ERROR("fence add callback failed\n"); | ||
77 | goto err; | ||
78 | } | ||
79 | 67 | ||
80 | if (sched_job->run_job) { | 68 | if (sched_job->run_job) { |
81 | r = sched_job->run_job(sched_job); | 69 | r = sched_job->run_job(sched_job); |
@@ -86,11 +74,13 @@ static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, | |||
86 | amd_sched_emit(entity, sched_job->ibs[sched_job->num_ibs - 1].sequence); | 74 | amd_sched_emit(entity, sched_job->ibs[sched_job->num_ibs - 1].sequence); |
87 | 75 | ||
88 | mutex_unlock(&sched_job->job_lock); | 76 | mutex_unlock(&sched_job->job_lock); |
89 | return; | 77 | return &fence->base; |
78 | |||
90 | err: | 79 | err: |
91 | DRM_ERROR("Run job error\n"); | 80 | DRM_ERROR("Run job error\n"); |
92 | mutex_unlock(&sched_job->job_lock); | 81 | mutex_unlock(&sched_job->job_lock); |
93 | schedule_work(&sched_job->job_work); | 82 | schedule_work(&sched_job->job_work); |
83 | return NULL; | ||
94 | } | 84 | } |
95 | 85 | ||
96 | static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job) | 86 | static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job) |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index eb3b0993a8cd..438dc23f4bb3 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -175,9 +175,9 @@ exit: | |||
175 | * return 0 if succeed. negative error code on failure | 175 | * return 0 if succeed. negative error code on failure |
176 | */ | 176 | */ |
177 | int amd_sched_entity_init(struct amd_gpu_scheduler *sched, | 177 | int amd_sched_entity_init(struct amd_gpu_scheduler *sched, |
178 | struct amd_sched_entity *entity, | 178 | struct amd_sched_entity *entity, |
179 | struct amd_run_queue *rq, | 179 | struct amd_run_queue *rq, |
180 | uint32_t jobs) | 180 | uint32_t jobs) |
181 | { | 181 | { |
182 | uint64_t seq_ring = 0; | 182 | uint64_t seq_ring = 0; |
183 | 183 | ||
@@ -353,6 +353,24 @@ int amd_sched_wait_emit(struct amd_sched_entity *c_entity, | |||
353 | return 0; | 353 | return 0; |
354 | } | 354 | } |
355 | 355 | ||
356 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) | ||
357 | { | ||
358 | struct amd_sched_job *sched_job = | ||
359 | container_of(cb, struct amd_sched_job, cb); | ||
360 | struct amd_gpu_scheduler *sched; | ||
361 | unsigned long flags; | ||
362 | |||
363 | sched = sched_job->sched; | ||
364 | spin_lock_irqsave(&sched->queue_lock, flags); | ||
365 | list_del(&sched_job->list); | ||
366 | atomic64_dec(&sched->hw_rq_count); | ||
367 | spin_unlock_irqrestore(&sched->queue_lock, flags); | ||
368 | |||
369 | sched->ops->process_job(sched, sched_job->job); | ||
370 | kfree(sched_job); | ||
371 | wake_up_interruptible(&sched->wait_queue); | ||
372 | } | ||
373 | |||
356 | static int amd_sched_main(void *param) | 374 | static int amd_sched_main(void *param) |
357 | { | 375 | { |
358 | int r; | 376 | int r; |
@@ -365,6 +383,8 @@ static int amd_sched_main(void *param) | |||
365 | 383 | ||
366 | while (!kthread_should_stop()) { | 384 | while (!kthread_should_stop()) { |
367 | struct amd_sched_job *sched_job = NULL; | 385 | struct amd_sched_job *sched_job = NULL; |
386 | struct fence *fence; | ||
387 | |||
368 | wait_event_interruptible(sched->wait_queue, | 388 | wait_event_interruptible(sched->wait_queue, |
369 | is_scheduler_ready(sched) && | 389 | is_scheduler_ready(sched) && |
370 | (c_entity = select_context(sched))); | 390 | (c_entity = select_context(sched))); |
@@ -388,37 +408,22 @@ static int amd_sched_main(void *param) | |||
388 | spin_unlock_irqrestore(&sched->queue_lock, flags); | 408 | spin_unlock_irqrestore(&sched->queue_lock, flags); |
389 | } | 409 | } |
390 | mutex_lock(&sched->sched_lock); | 410 | mutex_lock(&sched->sched_lock); |
391 | sched->ops->run_job(sched, c_entity, sched_job); | 411 | fence = sched->ops->run_job(sched, c_entity, sched_job); |
412 | if (fence) { | ||
413 | r = fence_add_callback(fence, &sched_job->cb, | ||
414 | amd_sched_process_job); | ||
415 | if (r == -ENOENT) | ||
416 | amd_sched_process_job(fence, &sched_job->cb); | ||
417 | else if (r) | ||
418 | DRM_ERROR("fence add callback failed (%d)\n", r); | ||
419 | fence_put(fence); | ||
420 | } | ||
392 | mutex_unlock(&sched->sched_lock); | 421 | mutex_unlock(&sched->sched_lock); |
393 | } | 422 | } |
394 | return 0; | 423 | return 0; |
395 | } | 424 | } |
396 | 425 | ||
397 | /** | 426 | /** |
398 | * ISR to handle EOP inetrrupts | ||
399 | * | ||
400 | * @sched: gpu scheduler | ||
401 | * | ||
402 | */ | ||
403 | void amd_sched_process_job(struct amd_sched_job *sched_job) | ||
404 | { | ||
405 | unsigned long flags; | ||
406 | struct amd_gpu_scheduler *sched; | ||
407 | |||
408 | if (!sched_job) | ||
409 | return; | ||
410 | sched = sched_job->sched; | ||
411 | spin_lock_irqsave(&sched->queue_lock, flags); | ||
412 | list_del(&sched_job->list); | ||
413 | atomic64_dec(&sched->hw_rq_count); | ||
414 | spin_unlock_irqrestore(&sched->queue_lock, flags); | ||
415 | |||
416 | sched->ops->process_job(sched, sched_job->job); | ||
417 | kfree(sched_job); | ||
418 | wake_up_interruptible(&sched->wait_queue); | ||
419 | } | ||
420 | |||
421 | /** | ||
422 | * Create a gpu scheduler | 427 | * Create a gpu scheduler |
423 | * | 428 | * |
424 | * @device The device context for this scheduler | 429 | * @device The device context for this scheduler |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index a3e29df957fc..e7cc40a6993b 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | |||
@@ -87,9 +87,9 @@ struct amd_sched_backend_ops { | |||
87 | int (*prepare_job)(struct amd_gpu_scheduler *sched, | 87 | int (*prepare_job)(struct amd_gpu_scheduler *sched, |
88 | struct amd_sched_entity *c_entity, | 88 | struct amd_sched_entity *c_entity, |
89 | void *job); | 89 | void *job); |
90 | void (*run_job)(struct amd_gpu_scheduler *sched, | 90 | struct fence *(*run_job)(struct amd_gpu_scheduler *sched, |
91 | struct amd_sched_entity *c_entity, | 91 | struct amd_sched_entity *c_entity, |
92 | struct amd_sched_job *job); | 92 | struct amd_sched_job *job); |
93 | void (*process_job)(struct amd_gpu_scheduler *sched, void *job); | 93 | void (*process_job)(struct amd_gpu_scheduler *sched, void *job); |
94 | }; | 94 | }; |
95 | 95 | ||
@@ -132,7 +132,6 @@ int amd_sched_wait_emit(struct amd_sched_entity *c_entity, | |||
132 | bool intr, | 132 | bool intr, |
133 | long timeout); | 133 | long timeout); |
134 | 134 | ||
135 | void amd_sched_process_job(struct amd_sched_job *sched_job); | ||
136 | uint64_t amd_sched_get_handled_seq(struct amd_gpu_scheduler *sched); | 135 | uint64_t amd_sched_get_handled_seq(struct amd_gpu_scheduler *sched); |
137 | 136 | ||
138 | int amd_sched_entity_init(struct amd_gpu_scheduler *sched, | 137 | int amd_sched_entity_init(struct amd_gpu_scheduler *sched, |