aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/scheduler
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-05-18 03:43:07 -0400
committerAlex Deucher <alexander.deucher@amd.com>2016-07-07 14:50:50 -0400
commit16a7133f35b310838ba934b6a29f8af73a88f093 (patch)
treebe870c21471f278dc0409b0260fc6304bbc962d2 /drivers/gpu/drm/amd/scheduler
parent3cc259112d3cd2da9c1f7418582ebd60f2407d13 (diff)
drm/amdgpu: fix coding style in the scheduler v2
v2: fix even more Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Monk.Liu <monk.liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler')
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c25
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h21
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c9
3 files changed, 32 insertions, 23 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index c16248cee779..f5ac01db287b 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -320,7 +320,9 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
320} 320}
321 321
322static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) { 322static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
323 struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job); 323 struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
324 cb_free_job);
325
324 schedule_work(&job->work_free_job); 326 schedule_work(&job->work_free_job);
325} 327}
326 328
@@ -341,7 +343,8 @@ void amd_sched_job_finish(struct amd_sched_job *s_job)
341 struct amd_sched_job, node); 343 struct amd_sched_job, node);
342 344
343 if (next) { 345 if (next) {
344 INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback); 346 INIT_DELAYED_WORK(&next->work_tdr,
347 s_job->timeout_callback);
345 amd_sched_job_get(next); 348 amd_sched_job_get(next);
346 schedule_delayed_work(&next->work_tdr, sched->timeout); 349 schedule_delayed_work(&next->work_tdr, sched->timeout);
347 } 350 }
@@ -353,7 +356,8 @@ void amd_sched_job_begin(struct amd_sched_job *s_job)
353 struct amd_gpu_scheduler *sched = s_job->sched; 356 struct amd_gpu_scheduler *sched = s_job->sched;
354 357
355 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && 358 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
356 list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job) 359 list_first_entry_or_null(&sched->ring_mirror_list,
360 struct amd_sched_job, node) == s_job)
357 { 361 {
358 INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback); 362 INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
359 amd_sched_job_get(s_job); 363 amd_sched_job_get(s_job);
@@ -374,7 +378,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
374 378
375 sched_job->use_sched = 1; 379 sched_job->use_sched = 1;
376 fence_add_callback(&sched_job->s_fence->base, 380 fence_add_callback(&sched_job->s_fence->base,
377 &sched_job->cb_free_job, amd_sched_free_job); 381 &sched_job->cb_free_job, amd_sched_free_job);
378 trace_amd_sched_job(sched_job); 382 trace_amd_sched_job(sched_job);
379 wait_event(entity->sched->job_scheduled, 383 wait_event(entity->sched->job_scheduled,
380 amd_sched_entity_in(sched_job)); 384 amd_sched_entity_in(sched_job));
@@ -382,11 +386,11 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
382 386
383/* init a sched_job with basic field */ 387/* init a sched_job with basic field */
384int amd_sched_job_init(struct amd_sched_job *job, 388int amd_sched_job_init(struct amd_sched_job *job,
385 struct amd_gpu_scheduler *sched, 389 struct amd_gpu_scheduler *sched,
386 struct amd_sched_entity *entity, 390 struct amd_sched_entity *entity,
387 void (*timeout_cb)(struct work_struct *work), 391 void (*timeout_cb)(struct work_struct *work),
388 void (*free_cb)(struct kref *refcount), 392 void (*free_cb)(struct kref *refcount),
389 void *owner, struct fence **fence) 393 void *owner, struct fence **fence)
390{ 394{
391 INIT_LIST_HEAD(&job->node); 395 INIT_LIST_HEAD(&job->node);
392 kref_init(&job->refcount); 396 kref_init(&job->refcount);
@@ -504,7 +508,8 @@ static int amd_sched_main(void *param)
504 if (r == -ENOENT) 508 if (r == -ENOENT)
505 amd_sched_process_job(fence, &s_fence->cb); 509 amd_sched_process_job(fence, &s_fence->cb);
506 else if (r) 510 else if (r)
507 DRM_ERROR("fence add callback failed (%d)\n", r); 511 DRM_ERROR("fence add callback failed (%d)\n",
512 r);
508 fence_put(fence); 513 fence_put(fence);
509 } else { 514 } else {
510 DRM_ERROR("Failed to run job!\n"); 515 DRM_ERROR("Failed to run job!\n");
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 070095a9433c..690ae4b0c673 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -94,7 +94,8 @@ struct amd_sched_job {
94extern const struct fence_ops amd_sched_fence_ops; 94extern const struct fence_ops amd_sched_fence_ops;
95static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) 95static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
96{ 96{
97 struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base); 97 struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence,
98 base);
98 99
99 if (__f->base.ops == &amd_sched_fence_ops) 100 if (__f->base.ops == &amd_sched_fence_ops)
100 return __f; 101 return __f;
@@ -154,21 +155,23 @@ struct amd_sched_fence *amd_sched_fence_create(
154void amd_sched_fence_scheduled(struct amd_sched_fence *fence); 155void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
155void amd_sched_fence_signal(struct amd_sched_fence *fence); 156void amd_sched_fence_signal(struct amd_sched_fence *fence);
156int amd_sched_job_init(struct amd_sched_job *job, 157int amd_sched_job_init(struct amd_sched_job *job,
157 struct amd_gpu_scheduler *sched, 158 struct amd_gpu_scheduler *sched,
158 struct amd_sched_entity *entity, 159 struct amd_sched_entity *entity,
159 void (*timeout_cb)(struct work_struct *work), 160 void (*timeout_cb)(struct work_struct *work),
160 void (*free_cb)(struct kref* refcount), 161 void (*free_cb)(struct kref* refcount),
161 void *owner, struct fence **fence); 162 void *owner, struct fence **fence);
162void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched , 163void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
163 struct amd_sched_job *s_job); 164 struct amd_sched_job *s_job);
164void amd_sched_job_finish(struct amd_sched_job *s_job); 165void amd_sched_job_finish(struct amd_sched_job *s_job);
165void amd_sched_job_begin(struct amd_sched_job *s_job); 166void amd_sched_job_begin(struct amd_sched_job *s_job);
166static inline void amd_sched_job_get(struct amd_sched_job *job) { 167static inline void amd_sched_job_get(struct amd_sched_job *job)
168{
167 if (job) 169 if (job)
168 kref_get(&job->refcount); 170 kref_get(&job->refcount);
169} 171}
170 172
171static inline void amd_sched_job_put(struct amd_sched_job *job) { 173static inline void amd_sched_job_put(struct amd_sched_job *job)
174{
172 if (job) 175 if (job)
173 kref_put(&job->refcount, job->free_callback); 176 kref_put(&job->refcount, job->free_callback);
174} 177}
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 2a732c490375..6bdc9b7169d2 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -27,7 +27,8 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "gpu_scheduler.h" 28#include "gpu_scheduler.h"
29 29
30struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner) 30struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
31 void *owner)
31{ 32{
32 struct amd_sched_fence *fence = NULL; 33 struct amd_sched_fence *fence = NULL;
33 unsigned seq; 34 unsigned seq;
@@ -38,12 +39,12 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
38 39
39 INIT_LIST_HEAD(&fence->scheduled_cb); 40 INIT_LIST_HEAD(&fence->scheduled_cb);
40 fence->owner = owner; 41 fence->owner = owner;
41 fence->sched = s_entity->sched; 42 fence->sched = entity->sched;
42 spin_lock_init(&fence->lock); 43 spin_lock_init(&fence->lock);
43 44
44 seq = atomic_inc_return(&s_entity->fence_seq); 45 seq = atomic_inc_return(&entity->fence_seq);
45 fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock, 46 fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
46 s_entity->fence_context, seq); 47 entity->fence_context, seq);
47 48
48 return fence; 49 return fence;
49} 50}