aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2015-08-05 12:18:52 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-08-17 16:51:04 -0400
commit4cd7f42cf8f57512b13a13bb7dcbeabb644f5264 (patch)
tree20bf8d5014f21c30a304c4ebf6437df4df3423b6 /drivers/gpu/drm
parentddf94d33d6434199be08f8965f63d408e2787539 (diff)
drm/amdgpu: fix coding style in a couple of places
Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c6
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c17
2 files changed, 12 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index f72a8583b1a9..d26688ddaa20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -528,15 +528,13 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
528 amdgpu_cs_parser_fini_late(parser); 528 amdgpu_cs_parser_fini_late(parser);
529} 529}
530 530
531static int amdgpu_cs_parser_run_job( 531static int amdgpu_cs_parser_run_job(struct amdgpu_cs_parser *sched_job)
532 struct amdgpu_cs_parser *sched_job)
533{ 532{
534 amdgpu_cs_parser_fini_early(sched_job, 0, true); 533 amdgpu_cs_parser_fini_early(sched_job, 0, true);
535 return 0; 534 return 0;
536} 535}
537 536
538static int amdgpu_cs_parser_free_job( 537static int amdgpu_cs_parser_free_job(struct amdgpu_cs_parser *sched_job)
539 struct amdgpu_cs_parser *sched_job)
540{ 538{
541 amdgpu_cs_parser_fini_late(sched_job); 539 amdgpu_cs_parser_fini_late(sched_job);
542 return 0; 540 return 0;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 6f0d40b13a23..1f78ad60224a 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -64,6 +64,7 @@ static struct amd_sched_entity *rq_select_entity(struct amd_run_queue *rq)
64{ 64{
65 struct amd_sched_entity *p = rq->current_entity; 65 struct amd_sched_entity *p = rq->current_entity;
66 int i = atomic_read(&rq->nr_entity) + 1; /*real count + dummy head*/ 66 int i = atomic_read(&rq->nr_entity) + 1; /*real count + dummy head*/
67
67 while (i) { 68 while (i) {
68 p = list_entry(p->list.next, typeof(*p), list); 69 p = list_entry(p->list.next, typeof(*p), list);
69 if (!rq->check_entity_status(p)) { 70 if (!rq->check_entity_status(p)) {
@@ -83,7 +84,7 @@ static bool context_entity_is_waiting(struct amd_context_entity *entity)
83 84
84static int gpu_entity_check_status(struct amd_sched_entity *entity) 85static int gpu_entity_check_status(struct amd_sched_entity *entity)
85{ 86{
86 struct amd_context_entity *tmp = NULL; 87 struct amd_context_entity *tmp;
87 88
88 if (entity == &entity->belongto_rq->head) 89 if (entity == &entity->belongto_rq->head)
89 return -1; 90 return -1;
@@ -109,6 +110,7 @@ static bool is_scheduler_ready(struct amd_gpu_scheduler *sched)
109{ 110{
110 unsigned long flags; 111 unsigned long flags;
111 bool full; 112 bool full;
113
112 spin_lock_irqsave(&sched->queue_lock, flags); 114 spin_lock_irqsave(&sched->queue_lock, flags);
113 full = atomic64_read(&sched->hw_rq_count) < 115 full = atomic64_read(&sched->hw_rq_count) <
114 sched->hw_submission_limit ? true : false; 116 sched->hw_submission_limit ? true : false;
@@ -121,10 +123,10 @@ static bool is_scheduler_ready(struct amd_gpu_scheduler *sched)
121 * Select next entity from the kernel run queue, if not available, 123 * Select next entity from the kernel run queue, if not available,
122 * return null. 124 * return null.
123*/ 125*/
124static struct amd_context_entity *kernel_rq_select_context( 126static struct amd_context_entity *
125 struct amd_gpu_scheduler *sched) 127kernel_rq_select_context(struct amd_gpu_scheduler *sched)
126{ 128{
127 struct amd_sched_entity *sched_entity = NULL; 129 struct amd_sched_entity *sched_entity;
128 struct amd_context_entity *tmp = NULL; 130 struct amd_context_entity *tmp = NULL;
129 struct amd_run_queue *rq = &sched->kernel_rq; 131 struct amd_run_queue *rq = &sched->kernel_rq;
130 132
@@ -141,8 +143,8 @@ static struct amd_context_entity *kernel_rq_select_context(
141/** 143/**
142 * Select next entity containing real IB submissions 144 * Select next entity containing real IB submissions
143*/ 145*/
144static struct amd_context_entity *select_context( 146static struct amd_context_entity *
145 struct amd_gpu_scheduler *sched) 147select_context(struct amd_gpu_scheduler *sched)
146{ 148{
147 struct amd_context_entity *wake_entity = NULL; 149 struct amd_context_entity *wake_entity = NULL;
148 struct amd_context_entity *tmp; 150 struct amd_context_entity *tmp;
@@ -413,6 +415,7 @@ void amd_sched_process_job(struct amd_sched_job *sched_job)
413{ 415{
414 unsigned long flags; 416 unsigned long flags;
415 struct amd_gpu_scheduler *sched; 417 struct amd_gpu_scheduler *sched;
418
416 if (!sched_job) 419 if (!sched_job)
417 return; 420 return;
418 sched = sched_job->sched; 421 sched = sched_job->sched;
@@ -445,7 +448,7 @@ struct amd_gpu_scheduler *amd_sched_create(void *device,
445 unsigned hw_submission) 448 unsigned hw_submission)
446{ 449{
447 struct amd_gpu_scheduler *sched; 450 struct amd_gpu_scheduler *sched;
448 char name[20] = "gpu_sched[0]"; 451 char name[20];
449 452
450 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL); 453 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
451 if (!sched) 454 if (!sched)