aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2015-09-07 12:07:14 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-09-23 17:23:37 -0400
commit0f75aee75112934bcaf42410df5c51d7194b5896 (patch)
tree5c0cf18ae6527c7c55960bc18ce1e2d74567e375 /drivers
parenta6db8a33e164ae72fb5429ab637e8cfee057a722 (diff)
drm/amdgpu: cleanup entity init
Reorder the fields and properly return the kfifo_alloc error code. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> Reviewed-by: Chunming Zhou<david1.zhou@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c30
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h12
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c2
3 files changed, 25 insertions, 19 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 5c8dcf89297a..a9d582634d8e 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -118,23 +118,27 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
118 struct amd_sched_rq *rq, 118 struct amd_sched_rq *rq,
119 uint32_t jobs) 119 uint32_t jobs)
120{ 120{
121 int r;
122
121 if (!(sched && entity && rq)) 123 if (!(sched && entity && rq))
122 return -EINVAL; 124 return -EINVAL;
123 125
124 memset(entity, 0, sizeof(struct amd_sched_entity)); 126 memset(entity, 0, sizeof(struct amd_sched_entity));
125 entity->belongto_rq = rq; 127 INIT_LIST_HEAD(&entity->list);
126 entity->scheduler = sched; 128 entity->rq = rq;
127 entity->fence_context = fence_context_alloc(1); 129 entity->sched = sched;
128 if(kfifo_alloc(&entity->job_queue,
129 jobs * sizeof(void *),
130 GFP_KERNEL))
131 return -EINVAL;
132 130
133 spin_lock_init(&entity->queue_lock); 131 spin_lock_init(&entity->queue_lock);
132 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
133 if (r)
134 return r;
135
134 atomic_set(&entity->fence_seq, 0); 136 atomic_set(&entity->fence_seq, 0);
137 entity->fence_context = fence_context_alloc(1);
135 138
136 /* Add the entity to the run queue */ 139 /* Add the entity to the run queue */
137 amd_sched_rq_add_entity(rq, entity); 140 amd_sched_rq_add_entity(rq, entity);
141
138 return 0; 142 return 0;
139} 143}
140 144
@@ -149,8 +153,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
149static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, 153static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
150 struct amd_sched_entity *entity) 154 struct amd_sched_entity *entity)
151{ 155{
152 return entity->scheduler == sched && 156 return entity->sched == sched &&
153 entity->belongto_rq != NULL; 157 entity->rq != NULL;
154} 158}
155 159
156/** 160/**
@@ -180,7 +184,7 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
180void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, 184void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
181 struct amd_sched_entity *entity) 185 struct amd_sched_entity *entity)
182{ 186{
183 struct amd_sched_rq *rq = entity->belongto_rq; 187 struct amd_sched_rq *rq = entity->rq;
184 188
185 if (!amd_sched_entity_is_initialized(sched, entity)) 189 if (!amd_sched_entity_is_initialized(sched, entity))
186 return; 190 return;
@@ -201,13 +205,13 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
201 container_of(cb, struct amd_sched_entity, cb); 205 container_of(cb, struct amd_sched_entity, cb);
202 entity->dependency = NULL; 206 entity->dependency = NULL;
203 fence_put(f); 207 fence_put(f);
204 amd_sched_wakeup(entity->scheduler); 208 amd_sched_wakeup(entity->sched);
205} 209}
206 210
207static struct amd_sched_job * 211static struct amd_sched_job *
208amd_sched_entity_pop_job(struct amd_sched_entity *entity) 212amd_sched_entity_pop_job(struct amd_sched_entity *entity)
209{ 213{
210 struct amd_gpu_scheduler *sched = entity->scheduler; 214 struct amd_gpu_scheduler *sched = entity->sched;
211 struct amd_sched_job *sched_job; 215 struct amd_sched_job *sched_job;
212 216
213 if (ACCESS_ONCE(entity->dependency)) 217 if (ACCESS_ONCE(entity->dependency))
@@ -275,7 +279,7 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
275 fence_get(&fence->base); 279 fence_get(&fence->base);
276 sched_job->s_fence = fence; 280 sched_job->s_fence = fence;
277 281
278 wait_event(entity->scheduler->job_scheduled, 282 wait_event(entity->sched->job_scheduled,
279 amd_sched_entity_in(sched_job)); 283 amd_sched_entity_in(sched_job));
280 trace_amd_sched_job(sched_job); 284 trace_amd_sched_job(sched_job);
281 return 0; 285 return 0;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index f33df6c70ca0..c4fe24e2cb21 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -38,13 +38,15 @@ struct amd_sched_rq;
38*/ 38*/
39struct amd_sched_entity { 39struct amd_sched_entity {
40 struct list_head list; 40 struct list_head list;
41 struct amd_sched_rq *belongto_rq; 41 struct amd_sched_rq *rq;
42 atomic_t fence_seq; 42 struct amd_gpu_scheduler *sched;
43 /* the job_queue maintains the jobs submitted by clients */ 43
44 struct kfifo job_queue;
45 spinlock_t queue_lock; 44 spinlock_t queue_lock;
46 struct amd_gpu_scheduler *scheduler; 45 struct kfifo job_queue;
46
47 atomic_t fence_seq;
47 uint64_t fence_context; 48 uint64_t fence_context;
49
48 struct fence *dependency; 50 struct fence *dependency;
49 struct fence_cb cb; 51 struct fence_cb cb;
50}; 52};
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index e62c37920e11..733522f7a0ea 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -36,7 +36,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
36 if (fence == NULL) 36 if (fence == NULL)
37 return NULL; 37 return NULL;
38 fence->owner = owner; 38 fence->owner = owner;
39 fence->scheduler = s_entity->scheduler; 39 fence->scheduler = s_entity->sched;
40 spin_lock_init(&fence->lock); 40 spin_lock_init(&fence->lock);
41 41
42 seq = atomic_inc_return(&s_entity->fence_seq); 42 seq = atomic_inc_return(&s_entity->fence_seq);