aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNayan Deshmukh <nayan26deshmukh@gmail.com>2018-07-20 08:21:06 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-07-25 16:06:26 -0400
commit068c330419ffb3422a43cb7d34351f1ef033950f (patch)
tree06fa3e3343bb92371a910769074f137576a7ca67
parentcdc50176597cb44ce25eb7331c450058775b8d2a (diff)
drm/scheduler: remove sched field from the entity
The scheduler of the entity is decided by the run queue on which it is queued. This patch avoids us the effort required to maintain a sync between rq and sched field when we start shifting entites among different rqs. Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com> Reviewed-by: Christian König <christian.koenig@amd.com> Reviewed-by: Eric Anholt <eric@anholt.net> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c6
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c19
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c2
-rw-r--r--include/drm/gpu_scheduler.h2
6 files changed, 16 insertions, 19 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4d4575b3bba7..178d9ce4eba1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1262,7 +1262,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1262 priority = job->base.s_priority; 1262 priority = job->base.s_priority;
1263 drm_sched_entity_push_job(&job->base, entity); 1263 drm_sched_entity_push_job(&job->base, entity);
1264 1264
1265 ring = to_amdgpu_ring(entity->sched); 1265 ring = to_amdgpu_ring(entity->rq->sched);
1266 amdgpu_ring_priority_get(ring, priority); 1266 amdgpu_ring_priority_get(ring, priority);
1267 1267
1268 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); 1268 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 631481a730e0..391e2f7c03aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -143,7 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
143 priority = job->base.s_priority; 143 priority = job->base.s_priority;
144 drm_sched_entity_push_job(&job->base, entity); 144 drm_sched_entity_push_job(&job->base, entity);
145 145
146 ring = to_amdgpu_ring(entity->sched); 146 ring = to_amdgpu_ring(entity->rq->sched);
147 amdgpu_ring_priority_get(ring, priority); 147 amdgpu_ring_priority_get(ring, priority);
148 148
149 return 0; 149 return 0;
@@ -167,7 +167,7 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
167static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, 167static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
168 struct drm_sched_entity *s_entity) 168 struct drm_sched_entity *s_entity)
169{ 169{
170 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched); 170 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
171 struct amdgpu_job *job = to_amdgpu_job(sched_job); 171 struct amdgpu_job *job = to_amdgpu_job(sched_job);
172 struct amdgpu_vm *vm = job->vm; 172 struct amdgpu_vm *vm = job->vm;
173 struct dma_fence *fence; 173 struct dma_fence *fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 74b4a28a41d6..5d7d7900ccab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -387,7 +387,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
387 ats_entries = 0; 387 ats_entries = 0;
388 } 388 }
389 389
390 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); 390 ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
391 391
392 r = reservation_object_reserve_shared(bo->tbo.resv); 392 r = reservation_object_reserve_shared(bo->tbo.resv);
393 if (r) 393 if (r)
@@ -1113,7 +1113,7 @@ restart:
1113 struct amdgpu_ring *ring; 1113 struct amdgpu_ring *ring;
1114 struct dma_fence *fence; 1114 struct dma_fence *fence;
1115 1115
1116 ring = container_of(vm->entity.sched, struct amdgpu_ring, 1116 ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
1117 sched); 1117 sched);
1118 1118
1119 amdgpu_ring_pad_ib(ring, params.ib); 1119 amdgpu_ring_pad_ib(ring, params.ib);
@@ -1403,7 +1403,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1403 addr, flags); 1403 addr, flags);
1404 } 1404 }
1405 1405
1406 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); 1406 ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
1407 1407
1408 nptes = last - start + 1; 1408 nptes = last - start + 1;
1409 1409
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index a3b55c542025..3f2fc5e8242a 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -185,7 +185,6 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
185 memset(entity, 0, sizeof(struct drm_sched_entity)); 185 memset(entity, 0, sizeof(struct drm_sched_entity));
186 INIT_LIST_HEAD(&entity->list); 186 INIT_LIST_HEAD(&entity->list);
187 entity->rq = rq_list[0]; 187 entity->rq = rq_list[0];
188 entity->sched = rq_list[0]->sched;
189 entity->guilty = guilty; 188 entity->guilty = guilty;
190 entity->last_scheduled = NULL; 189 entity->last_scheduled = NULL;
191 190
@@ -210,8 +209,8 @@ EXPORT_SYMBOL(drm_sched_entity_init);
210static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched, 209static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
211 struct drm_sched_entity *entity) 210 struct drm_sched_entity *entity)
212{ 211{
213 return entity->sched == sched && 212 return entity->rq != NULL &&
214 entity->rq != NULL; 213 entity->rq->sched == sched;
215} 214}
216 215
217/** 216/**
@@ -278,7 +277,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
278 struct drm_gpu_scheduler *sched; 277 struct drm_gpu_scheduler *sched;
279 long ret = timeout; 278 long ret = timeout;
280 279
281 sched = entity->sched; 280 sched = entity->rq->sched;
282 if (!drm_sched_entity_is_initialized(sched, entity)) 281 if (!drm_sched_entity_is_initialized(sched, entity))
283 return ret; 282 return ret;
284 /** 283 /**
@@ -317,7 +316,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
317{ 316{
318 struct drm_gpu_scheduler *sched; 317 struct drm_gpu_scheduler *sched;
319 318
320 sched = entity->sched; 319 sched = entity->rq->sched;
321 drm_sched_entity_set_rq(entity, NULL); 320 drm_sched_entity_set_rq(entity, NULL);
322 321
323 /* Consumption of existing IBs wasn't completed. Forcefully 322 /* Consumption of existing IBs wasn't completed. Forcefully
@@ -388,7 +387,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb
388 container_of(cb, struct drm_sched_entity, cb); 387 container_of(cb, struct drm_sched_entity, cb);
389 entity->dependency = NULL; 388 entity->dependency = NULL;
390 dma_fence_put(f); 389 dma_fence_put(f);
391 drm_sched_wakeup(entity->sched); 390 drm_sched_wakeup(entity->rq->sched);
392} 391}
393 392
394static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) 393static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
@@ -438,7 +437,7 @@ EXPORT_SYMBOL(drm_sched_entity_set_rq);
438bool drm_sched_dependency_optimized(struct dma_fence* fence, 437bool drm_sched_dependency_optimized(struct dma_fence* fence,
439 struct drm_sched_entity *entity) 438 struct drm_sched_entity *entity)
440{ 439{
441 struct drm_gpu_scheduler *sched = entity->sched; 440 struct drm_gpu_scheduler *sched = entity->rq->sched;
442 struct drm_sched_fence *s_fence; 441 struct drm_sched_fence *s_fence;
443 442
444 if (!fence || dma_fence_is_signaled(fence)) 443 if (!fence || dma_fence_is_signaled(fence))
@@ -455,7 +454,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
455 454
456static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) 455static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
457{ 456{
458 struct drm_gpu_scheduler *sched = entity->sched; 457 struct drm_gpu_scheduler *sched = entity->rq->sched;
459 struct dma_fence * fence = entity->dependency; 458 struct dma_fence * fence = entity->dependency;
460 struct drm_sched_fence *s_fence; 459 struct drm_sched_fence *s_fence;
461 460
@@ -500,7 +499,7 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
500static struct drm_sched_job * 499static struct drm_sched_job *
501drm_sched_entity_pop_job(struct drm_sched_entity *entity) 500drm_sched_entity_pop_job(struct drm_sched_entity *entity)
502{ 501{
503 struct drm_gpu_scheduler *sched = entity->sched; 502 struct drm_gpu_scheduler *sched = entity->rq->sched;
504 struct drm_sched_job *sched_job = to_drm_sched_job( 503 struct drm_sched_job *sched_job = to_drm_sched_job(
505 spsc_queue_peek(&entity->job_queue)); 504 spsc_queue_peek(&entity->job_queue));
506 505
@@ -744,7 +743,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
744 struct drm_sched_entity *entity, 743 struct drm_sched_entity *entity,
745 void *owner) 744 void *owner)
746{ 745{
747 struct drm_gpu_scheduler *sched = entity->sched; 746 struct drm_gpu_scheduler *sched = entity->rq->sched;
748 747
749 job->sched = sched; 748 job->sched = sched;
750 job->entity = entity; 749 job->entity = entity;
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 45d9c3affbea..d8d2dff9ea2f 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -161,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
161 return NULL; 161 return NULL;
162 162
163 fence->owner = owner; 163 fence->owner = owner;
164 fence->sched = entity->sched; 164 fence->sched = entity->rq->sched;
165 spin_lock_init(&fence->lock); 165 spin_lock_init(&fence->lock);
166 166
167 seq = atomic_inc_return(&entity->fence_seq); 167 seq = atomic_inc_return(&entity->fence_seq);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 728346abcc81..091b9afcd184 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -52,7 +52,6 @@ enum drm_sched_priority {
52 * runqueue. 52 * runqueue.
53 * @rq: runqueue to which this entity belongs. 53 * @rq: runqueue to which this entity belongs.
54 * @rq_lock: lock to modify the runqueue to which this entity belongs. 54 * @rq_lock: lock to modify the runqueue to which this entity belongs.
55 * @sched: the scheduler instance to which this entity is enqueued.
56 * @job_queue: the list of jobs of this entity. 55 * @job_queue: the list of jobs of this entity.
57 * @fence_seq: a linearly increasing seqno incremented with each 56 * @fence_seq: a linearly increasing seqno incremented with each
58 * new &drm_sched_fence which is part of the entity. 57 * new &drm_sched_fence which is part of the entity.
@@ -76,7 +75,6 @@ struct drm_sched_entity {
76 struct list_head list; 75 struct list_head list;
77 struct drm_sched_rq *rq; 76 struct drm_sched_rq *rq;
78 spinlock_t rq_lock; 77 spinlock_t rq_lock;
79 struct drm_gpu_scheduler *sched;
80 78
81 struct spsc_queue job_queue; 79 struct spsc_queue job_queue;
82 80