aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>2017-10-24 13:30:16 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-12-04 16:33:11 -0500
commita4176cb484ac457a08b44c93da06fce09c6e281c (patch)
tree65a7ee568e4c895812bdee38d1675280c838d7f2
parent83f4b1180155f2d65472ce943a1f051215030560 (diff)
drm/amdgpu: Remove job->s_entity to avoid keeping reference to stale pointer.
Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c7
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h9
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c19
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h7
5 files changed, 22 insertions, 22 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 16947bad5b49..bf1aad00bb8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1203,7 +1203,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1203 amdgpu_ring_priority_get(job->ring, job->base.s_priority); 1203 amdgpu_ring_priority_get(job->ring, job->base.s_priority);
1204 1204
1205 trace_amdgpu_cs_ioctl(job); 1205 trace_amdgpu_cs_ioctl(job);
1206 amd_sched_entity_push_job(&job->base); 1206 amd_sched_entity_push_job(&job->base, entity);
1207 1207
1208 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); 1208 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1209 amdgpu_mn_unlock(p->mn); 1209 amdgpu_mn_unlock(p->mn);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index a58e3c5dd84b..f60662e03761 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -142,12 +142,13 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
142 *f = dma_fence_get(&job->base.s_fence->finished); 142 *f = dma_fence_get(&job->base.s_fence->finished);
143 amdgpu_job_free_resources(job); 143 amdgpu_job_free_resources(job);
144 amdgpu_ring_priority_get(job->ring, job->base.s_priority); 144 amdgpu_ring_priority_get(job->ring, job->base.s_priority);
145 amd_sched_entity_push_job(&job->base); 145 amd_sched_entity_push_job(&job->base, entity);
146 146
147 return 0; 147 return 0;
148} 148}
149 149
150static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) 150static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
151 struct amd_sched_entity *s_entity)
151{ 152{
152 struct amdgpu_job *job = to_amdgpu_job(sched_job); 153 struct amdgpu_job *job = to_amdgpu_job(sched_job);
153 struct amdgpu_vm *vm = job->vm; 154 struct amdgpu_vm *vm = job->vm;
@@ -155,7 +156,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
155 struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync); 156 struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync);
156 int r; 157 int r;
157 158
158 if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) { 159 if (amd_sched_dependency_optimized(fence, s_entity)) {
159 r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence); 160 r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
160 if (r) 161 if (r)
161 DRM_ERROR("Error adding fence to sync (%d)\n", r); 162 DRM_ERROR("Error adding fence to sync (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index 705380eb693c..eebe323c7159 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -13,8 +13,8 @@
13#define TRACE_INCLUDE_FILE gpu_sched_trace 13#define TRACE_INCLUDE_FILE gpu_sched_trace
14 14
15TRACE_EVENT(amd_sched_job, 15TRACE_EVENT(amd_sched_job,
16 TP_PROTO(struct amd_sched_job *sched_job), 16 TP_PROTO(struct amd_sched_job *sched_job, struct amd_sched_entity *entity),
17 TP_ARGS(sched_job), 17 TP_ARGS(sched_job, entity),
18 TP_STRUCT__entry( 18 TP_STRUCT__entry(
19 __field(struct amd_sched_entity *, entity) 19 __field(struct amd_sched_entity *, entity)
20 __field(struct dma_fence *, fence) 20 __field(struct dma_fence *, fence)
@@ -25,12 +25,11 @@ TRACE_EVENT(amd_sched_job,
25 ), 25 ),
26 26
27 TP_fast_assign( 27 TP_fast_assign(
28 __entry->entity = sched_job->s_entity; 28 __entry->entity = entity;
29 __entry->id = sched_job->id; 29 __entry->id = sched_job->id;
30 __entry->fence = &sched_job->s_fence->finished; 30 __entry->fence = &sched_job->s_fence->finished;
31 __entry->name = sched_job->sched->name; 31 __entry->name = sched_job->sched->name;
32 __entry->job_count = spsc_queue_count( 32 __entry->job_count = spsc_queue_count(&entity->job_queue);
33 &sched_job->s_entity->job_queue);
34 __entry->hw_job_count = atomic_read( 33 __entry->hw_job_count = atomic_read(
35 &sched_job->sched->hw_rq_count); 34 &sched_job->sched->hw_rq_count);
36 ), 35 ),
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 1a2267ce62a8..f116de798204 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -341,11 +341,10 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
341 if (!sched_job) 341 if (!sched_job)
342 return NULL; 342 return NULL;
343 343
344 while ((entity->dependency = sched->ops->dependency(sched_job))) 344 while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
345 if (amd_sched_entity_add_dependency_cb(entity)) 345 if (amd_sched_entity_add_dependency_cb(entity))
346 return NULL; 346 return NULL;
347 347
348 sched_job->s_entity = NULL;
349 spsc_queue_pop(&entity->job_queue); 348 spsc_queue_pop(&entity->job_queue);
350 return sched_job; 349 return sched_job;
351} 350}
@@ -357,13 +356,13 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
357 * 356 *
358 * Returns 0 for success, negative error code otherwise. 357 * Returns 0 for success, negative error code otherwise.
359 */ 358 */
360void amd_sched_entity_push_job(struct amd_sched_job *sched_job) 359void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
360 struct amd_sched_entity *entity)
361{ 361{
362 struct amd_gpu_scheduler *sched = sched_job->sched; 362 struct amd_gpu_scheduler *sched = sched_job->sched;
363 struct amd_sched_entity *entity = sched_job->s_entity;
364 bool first = false; 363 bool first = false;
365 364
366 trace_amd_sched_job(sched_job); 365 trace_amd_sched_job(sched_job, entity);
367 366
368 spin_lock(&entity->queue_lock); 367 spin_lock(&entity->queue_lock);
369 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); 368 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
@@ -442,11 +441,12 @@ static void amd_sched_job_timedout(struct work_struct *work)
442 job->sched->ops->timedout_job(job); 441 job->sched->ops->timedout_job(job);
443} 442}
444 443
445static void amd_sched_set_guilty(struct amd_sched_job *s_job) 444static void amd_sched_set_guilty(struct amd_sched_job *s_job,
445 struct amd_sched_entity *s_entity)
446{ 446{
447 if (atomic_inc_return(&s_job->karma) > s_job->sched->hang_limit) 447 if (atomic_inc_return(&s_job->karma) > s_job->sched->hang_limit)
448 if (s_job->s_entity->guilty) 448 if (s_entity->guilty)
449 atomic_set(s_job->s_entity->guilty, 1); 449 atomic_set(s_entity->guilty, 1);
450} 450}
451 451
452void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad) 452void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad)
@@ -477,7 +477,7 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_jo
477 list_for_each_entry_safe(entity, tmp, &rq->entities, list) { 477 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
478 if (bad->s_fence->scheduled.context == entity->fence_context) { 478 if (bad->s_fence->scheduled.context == entity->fence_context) {
479 found = true; 479 found = true;
480 amd_sched_set_guilty(bad); 480 amd_sched_set_guilty(bad, entity);
481 break; 481 break;
482 } 482 }
483 } 483 }
@@ -541,7 +541,6 @@ int amd_sched_job_init(struct amd_sched_job *job,
541 void *owner) 541 void *owner)
542{ 542{
543 job->sched = sched; 543 job->sched = sched;
544 job->s_entity = entity;
545 job->s_priority = entity->rq - sched->sched_rq; 544 job->s_priority = entity->rq - sched->sched_rq;
546 job->s_fence = amd_sched_fence_create(entity, owner); 545 job->s_fence = amd_sched_fence_create(entity, owner);
547 if (!job->s_fence) 546 if (!job->s_fence)
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index f9e3a83cddc6..b590fcc2786a 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -91,7 +91,6 @@ struct amd_sched_fence {
91struct amd_sched_job { 91struct amd_sched_job {
92 struct spsc_node queue_node; 92 struct spsc_node queue_node;
93 struct amd_gpu_scheduler *sched; 93 struct amd_gpu_scheduler *sched;
94 struct amd_sched_entity *s_entity;
95 struct amd_sched_fence *s_fence; 94 struct amd_sched_fence *s_fence;
96 struct dma_fence_cb finish_cb; 95 struct dma_fence_cb finish_cb;
97 struct work_struct finish_work; 96 struct work_struct finish_work;
@@ -125,7 +124,8 @@ static inline bool amd_sched_invalidate_job(struct amd_sched_job *s_job, int thr
125 * these functions should be implemented in driver side 124 * these functions should be implemented in driver side
126*/ 125*/
127struct amd_sched_backend_ops { 126struct amd_sched_backend_ops {
128 struct dma_fence *(*dependency)(struct amd_sched_job *sched_job); 127 struct dma_fence *(*dependency)(struct amd_sched_job *sched_job,
128 struct amd_sched_entity *s_entity);
129 struct dma_fence *(*run_job)(struct amd_sched_job *sched_job); 129 struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
130 void (*timedout_job)(struct amd_sched_job *sched_job); 130 void (*timedout_job)(struct amd_sched_job *sched_job);
131 void (*free_job)(struct amd_sched_job *sched_job); 131 void (*free_job)(struct amd_sched_job *sched_job);
@@ -161,7 +161,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
161 uint32_t jobs, atomic_t* guilty); 161 uint32_t jobs, atomic_t* guilty);
162void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, 162void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
163 struct amd_sched_entity *entity); 163 struct amd_sched_entity *entity);
164void amd_sched_entity_push_job(struct amd_sched_job *sched_job); 164void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
165 struct amd_sched_entity *entity);
165void amd_sched_entity_set_rq(struct amd_sched_entity *entity, 166void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
166 struct amd_sched_rq *rq); 167 struct amd_sched_rq *rq);
167 168