diff options
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_sched.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_main.c | 31 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_sched.c | 2 | ||||
-rw-r--r-- | include/drm/gpu_scheduler.h | 6 |
4 files changed, 21 insertions, 20 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 69e9b431bf1f..e7c3ed6c9a2e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c | |||
@@ -105,7 +105,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) | |||
105 | change = dma_addr - gpu->hangcheck_dma_addr; | 105 | change = dma_addr - gpu->hangcheck_dma_addr; |
106 | if (change < 0 || change > 16) { | 106 | if (change < 0 || change > 16) { |
107 | gpu->hangcheck_dma_addr = dma_addr; | 107 | gpu->hangcheck_dma_addr = dma_addr; |
108 | schedule_delayed_work(&sched_job->work_tdr, | 108 | schedule_delayed_work(&sched_job->sched->work_tdr, |
109 | sched_job->sched->timeout); | 109 | sched_job->sched->timeout); |
110 | return; | 110 | return; |
111 | } | 111 | } |
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 9ca741f3a0bc..4e8505d51795 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c | |||
@@ -197,19 +197,15 @@ static void drm_sched_job_finish(struct work_struct *work) | |||
197 | * manages to find this job as the next job in the list, the fence | 197 | * manages to find this job as the next job in the list, the fence |
198 | * signaled check below will prevent the timeout to be restarted. | 198 | * signaled check below will prevent the timeout to be restarted. |
199 | */ | 199 | */ |
200 | cancel_delayed_work_sync(&s_job->work_tdr); | 200 | cancel_delayed_work_sync(&sched->work_tdr); |
201 | 201 | ||
202 | spin_lock(&sched->job_list_lock); | 202 | spin_lock(&sched->job_list_lock); |
203 | /* queue TDR for next job */ | ||
204 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT && | ||
205 | !list_is_last(&s_job->node, &sched->ring_mirror_list)) { | ||
206 | struct drm_sched_job *next = list_next_entry(s_job, node); | ||
207 | |||
208 | if (!dma_fence_is_signaled(&next->s_fence->finished)) | ||
209 | schedule_delayed_work(&next->work_tdr, sched->timeout); | ||
210 | } | ||
211 | /* remove job from ring_mirror_list */ | 203 | /* remove job from ring_mirror_list */ |
212 | list_del(&s_job->node); | 204 | list_del(&s_job->node); |
205 | /* queue TDR for next job */ | ||
206 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT && | ||
207 | !list_empty(&sched->ring_mirror_list)) | ||
208 | schedule_delayed_work(&sched->work_tdr, sched->timeout); | ||
213 | spin_unlock(&sched->job_list_lock); | 209 | spin_unlock(&sched->job_list_lock); |
214 | 210 | ||
215 | dma_fence_put(&s_job->s_fence->finished); | 211 | dma_fence_put(&s_job->s_fence->finished); |
@@ -236,16 +232,21 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job) | |||
236 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT && | 232 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT && |
237 | list_first_entry_or_null(&sched->ring_mirror_list, | 233 | list_first_entry_or_null(&sched->ring_mirror_list, |
238 | struct drm_sched_job, node) == s_job) | 234 | struct drm_sched_job, node) == s_job) |
239 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); | 235 | schedule_delayed_work(&sched->work_tdr, sched->timeout); |
240 | spin_unlock(&sched->job_list_lock); | 236 | spin_unlock(&sched->job_list_lock); |
241 | } | 237 | } |
242 | 238 | ||
243 | static void drm_sched_job_timedout(struct work_struct *work) | 239 | static void drm_sched_job_timedout(struct work_struct *work) |
244 | { | 240 | { |
245 | struct drm_sched_job *job = container_of(work, struct drm_sched_job, | 241 | struct drm_gpu_scheduler *sched; |
246 | work_tdr.work); | 242 | struct drm_sched_job *job; |
243 | |||
244 | sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); | ||
245 | job = list_first_entry_or_null(&sched->ring_mirror_list, | ||
246 | struct drm_sched_job, node); | ||
247 | 247 | ||
248 | job->sched->ops->timedout_job(job); | 248 | if (job) |
249 | job->sched->ops->timedout_job(job); | ||
249 | } | 250 | } |
250 | 251 | ||
251 | /** | 252 | /** |
@@ -315,7 +316,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched) | |||
315 | s_job = list_first_entry_or_null(&sched->ring_mirror_list, | 316 | s_job = list_first_entry_or_null(&sched->ring_mirror_list, |
316 | struct drm_sched_job, node); | 317 | struct drm_sched_job, node); |
317 | if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) | 318 | if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) |
318 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); | 319 | schedule_delayed_work(&sched->work_tdr, sched->timeout); |
319 | 320 | ||
320 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { | 321 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { |
321 | struct drm_sched_fence *s_fence = s_job->s_fence; | 322 | struct drm_sched_fence *s_fence = s_job->s_fence; |
@@ -384,7 +385,6 @@ int drm_sched_job_init(struct drm_sched_job *job, | |||
384 | 385 | ||
385 | INIT_WORK(&job->finish_work, drm_sched_job_finish); | 386 | INIT_WORK(&job->finish_work, drm_sched_job_finish); |
386 | INIT_LIST_HEAD(&job->node); | 387 | INIT_LIST_HEAD(&job->node); |
387 | INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout); | ||
388 | 388 | ||
389 | return 0; | 389 | return 0; |
390 | } | 390 | } |
@@ -575,6 +575,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, | |||
575 | INIT_LIST_HEAD(&sched->ring_mirror_list); | 575 | INIT_LIST_HEAD(&sched->ring_mirror_list); |
576 | spin_lock_init(&sched->job_list_lock); | 576 | spin_lock_init(&sched->job_list_lock); |
577 | atomic_set(&sched->hw_rq_count, 0); | 577 | atomic_set(&sched->hw_rq_count, 0); |
578 | INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); | ||
578 | atomic_set(&sched->num_jobs, 0); | 579 | atomic_set(&sched->num_jobs, 0); |
579 | atomic64_set(&sched->job_id_count, 0); | 580 | atomic64_set(&sched->job_id_count, 0); |
580 | 581 | ||
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index a5501581d96b..9243dea6e6ad 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c | |||
@@ -168,7 +168,7 @@ v3d_job_timedout(struct drm_sched_job *sched_job) | |||
168 | job->timedout_ctca = ctca; | 168 | job->timedout_ctca = ctca; |
169 | job->timedout_ctra = ctra; | 169 | job->timedout_ctra = ctra; |
170 | 170 | ||
171 | schedule_delayed_work(&job->base.work_tdr, | 171 | schedule_delayed_work(&job->base.sched->work_tdr, |
172 | job->base.sched->timeout); | 172 | job->base.sched->timeout); |
173 | return; | 173 | return; |
174 | } | 174 | } |
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index daec50f887b3..d87b268f1781 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h | |||
@@ -175,8 +175,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); | |||
175 | * finished to remove the job from the | 175 | * finished to remove the job from the |
176 | * @drm_gpu_scheduler.ring_mirror_list. | 176 | * @drm_gpu_scheduler.ring_mirror_list. |
177 | * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list. | 177 | * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list. |
178 | * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the timeout | ||
179 | * interval is over. | ||
180 | * @id: a unique id assigned to each job scheduled on the scheduler. | 178 | * @id: a unique id assigned to each job scheduled on the scheduler. |
181 | * @karma: increment on every hang caused by this job. If this exceeds the hang | 179 | * @karma: increment on every hang caused by this job. If this exceeds the hang |
182 | * limit of the scheduler then the job is marked guilty and will not | 180 | * limit of the scheduler then the job is marked guilty and will not |
@@ -195,7 +193,6 @@ struct drm_sched_job { | |||
195 | struct dma_fence_cb finish_cb; | 193 | struct dma_fence_cb finish_cb; |
196 | struct work_struct finish_work; | 194 | struct work_struct finish_work; |
197 | struct list_head node; | 195 | struct list_head node; |
198 | struct delayed_work work_tdr; | ||
199 | uint64_t id; | 196 | uint64_t id; |
200 | atomic_t karma; | 197 | atomic_t karma; |
201 | enum drm_sched_priority s_priority; | 198 | enum drm_sched_priority s_priority; |
@@ -259,6 +256,8 @@ struct drm_sched_backend_ops { | |||
259 | * finished. | 256 | * finished. |
260 | * @hw_rq_count: the number of jobs currently in the hardware queue. | 257 | * @hw_rq_count: the number of jobs currently in the hardware queue. |
261 | * @job_id_count: used to assign unique id to the each job. | 258 | * @job_id_count: used to assign unique id to the each job. |
259 | * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the | ||
260 | * timeout interval is over. | ||
262 | * @thread: the kthread on which the scheduler which run. | 261 | * @thread: the kthread on which the scheduler which run. |
263 | * @ring_mirror_list: the list of jobs which are currently in the job queue. | 262 | * @ring_mirror_list: the list of jobs which are currently in the job queue. |
264 | * @job_list_lock: lock to protect the ring_mirror_list. | 263 | * @job_list_lock: lock to protect the ring_mirror_list. |
@@ -278,6 +277,7 @@ struct drm_gpu_scheduler { | |||
278 | wait_queue_head_t job_scheduled; | 277 | wait_queue_head_t job_scheduled; |
279 | atomic_t hw_rq_count; | 278 | atomic_t hw_rq_count; |
280 | atomic64_t job_id_count; | 279 | atomic64_t job_id_count; |
280 | struct delayed_work work_tdr; | ||
281 | struct task_struct *thread; | 281 | struct task_struct *thread; |
282 | struct list_head ring_mirror_list; | 282 | struct list_head ring_mirror_list; |
283 | spinlock_t job_list_lock; | 283 | spinlock_t job_list_lock; |