aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/scheduler/sched_main.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2018-11-22 05:57:15 -0500
committerAlex Deucher <alexander.deucher@amd.com>2018-11-28 15:49:58 -0500
commit68c12d24ce26ae7cabc671230a4e390e902005c1 (patch)
treeaf5f8451ea282a9ed0e7daae38c6f6282f902e9c /drivers/gpu/drm/scheduler/sched_main.c
parent3198ec5dfc7207b7977c8258f7d07fe423d7dfcd (diff)
drm/sched: revert "fix timeout handling v2" v2
This reverts commit 0efd2d2f68cd5dbddf4ecd974c33133257d16a8e. It's still causing problems for V3D. v2: keep rearming the timeout. Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c27
1 files changed, 1 insertions, 26 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 6fedf9544bbf..9d4cd196037a 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -263,40 +263,15 @@ static void drm_sched_job_timedout(struct work_struct *work)
263{ 263{
264 struct drm_gpu_scheduler *sched; 264 struct drm_gpu_scheduler *sched;
265 struct drm_sched_job *job; 265 struct drm_sched_job *job;
266 int r;
267 266
268 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); 267 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
269
270 spin_lock(&sched->job_list_lock);
271 list_for_each_entry_reverse(job, &sched->ring_mirror_list, node) {
272 struct drm_sched_fence *fence = job->s_fence;
273
274 if (!dma_fence_remove_callback(fence->parent, &fence->cb))
275 goto already_signaled;
276 }
277
278 job = list_first_entry_or_null(&sched->ring_mirror_list, 268 job = list_first_entry_or_null(&sched->ring_mirror_list,
279 struct drm_sched_job, node); 269 struct drm_sched_job, node);
280 spin_unlock(&sched->job_list_lock);
281 270
282 if (job) 271 if (job)
283 sched->ops->timedout_job(job); 272 job->sched->ops->timedout_job(job);
284 273
285 spin_lock(&sched->job_list_lock); 274 spin_lock(&sched->job_list_lock);
286 list_for_each_entry(job, &sched->ring_mirror_list, node) {
287 struct drm_sched_fence *fence = job->s_fence;
288
289 if (!fence->parent || !list_empty(&fence->cb.node))
290 continue;
291
292 r = dma_fence_add_callback(fence->parent, &fence->cb,
293 drm_sched_process_job);
294 if (r)
295 drm_sched_process_job(fence->parent, &fence->cb);
296
297already_signaled:
298 ;
299 }
300 drm_sched_start_timeout(sched); 275 drm_sched_start_timeout(sched);
301 spin_unlock(&sched->job_list_lock); 276 spin_unlock(&sched->job_list_lock);
302} 277}