diff options
Diffstat (limited to 'drivers/gpu')
| -rw-r--r-- | drivers/gpu/drm/scheduler/gpu_scheduler.c | 30 |
1 files changed, 17 insertions, 13 deletions
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 1b733229201e..a70c7f7fd6fe 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c | |||
| @@ -552,24 +552,28 @@ static void drm_sched_job_finish(struct work_struct *work) | |||
| 552 | finish_work); | 552 | finish_work); |
| 553 | struct drm_gpu_scheduler *sched = s_job->sched; | 553 | struct drm_gpu_scheduler *sched = s_job->sched; |
| 554 | 554 | ||
| 555 | /* remove job from ring_mirror_list */ | 555 | /* |
| 556 | spin_lock(&sched->job_list_lock); | 556 | * Canceling the timeout without removing our job from the ring mirror |
| 557 | list_del_init(&s_job->node); | 557 | * list is safe, as we will only end up in this worker if our jobs |
| 558 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { | 558 | * finished fence has been signaled. So even if some another worker |
| 559 | struct drm_sched_job *next; | 559 | * manages to find this job as the next job in the list, the fence |
| 560 | 560 | * signaled check below will prevent the timeout to be restarted. | |
| 561 | spin_unlock(&sched->job_list_lock); | 561 | */ |
| 562 | cancel_delayed_work_sync(&s_job->work_tdr); | 562 | cancel_delayed_work_sync(&s_job->work_tdr); |
| 563 | spin_lock(&sched->job_list_lock); | ||
| 564 | 563 | ||
| 565 | /* queue TDR for next job */ | 564 | spin_lock(&sched->job_list_lock); |
| 566 | next = list_first_entry_or_null(&sched->ring_mirror_list, | 565 | /* queue TDR for next job */ |
| 567 | struct drm_sched_job, node); | 566 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT && |
| 567 | !list_is_last(&s_job->node, &sched->ring_mirror_list)) { | ||
| 568 | struct drm_sched_job *next = list_next_entry(s_job, node); | ||
| 568 | 569 | ||
| 569 | if (next) | 570 | if (!dma_fence_is_signaled(&next->s_fence->finished)) |
| 570 | schedule_delayed_work(&next->work_tdr, sched->timeout); | 571 | schedule_delayed_work(&next->work_tdr, sched->timeout); |
| 571 | } | 572 | } |
| 573 | /* remove job from ring_mirror_list */ | ||
| 574 | list_del(&s_job->node); | ||
| 572 | spin_unlock(&sched->job_list_lock); | 575 | spin_unlock(&sched->job_list_lock); |
| 576 | |||
| 573 | dma_fence_put(&s_job->s_fence->finished); | 577 | dma_fence_put(&s_job->s_fence->finished); |
| 574 | sched->ops->free_job(s_job); | 578 | sched->ops->free_job(s_job); |
| 575 | } | 579 | } |
