diff options
author | Andrey Grodzovsky <andrey.grodzovsky@amd.com> | 2019-10-24 15:39:06 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2019-10-30 11:56:15 -0400 |
commit | 167bf96014a095753053595f3224fcdeb49ac3c8 (patch) | |
tree | 9bf1d783f3fb5bb24d0599a02685a1d46bcd3f38 | |
parent | 47661f6dad42e1241cdef82c5e06cfb7027a3f59 (diff) |
drm/sched: Set error to s_fence if HW job submission failed.
Problem:
When run_job fails and HW fence returned is NULL we still signal
the s_fence to avoid hangs but the user has no way of knowing if
the actual HW job was ran and finished.
Fix:
Allow .run_job implementations to return ERR_PTR in the fence pointer
returned and then set this error for s_fence->finished fence so whoever
wait on this fence can inspect the signaled fence for an error.
Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_main.c | 19 |
1 files changed, 16 insertions, 3 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 9a0ee74d82dc..f39b97ed4ade 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c | |||
@@ -479,6 +479,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) | |||
479 | struct drm_sched_job *s_job, *tmp; | 479 | struct drm_sched_job *s_job, *tmp; |
480 | uint64_t guilty_context; | 480 | uint64_t guilty_context; |
481 | bool found_guilty = false; | 481 | bool found_guilty = false; |
482 | struct dma_fence *fence; | ||
482 | 483 | ||
483 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { | 484 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { |
484 | struct drm_sched_fence *s_fence = s_job->s_fence; | 485 | struct drm_sched_fence *s_fence = s_job->s_fence; |
@@ -492,7 +493,16 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) | |||
492 | dma_fence_set_error(&s_fence->finished, -ECANCELED); | 493 | dma_fence_set_error(&s_fence->finished, -ECANCELED); |
493 | 494 | ||
494 | dma_fence_put(s_job->s_fence->parent); | 495 | dma_fence_put(s_job->s_fence->parent); |
495 | s_job->s_fence->parent = sched->ops->run_job(s_job); | 496 | fence = sched->ops->run_job(s_job); |
497 | |||
498 | if (IS_ERR_OR_NULL(fence)) { | ||
499 | s_job->s_fence->parent = NULL; | ||
500 | dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); | ||
501 | } else { | ||
502 | s_job->s_fence->parent = fence; | ||
503 | } | ||
504 | |||
505 | |||
496 | } | 506 | } |
497 | } | 507 | } |
498 | EXPORT_SYMBOL(drm_sched_resubmit_jobs); | 508 | EXPORT_SYMBOL(drm_sched_resubmit_jobs); |
@@ -720,7 +730,7 @@ static int drm_sched_main(void *param) | |||
720 | fence = sched->ops->run_job(sched_job); | 730 | fence = sched->ops->run_job(sched_job); |
721 | drm_sched_fence_scheduled(s_fence); | 731 | drm_sched_fence_scheduled(s_fence); |
722 | 732 | ||
723 | if (fence) { | 733 | if (!IS_ERR_OR_NULL(fence)) { |
724 | s_fence->parent = dma_fence_get(fence); | 734 | s_fence->parent = dma_fence_get(fence); |
725 | r = dma_fence_add_callback(fence, &sched_job->cb, | 735 | r = dma_fence_add_callback(fence, &sched_job->cb, |
726 | drm_sched_process_job); | 736 | drm_sched_process_job); |
@@ -730,8 +740,11 @@ static int drm_sched_main(void *param) | |||
730 | DRM_ERROR("fence add callback failed (%d)\n", | 740 | DRM_ERROR("fence add callback failed (%d)\n", |
731 | r); | 741 | r); |
732 | dma_fence_put(fence); | 742 | dma_fence_put(fence); |
733 | } else | 743 | } else { |
744 | |||
745 | dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); | ||
734 | drm_sched_process_job(NULL, &sched_job->cb); | 746 | drm_sched_process_job(NULL, &sched_job->cb); |
747 | } | ||
735 | 748 | ||
736 | wake_up(&sched->job_scheduled); | 749 | wake_up(&sched->job_scheduled); |
737 | } | 750 | } |