diff options
author | Dave Airlie <airlied@redhat.com> | 2018-10-17 22:05:08 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-10-17 22:05:11 -0400 |
commit | c13bbf4a78aafed144de0250a3c71265672c9bda (patch) | |
tree | 54edb601a81ce803d42e1fe55c93b6a7f7f42741 /drivers/gpu/drm/scheduler/sched_main.c | |
parent | 28b32b9f61fe73e7625ed30c35afd6d8a0ed2b6e (diff) | |
parent | 8e16695b4eb819881774b8c06eb164dc1fb74275 (diff) |
Merge branch 'drm-next-4.20' of git://people.freedesktop.org/~agd5f/linux into drm-next
Fixes for 4.20. Highlights:
- VCN DPG fixes for Picasso
- Add support for the latest vega20 vbios
- Scheduler timeout fix
- License fixes for radeon and amdgpu
- Misc other fixes
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181017215427.2804-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_main.c | 59 |
1 files changed, 46 insertions, 13 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 4e8505d51795..44fe587aaef9 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c | |||
@@ -182,6 +182,20 @@ bool drm_sched_dependency_optimized(struct dma_fence* fence, | |||
182 | } | 182 | } |
183 | EXPORT_SYMBOL(drm_sched_dependency_optimized); | 183 | EXPORT_SYMBOL(drm_sched_dependency_optimized); |
184 | 184 | ||
185 | /** | ||
186 | * drm_sched_start_timeout - start timeout for reset worker | ||
187 | * | ||
188 | * @sched: scheduler instance to start the worker for | ||
189 | * | ||
190 | * Start the timeout for the given scheduler. | ||
191 | */ | ||
192 | static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) | ||
193 | { | ||
194 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT && | ||
195 | !list_empty(&sched->ring_mirror_list)) | ||
196 | schedule_delayed_work(&sched->work_tdr, sched->timeout); | ||
197 | } | ||
198 | |||
185 | /* job_finish is called after hw fence signaled | 199 | /* job_finish is called after hw fence signaled |
186 | */ | 200 | */ |
187 | static void drm_sched_job_finish(struct work_struct *work) | 201 | static void drm_sched_job_finish(struct work_struct *work) |
@@ -203,9 +217,7 @@ static void drm_sched_job_finish(struct work_struct *work) | |||
203 | /* remove job from ring_mirror_list */ | 217 | /* remove job from ring_mirror_list */ |
204 | list_del(&s_job->node); | 218 | list_del(&s_job->node); |
205 | /* queue TDR for next job */ | 219 | /* queue TDR for next job */ |
206 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT && | 220 | drm_sched_start_timeout(sched); |
207 | !list_empty(&sched->ring_mirror_list)) | ||
208 | schedule_delayed_work(&sched->work_tdr, sched->timeout); | ||
209 | spin_unlock(&sched->job_list_lock); | 221 | spin_unlock(&sched->job_list_lock); |
210 | 222 | ||
211 | dma_fence_put(&s_job->s_fence->finished); | 223 | dma_fence_put(&s_job->s_fence->finished); |
@@ -229,10 +241,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job) | |||
229 | 241 | ||
230 | spin_lock(&sched->job_list_lock); | 242 | spin_lock(&sched->job_list_lock); |
231 | list_add_tail(&s_job->node, &sched->ring_mirror_list); | 243 | list_add_tail(&s_job->node, &sched->ring_mirror_list); |
232 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT && | 244 | drm_sched_start_timeout(sched); |
233 | list_first_entry_or_null(&sched->ring_mirror_list, | ||
234 | struct drm_sched_job, node) == s_job) | ||
235 | schedule_delayed_work(&sched->work_tdr, sched->timeout); | ||
236 | spin_unlock(&sched->job_list_lock); | 245 | spin_unlock(&sched->job_list_lock); |
237 | } | 246 | } |
238 | 247 | ||
@@ -240,13 +249,41 @@ static void drm_sched_job_timedout(struct work_struct *work) | |||
240 | { | 249 | { |
241 | struct drm_gpu_scheduler *sched; | 250 | struct drm_gpu_scheduler *sched; |
242 | struct drm_sched_job *job; | 251 | struct drm_sched_job *job; |
252 | int r; | ||
243 | 253 | ||
244 | sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); | 254 | sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); |
255 | |||
256 | spin_lock(&sched->job_list_lock); | ||
257 | list_for_each_entry_reverse(job, &sched->ring_mirror_list, node) { | ||
258 | struct drm_sched_fence *fence = job->s_fence; | ||
259 | |||
260 | if (!dma_fence_remove_callback(fence->parent, &fence->cb)) | ||
261 | goto already_signaled; | ||
262 | } | ||
263 | |||
245 | job = list_first_entry_or_null(&sched->ring_mirror_list, | 264 | job = list_first_entry_or_null(&sched->ring_mirror_list, |
246 | struct drm_sched_job, node); | 265 | struct drm_sched_job, node); |
266 | spin_unlock(&sched->job_list_lock); | ||
247 | 267 | ||
248 | if (job) | 268 | if (job) |
249 | job->sched->ops->timedout_job(job); | 269 | sched->ops->timedout_job(job); |
270 | |||
271 | spin_lock(&sched->job_list_lock); | ||
272 | list_for_each_entry(job, &sched->ring_mirror_list, node) { | ||
273 | struct drm_sched_fence *fence = job->s_fence; | ||
274 | |||
275 | if (!fence->parent || !list_empty(&fence->cb.node)) | ||
276 | continue; | ||
277 | |||
278 | r = dma_fence_add_callback(fence->parent, &fence->cb, | ||
279 | drm_sched_process_job); | ||
280 | if (r) | ||
281 | drm_sched_process_job(fence->parent, &fence->cb); | ||
282 | |||
283 | already_signaled: | ||
284 | ; | ||
285 | } | ||
286 | spin_unlock(&sched->job_list_lock); | ||
250 | } | 287 | } |
251 | 288 | ||
252 | /** | 289 | /** |
@@ -313,11 +350,6 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched) | |||
313 | int r; | 350 | int r; |
314 | 351 | ||
315 | spin_lock(&sched->job_list_lock); | 352 | spin_lock(&sched->job_list_lock); |
316 | s_job = list_first_entry_or_null(&sched->ring_mirror_list, | ||
317 | struct drm_sched_job, node); | ||
318 | if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) | ||
319 | schedule_delayed_work(&sched->work_tdr, sched->timeout); | ||
320 | |||
321 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { | 353 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { |
322 | struct drm_sched_fence *s_fence = s_job->s_fence; | 354 | struct drm_sched_fence *s_fence = s_job->s_fence; |
323 | struct dma_fence *fence; | 355 | struct dma_fence *fence; |
@@ -350,6 +382,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched) | |||
350 | } | 382 | } |
351 | spin_lock(&sched->job_list_lock); | 383 | spin_lock(&sched->job_list_lock); |
352 | } | 384 | } |
385 | drm_sched_start_timeout(sched); | ||
353 | spin_unlock(&sched->job_list_lock); | 386 | spin_unlock(&sched->job_list_lock); |
354 | } | 387 | } |
355 | EXPORT_SYMBOL(drm_sched_job_recovery); | 388 | EXPORT_SYMBOL(drm_sched_job_recovery); |