diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 54 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_lrc.c | 11 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 13 |
3 files changed, 30 insertions, 48 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c55b1f75c980..834240a9b262 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -3309,16 +3309,6 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) | |||
| 3309 | 3309 | ||
| 3310 | static void nop_submit_request(struct i915_request *request) | 3310 | static void nop_submit_request(struct i915_request *request) |
| 3311 | { | 3311 | { |
| 3312 | GEM_TRACE("%s fence %llx:%d -> -EIO\n", | ||
| 3313 | request->engine->name, | ||
| 3314 | request->fence.context, request->fence.seqno); | ||
| 3315 | dma_fence_set_error(&request->fence, -EIO); | ||
| 3316 | |||
| 3317 | i915_request_submit(request); | ||
| 3318 | } | ||
| 3319 | |||
| 3320 | static void nop_complete_submit_request(struct i915_request *request) | ||
| 3321 | { | ||
| 3322 | unsigned long flags; | 3312 | unsigned long flags; |
| 3323 | 3313 | ||
| 3324 | GEM_TRACE("%s fence %llx:%d -> -EIO\n", | 3314 | GEM_TRACE("%s fence %llx:%d -> -EIO\n", |
| @@ -3354,57 +3344,33 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) | |||
| 3354 | * rolling the global seqno forward (since this would complete requests | 3344 | * rolling the global seqno forward (since this would complete requests |
| 3355 | * for which we haven't set the fence error to EIO yet). | 3345 | * for which we haven't set the fence error to EIO yet). |
| 3356 | */ | 3346 | */ |
| 3357 | for_each_engine(engine, i915, id) { | 3347 | for_each_engine(engine, i915, id) |
| 3358 | i915_gem_reset_prepare_engine(engine); | 3348 | i915_gem_reset_prepare_engine(engine); |
| 3359 | 3349 | ||
| 3360 | engine->submit_request = nop_submit_request; | ||
| 3361 | engine->schedule = NULL; | ||
| 3362 | } | ||
| 3363 | i915->caps.scheduler = 0; | ||
| 3364 | |||
| 3365 | /* Even if the GPU reset fails, it should still stop the engines */ | 3350 | /* Even if the GPU reset fails, it should still stop the engines */ |
| 3366 | if (INTEL_GEN(i915) >= 5) | 3351 | if (INTEL_GEN(i915) >= 5) |
| 3367 | intel_gpu_reset(i915, ALL_ENGINES); | 3352 | intel_gpu_reset(i915, ALL_ENGINES); |
| 3368 | 3353 | ||
| 3369 | /* | ||
| 3370 | * Make sure no one is running the old callback before we proceed with | ||
| 3371 | * cancelling requests and resetting the completion tracking. Otherwise | ||
| 3372 | * we might submit a request to the hardware which never completes. | ||
| 3373 | */ | ||
| 3374 | synchronize_rcu(); | ||
| 3375 | |||
| 3376 | for_each_engine(engine, i915, id) { | 3354 | for_each_engine(engine, i915, id) { |
| 3377 | /* Mark all executing requests as skipped */ | 3355 | engine->submit_request = nop_submit_request; |
| 3378 | engine->cancel_requests(engine); | 3356 | engine->schedule = NULL; |
| 3379 | |||
| 3380 | /* | ||
| 3381 | * Only once we've force-cancelled all in-flight requests can we | ||
| 3382 | * start to complete all requests. | ||
| 3383 | */ | ||
| 3384 | engine->submit_request = nop_complete_submit_request; | ||
| 3385 | } | 3357 | } |
| 3358 | i915->caps.scheduler = 0; | ||
| 3386 | 3359 | ||
| 3387 | /* | 3360 | /* |
| 3388 | * Make sure no request can slip through without getting completed by | 3361 | * Make sure no request can slip through without getting completed by |
| 3389 | * either this call here to intel_engine_init_global_seqno, or the one | 3362 | * either this call here to intel_engine_init_global_seqno, or the one |
| 3390 | * in nop_complete_submit_request. | 3363 | * in nop_submit_request. |
| 3391 | */ | 3364 | */ |
| 3392 | synchronize_rcu(); | 3365 | synchronize_rcu(); |
| 3393 | 3366 | ||
| 3394 | for_each_engine(engine, i915, id) { | 3367 | /* Mark all executing requests as skipped */ |
| 3395 | unsigned long flags; | 3368 | for_each_engine(engine, i915, id) |
| 3396 | 3369 | engine->cancel_requests(engine); | |
| 3397 | /* | ||
| 3398 | * Mark all pending requests as complete so that any concurrent | ||
| 3399 | * (lockless) lookup doesn't try and wait upon the request as we | ||
| 3400 | * reset it. | ||
| 3401 | */ | ||
| 3402 | spin_lock_irqsave(&engine->timeline.lock, flags); | ||
| 3403 | intel_engine_init_global_seqno(engine, | ||
| 3404 | intel_engine_last_submit(engine)); | ||
| 3405 | spin_unlock_irqrestore(&engine->timeline.lock, flags); | ||
| 3406 | 3370 | ||
| 3371 | for_each_engine(engine, i915, id) { | ||
| 3407 | i915_gem_reset_finish_engine(engine); | 3372 | i915_gem_reset_finish_engine(engine); |
| 3373 | intel_engine_wakeup(engine); | ||
| 3408 | } | 3374 | } |
| 3409 | 3375 | ||
| 3410 | out: | 3376 | out: |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 1f004683b777..87d42a2b9400 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
| @@ -820,8 +820,11 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) | |||
| 820 | /* Mark all executing requests as skipped. */ | 820 | /* Mark all executing requests as skipped. */ |
| 821 | list_for_each_entry(rq, &engine->timeline.requests, link) { | 821 | list_for_each_entry(rq, &engine->timeline.requests, link) { |
| 822 | GEM_BUG_ON(!rq->global_seqno); | 822 | GEM_BUG_ON(!rq->global_seqno); |
| 823 | if (!i915_request_completed(rq)) | 823 | |
| 824 | dma_fence_set_error(&rq->fence, -EIO); | 824 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) |
| 825 | continue; | ||
| 826 | |||
| 827 | dma_fence_set_error(&rq->fence, -EIO); | ||
| 825 | } | 828 | } |
| 826 | 829 | ||
| 827 | /* Flush the queued requests to the timeline list (for retiring). */ | 830 | /* Flush the queued requests to the timeline list (for retiring). */ |
| @@ -841,6 +844,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) | |||
| 841 | kmem_cache_free(engine->i915->priorities, p); | 844 | kmem_cache_free(engine->i915->priorities, p); |
| 842 | } | 845 | } |
| 843 | 846 | ||
| 847 | intel_write_status_page(engine, | ||
| 848 | I915_GEM_HWS_INDEX, | ||
| 849 | intel_engine_last_submit(engine)); | ||
| 850 | |||
| 844 | /* Remaining _unready_ requests will be nop'ed when submitted */ | 851 | /* Remaining _unready_ requests will be nop'ed when submitted */ |
| 845 | 852 | ||
| 846 | execlists->queue_priority = INT_MIN; | 853 | execlists->queue_priority = INT_MIN; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index d81eaf5f6b3e..81b10d85b738 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -755,9 +755,18 @@ static void cancel_requests(struct intel_engine_cs *engine) | |||
| 755 | /* Mark all submitted requests as skipped. */ | 755 | /* Mark all submitted requests as skipped. */ |
| 756 | list_for_each_entry(request, &engine->timeline.requests, link) { | 756 | list_for_each_entry(request, &engine->timeline.requests, link) { |
| 757 | GEM_BUG_ON(!request->global_seqno); | 757 | GEM_BUG_ON(!request->global_seqno); |
| 758 | if (!i915_request_completed(request)) | 758 | |
| 759 | dma_fence_set_error(&request->fence, -EIO); | 759 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, |
| 760 | &request->fence.flags)) | ||
| 761 | continue; | ||
| 762 | |||
| 763 | dma_fence_set_error(&request->fence, -EIO); | ||
| 760 | } | 764 | } |
| 765 | |||
| 766 | intel_write_status_page(engine, | ||
| 767 | I915_GEM_HWS_INDEX, | ||
| 768 | intel_engine_last_submit(engine)); | ||
| 769 | |||
| 761 | /* Remaining _unready_ requests will be nop'ed when submitted */ | 770 | /* Remaining _unready_ requests will be nop'ed when submitted */ |
| 762 | 771 | ||
| 763 | spin_unlock_irqrestore(&engine->timeline.lock, flags); | 772 | spin_unlock_irqrestore(&engine->timeline.lock, flags); |
