diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 22 |
1 files changed, 17 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dd89abd2263d..6ff5d655c202 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -434,20 +434,28 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, | |||
434 | dma_fence_put(shared[i]); | 434 | dma_fence_put(shared[i]); |
435 | kfree(shared); | 435 | kfree(shared); |
436 | 436 | ||
437 | /* | ||
438 | * If both shared fences and an exclusive fence exist, | ||
439 | * then by construction the shared fences must be later | ||
440 | * than the exclusive fence. If we successfully wait for | ||
441 | * all the shared fences, we know that the exclusive fence | ||
442 | * must all be signaled. If all the shared fences are | ||
443 | * signaled, we can prune the array and recover the | ||
444 | * floating references on the fences/requests. | ||
445 | */ | ||
437 | prune_fences = count && timeout >= 0; | 446 | prune_fences = count && timeout >= 0; |
438 | } else { | 447 | } else { |
439 | excl = reservation_object_get_excl_rcu(resv); | 448 | excl = reservation_object_get_excl_rcu(resv); |
440 | } | 449 | } |
441 | 450 | ||
442 | if (excl && timeout >= 0) { | 451 | if (excl && timeout >= 0) |
443 | timeout = i915_gem_object_wait_fence(excl, flags, timeout, | 452 | timeout = i915_gem_object_wait_fence(excl, flags, timeout, |
444 | rps_client); | 453 | rps_client); |
445 | prune_fences = timeout >= 0; | ||
446 | } | ||
447 | 454 | ||
448 | dma_fence_put(excl); | 455 | dma_fence_put(excl); |
449 | 456 | ||
450 | /* Oportunistically prune the fences iff we know they have *all* been | 457 | /* |
458 | * Opportunistically prune the fences iff we know they have *all* been | ||
451 | * signaled and that the reservation object has not been changed (i.e. | 459 | * signaled and that the reservation object has not been changed (i.e. |
452 | * no new fences have been added). | 460 | * no new fences have been added). |
453 | */ | 461 | */ |
@@ -3205,8 +3213,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) | |||
3205 | * rolling the global seqno forward (since this would complete requests | 3213 | * rolling the global seqno forward (since this would complete requests |
3206 | * for which we haven't set the fence error to EIO yet). | 3214 | * for which we haven't set the fence error to EIO yet). |
3207 | */ | 3215 | */ |
3208 | for_each_engine(engine, i915, id) | 3216 | for_each_engine(engine, i915, id) { |
3217 | i915_gem_reset_prepare_engine(engine); | ||
3209 | engine->submit_request = nop_submit_request; | 3218 | engine->submit_request = nop_submit_request; |
3219 | } | ||
3210 | 3220 | ||
3211 | /* | 3221 | /* |
3212 | * Make sure no one is running the old callback before we proceed with | 3222 | * Make sure no one is running the old callback before we proceed with |
@@ -3244,6 +3254,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) | |||
3244 | intel_engine_init_global_seqno(engine, | 3254 | intel_engine_init_global_seqno(engine, |
3245 | intel_engine_last_submit(engine)); | 3255 | intel_engine_last_submit(engine)); |
3246 | spin_unlock_irqrestore(&engine->timeline->lock, flags); | 3256 | spin_unlock_irqrestore(&engine->timeline->lock, flags); |
3257 | |||
3258 | i915_gem_reset_finish_engine(engine); | ||
3247 | } | 3259 | } |
3248 | 3260 | ||
3249 | set_bit(I915_WEDGED, &i915->gpu_error.flags); | 3261 | set_bit(I915_WEDGED, &i915->gpu_error.flags); |