aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c58
1 files changed, 55 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 69035e4f9b3b..91bc4abf5d3e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -599,10 +599,62 @@ out:
599static void reset_ring_common(struct intel_engine_cs *engine, 599static void reset_ring_common(struct intel_engine_cs *engine,
600 struct drm_i915_gem_request *request) 600 struct drm_i915_gem_request *request)
601{ 601{
602 struct intel_ring *ring = request->ring; 602 /* Try to restore the logical GPU state to match the continuation
603 * of the request queue. If we skip the context/PD restore, then
604 * the next request may try to execute assuming that its context
605 * is valid and loaded on the GPU and so may try to access invalid
606 * memory, prompting repeated GPU hangs.
607 *
608 * If the request was guilty, we still restore the logical state
609 * in case the next request requires it (e.g. the aliasing ppgtt),
610 * but skip over the hung batch.
611 *
612 * If the request was innocent, we try to replay the request with
613 * the restored context.
614 */
615 if (request) {
616 struct drm_i915_private *dev_priv = request->i915;
617 struct intel_context *ce = &request->ctx->engine[engine->id];
618 struct i915_hw_ppgtt *ppgtt;
619
620 /* FIXME consider gen8 reset */
621
622 if (ce->state) {
623 I915_WRITE(CCID,
624 i915_ggtt_offset(ce->state) |
625 BIT(8) /* must be set! */ |
626 CCID_EXTENDED_STATE_SAVE |
627 CCID_EXTENDED_STATE_RESTORE |
628 CCID_EN);
629 }
603 630
604 ring->head = request->postfix; 631 ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
605 ring->last_retired_head = -1; 632 if (ppgtt) {
633 u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
634
635 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
636 I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
637
638 /* Wait for the PD reload to complete */
639 if (intel_wait_for_register(dev_priv,
640 RING_PP_DIR_BASE(engine),
641 BIT(0), 0,
642 10))
643 DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
644
645 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
646 }
647
648 /* If the rq hung, jump to its breadcrumb and skip the batch */
649 if (request->fence.error == -EIO) {
650 struct intel_ring *ring = request->ring;
651
652 ring->head = request->postfix;
653 ring->last_retired_head = -1;
654 }
655 } else {
656 engine->legacy_active_context = NULL;
657 }
606} 658}
607 659
608static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) 660static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)