diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2017-09-15 13:31:00 -0400 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2017-09-18 05:59:55 -0400 |
commit | 27a5f61b377bb62e4813af57fd91636f91ea5755 (patch) | |
tree | 6e17b8a3540ad9a9b2342753d59dd67b5a9c35c0 /drivers/gpu/drm/i915/intel_ringbuffer.c | |
parent | 309bd8ed464fc08f79152e4a18b1da2b11410842 (diff) |
drm/i915: Cancel all ready but queued requests when wedging
When wedging the hw, we want to mark all in-flight requests as -EIO.
This is made slightly more complex by execlists who store the ready but
not yet submitted-to-hw requests on a private queue (an rbtree
priolist). Call into execlists to cancel not only the ELSP tracking for
the submitted requests, but also the queue of unsubmitted requests.
v2: Move the majority of engine_set_wedged to the backends (both legacy
ringbuffer and execlists handling their own lists).
Reported-by: Michał Winiarski <michal.winiarski@intel.com>
Testcase: igt/gem_eio/in-flight-contexts
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170915173100.26470-1-chris@chris-wilson.co.uk
Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 20 |
1 files changed, 20 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 22e5ea8516b6..85e64a45d0bf 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -782,6 +782,24 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs) | |||
782 | return cs; | 782 | return cs; |
783 | } | 783 | } |
784 | 784 | ||
785 | static void cancel_requests(struct intel_engine_cs *engine) | ||
786 | { | ||
787 | struct drm_i915_gem_request *request; | ||
788 | unsigned long flags; | ||
789 | |||
790 | spin_lock_irqsave(&engine->timeline->lock, flags); | ||
791 | |||
792 | /* Mark all submitted requests as skipped. */ | ||
793 | list_for_each_entry(request, &engine->timeline->requests, link) { | ||
794 | GEM_BUG_ON(!request->global_seqno); | ||
795 | if (!i915_gem_request_completed(request)) | ||
796 | dma_fence_set_error(&request->fence, -EIO); | ||
797 | } | ||
798 | /* Remaining _unready_ requests will be nop'ed when submitted */ | ||
799 | |||
800 | spin_unlock_irqrestore(&engine->timeline->lock, flags); | ||
801 | } | ||
802 | |||
785 | static void i9xx_submit_request(struct drm_i915_gem_request *request) | 803 | static void i9xx_submit_request(struct drm_i915_gem_request *request) |
786 | { | 804 | { |
787 | struct drm_i915_private *dev_priv = request->i915; | 805 | struct drm_i915_private *dev_priv = request->i915; |
@@ -1996,11 +2014,13 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv, | |||
1996 | static void i9xx_set_default_submission(struct intel_engine_cs *engine) | 2014 | static void i9xx_set_default_submission(struct intel_engine_cs *engine) |
1997 | { | 2015 | { |
1998 | engine->submit_request = i9xx_submit_request; | 2016 | engine->submit_request = i9xx_submit_request; |
2017 | engine->cancel_requests = cancel_requests; | ||
1999 | } | 2018 | } |
2000 | 2019 | ||
2001 | static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) | 2020 | static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) |
2002 | { | 2021 | { |
2003 | engine->submit_request = gen6_bsd_submit_request; | 2022 | engine->submit_request = gen6_bsd_submit_request; |
2023 | engine->cancel_requests = cancel_requests; | ||
2004 | } | 2024 | } |
2005 | 2025 | ||
2006 | static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, | 2026 | static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, |