aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2012-07-04 16:52:50 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-07-05 04:03:45 -0400
commitde2b998552c1534e87bfbc51ec5734b02bc89020 (patch)
tree06385a5a5f55fd7dc072073fddbd22e678d5e5ba
parenta9340ccab547f24e3b398b7e3ebd792827ff1be1 (diff)
drm/i915: don't return a spurious -EIO from intel_ring_begin
The issue with this check is that it results in userspace receiving an -EIO while the gpu reset hasn't completed, resulting in fallback to sw rendering or worse. Now there's also a stern comment in intel_ring_wait_seqno saying that intel_ring_begin should not return -EAGAIN, ever, because some callers can't handle that. But after an audit of the callsites I don't see any issues. I guess the last problematic spot disappeared with the removal of the pipelined fencing code. So do the right thing and call check_wedge, which should properly decide whether an -EAGAIN or -EIO is appropriate if wedged is set. Note that the early check for a wedged gpu before touching the ring is rather important (and it took me quite some time of acting like the densest doofus to figure that out): If we don't do that and the gpu died for good, not having been resurrect by the reset code, userspace can merrily fill up the entire ring until it notices that something is amiss. Allowing userspace to emit more render, despite that we know that it will fail can't lead to anything good (and by experience can lead to all sorts of havoc, including angering the OOM gods and hard-hanging the hw for good). v2: Fix EAGAIN mispell, noticed by Chris Wilson. Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Tested-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c18
1 files changed, 4 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cd35ad4922de..d42d821c64d6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1117,20 +1117,9 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1117 1117
1118static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) 1118static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1119{ 1119{
1120 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1121 bool was_interruptible;
1122 int ret; 1120 int ret;
1123 1121
1124 /* XXX As we have not yet audited all the paths to check that
1125 * they are ready for ERESTARTSYS from intel_ring_begin, do not
1126 * allow us to be interruptible by a signal.
1127 */
1128 was_interruptible = dev_priv->mm.interruptible;
1129 dev_priv->mm.interruptible = false;
1130
1131 ret = i915_wait_seqno(ring, seqno); 1122 ret = i915_wait_seqno(ring, seqno);
1132
1133 dev_priv->mm.interruptible = was_interruptible;
1134 if (!ret) 1123 if (!ret)
1135 i915_gem_retire_requests_ring(ring); 1124 i915_gem_retire_requests_ring(ring);
1136 1125
@@ -1240,12 +1229,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1240int intel_ring_begin(struct intel_ring_buffer *ring, 1229int intel_ring_begin(struct intel_ring_buffer *ring,
1241 int num_dwords) 1230 int num_dwords)
1242{ 1231{
1243 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1232 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1244 int n = 4*num_dwords; 1233 int n = 4*num_dwords;
1245 int ret; 1234 int ret;
1246 1235
1247 if (unlikely(atomic_read(&dev_priv->mm.wedged))) 1236 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
1248 return -EIO; 1237 if (ret)
1238 return ret;
1249 1239
1250 if (unlikely(ring->tail + n > ring->effective_size)) { 1240 if (unlikely(ring->tail + n > ring->effective_size)) {
1251 ret = intel_wrap_ring_buffer(ring); 1241 ret = intel_wrap_ring_buffer(ring);