aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2011-02-07 08:09:31 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2011-02-07 09:33:55 -0500
commitd9bc7e9f32716901c617e1f0fb6ce0f74f172686 (patch)
tree70cda44be76ffa39b0efc03137732b842e5d8b6e /drivers/gpu/drm/i915/i915_gem.c
parentdcbe6f2b3d136995915e2f9ecc7d4f3b28f47b6c (diff)
drm/i915: Fix infinite loop regression from 21dd3734
By returning EAGAIN upon a wedged GPU before attempting to wait, we would hit an infinite loop of repeating operation without ever progressing. Instead this needs to be EIO so that userspace knows that the GPU is truly wedged and not in the process of error recovery. Similarly, we need to handle the error recovery during i915_gem_fault. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c30
1 files changed, 25 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 52dd77b1bb7c..a41c0e716805 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1171,9 +1171,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1171 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> 1171 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1172 PAGE_SHIFT; 1172 PAGE_SHIFT;
1173 1173
1174 /* Now bind it into the GTT if needed */ 1174 ret = i915_mutex_lock_interruptible(dev);
1175 mutex_lock(&dev->struct_mutex); 1175 if (ret)
1176 goto out;
1176 1177
1178 /* Now bind it into the GTT if needed */
1177 if (!obj->map_and_fenceable) { 1179 if (!obj->map_and_fenceable) {
1178 ret = i915_gem_object_unbind(obj); 1180 ret = i915_gem_object_unbind(obj);
1179 if (ret) 1181 if (ret)
@@ -1208,9 +1210,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1208 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1210 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1209unlock: 1211unlock:
1210 mutex_unlock(&dev->struct_mutex); 1212 mutex_unlock(&dev->struct_mutex);
1211 1213out:
1212 switch (ret) { 1214 switch (ret) {
1215 case -EIO:
1213 case -EAGAIN: 1216 case -EAGAIN:
1217 /* Give the error handler a chance to run and move the
1218 * objects off the GPU active list. Next time we service the
1219 * fault, we should be able to transition the page into the
1220 * GTT without touching the GPU (and so avoid further
1221 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1222 * with coherency, just lost writes.
1223 */
1214 set_need_resched(); 1224 set_need_resched();
1215 case 0: 1225 case 0:
1216 case -ERESTARTSYS: 1226 case -ERESTARTSYS:
@@ -1981,8 +1991,18 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1981 1991
1982 BUG_ON(seqno == 0); 1992 BUG_ON(seqno == 0);
1983 1993
1984 if (atomic_read(&dev_priv->mm.wedged)) 1994 if (atomic_read(&dev_priv->mm.wedged)) {
1985 return -EAGAIN; 1995 struct completion *x = &dev_priv->error_completion;
1996 bool recovery_complete;
1997 unsigned long flags;
1998
1999 /* Give the error handler a chance to run. */
2000 spin_lock_irqsave(&x->wait.lock, flags);
2001 recovery_complete = x->done > 0;
2002 spin_unlock_irqrestore(&x->wait.lock, flags);
2003
2004 return recovery_complete ? -EIO : -EAGAIN;
2005 }
1986 2006
1987 if (seqno == ring->outstanding_lazy_request) { 2007 if (seqno == ring->outstanding_lazy_request) {
1988 struct drm_i915_gem_request *request; 2008 struct drm_i915_gem_request *request;