aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2011-01-26 10:55:56 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2011-01-27 06:06:07 -0500
commit21dd373486956d7789ffd878347c36efad16923d (patch)
tree4b0420fdb193276f4e53df78afc50642e8984a5e /drivers/gpu
parente110e8d672c9e6e395a5c8bfa3444899b85181ed (diff)
drm/i915: Defer reporting EIO until we try to use the GPU
Instead of reporting EIO upfront in the entrance of an ioctl that may or may not attempt to use the GPU, defer the actual detection of an invalid ioctl to when we issue a GPU instruction. This allows us to continue to use bo in video memory (via pread/pwrite and mmap) after the GPU has hung. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c36
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c4
4 files changed, 18 insertions, 28 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a39f8d254502..ff498b98c9bd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1052,7 +1052,6 @@ extern void i915_mem_takedown(struct mem_block **heap);
1052extern void i915_mem_release(struct drm_device * dev, 1052extern void i915_mem_release(struct drm_device * dev,
1053 struct drm_file *file_priv, struct mem_block *heap); 1053 struct drm_file *file_priv, struct mem_block *heap);
1054/* i915_gem.c */ 1054/* i915_gem.c */
1055int i915_gem_check_is_wedged(struct drm_device *dev);
1056int i915_gem_init_ioctl(struct drm_device *dev, void *data, 1055int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1057 struct drm_file *file_priv); 1056 struct drm_file *file_priv);
1058int i915_gem_create_ioctl(struct drm_device *dev, void *data, 1057int i915_gem_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b9d4de368de3..52dd77b1bb7c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -75,8 +75,8 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
75 dev_priv->mm.object_memory -= size; 75 dev_priv->mm.object_memory -= size;
76} 76}
77 77
78int 78static int
79i915_gem_check_is_wedged(struct drm_device *dev) 79i915_gem_wait_for_error(struct drm_device *dev)
80{ 80{
81 struct drm_i915_private *dev_priv = dev->dev_private; 81 struct drm_i915_private *dev_priv = dev->dev_private;
82 struct completion *x = &dev_priv->error_completion; 82 struct completion *x = &dev_priv->error_completion;
@@ -90,27 +90,24 @@ i915_gem_check_is_wedged(struct drm_device *dev)
90 if (ret) 90 if (ret)
91 return ret; 91 return ret;
92 92
93 /* Success, we reset the GPU! */ 93 if (atomic_read(&dev_priv->mm.wedged)) {
94 if (!atomic_read(&dev_priv->mm.wedged)) 94 /* GPU is hung, bump the completion count to account for
95 return 0; 95 * the token we just consumed so that we never hit zero and
96 96 * end up waiting upon a subsequent completion event that
97 /* GPU is hung, bump the completion count to account for 97 * will never happen.
98 * the token we just consumed so that we never hit zero and 98 */
99 * end up waiting upon a subsequent completion event that 99 spin_lock_irqsave(&x->wait.lock, flags);
100 * will never happen. 100 x->done++;
101 */ 101 spin_unlock_irqrestore(&x->wait.lock, flags);
102 spin_lock_irqsave(&x->wait.lock, flags); 102 }
103 x->done++; 103 return 0;
104 spin_unlock_irqrestore(&x->wait.lock, flags);
105 return -EIO;
106} 104}
107 105
108int i915_mutex_lock_interruptible(struct drm_device *dev) 106int i915_mutex_lock_interruptible(struct drm_device *dev)
109{ 107{
110 struct drm_i915_private *dev_priv = dev->dev_private;
111 int ret; 108 int ret;
112 109
113 ret = i915_gem_check_is_wedged(dev); 110 ret = i915_gem_wait_for_error(dev);
114 if (ret) 111 if (ret)
115 return ret; 112 return ret;
116 113
@@ -118,11 +115,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
118 if (ret) 115 if (ret)
119 return ret; 116 return ret;
120 117
121 if (atomic_read(&dev_priv->mm.wedged)) {
122 mutex_unlock(&dev->struct_mutex);
123 return -EAGAIN;
124 }
125
126 WARN_ON(i915_verify_lists(dev)); 118 WARN_ON(i915_verify_lists(dev));
127 return 0; 119 return 0;
128} 120}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9932c5..a093d67b94e2 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -284,11 +284,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
284 struct drm_i915_gem_set_tiling *args = data; 284 struct drm_i915_gem_set_tiling *args = data;
285 drm_i915_private_t *dev_priv = dev->dev_private; 285 drm_i915_private_t *dev_priv = dev->dev_private;
286 struct drm_i915_gem_object *obj; 286 struct drm_i915_gem_object *obj;
287 int ret;
288
289 ret = i915_gem_check_is_wedged(dev);
290 if (ret)
291 return ret;
292 287
293 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 288 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
294 if (obj == NULL) 289 if (obj == NULL)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 5f7bbaa6a608..235d9c4b40ae 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -980,9 +980,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
980int intel_ring_begin(struct intel_ring_buffer *ring, 980int intel_ring_begin(struct intel_ring_buffer *ring,
981 int num_dwords) 981 int num_dwords)
982{ 982{
983 struct drm_i915_private *dev_priv = ring->dev->dev_private;
983 int n = 4*num_dwords; 984 int n = 4*num_dwords;
984 int ret; 985 int ret;
985 986
987 if (unlikely(atomic_read(&dev_priv->mm.wedged)))
988 return -EIO;
989
986 if (unlikely(ring->tail + n > ring->effective_size)) { 990 if (unlikely(ring->tail + n > ring->effective_size)) {
987 ret = intel_wrap_ring_buffer(ring); 991 ret = intel_wrap_ring_buffer(ring);
988 if (unlikely(ret)) 992 if (unlikely(ret))