aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2012-11-14 11:14:05 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-01-20 07:11:15 -0500
commit33196deddacc7790defb9a7e84659e0362d4da7a (patch)
treedc00ac3abcd36acad31371710ecedaffdd3411e7 /drivers/gpu
parent99584db33ba4f864777e2cfef5329ed1bf13f714 (diff)
drm/i915: move wedged to the other gpu error handling stuff
And to make Ben Widawsky happier, use the gpu_error instead of the entire device as the argument in some functions. Drop the outdated comment on ->wedged for now, a follow-up patch will change the semantics and add a proper comment again. Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h13
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c34
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c6
6 files changed, 30 insertions, 35 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3b1bf4e70d94..e1b7eaf60d24 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1672,7 +1672,7 @@ i915_wedged_read(struct file *filp,
1672 1672
1673 len = snprintf(buf, sizeof(buf), 1673 len = snprintf(buf, sizeof(buf),
1674 "wedged : %d\n", 1674 "wedged : %d\n",
1675 atomic_read(&dev_priv->mm.wedged)); 1675 atomic_read(&dev_priv->gpu_error.wedged));
1676 1676
1677 if (len > sizeof(buf)) 1677 if (len > sizeof(buf))
1678 len = sizeof(buf); 1678 len = sizeof(buf);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index dfe0e747c4f7..62da6c7a4884 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -744,15 +744,6 @@ struct i915_gem_mm {
744 */ 744 */
745 int suspended; 745 int suspended;
746 746
747 /**
748 * Flag if the hardware appears to be wedged.
749 *
750 * This is set when attempts to idle the device timeout.
751 * It prevents command submission from occurring and makes
752 * every pending request fail
753 */
754 atomic_t wedged;
755
756 /** Bit 6 swizzling required for X tiling */ 747 /** Bit 6 swizzling required for X tiling */
757 uint32_t bit_6_swizzle_x; 748 uint32_t bit_6_swizzle_x;
758 /** Bit 6 swizzling required for Y tiling */ 749 /** Bit 6 swizzling required for Y tiling */
@@ -784,6 +775,8 @@ struct i915_gpu_error {
784 775
785 unsigned long last_reset; 776 unsigned long last_reset;
786 777
778 atomic_t wedged;
779
787 /* For gpu hang simulation. */ 780 /* For gpu hang simulation. */
788 unsigned int stop_rings; 781 unsigned int stop_rings;
789}; 782};
@@ -1548,7 +1541,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1548 1541
1549void i915_gem_retire_requests(struct drm_device *dev); 1542void i915_gem_retire_requests(struct drm_device *dev);
1550void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); 1543void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1551int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv, 1544int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
1552 bool interruptible); 1545 bool interruptible);
1553 1546
1554void i915_gem_reset(struct drm_device *dev); 1547void i915_gem_reset(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 95e022e7a26e..04b2f92eb456 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -87,14 +87,13 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
87} 87}
88 88
89static int 89static int
90i915_gem_wait_for_error(struct drm_device *dev) 90i915_gem_wait_for_error(struct i915_gpu_error *error)
91{ 91{
92 struct drm_i915_private *dev_priv = dev->dev_private; 92 struct completion *x = &error->completion;
93 struct completion *x = &dev_priv->gpu_error.completion;
94 unsigned long flags; 93 unsigned long flags;
95 int ret; 94 int ret;
96 95
97 if (!atomic_read(&dev_priv->mm.wedged)) 96 if (!atomic_read(&error->wedged))
98 return 0; 97 return 0;
99 98
100 /* 99 /*
@@ -110,7 +109,7 @@ i915_gem_wait_for_error(struct drm_device *dev)
110 return ret; 109 return ret;
111 } 110 }
112 111
113 if (atomic_read(&dev_priv->mm.wedged)) { 112 if (atomic_read(&error->wedged)) {
114 /* GPU is hung, bump the completion count to account for 113 /* GPU is hung, bump the completion count to account for
115 * the token we just consumed so that we never hit zero and 114 * the token we just consumed so that we never hit zero and
116 * end up waiting upon a subsequent completion event that 115 * end up waiting upon a subsequent completion event that
@@ -125,9 +124,10 @@ i915_gem_wait_for_error(struct drm_device *dev)
125 124
126int i915_mutex_lock_interruptible(struct drm_device *dev) 125int i915_mutex_lock_interruptible(struct drm_device *dev)
127{ 126{
127 struct drm_i915_private *dev_priv = dev->dev_private;
128 int ret; 128 int ret;
129 129
130 ret = i915_gem_wait_for_error(dev); 130 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
131 if (ret) 131 if (ret)
132 return ret; 132 return ret;
133 133
@@ -939,11 +939,11 @@ unlock:
939} 939}
940 940
941int 941int
942i915_gem_check_wedge(struct drm_i915_private *dev_priv, 942i915_gem_check_wedge(struct i915_gpu_error *error,
943 bool interruptible) 943 bool interruptible)
944{ 944{
945 if (atomic_read(&dev_priv->mm.wedged)) { 945 if (atomic_read(&error->wedged)) {
946 struct completion *x = &dev_priv->gpu_error.completion; 946 struct completion *x = &error->completion;
947 bool recovery_complete; 947 bool recovery_complete;
948 unsigned long flags; 948 unsigned long flags;
949 949
@@ -1025,7 +1025,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1025 1025
1026#define EXIT_COND \ 1026#define EXIT_COND \
1027 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ 1027 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1028 atomic_read(&dev_priv->mm.wedged)) 1028 atomic_read(&dev_priv->gpu_error.wedged))
1029 do { 1029 do {
1030 if (interruptible) 1030 if (interruptible)
1031 end = wait_event_interruptible_timeout(ring->irq_queue, 1031 end = wait_event_interruptible_timeout(ring->irq_queue,
@@ -1035,7 +1035,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1035 end = wait_event_timeout(ring->irq_queue, EXIT_COND, 1035 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1036 timeout_jiffies); 1036 timeout_jiffies);
1037 1037
1038 ret = i915_gem_check_wedge(dev_priv, interruptible); 1038 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1039 if (ret) 1039 if (ret)
1040 end = ret; 1040 end = ret;
1041 } while (end == 0 && wait_forever); 1041 } while (end == 0 && wait_forever);
@@ -1081,7 +1081,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1081 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 1081 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1082 BUG_ON(seqno == 0); 1082 BUG_ON(seqno == 0);
1083 1083
1084 ret = i915_gem_check_wedge(dev_priv, interruptible); 1084 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1085 if (ret) 1085 if (ret)
1086 return ret; 1086 return ret;
1087 1087
@@ -1146,7 +1146,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1146 if (seqno == 0) 1146 if (seqno == 0)
1147 return 0; 1147 return 0;
1148 1148
1149 ret = i915_gem_check_wedge(dev_priv, true); 1149 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1150 if (ret) 1150 if (ret)
1151 return ret; 1151 return ret;
1152 1152
@@ -1379,7 +1379,7 @@ out:
1379 /* If this -EIO is due to a gpu hang, give the reset code a 1379 /* If this -EIO is due to a gpu hang, give the reset code a
1380 * chance to clean up the mess. Otherwise return the proper 1380 * chance to clean up the mess. Otherwise return the proper
1381 * SIGBUS. */ 1381 * SIGBUS. */
1382 if (!atomic_read(&dev_priv->mm.wedged)) 1382 if (!atomic_read(&dev_priv->gpu_error.wedged))
1383 return VM_FAULT_SIGBUS; 1383 return VM_FAULT_SIGBUS;
1384 case -EAGAIN: 1384 case -EAGAIN:
1385 /* Give the error handler a chance to run and move the 1385 /* Give the error handler a chance to run and move the
@@ -3390,7 +3390,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3390 u32 seqno = 0; 3390 u32 seqno = 0;
3391 int ret; 3391 int ret;
3392 3392
3393 if (atomic_read(&dev_priv->mm.wedged)) 3393 if (atomic_read(&dev_priv->gpu_error.wedged))
3394 return -EIO; 3394 return -EIO;
3395 3395
3396 spin_lock(&file_priv->mm.lock); 3396 spin_lock(&file_priv->mm.lock);
@@ -3978,9 +3978,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3978 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3978 if (drm_core_check_feature(dev, DRIVER_MODESET))
3979 return 0; 3979 return 0;
3980 3980
3981 if (atomic_read(&dev_priv->mm.wedged)) { 3981 if (atomic_read(&dev_priv->gpu_error.wedged)) {
3982 DRM_ERROR("Reenabling wedged hardware, good luck\n"); 3982 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3983 atomic_set(&dev_priv->mm.wedged, 0); 3983 atomic_set(&dev_priv->gpu_error.wedged, 0);
3984 } 3984 }
3985 3985
3986 mutex_lock(&dev->struct_mutex); 3986 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c768ebdf8a27..f2c0016fa533 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -871,11 +871,11 @@ static void i915_error_work_func(struct work_struct *work)
871 871
872 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 872 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
873 873
874 if (atomic_read(&dev_priv->mm.wedged)) { 874 if (atomic_read(&dev_priv->gpu_error.wedged)) {
875 DRM_DEBUG_DRIVER("resetting chip\n"); 875 DRM_DEBUG_DRIVER("resetting chip\n");
876 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 876 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
877 if (!i915_reset(dev)) { 877 if (!i915_reset(dev)) {
878 atomic_set(&dev_priv->mm.wedged, 0); 878 atomic_set(&dev_priv->gpu_error.wedged, 0);
879 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 879 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
880 } 880 }
881 complete_all(&dev_priv->gpu_error.completion); 881 complete_all(&dev_priv->gpu_error.completion);
@@ -1483,7 +1483,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
1483 1483
1484 if (wedged) { 1484 if (wedged) {
1485 INIT_COMPLETION(dev_priv->gpu_error.completion); 1485 INIT_COMPLETION(dev_priv->gpu_error.completion);
1486 atomic_set(&dev_priv->mm.wedged, 1); 1486 atomic_set(&dev_priv->gpu_error.wedged, 1);
1487 1487
1488 /* 1488 /*
1489 * Wakeup waiting processes so they don't hang 1489 * Wakeup waiting processes so they don't hang
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b35902e5d925..160aa5f76118 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2223,7 +2223,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
2223 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 2223 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
2224 2224
2225 wait_event(dev_priv->pending_flip_queue, 2225 wait_event(dev_priv->pending_flip_queue,
2226 atomic_read(&dev_priv->mm.wedged) || 2226 atomic_read(&dev_priv->gpu_error.wedged) ||
2227 atomic_read(&obj->pending_flip) == 0); 2227 atomic_read(&obj->pending_flip) == 0);
2228 2228
2229 /* Big Hammer, we also need to ensure that any pending 2229 /* Big Hammer, we also need to ensure that any pending
@@ -2871,7 +2871,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2871 unsigned long flags; 2871 unsigned long flags;
2872 bool pending; 2872 bool pending;
2873 2873
2874 if (atomic_read(&dev_priv->mm.wedged)) 2874 if (atomic_read(&dev_priv->gpu_error.wedged))
2875 return false; 2875 return false;
2876 2876
2877 spin_lock_irqsave(&dev->event_lock, flags); 2877 spin_lock_irqsave(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d6b06aa4c05c..9438bcd50678 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1371,7 +1371,8 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1371 1371
1372 msleep(1); 1372 msleep(1);
1373 1373
1374 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); 1374 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1375 dev_priv->mm.interruptible);
1375 if (ret) 1376 if (ret)
1376 return ret; 1377 return ret;
1377 } while (!time_after(jiffies, end)); 1378 } while (!time_after(jiffies, end));
@@ -1460,7 +1461,8 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1460 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1461 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1461 int ret; 1462 int ret;
1462 1463
1463 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); 1464 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1465 dev_priv->mm.interruptible);
1464 if (ret) 1466 if (ret)
1465 return ret; 1467 return ret;
1466 1468