aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Gamari <bgamari.foss@gmail.com>2009-09-14 17:48:47 -0400
committerJesse Barnes <jbarnes@virtuousgeek.org>2009-09-17 17:36:46 -0400
commitba1234d17b3b1fe7087defb191a3c705f208aca6 (patch)
tree1d26be1196eeef15dda3e5386cd021d94249f0be
parentf316a42cc49eca73b33d85feb6177e32431747ff (diff)
drm/i915: Make dev_priv->mm.wedged an atomic_t
There is a very real possibility that multiple CPUs will notice that the GPU is wedged. This introduces all sorts of potential race conditions. Make the wedged flag atomic to mitigate this risk. Signed-off-by: Ben Gamari <bgamari.foss@gmail.com> Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c18
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c15
3 files changed, 18 insertions, 17 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 42142f269765..bcc1be281de6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -450,7 +450,7 @@ typedef struct drm_i915_private {
450 * It prevents command submission from occuring and makes 450 * It prevents command submission from occuring and makes
451 * every pending request fail 451 * every pending request fail
452 */ 452 */
453 int wedged; 453 atomic_t wedged;
454 454
455 /** Bit 6 swizzling required for X tiling */ 455 /** Bit 6 swizzling required for X tiling */
456 uint32_t bit_6_swizzle_x; 456 uint32_t bit_6_swizzle_x;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 579b3b04ff12..f0f6f668a61e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1712,7 +1712,7 @@ i915_gem_retire_requests(struct drm_device *dev)
1712 retiring_seqno = request->seqno; 1712 retiring_seqno = request->seqno;
1713 1713
1714 if (i915_seqno_passed(seqno, retiring_seqno) || 1714 if (i915_seqno_passed(seqno, retiring_seqno) ||
1715 dev_priv->mm.wedged) { 1715 atomic_read(&dev_priv->mm.wedged)) {
1716 i915_gem_retire_request(dev, request); 1716 i915_gem_retire_request(dev, request);
1717 1717
1718 list_del(&request->list); 1718 list_del(&request->list);
@@ -1754,7 +1754,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1754 1754
1755 BUG_ON(seqno == 0); 1755 BUG_ON(seqno == 0);
1756 1756
1757 if (dev_priv->mm.wedged) 1757 if (atomic_read(&dev_priv->mm.wedged))
1758 return -EIO; 1758 return -EIO;
1759 1759
1760 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1760 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
@@ -1774,11 +1774,11 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1774 ret = wait_event_interruptible(dev_priv->irq_queue, 1774 ret = wait_event_interruptible(dev_priv->irq_queue,
1775 i915_seqno_passed(i915_get_gem_seqno(dev), 1775 i915_seqno_passed(i915_get_gem_seqno(dev),
1776 seqno) || 1776 seqno) ||
1777 dev_priv->mm.wedged); 1777 atomic_read(&dev_priv->mm.wedged));
1778 i915_user_irq_put(dev); 1778 i915_user_irq_put(dev);
1779 dev_priv->mm.waiting_gem_seqno = 0; 1779 dev_priv->mm.waiting_gem_seqno = 0;
1780 } 1780 }
1781 if (dev_priv->mm.wedged) 1781 if (atomic_read(&dev_priv->mm.wedged))
1782 ret = -EIO; 1782 ret = -EIO;
1783 1783
1784 if (ret && ret != -ERESTARTSYS) 1784 if (ret && ret != -ERESTARTSYS)
@@ -3359,7 +3359,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3359 3359
3360 i915_verify_inactive(dev, __FILE__, __LINE__); 3360 i915_verify_inactive(dev, __FILE__, __LINE__);
3361 3361
3362 if (dev_priv->mm.wedged) { 3362 if (atomic_read(&dev_priv->mm.wedged)) {
3363 DRM_ERROR("Execbuf while wedged\n"); 3363 DRM_ERROR("Execbuf while wedged\n");
3364 mutex_unlock(&dev->struct_mutex); 3364 mutex_unlock(&dev->struct_mutex);
3365 ret = -EIO; 3365 ret = -EIO;
@@ -3929,7 +3929,7 @@ i915_gem_idle(struct drm_device *dev)
3929 if (last_seqno == cur_seqno) { 3929 if (last_seqno == cur_seqno) {
3930 if (stuck++ > 100) { 3930 if (stuck++ > 100) {
3931 DRM_ERROR("hardware wedged\n"); 3931 DRM_ERROR("hardware wedged\n");
3932 dev_priv->mm.wedged = 1; 3932 atomic_set(&dev_priv->mm.wedged, 1);
3933 DRM_WAKEUP(&dev_priv->irq_queue); 3933 DRM_WAKEUP(&dev_priv->irq_queue);
3934 break; 3934 break;
3935 } 3935 }
@@ -3942,7 +3942,7 @@ i915_gem_idle(struct drm_device *dev)
3942 i915_gem_retire_requests(dev); 3942 i915_gem_retire_requests(dev);
3943 3943
3944 spin_lock(&dev_priv->mm.active_list_lock); 3944 spin_lock(&dev_priv->mm.active_list_lock);
3945 if (!dev_priv->mm.wedged) { 3945 if (!atomic_read(&dev_priv->mm.wedged)) {
3946 /* Active and flushing should now be empty as we've 3946 /* Active and flushing should now be empty as we've
3947 * waited for a sequence higher than any pending execbuffer 3947 * waited for a sequence higher than any pending execbuffer
3948 */ 3948 */
@@ -4204,9 +4204,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4204 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4204 if (drm_core_check_feature(dev, DRIVER_MODESET))
4205 return 0; 4205 return 0;
4206 4206
4207 if (dev_priv->mm.wedged) { 4207 if (atomic_read(&dev_priv->mm.wedged)) {
4208 DRM_ERROR("Reenabling wedged hardware, good luck\n"); 4208 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4209 dev_priv->mm.wedged = 0; 4209 atomic_set(&dev_priv->mm.wedged, 0);
4210 } 4210 }
4211 4211
4212 mutex_lock(&dev->struct_mutex); 4212 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8f5276614ce2..13e664ddb611 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -309,12 +309,12 @@ static void i915_error_work_func(struct work_struct *work)
309 DRM_DEBUG("generating error event\n"); 309 DRM_DEBUG("generating error event\n");
310 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 310 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
311 311
312 if (dev_priv->mm.wedged) { 312 if (atomic_read(&dev_priv->mm.wedged)) {
313 if (IS_I965G(dev)) { 313 if (IS_I965G(dev)) {
314 DRM_DEBUG("resetting chip\n"); 314 DRM_DEBUG("resetting chip\n");
315 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 315 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
316 if (!i965_reset(dev, GDRST_RENDER)) { 316 if (!i965_reset(dev, GDRST_RENDER)) {
317 dev_priv->mm.wedged = 0; 317 atomic_set(&dev_priv->mm.wedged, 0);
318 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 318 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
319 } 319 }
320 } else { 320 } else {
@@ -385,7 +385,7 @@ out:
385 * so userspace knows something bad happened (should trigger collection 385 * so userspace knows something bad happened (should trigger collection
386 * of a ring dump etc.). 386 * of a ring dump etc.).
387 */ 387 */
388static void i915_handle_error(struct drm_device *dev) 388static void i915_handle_error(struct drm_device *dev, bool wedged)
389{ 389{
390 struct drm_i915_private *dev_priv = dev->dev_private; 390 struct drm_i915_private *dev_priv = dev->dev_private;
391 u32 eir = I915_READ(EIR); 391 u32 eir = I915_READ(EIR);
@@ -495,7 +495,9 @@ static void i915_handle_error(struct drm_device *dev)
495 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 495 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
496 } 496 }
497 497
498 if (dev_priv->mm.wedged) { 498 if (wedged) {
499 atomic_set(&dev_priv->mm.wedged, 1);
500
499 /* 501 /*
500 * Wakeup waiting processes so they don't hang 502 * Wakeup waiting processes so they don't hang
501 */ 503 */
@@ -548,7 +550,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
548 pipeb_stats = I915_READ(PIPEBSTAT); 550 pipeb_stats = I915_READ(PIPEBSTAT);
549 551
550 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 552 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
551 i915_handle_error(dev); 553 i915_handle_error(dev, false);
552 554
553 /* 555 /*
554 * Clear the PIPE(A|B)STAT regs before the IIR 556 * Clear the PIPE(A|B)STAT regs before the IIR
@@ -934,8 +936,7 @@ void i915_hangcheck_elapsed(unsigned long data)
934 936
935 if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) { 937 if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
936 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 938 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
937 dev_priv->mm.wedged = true; /* Hopefully this is atomic */ 939 i915_handle_error(dev, true);
938 i915_handle_error(dev);
939 return; 940 return;
940 } 941 }
941 942