diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 26 |
1 files changed, 18 insertions, 8 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6a98c0659324..af6a510367af 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1863,11 +1863,10 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1863 | mutex_unlock(&dev->struct_mutex); | 1863 | mutex_unlock(&dev->struct_mutex); |
1864 | } | 1864 | } |
1865 | 1865 | ||
1866 | static int | 1866 | int |
1867 | i915_gem_check_wedge(struct drm_i915_private *dev_priv) | 1867 | i915_gem_check_wedge(struct drm_i915_private *dev_priv, |
1868 | bool interruptible) | ||
1868 | { | 1869 | { |
1869 | BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); | ||
1870 | |||
1871 | if (atomic_read(&dev_priv->mm.wedged)) { | 1870 | if (atomic_read(&dev_priv->mm.wedged)) { |
1872 | struct completion *x = &dev_priv->error_completion; | 1871 | struct completion *x = &dev_priv->error_completion; |
1873 | bool recovery_complete; | 1872 | bool recovery_complete; |
@@ -1878,7 +1877,16 @@ i915_gem_check_wedge(struct drm_i915_private *dev_priv) | |||
1878 | recovery_complete = x->done > 0; | 1877 | recovery_complete = x->done > 0; |
1879 | spin_unlock_irqrestore(&x->wait.lock, flags); | 1878 | spin_unlock_irqrestore(&x->wait.lock, flags); |
1880 | 1879 | ||
1881 | return recovery_complete ? -EIO : -EAGAIN; | 1880 | /* Non-interruptible callers can't handle -EAGAIN, hence return |
1881 | * -EIO unconditionally for these. */ | ||
1882 | if (!interruptible) | ||
1883 | return -EIO; | ||
1884 | |||
1885 | /* Recovery complete, but still wedged means reset failure. */ | ||
1886 | if (recovery_complete) | ||
1887 | return -EIO; | ||
1888 | |||
1889 | return -EAGAIN; | ||
1882 | } | 1890 | } |
1883 | 1891 | ||
1884 | return 0; | 1892 | return 0; |
@@ -1932,6 +1940,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
1932 | unsigned long timeout_jiffies; | 1940 | unsigned long timeout_jiffies; |
1933 | long end; | 1941 | long end; |
1934 | bool wait_forever = true; | 1942 | bool wait_forever = true; |
1943 | int ret; | ||
1935 | 1944 | ||
1936 | if (i915_seqno_passed(ring->get_seqno(ring), seqno)) | 1945 | if (i915_seqno_passed(ring->get_seqno(ring), seqno)) |
1937 | return 0; | 1946 | return 0; |
@@ -1963,8 +1972,9 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
1963 | end = wait_event_timeout(ring->irq_queue, EXIT_COND, | 1972 | end = wait_event_timeout(ring->irq_queue, EXIT_COND, |
1964 | timeout_jiffies); | 1973 | timeout_jiffies); |
1965 | 1974 | ||
1966 | if (atomic_read(&dev_priv->mm.wedged)) | 1975 | ret = i915_gem_check_wedge(dev_priv, interruptible); |
1967 | end = -EAGAIN; | 1976 | if (ret) |
1977 | end = ret; | ||
1968 | } while (end == 0 && wait_forever); | 1978 | } while (end == 0 && wait_forever); |
1969 | 1979 | ||
1970 | getrawmonotonic(&now); | 1980 | getrawmonotonic(&now); |
@@ -2004,7 +2014,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) | |||
2004 | 2014 | ||
2005 | BUG_ON(seqno == 0); | 2015 | BUG_ON(seqno == 0); |
2006 | 2016 | ||
2007 | ret = i915_gem_check_wedge(dev_priv); | 2017 | ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); |
2008 | if (ret) | 2018 | if (ret) |
2009 | return ret; | 2019 | return ret; |
2010 | 2020 | ||