aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorBen Widawsky <ben@bwidawsk.net>2012-04-25 23:50:12 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-05-03 05:18:32 -0400
commitb4aca0106c466b5a0329318203f65bac2d91b682 (patch)
treec2babdee7cce61eefe1909650f188c0ab7f99db2 /drivers/gpu/drm/i915/i915_gem.c
parentc921aba84ae5bf74b5386b4d2e01a5706ae4b878 (diff)
drm/i915: extract some common olr+wedge code
The new wait_rendering ioctl also needs to check for an oustanding lazy request, and we already duplicate that logic at three places. So extract it. While at it, also extract the code to check the gpu wedging state to improve code flow. v2: Don't use seqno as an outparam (Chris) v3 by danvet: Kill stale comment and pimp commit message Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c120
1 files changed, 63 insertions, 57 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 131eadb84adb..4ab57fd752dc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1819,6 +1819,57 @@ i915_gem_retire_work_handler(struct work_struct *work)
1819 mutex_unlock(&dev->struct_mutex); 1819 mutex_unlock(&dev->struct_mutex);
1820} 1820}
1821 1821
1822static int
1823i915_gem_check_wedge(struct drm_i915_private *dev_priv)
1824{
1825 BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
1826
1827 if (atomic_read(&dev_priv->mm.wedged)) {
1828 struct completion *x = &dev_priv->error_completion;
1829 bool recovery_complete;
1830 unsigned long flags;
1831
1832 /* Give the error handler a chance to run. */
1833 spin_lock_irqsave(&x->wait.lock, flags);
1834 recovery_complete = x->done > 0;
1835 spin_unlock_irqrestore(&x->wait.lock, flags);
1836
1837 return recovery_complete ? -EIO : -EAGAIN;
1838 }
1839
1840 return 0;
1841}
1842
1843/*
1844 * Compare seqno against outstanding lazy request. Emit a request if they are
1845 * equal.
1846 */
1847static int
1848i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1849{
1850 int ret = 0;
1851
1852 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1853
1854 if (seqno == ring->outstanding_lazy_request) {
1855 struct drm_i915_gem_request *request;
1856
1857 request = kzalloc(sizeof(*request), GFP_KERNEL);
1858 if (request == NULL)
1859 return -ENOMEM;
1860
1861 ret = i915_add_request(ring, NULL, request);
1862 if (ret) {
1863 kfree(request);
1864 return ret;
1865 }
1866
1867 BUG_ON(seqno != request->seqno);
1868 }
1869
1870 return ret;
1871}
1872
1822static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, 1873static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1823 bool interruptible) 1874 bool interruptible)
1824{ 1875{
@@ -1862,34 +1913,13 @@ i915_wait_request(struct intel_ring_buffer *ring,
1862 1913
1863 BUG_ON(seqno == 0); 1914 BUG_ON(seqno == 0);
1864 1915
1865 if (atomic_read(&dev_priv->mm.wedged)) { 1916 ret = i915_gem_check_wedge(dev_priv);
1866 struct completion *x = &dev_priv->error_completion; 1917 if (ret)
1867 bool recovery_complete; 1918 return ret;
1868 unsigned long flags;
1869
1870 /* Give the error handler a chance to run. */
1871 spin_lock_irqsave(&x->wait.lock, flags);
1872 recovery_complete = x->done > 0;
1873 spin_unlock_irqrestore(&x->wait.lock, flags);
1874
1875 return recovery_complete ? -EIO : -EAGAIN;
1876 }
1877
1878 if (seqno == ring->outstanding_lazy_request) {
1879 struct drm_i915_gem_request *request;
1880
1881 request = kzalloc(sizeof(*request), GFP_KERNEL);
1882 if (request == NULL)
1883 return -ENOMEM;
1884
1885 ret = i915_add_request(ring, NULL, request);
1886 if (ret) {
1887 kfree(request);
1888 return ret;
1889 }
1890 1919
1891 seqno = request->seqno; 1920 ret = i915_gem_check_olr(ring, seqno);
1892 } 1921 if (ret)
1922 return ret;
1893 1923
1894 ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible); 1924 ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
1895 if (atomic_read(&dev_priv->mm.wedged)) 1925 if (atomic_read(&dev_priv->mm.wedged))
@@ -1957,22 +1987,9 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
1957 if (seqno <= from->sync_seqno[idx]) 1987 if (seqno <= from->sync_seqno[idx])
1958 return 0; 1988 return 0;
1959 1989
1960 if (seqno == from->outstanding_lazy_request) { 1990 ret = i915_gem_check_olr(obj->ring, seqno);
1961 struct drm_i915_gem_request *request; 1991 if (ret)
1962 1992 return ret;
1963 request = kzalloc(sizeof(*request), GFP_KERNEL);
1964 if (request == NULL)
1965 return -ENOMEM;
1966
1967 ret = i915_add_request(from, NULL, request);
1968 if (ret) {
1969 kfree(request);
1970 return ret;
1971 }
1972
1973 seqno = request->seqno;
1974 }
1975
1976 1993
1977 ret = to->sync_to(to, from, seqno); 1994 ret = to->sync_to(to, from, seqno);
1978 if (!ret) 1995 if (!ret)
@@ -3160,20 +3177,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3160 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 3177 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3161 ret = i915_gem_flush_ring(obj->ring, 3178 ret = i915_gem_flush_ring(obj->ring,
3162 0, obj->base.write_domain); 3179 0, obj->base.write_domain);
3163 } else if (obj->ring->outstanding_lazy_request == 3180 } else {
3164 obj->last_rendering_seqno) { 3181 ret = i915_gem_check_olr(obj->ring,
3165 struct drm_i915_gem_request *request; 3182 obj->last_rendering_seqno);
3166
3167 /* This ring is not being cleared by active usage,
3168 * so emit a request to do so.
3169 */
3170 request = kzalloc(sizeof(*request), GFP_KERNEL);
3171 if (request) {
3172 ret = i915_add_request(obj->ring, NULL, request);
3173 if (ret)
3174 kfree(request);
3175 } else
3176 ret = -ENOMEM;
3177 } 3183 }
3178 3184
3179 /* Update the active list for the hardware's current position. 3185 /* Update the active list for the hardware's current position.