aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-09-28 05:07:56 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2010-09-28 06:30:52 -0400
commita56ba56c275b1c2b982c8901ab92bf5a0fd0b757 (patch)
tree6f4e9ea2fe775a1f05e16888dc8b5c5c0edce11a /drivers/gpu/drm/i915/i915_gem.c
parentced270fa893735363f74bf96e0a8a05ec330d04d (diff)
Revert "drm/i915: Drop ring->lazy_request"
With multiple rings generating requests independently, the outstanding requests must also be track independently. Reported-by: Wang Jinjin <jinjin.wang@intel.com> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=30380 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c43
1 files changed, 30 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1025508e5916..63b38608c800 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1545,12 +1545,23 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1545 obj_priv->pages = NULL; 1545 obj_priv->pages = NULL;
1546} 1546}
1547 1547
1548static uint32_t
1549i915_gem_next_request_seqno(struct drm_device *dev,
1550 struct intel_ring_buffer *ring)
1551{
1552 drm_i915_private_t *dev_priv = dev->dev_private;
1553
1554 ring->outstanding_lazy_request = true;
1555 return dev_priv->next_seqno;
1556}
1557
1548static void 1558static void
1549i915_gem_object_move_to_active(struct drm_gem_object *obj, 1559i915_gem_object_move_to_active(struct drm_gem_object *obj,
1550 struct intel_ring_buffer *ring) 1560 struct intel_ring_buffer *ring)
1551{ 1561{
1552 struct drm_i915_private *dev_priv = obj->dev->dev_private; 1562 struct drm_device *dev = obj->dev;
1553 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1563 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1564 uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
1554 1565
1555 BUG_ON(ring == NULL); 1566 BUG_ON(ring == NULL);
1556 obj_priv->ring = ring; 1567 obj_priv->ring = ring;
@@ -1563,7 +1574,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj,
1563 1574
1564 /* Move from whatever list we were on to the tail of execution. */ 1575 /* Move from whatever list we were on to the tail of execution. */
1565 list_move_tail(&obj_priv->list, &ring->active_list); 1576 list_move_tail(&obj_priv->list, &ring->active_list);
1566 obj_priv->last_rendering_seqno = dev_priv->next_seqno; 1577 obj_priv->last_rendering_seqno = seqno;
1567} 1578}
1568 1579
1569static void 1580static void
@@ -1686,6 +1697,7 @@ i915_add_request(struct drm_device *dev,
1686 } 1697 }
1687 1698
1688 seqno = ring->add_request(dev, ring, 0); 1699 seqno = ring->add_request(dev, ring, 0);
1700 ring->outstanding_lazy_request = false;
1689 1701
1690 request->seqno = seqno; 1702 request->seqno = seqno;
1691 request->ring = ring; 1703 request->ring = ring;
@@ -1930,11 +1942,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1930 if (atomic_read(&dev_priv->mm.wedged)) 1942 if (atomic_read(&dev_priv->mm.wedged))
1931 return -EAGAIN; 1943 return -EAGAIN;
1932 1944
1933 if (seqno == dev_priv->next_seqno) { 1945 if (ring->outstanding_lazy_request) {
1934 seqno = i915_add_request(dev, NULL, NULL, ring); 1946 seqno = i915_add_request(dev, NULL, NULL, ring);
1935 if (seqno == 0) 1947 if (seqno == 0)
1936 return -ENOMEM; 1948 return -ENOMEM;
1937 } 1949 }
1950 BUG_ON(seqno == dev_priv->next_seqno);
1938 1951
1939 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { 1952 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
1940 if (HAS_PCH_SPLIT(dev)) 1953 if (HAS_PCH_SPLIT(dev))
@@ -1993,7 +2006,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1993 */ 2006 */
1994static int 2007static int
1995i915_wait_request(struct drm_device *dev, uint32_t seqno, 2008i915_wait_request(struct drm_device *dev, uint32_t seqno,
1996 struct intel_ring_buffer *ring) 2009 struct intel_ring_buffer *ring)
1997{ 2010{
1998 return i915_do_wait_request(dev, seqno, 1, ring); 2011 return i915_do_wait_request(dev, seqno, 1, ring);
1999} 2012}
@@ -2139,12 +2152,21 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2139 return ret; 2152 return ret;
2140} 2153}
2141 2154
2155static int i915_ring_idle(struct drm_device *dev,
2156 struct intel_ring_buffer *ring)
2157{
2158 i915_gem_flush_ring(dev, NULL, ring,
2159 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2160 return i915_wait_request(dev,
2161 i915_gem_next_request_seqno(dev, ring),
2162 ring);
2163}
2164
2142int 2165int
2143i915_gpu_idle(struct drm_device *dev) 2166i915_gpu_idle(struct drm_device *dev)
2144{ 2167{
2145 drm_i915_private_t *dev_priv = dev->dev_private; 2168 drm_i915_private_t *dev_priv = dev->dev_private;
2146 bool lists_empty; 2169 bool lists_empty;
2147 u32 seqno;
2148 int ret; 2170 int ret;
2149 2171
2150 lists_empty = (list_empty(&dev_priv->mm.flushing_list) && 2172 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
@@ -2155,18 +2177,12 @@ i915_gpu_idle(struct drm_device *dev)
2155 return 0; 2177 return 0;
2156 2178
2157 /* Flush everything onto the inactive list. */ 2179 /* Flush everything onto the inactive list. */
2158 seqno = dev_priv->next_seqno; 2180 ret = i915_ring_idle(dev, &dev_priv->render_ring);
2159 i915_gem_flush_ring(dev, NULL, &dev_priv->render_ring,
2160 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2161 ret = i915_wait_request(dev, seqno, &dev_priv->render_ring);
2162 if (ret) 2181 if (ret)
2163 return ret; 2182 return ret;
2164 2183
2165 if (HAS_BSD(dev)) { 2184 if (HAS_BSD(dev)) {
2166 seqno = dev_priv->next_seqno; 2185 ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
2167 i915_gem_flush_ring(dev, NULL, &dev_priv->bsd_ring,
2168 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2169 ret = i915_wait_request(dev, seqno, &dev_priv->bsd_ring);
2170 if (ret) 2186 if (ret)
2171 return ret; 2187 return ret;
2172 } 2188 }
@@ -3938,6 +3954,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3938 DRM_INFO("%s: move to exec list %p\n", __func__, obj); 3954 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3939#endif 3955#endif
3940 } 3956 }
3957
3941 i915_add_request(dev, file_priv, request, ring); 3958 i915_add_request(dev, file_priv, request, ring);
3942 request = NULL; 3959 request = NULL;
3943 3960