aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorBen Widawsky <ben@bwidawsk.net>2012-04-26 19:02:58 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-05-03 05:18:20 -0400
commitb2da9fe5d5994a104bbae154590070d698279919 (patch)
tree06ebd3a0f0c0be5ba35265922ca4b67b7488dbf0 /drivers/gpu/drm/i915/i915_gem.c
parent507432986c15f18c5102b18027e4716fc9e9009e (diff)
drm/i915: remove do_retire from i915_wait_request
This originates from a hack by me to quickly fix a bug in an earlier patch where we needed control over whether or not waiting on a seqno actually did any retire list processing. Since the two operations aren't clearly related, we should pull the parameter out of the wait function, and make the caller responsible for retiring if the action is desired. The only function call site which did not get an explicit retire_request call (on purpose) is i915_gem_inactive_shrink(). That code was already calling retire_request a second time. v2: don't modify any behavior excepit i915_gem_inactive_shrink(Daniel) Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c33
1 files changed, 11 insertions, 22 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b46a3fd17746..e378204970fd 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1825,8 +1825,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
1825 */ 1825 */
1826int 1826int
1827i915_wait_request(struct intel_ring_buffer *ring, 1827i915_wait_request(struct intel_ring_buffer *ring,
1828 uint32_t seqno, 1828 uint32_t seqno)
1829 bool do_retire)
1830{ 1829{
1831 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1830 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1832 u32 ier; 1831 u32 ier;
@@ -1902,14 +1901,6 @@ i915_wait_request(struct intel_ring_buffer *ring,
1902 if (atomic_read(&dev_priv->mm.wedged)) 1901 if (atomic_read(&dev_priv->mm.wedged))
1903 ret = -EAGAIN; 1902 ret = -EAGAIN;
1904 1903
1905 /* Directly dispatch request retiring. While we have the work queue
1906 * to handle this, the waiter on a request often wants an associated
1907 * buffer to have made it to the inactive list, and we would need
1908 * a separate wait queue to handle that.
1909 */
1910 if (ret == 0 && do_retire)
1911 i915_gem_retire_requests_ring(ring);
1912
1913 return ret; 1904 return ret;
1914} 1905}
1915 1906
@@ -1931,10 +1922,10 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
1931 * it. 1922 * it.
1932 */ 1923 */
1933 if (obj->active) { 1924 if (obj->active) {
1934 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno, 1925 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
1935 true);
1936 if (ret) 1926 if (ret)
1937 return ret; 1927 return ret;
1928 i915_gem_retire_requests_ring(obj->ring);
1938 } 1929 }
1939 1930
1940 return 0; 1931 return 0;
@@ -2117,7 +2108,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
2117 return 0; 2108 return 0;
2118} 2109}
2119 2110
2120static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire) 2111static int i915_ring_idle(struct intel_ring_buffer *ring)
2121{ 2112{
2122 int ret; 2113 int ret;
2123 2114
@@ -2131,18 +2122,17 @@ static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
2131 return ret; 2122 return ret;
2132 } 2123 }
2133 2124
2134 return i915_wait_request(ring, i915_gem_next_request_seqno(ring), 2125 return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
2135 do_retire);
2136} 2126}
2137 2127
2138int i915_gpu_idle(struct drm_device *dev, bool do_retire) 2128int i915_gpu_idle(struct drm_device *dev)
2139{ 2129{
2140 drm_i915_private_t *dev_priv = dev->dev_private; 2130 drm_i915_private_t *dev_priv = dev->dev_private;
2141 int ret, i; 2131 int ret, i;
2142 2132
2143 /* Flush everything onto the inactive list. */ 2133 /* Flush everything onto the inactive list. */
2144 for (i = 0; i < I915_NUM_RINGS; i++) { 2134 for (i = 0; i < I915_NUM_RINGS; i++) {
2145 ret = i915_ring_idle(&dev_priv->ring[i], do_retire); 2135 ret = i915_ring_idle(&dev_priv->ring[i]);
2146 if (ret) 2136 if (ret)
2147 return ret; 2137 return ret;
2148 } 2138 }
@@ -2331,9 +2321,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2331 } 2321 }
2332 2322
2333 if (obj->last_fenced_seqno) { 2323 if (obj->last_fenced_seqno) {
2334 ret = i915_wait_request(obj->ring, 2324 ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
2335 obj->last_fenced_seqno,
2336 false);
2337 if (ret) 2325 if (ret)
2338 return ret; 2326 return ret;
2339 2327
@@ -3394,11 +3382,12 @@ i915_gem_idle(struct drm_device *dev)
3394 return 0; 3382 return 0;
3395 } 3383 }
3396 3384
3397 ret = i915_gpu_idle(dev, true); 3385 ret = i915_gpu_idle(dev);
3398 if (ret) { 3386 if (ret) {
3399 mutex_unlock(&dev->struct_mutex); 3387 mutex_unlock(&dev->struct_mutex);
3400 return ret; 3388 return ret;
3401 } 3389 }
3390 i915_gem_retire_requests(dev);
3402 3391
3403 /* Under UMS, be paranoid and evict. */ 3392 /* Under UMS, be paranoid and evict. */
3404 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3393 if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -4025,7 +4014,7 @@ rescan:
4025 * This has a dramatic impact to reduce the number of 4014 * This has a dramatic impact to reduce the number of
4026 * OOM-killer events whilst running the GPU aggressively. 4015 * OOM-killer events whilst running the GPU aggressively.
4027 */ 4016 */
4028 if (i915_gpu_idle(dev, true) == 0) 4017 if (i915_gpu_idle(dev) == 0)
4029 goto rescan; 4018 goto rescan;
4030 } 4019 }
4031 mutex_unlock(&dev->struct_mutex); 4020 mutex_unlock(&dev->struct_mutex);