aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-09-14 08:03:28 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2010-09-14 16:08:36 -0400
commit2cf34d7b7ee99c27c1a6bdd2f91344cbfa5fef5c (patch)
tree86e52ca489247cddc28950e86cc8a01e5433ae4b
parent48b956c5a89c7b100ef3b818b6ccf759ab695383 (diff)
drm/i915: Allow get_fence_reg() to be uninterruptible
As we currently may need to acquire a fence register during a modeset, we need to be able to do so in an uninterruptible manner. So expose that parameter to the callers of the fence management code. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c43
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
4 files changed, 31 insertions, 22 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b97d62d81905..b0692c40b0c7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -986,8 +986,10 @@ void i915_gem_lastclose(struct drm_device *dev);
986uint32_t i915_get_gem_seqno(struct drm_device *dev, 986uint32_t i915_get_gem_seqno(struct drm_device *dev,
987 struct intel_ring_buffer *ring); 987 struct intel_ring_buffer *ring);
988bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); 988bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
989int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 989int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
990int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); 990 bool interruptible);
991int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
992 bool interruptible);
991void i915_gem_retire_requests(struct drm_device *dev); 993void i915_gem_retire_requests(struct drm_device *dev);
992void i915_gem_clflush_object(struct drm_gem_object *obj); 994void i915_gem_clflush_object(struct drm_gem_object *obj);
993int i915_gem_object_set_domain(struct drm_gem_object *obj, 995int i915_gem_object_set_domain(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 85a3cf4ab481..02719df418e3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -48,7 +48,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
48 uint64_t offset, 48 uint64_t offset,
49 uint64_t size); 49 uint64_t size);
50static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 50static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
51static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 51static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
52 bool interruptible);
52static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 53static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
53 unsigned alignment); 54 unsigned alignment);
54static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 55static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
@@ -1181,7 +1182,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1181 1182
1182 /* Need a new fence register? */ 1183 /* Need a new fence register? */
1183 if (obj_priv->tiling_mode != I915_TILING_NONE) { 1184 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1184 ret = i915_gem_object_get_fence_reg(obj); 1185 ret = i915_gem_object_get_fence_reg(obj, true);
1185 if (ret) 1186 if (ret)
1186 goto unlock; 1187 goto unlock;
1187 } 1188 }
@@ -1919,7 +1920,8 @@ i915_gem_flush(struct drm_device *dev,
1919 * safe to unbind from the GTT or access from the CPU. 1920 * safe to unbind from the GTT or access from the CPU.
1920 */ 1921 */
1921static int 1922static int
1922i915_gem_object_wait_rendering(struct drm_gem_object *obj) 1923i915_gem_object_wait_rendering(struct drm_gem_object *obj,
1924 bool interruptible)
1923{ 1925{
1924 struct drm_device *dev = obj->dev; 1926 struct drm_device *dev = obj->dev;
1925 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1927 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -1938,10 +1940,11 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1938 DRM_INFO("%s: object %p wait for seqno %08x\n", 1940 DRM_INFO("%s: object %p wait for seqno %08x\n",
1939 __func__, obj, obj_priv->last_rendering_seqno); 1941 __func__, obj, obj_priv->last_rendering_seqno);
1940#endif 1942#endif
1941 ret = i915_wait_request(dev, 1943 ret = i915_do_wait_request(dev,
1942 obj_priv->last_rendering_seqno, 1944 obj_priv->last_rendering_seqno,
1943 obj_priv->ring); 1945 interruptible,
1944 if (ret != 0) 1946 obj_priv->ring);
1947 if (ret)
1945 return ret; 1948 return ret;
1946 } 1949 }
1947 1950
@@ -2234,7 +2237,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2234 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); 2237 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2235} 2238}
2236 2239
2237static int i915_find_fence_reg(struct drm_device *dev) 2240static int i915_find_fence_reg(struct drm_device *dev,
2241 bool interruptible)
2238{ 2242{
2239 struct drm_i915_fence_reg *reg = NULL; 2243 struct drm_i915_fence_reg *reg = NULL;
2240 struct drm_i915_gem_object *obj_priv = NULL; 2244 struct drm_i915_gem_object *obj_priv = NULL;
@@ -2279,7 +2283,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
2279 * private reference to obj like the other callers of put_fence_reg 2283 * private reference to obj like the other callers of put_fence_reg
2280 * (set_tiling ioctl) do. */ 2284 * (set_tiling ioctl) do. */
2281 drm_gem_object_reference(obj); 2285 drm_gem_object_reference(obj);
2282 ret = i915_gem_object_put_fence_reg(obj); 2286 ret = i915_gem_object_put_fence_reg(obj, interruptible);
2283 drm_gem_object_unreference(obj); 2287 drm_gem_object_unreference(obj);
2284 if (ret != 0) 2288 if (ret != 0)
2285 return ret; 2289 return ret;
@@ -2301,7 +2305,8 @@ static int i915_find_fence_reg(struct drm_device *dev)
2301 * and tiling format. 2305 * and tiling format.
2302 */ 2306 */
2303int 2307int
2304i915_gem_object_get_fence_reg(struct drm_gem_object *obj) 2308i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
2309 bool interruptible)
2305{ 2310{
2306 struct drm_device *dev = obj->dev; 2311 struct drm_device *dev = obj->dev;
2307 struct drm_i915_private *dev_priv = dev->dev_private; 2312 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2336,7 +2341,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2336 break; 2341 break;
2337 } 2342 }
2338 2343
2339 ret = i915_find_fence_reg(dev); 2344 ret = i915_find_fence_reg(dev, interruptible);
2340 if (ret < 0) 2345 if (ret < 0)
2341 return ret; 2346 return ret;
2342 2347
@@ -2403,12 +2408,14 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2403 * i915_gem_object_put_fence_reg - waits on outstanding fenced access 2408 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2404 * to the buffer to finish, and then resets the fence register. 2409 * to the buffer to finish, and then resets the fence register.
2405 * @obj: tiled object holding a fence register. 2410 * @obj: tiled object holding a fence register.
2411 * @bool: whether the wait upon the fence is interruptible
2406 * 2412 *
2407 * Zeroes out the fence register itself and clears out the associated 2413 * Zeroes out the fence register itself and clears out the associated
2408 * data structures in dev_priv and obj_priv. 2414 * data structures in dev_priv and obj_priv.
2409 */ 2415 */
2410int 2416int
2411i915_gem_object_put_fence_reg(struct drm_gem_object *obj) 2417i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
2418 bool interruptible)
2412{ 2419{
2413 struct drm_device *dev = obj->dev; 2420 struct drm_device *dev = obj->dev;
2414 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2421 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -2429,11 +2436,11 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2429 if (!IS_I965G(dev)) { 2436 if (!IS_I965G(dev)) {
2430 int ret; 2437 int ret;
2431 2438
2432 ret = i915_gem_object_flush_gpu_write_domain(obj, false); 2439 ret = i915_gem_object_flush_gpu_write_domain(obj, true);
2433 if (ret) 2440 if (ret)
2434 return ret; 2441 return ret;
2435 2442
2436 ret = i915_gem_object_wait_rendering(obj); 2443 ret = i915_gem_object_wait_rendering(obj, interruptible);
2437 if (ret) 2444 if (ret)
2438 return ret; 2445 return ret;
2439 } 2446 }
@@ -2606,7 +2613,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
2606 if (pipelined) 2613 if (pipelined)
2607 return 0; 2614 return 0;
2608 2615
2609 return i915_gem_object_wait_rendering(obj); 2616 return i915_gem_object_wait_rendering(obj, true);
2610} 2617}
2611 2618
2612/** Flushes the GTT write domain for the object if it's dirty. */ 2619/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2674,7 +2681,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2674 i915_gem_object_flush_cpu_write_domain(obj); 2681 i915_gem_object_flush_cpu_write_domain(obj);
2675 2682
2676 if (write) { 2683 if (write) {
2677 ret = i915_gem_object_wait_rendering(obj); 2684 ret = i915_gem_object_wait_rendering(obj, true);
2678 if (ret) 2685 if (ret)
2679 return ret; 2686 return ret;
2680 } 2687 }
@@ -2756,7 +2763,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2756 i915_gem_object_set_to_full_cpu_read_domain(obj); 2763 i915_gem_object_set_to_full_cpu_read_domain(obj);
2757 2764
2758 if (write) { 2765 if (write) {
2759 ret = i915_gem_object_wait_rendering(obj); 2766 ret = i915_gem_object_wait_rendering(obj, true);
2760 if (ret) 2767 if (ret)
2761 return ret; 2768 return ret;
2762 } 2769 }
@@ -3125,7 +3132,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3125 * properly handle blits to/from tiled surfaces. 3132 * properly handle blits to/from tiled surfaces.
3126 */ 3133 */
3127 if (need_fence) { 3134 if (need_fence) {
3128 ret = i915_gem_object_get_fence_reg(obj); 3135 ret = i915_gem_object_get_fence_reg(obj, false);
3129 if (ret != 0) { 3136 if (ret != 0) {
3130 i915_gem_object_unpin(obj); 3137 i915_gem_object_unpin(obj);
3131 return ret; 3138 return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 3c0859edfdf7..caef7ff2aa39 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -328,7 +328,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
328 if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) 328 if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
329 ret = i915_gem_object_unbind(obj); 329 ret = i915_gem_object_unbind(obj);
330 else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 330 else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
331 ret = i915_gem_object_put_fence_reg(obj); 331 ret = i915_gem_object_put_fence_reg(obj, true);
332 else 332 else
333 i915_gem_release_mmap(obj); 333 i915_gem_release_mmap(obj);
334 334
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a7628fdd0c4c..11d643acf2fa 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1461,7 +1461,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1461 */ 1461 */
1462 if (obj_priv->fence_reg == I915_FENCE_REG_NONE && 1462 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1463 obj_priv->tiling_mode != I915_TILING_NONE) { 1463 obj_priv->tiling_mode != I915_TILING_NONE) {
1464 ret = i915_gem_object_get_fence_reg(obj); 1464 ret = i915_gem_object_get_fence_reg(obj, false);
1465 if (ret) 1465 if (ret)
1466 goto err_unpin; 1466 goto err_unpin;
1467 } 1467 }