aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_display.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-09-14 07:50:34 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2010-09-14 16:08:35 -0400
commit48b956c5a89c7b100ef3b818b6ccf759ab695383 (patch)
treef614911615f62e32b57e41aff01450f991bc0c56 /drivers/gpu/drm/i915/intel_display.c
parent9e76e7b8bd716413cfd722a807aa22723f3a895f (diff)
drm/i915: Push pipelining of display plane flushes to the caller
This ensures that we do wait upon the flushes to complete if necessary and avoid the visual tears, whilst enabling pipelined page-flips. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r--drivers/gpu/drm/i915/intel_display.c56
1 files changed, 33 insertions, 23 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 810ed2dca4c7..a7628fdd0c4c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1417,7 +1417,9 @@ out_disable:
1417} 1417}
1418 1418
1419int 1419int
1420intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) 1420intel_pin_and_fence_fb_obj(struct drm_device *dev,
1421 struct drm_gem_object *obj,
1422 bool pipelined)
1421{ 1423{
1422 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1424 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1423 u32 alignment; 1425 u32 alignment;
@@ -1445,14 +1447,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1445 } 1447 }
1446 1448
1447 ret = i915_gem_object_pin(obj, alignment); 1449 ret = i915_gem_object_pin(obj, alignment);
1448 if (ret != 0) 1450 if (ret)
1449 return ret; 1451 return ret;
1450 1452
1451 ret = i915_gem_object_set_to_display_plane(obj); 1453 ret = i915_gem_object_set_to_display_plane(obj, pipelined);
1452 if (ret != 0) { 1454 if (ret)
1453 i915_gem_object_unpin(obj); 1455 goto err_unpin;
1454 return ret;
1455 }
1456 1456
1457 /* Install a fence for tiled scan-out. Pre-i965 always needs a 1457 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1458 * fence, whereas 965+ only requires a fence if using 1458 * fence, whereas 965+ only requires a fence if using
@@ -1462,13 +1462,15 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1462 if (obj_priv->fence_reg == I915_FENCE_REG_NONE && 1462 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1463 obj_priv->tiling_mode != I915_TILING_NONE) { 1463 obj_priv->tiling_mode != I915_TILING_NONE) {
1464 ret = i915_gem_object_get_fence_reg(obj); 1464 ret = i915_gem_object_get_fence_reg(obj);
1465 if (ret != 0) { 1465 if (ret)
1466 i915_gem_object_unpin(obj); 1466 goto err_unpin;
1467 return ret;
1468 }
1469 } 1467 }
1470 1468
1471 return 0; 1469 return 0;
1470
1471err_unpin:
1472 i915_gem_object_unpin(obj);
1473 return ret;
1472} 1474}
1473 1475
1474/* Assume fb object is pinned & idle & fenced and just update base pointers */ 1476/* Assume fb object is pinned & idle & fenced and just update base pointers */
@@ -1589,7 +1591,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1589 obj_priv = to_intel_bo(obj); 1591 obj_priv = to_intel_bo(obj);
1590 1592
1591 mutex_lock(&dev->struct_mutex); 1593 mutex_lock(&dev->struct_mutex);
1592 ret = intel_pin_and_fence_fb_obj(dev, obj); 1594 ret = intel_pin_and_fence_fb_obj(dev, obj, false);
1593 if (ret != 0) { 1595 if (ret != 0) {
1594 mutex_unlock(&dev->struct_mutex); 1596 mutex_unlock(&dev->struct_mutex);
1595 return ret; 1597 return ret;
@@ -5004,7 +5006,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5004 struct intel_unpin_work *work; 5006 struct intel_unpin_work *work;
5005 unsigned long flags, offset; 5007 unsigned long flags, offset;
5006 int pipe = intel_crtc->pipe; 5008 int pipe = intel_crtc->pipe;
5007 u32 pf, pipesrc; 5009 u32 was_dirty, pf, pipesrc;
5008 int ret; 5010 int ret;
5009 5011
5010 work = kzalloc(sizeof *work, GFP_KERNEL); 5012 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -5033,7 +5035,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5033 obj = intel_fb->obj; 5035 obj = intel_fb->obj;
5034 5036
5035 mutex_lock(&dev->struct_mutex); 5037 mutex_lock(&dev->struct_mutex);
5036 ret = intel_pin_and_fence_fb_obj(dev, obj); 5038 was_dirty = obj->write_domain & I915_GEM_GPU_DOMAINS;
5039 ret = intel_pin_and_fence_fb_obj(dev, obj, true);
5037 if (ret) 5040 if (ret)
5038 goto cleanup_work; 5041 goto cleanup_work;
5039 5042
@@ -5051,17 +5054,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5051 atomic_inc(&obj_priv->pending_flip); 5054 atomic_inc(&obj_priv->pending_flip);
5052 work->pending_flip_obj = obj; 5055 work->pending_flip_obj = obj;
5053 5056
5054 if (IS_GEN3(dev) || IS_GEN2(dev)) { 5057 if (was_dirty || IS_GEN3(dev) || IS_GEN2(dev)) {
5055 u32 flip_mask; 5058 BEGIN_LP_RING(2);
5059 if (IS_GEN3(dev) || IS_GEN2(dev)) {
5060 u32 flip_mask;
5056 5061
5057 if (intel_crtc->plane) 5062 /* Can't queue multiple flips, so wait for the previous
5058 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5063 * one to finish before executing the next.
5059 else 5064 */
5060 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5061 5065
5062 BEGIN_LP_RING(2); 5066 if (intel_crtc->plane)
5063 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5067 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5064 OUT_RING(0); 5068 else
5069 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5070
5071 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
5072 } else
5073 OUT_RING(MI_NOOP);
5074 OUT_RING(MI_FLUSH);
5065 ADVANCE_LP_RING(); 5075 ADVANCE_LP_RING();
5066 } 5076 }
5067 5077