aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-09-14 07:50:34 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2010-09-14 16:08:35 -0400
commit48b956c5a89c7b100ef3b818b6ccf759ab695383 (patch)
treef614911615f62e32b57e41aff01450f991bc0c56 /drivers/gpu/drm
parent9e76e7b8bd716413cfd722a807aa22723f3a895f (diff)
drm/i915: Push pipelining of display plane flushes to the caller
This ensures that we do wait upon the flushes to complete if necessary and avoid the visual tears, whilst enabling pipelined page-flips. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/intel_display.c56
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
5 files changed, 43 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 24b7796c33af..b97d62d81905 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1013,7 +1013,8 @@ void i915_gem_process_flushing_list(struct drm_device *dev,
1013 struct intel_ring_buffer *ring); 1013 struct intel_ring_buffer *ring);
1014int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 1014int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
1015 int write); 1015 int write);
1016int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); 1016int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
1017 bool pipelined);
1017int i915_gem_attach_phys_object(struct drm_device *dev, 1018int i915_gem_attach_phys_object(struct drm_device *dev,
1018 struct drm_gem_object *obj, 1019 struct drm_gem_object *obj,
1019 int id, 1020 int id,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4a0d85c78d47..85a3cf4ab481 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2597,6 +2597,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
2597 /* Queue the GPU write cache flushing we need. */ 2597 /* Queue the GPU write cache flushing we need. */
2598 old_write_domain = obj->write_domain; 2598 old_write_domain = obj->write_domain;
2599 i915_gem_flush(dev, 0, obj->write_domain); 2599 i915_gem_flush(dev, 0, obj->write_domain);
2600 BUG_ON(obj->write_domain);
2600 2601
2601 trace_i915_gem_object_change_domain(obj, 2602 trace_i915_gem_object_change_domain(obj,
2602 obj->read_domains, 2603 obj->read_domains,
@@ -2704,7 +2705,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2704 * wait, as in modesetting process we're not supposed to be interrupted. 2705 * wait, as in modesetting process we're not supposed to be interrupted.
2705 */ 2706 */
2706int 2707int
2707i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) 2708i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2709 bool pipelined)
2708{ 2710{
2709 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2711 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2710 uint32_t old_read_domains; 2712 uint32_t old_read_domains;
@@ -2714,8 +2716,8 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2714 if (obj_priv->gtt_space == NULL) 2716 if (obj_priv->gtt_space == NULL)
2715 return -EINVAL; 2717 return -EINVAL;
2716 2718
2717 ret = i915_gem_object_flush_gpu_write_domain(obj, true); 2719 ret = i915_gem_object_flush_gpu_write_domain(obj, pipelined);
2718 if (ret != 0) 2720 if (ret)
2719 return ret; 2721 return ret;
2720 2722
2721 i915_gem_object_flush_cpu_write_domain(obj); 2723 i915_gem_object_flush_cpu_write_domain(obj);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 810ed2dca4c7..a7628fdd0c4c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1417,7 +1417,9 @@ out_disable:
1417} 1417}
1418 1418
1419int 1419int
1420intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) 1420intel_pin_and_fence_fb_obj(struct drm_device *dev,
1421 struct drm_gem_object *obj,
1422 bool pipelined)
1421{ 1423{
1422 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1424 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1423 u32 alignment; 1425 u32 alignment;
@@ -1445,14 +1447,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1445 } 1447 }
1446 1448
1447 ret = i915_gem_object_pin(obj, alignment); 1449 ret = i915_gem_object_pin(obj, alignment);
1448 if (ret != 0) 1450 if (ret)
1449 return ret; 1451 return ret;
1450 1452
1451 ret = i915_gem_object_set_to_display_plane(obj); 1453 ret = i915_gem_object_set_to_display_plane(obj, pipelined);
1452 if (ret != 0) { 1454 if (ret)
1453 i915_gem_object_unpin(obj); 1455 goto err_unpin;
1454 return ret;
1455 }
1456 1456
1457 /* Install a fence for tiled scan-out. Pre-i965 always needs a 1457 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1458 * fence, whereas 965+ only requires a fence if using 1458 * fence, whereas 965+ only requires a fence if using
@@ -1462,13 +1462,15 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1462 if (obj_priv->fence_reg == I915_FENCE_REG_NONE && 1462 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1463 obj_priv->tiling_mode != I915_TILING_NONE) { 1463 obj_priv->tiling_mode != I915_TILING_NONE) {
1464 ret = i915_gem_object_get_fence_reg(obj); 1464 ret = i915_gem_object_get_fence_reg(obj);
1465 if (ret != 0) { 1465 if (ret)
1466 i915_gem_object_unpin(obj); 1466 goto err_unpin;
1467 return ret;
1468 }
1469 } 1467 }
1470 1468
1471 return 0; 1469 return 0;
1470
1471err_unpin:
1472 i915_gem_object_unpin(obj);
1473 return ret;
1472} 1474}
1473 1475
1474/* Assume fb object is pinned & idle & fenced and just update base pointers */ 1476/* Assume fb object is pinned & idle & fenced and just update base pointers */
@@ -1589,7 +1591,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1589 obj_priv = to_intel_bo(obj); 1591 obj_priv = to_intel_bo(obj);
1590 1592
1591 mutex_lock(&dev->struct_mutex); 1593 mutex_lock(&dev->struct_mutex);
1592 ret = intel_pin_and_fence_fb_obj(dev, obj); 1594 ret = intel_pin_and_fence_fb_obj(dev, obj, false);
1593 if (ret != 0) { 1595 if (ret != 0) {
1594 mutex_unlock(&dev->struct_mutex); 1596 mutex_unlock(&dev->struct_mutex);
1595 return ret; 1597 return ret;
@@ -5004,7 +5006,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5004 struct intel_unpin_work *work; 5006 struct intel_unpin_work *work;
5005 unsigned long flags, offset; 5007 unsigned long flags, offset;
5006 int pipe = intel_crtc->pipe; 5008 int pipe = intel_crtc->pipe;
5007 u32 pf, pipesrc; 5009 u32 was_dirty, pf, pipesrc;
5008 int ret; 5010 int ret;
5009 5011
5010 work = kzalloc(sizeof *work, GFP_KERNEL); 5012 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -5033,7 +5035,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5033 obj = intel_fb->obj; 5035 obj = intel_fb->obj;
5034 5036
5035 mutex_lock(&dev->struct_mutex); 5037 mutex_lock(&dev->struct_mutex);
5036 ret = intel_pin_and_fence_fb_obj(dev, obj); 5038 was_dirty = obj->write_domain & I915_GEM_GPU_DOMAINS;
5039 ret = intel_pin_and_fence_fb_obj(dev, obj, true);
5037 if (ret) 5040 if (ret)
5038 goto cleanup_work; 5041 goto cleanup_work;
5039 5042
@@ -5051,17 +5054,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5051 atomic_inc(&obj_priv->pending_flip); 5054 atomic_inc(&obj_priv->pending_flip);
5052 work->pending_flip_obj = obj; 5055 work->pending_flip_obj = obj;
5053 5056
5054 if (IS_GEN3(dev) || IS_GEN2(dev)) { 5057 if (was_dirty || IS_GEN3(dev) || IS_GEN2(dev)) {
5055 u32 flip_mask; 5058 BEGIN_LP_RING(2);
5059 if (IS_GEN3(dev) || IS_GEN2(dev)) {
5060 u32 flip_mask;
5056 5061
5057 if (intel_crtc->plane) 5062 /* Can't queue multiple flips, so wait for the previous
5058 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5063 * one to finish before executing the next.
5059 else 5064 */
5060 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5061 5065
5062 BEGIN_LP_RING(2); 5066 if (intel_crtc->plane)
5063 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5067 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5064 OUT_RING(0); 5068 else
5069 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5070
5071 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
5072 } else
5073 OUT_RING(MI_NOOP);
5074 OUT_RING(MI_FLUSH);
5065 ADVANCE_LP_RING(); 5075 ADVANCE_LP_RING();
5066 } 5076 }
5067 5077
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 5171b0523178..31f072d31e37 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -281,7 +281,8 @@ extern void ironlake_enable_drps(struct drm_device *dev);
281extern void ironlake_disable_drps(struct drm_device *dev); 281extern void ironlake_disable_drps(struct drm_device *dev);
282 282
283extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 283extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
284 struct drm_gem_object *obj); 284 struct drm_gem_object *obj,
285 bool pipelined);
285 286
286extern int intel_framebuffer_init(struct drm_device *dev, 287extern int intel_framebuffer_init(struct drm_device *dev,
287 struct intel_framebuffer *ifb, 288 struct intel_framebuffer *ifb,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index e2d13e394a0d..8a23bf772c95 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -94,7 +94,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
94 mutex_lock(&dev->struct_mutex); 94 mutex_lock(&dev->struct_mutex);
95 95
96 /* Flush everything out, we'll be doing GTT only from now on */ 96 /* Flush everything out, we'll be doing GTT only from now on */
97 ret = intel_pin_and_fence_fb_obj(dev, fbo); 97 ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
98 if (ret) { 98 if (ret) {
99 DRM_ERROR("failed to pin fb: %d\n", ret); 99 DRM_ERROR("failed to pin fb: %d\n", ret);
100 goto out_unref; 100 goto out_unref;