aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h14
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c191
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h29
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c3
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c4
7 files changed, 242 insertions, 25 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 17e90b371dbe..aa849f2a0d9f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1330,6 +1330,17 @@ struct intel_pipe_crc {
1330 wait_queue_head_t wq; 1330 wait_queue_head_t wq;
1331}; 1331};
1332 1332
1333struct i915_frontbuffer_tracking {
1334 struct mutex lock;
1335
1336 /*
1337 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
1338 * scheduled flips.
1339 */
1340 unsigned busy_bits;
1341 unsigned flip_bits;
1342};
1343
1333struct drm_i915_private { 1344struct drm_i915_private {
1334 struct drm_device *dev; 1345 struct drm_device *dev;
1335 struct kmem_cache *slab; 1346 struct kmem_cache *slab;
@@ -1476,6 +1487,9 @@ struct drm_i915_private {
1476 bool lvds_downclock_avail; 1487 bool lvds_downclock_avail;
1477 /* indicates the reduced downclock for LVDS*/ 1488 /* indicates the reduced downclock for LVDS*/
1478 int lvds_downclock; 1489 int lvds_downclock;
1490
1491 struct i915_frontbuffer_tracking fb_tracking;
1492
1479 u16 orig_clock; 1493 u16 orig_clock;
1480 1494
1481 bool mchbar_need_disable; 1495 bool mchbar_need_disable;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index caed6621d71a..f6d123828926 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1395,8 +1395,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1395 goto unlock; 1395 goto unlock;
1396 } 1396 }
1397 1397
1398 intel_edp_psr_exit(dev);
1399
1400 /* Try to flush the object off the GPU without holding the lock. 1398 /* Try to flush the object off the GPU without holding the lock.
1401 * We will repeat the flush holding the lock in the normal manner 1399 * We will repeat the flush holding the lock in the normal manner
1402 * to catch cases where we are gazumped. 1400 * to catch cases where we are gazumped.
@@ -1442,8 +1440,6 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1442 if (ret) 1440 if (ret)
1443 return ret; 1441 return ret;
1444 1442
1445 intel_edp_psr_exit(dev);
1446
1447 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1443 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1448 if (&obj->base == NULL) { 1444 if (&obj->base == NULL) {
1449 ret = -ENOENT; 1445 ret = -ENOENT;
@@ -2223,6 +2219,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2223 list_move_tail(&vma->mm_list, &vm->inactive_list); 2219 list_move_tail(&vma->mm_list, &vm->inactive_list);
2224 } 2220 }
2225 2221
2222 intel_fb_obj_flush(obj, true);
2223
2226 list_del_init(&obj->ring_list); 2224 list_del_init(&obj->ring_list);
2227 obj->ring = NULL; 2225 obj->ring = NULL;
2228 2226
@@ -3552,6 +3550,8 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3552 old_write_domain = obj->base.write_domain; 3550 old_write_domain = obj->base.write_domain;
3553 obj->base.write_domain = 0; 3551 obj->base.write_domain = 0;
3554 3552
3553 intel_fb_obj_flush(obj, false);
3554
3555 trace_i915_gem_object_change_domain(obj, 3555 trace_i915_gem_object_change_domain(obj,
3556 obj->base.read_domains, 3556 obj->base.read_domains,
3557 old_write_domain); 3557 old_write_domain);
@@ -3573,6 +3573,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3573 old_write_domain = obj->base.write_domain; 3573 old_write_domain = obj->base.write_domain;
3574 obj->base.write_domain = 0; 3574 obj->base.write_domain = 0;
3575 3575
3576 intel_fb_obj_flush(obj, false);
3577
3576 trace_i915_gem_object_change_domain(obj, 3578 trace_i915_gem_object_change_domain(obj,
3577 obj->base.read_domains, 3579 obj->base.read_domains,
3578 old_write_domain); 3580 old_write_domain);
@@ -3626,6 +3628,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3626 obj->dirty = 1; 3628 obj->dirty = 1;
3627 } 3629 }
3628 3630
3631 if (write)
3632 intel_fb_obj_invalidate(obj, NULL);
3633
3629 trace_i915_gem_object_change_domain(obj, 3634 trace_i915_gem_object_change_domain(obj,
3630 old_read_domains, 3635 old_read_domains,
3631 old_write_domain); 3636 old_write_domain);
@@ -3962,6 +3967,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3962 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3967 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3963 } 3968 }
3964 3969
3970 if (write)
3971 intel_fb_obj_invalidate(obj, NULL);
3972
3965 trace_i915_gem_object_change_domain(obj, 3973 trace_i915_gem_object_change_domain(obj,
3966 old_read_domains, 3974 old_read_domains,
3967 old_write_domain); 3975 old_write_domain);
@@ -4236,8 +4244,6 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4236 if (ret) 4244 if (ret)
4237 return ret; 4245 return ret;
4238 4246
4239 intel_edp_psr_exit(dev);
4240
4241 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 4247 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4242 if (&obj->base == NULL) { 4248 if (&obj->base == NULL) {
4243 ret = -ENOENT; 4249 ret = -ENOENT;
@@ -4937,6 +4943,8 @@ i915_gem_load(struct drm_device *dev)
4937 4943
4938 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; 4944 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
4939 register_oom_notifier(&dev_priv->mm.oom_notifier); 4945 register_oom_notifier(&dev_priv->mm.oom_notifier);
4946
4947 mutex_init(&dev_priv->fb_tracking.lock);
4940} 4948}
4941 4949
4942void i915_gem_release(struct drm_device *dev, struct drm_file *file) 4950void i915_gem_release(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 93d7f7246588..d815ef51a5ea 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -975,10 +975,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
975 if (obj->base.write_domain) { 975 if (obj->base.write_domain) {
976 obj->dirty = 1; 976 obj->dirty = 1;
977 obj->last_write_seqno = intel_ring_get_seqno(ring); 977 obj->last_write_seqno = intel_ring_get_seqno(ring);
978 /* check for potential scanout */ 978
979 if (i915_gem_obj_ggtt_bound(obj) && 979 intel_fb_obj_invalidate(obj, ring);
980 i915_gem_obj_to_ggtt(obj)->pin_count)
981 intel_mark_fb_busy(obj, ring);
982 980
983 /* update for the implicit flush after a batch */ 981 /* update for the implicit flush after a batch */
984 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; 982 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9a3fa90660f4..ff6336793826 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2756,6 +2756,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2756 2756
2757 dev_priv->display.update_primary_plane(crtc, fb, x, y); 2757 dev_priv->display.update_primary_plane(crtc, fb, x, y);
2758 2758
2759 if (intel_crtc->active)
2760 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
2761
2759 crtc->primary->fb = fb; 2762 crtc->primary->fb = fb;
2760 crtc->x = x; 2763 crtc->x = x;
2761 crtc->y = y; 2764 crtc->y = y;
@@ -3950,6 +3953,13 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3950 mutex_lock(&dev->struct_mutex); 3953 mutex_lock(&dev->struct_mutex);
3951 intel_update_fbc(dev); 3954 intel_update_fbc(dev);
3952 mutex_unlock(&dev->struct_mutex); 3955 mutex_unlock(&dev->struct_mutex);
3956
3957 /*
3958 * FIXME: Once we grow proper nuclear flip support out of this we need
3959 * to compute the mask of flip planes precisely. For the time being
3960 * consider this a flip from a NULL plane.
3961 */
3962 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
3953} 3963}
3954 3964
3955static void intel_crtc_disable_planes(struct drm_crtc *crtc) 3965static void intel_crtc_disable_planes(struct drm_crtc *crtc)
@@ -3972,6 +3982,13 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3972 intel_disable_planes(crtc); 3982 intel_disable_planes(crtc);
3973 intel_disable_primary_hw_plane(dev_priv, plane, pipe); 3983 intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3974 3984
3985 /*
3986 * FIXME: Once we grow proper nuclear flip support out of this we need
3987 * to compute the mask of flip planes precisely. For the time being
3988 * consider this a flip to a NULL plane.
3989 */
3990 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
3991
3975 drm_vblank_off(dev, pipe); 3992 drm_vblank_off(dev, pipe);
3976} 3993}
3977 3994
@@ -8212,6 +8229,8 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8212 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 8229 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
8213 } 8230 }
8214 8231
8232 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
8233
8215 return 0; 8234 return 0;
8216fail_unpin: 8235fail_unpin:
8217 i915_gem_object_unpin_from_display_plane(obj); 8236 i915_gem_object_unpin_from_display_plane(obj);
@@ -8827,20 +8846,26 @@ out:
8827} 8846}
8828 8847
8829 8848
8830void intel_mark_fb_busy(struct drm_i915_gem_object *obj, 8849/**
8831 struct intel_engine_cs *ring) 8850 * intel_mark_fb_busy - mark given planes as busy
8851 * @dev: DRM device
8852 * @frontbuffer_bits: bits for the affected planes
8853 * @ring: optional ring for asynchronous commands
8854 *
8855 * This function gets called every time the screen contents change. It can be
8856 * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
8857 */
8858static void intel_mark_fb_busy(struct drm_device *dev,
8859 unsigned frontbuffer_bits,
8860 struct intel_engine_cs *ring)
8832{ 8861{
8833 struct drm_device *dev = obj->base.dev;
8834 enum pipe pipe; 8862 enum pipe pipe;
8835 8863
8836 intel_edp_psr_exit(dev);
8837
8838 if (!i915.powersave) 8864 if (!i915.powersave)
8839 return; 8865 return;
8840 8866
8841 for_each_pipe(pipe) { 8867 for_each_pipe(pipe) {
8842 if (!(obj->frontbuffer_bits & 8868 if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
8843 INTEL_FRONTBUFFER_ALL_MASK(pipe)))
8844 continue; 8869 continue;
8845 8870
8846 intel_increase_pllclock(dev, pipe); 8871 intel_increase_pllclock(dev, pipe);
@@ -8849,6 +8874,150 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
8849 } 8874 }
8850} 8875}
8851 8876
8877/**
8878 * intel_fb_obj_invalidate - invalidate frontbuffer object
8879 * @obj: GEM object to invalidate
8880 * @ring: set for asynchronous rendering
8881 *
8882 * This function gets called every time rendering on the given object starts and
8883 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
8884 * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
8885 * until the rendering completes or a flip on this frontbuffer plane is
8886 * scheduled.
8887 */
8888void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
8889 struct intel_engine_cs *ring)
8890{
8891 struct drm_device *dev = obj->base.dev;
8892 struct drm_i915_private *dev_priv = dev->dev_private;
8893
8894 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
8895
8896 if (!obj->frontbuffer_bits)
8897 return;
8898
8899 if (ring) {
8900 mutex_lock(&dev_priv->fb_tracking.lock);
8901 dev_priv->fb_tracking.busy_bits
8902 |= obj->frontbuffer_bits;
8903 dev_priv->fb_tracking.flip_bits
8904 &= ~obj->frontbuffer_bits;
8905 mutex_unlock(&dev_priv->fb_tracking.lock);
8906 }
8907
8908 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
8909
8910 intel_edp_psr_exit(dev);
8911}
8912
8913/**
8914 * intel_frontbuffer_flush - flush frontbuffer
8915 * @dev: DRM device
8916 * @frontbuffer_bits: frontbuffer plane tracking bits
8917 *
8918 * This function gets called every time rendering on the given planes has
8919 * completed and frontbuffer caching can be started again. Flushes will get
8920 * delayed if they're blocked by some oustanding asynchronous rendering.
8921 *
8922 * Can be called without any locks held.
8923 */
8924void intel_frontbuffer_flush(struct drm_device *dev,
8925 unsigned frontbuffer_bits)
8926{
8927 struct drm_i915_private *dev_priv = dev->dev_private;
8928
8929 /* Delay flushing when rings are still busy.*/
8930 mutex_lock(&dev_priv->fb_tracking.lock);
8931 frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
8932 mutex_unlock(&dev_priv->fb_tracking.lock);
8933
8934 intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
8935
8936 intel_edp_psr_exit(dev);
8937}
8938
8939/**
8940 * intel_fb_obj_flush - flush frontbuffer object
8941 * @obj: GEM object to flush
8942 * @retire: set when retiring asynchronous rendering
8943 *
8944 * This function gets called every time rendering on the given object has
8945 * completed and frontbuffer caching can be started again. If @retire is true
8946 * then any delayed flushes will be unblocked.
8947 */
8948void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
8949 bool retire)
8950{
8951 struct drm_device *dev = obj->base.dev;
8952 struct drm_i915_private *dev_priv = dev->dev_private;
8953 unsigned frontbuffer_bits;
8954
8955 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
8956
8957 if (!obj->frontbuffer_bits)
8958 return;
8959
8960 frontbuffer_bits = obj->frontbuffer_bits;
8961
8962 if (retire) {
8963 mutex_lock(&dev_priv->fb_tracking.lock);
8964 /* Filter out new bits since rendering started. */
8965 frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
8966
8967 dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
8968 mutex_unlock(&dev_priv->fb_tracking.lock);
8969 }
8970
8971 intel_frontbuffer_flush(dev, frontbuffer_bits);
8972}
8973
8974/**
8975 * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
8976 * @dev: DRM device
8977 * @frontbuffer_bits: frontbuffer plane tracking bits
8978 *
8979 * This function gets called after scheduling a flip on @obj. The actual
8980 * frontbuffer flushing will be delayed until completion is signalled with
8981 * intel_frontbuffer_flip_complete. If an invalidate happens in between this
8982 * flush will be cancelled.
8983 *
8984 * Can be called without any locks held.
8985 */
8986void intel_frontbuffer_flip_prepare(struct drm_device *dev,
8987 unsigned frontbuffer_bits)
8988{
8989 struct drm_i915_private *dev_priv = dev->dev_private;
8990
8991 mutex_lock(&dev_priv->fb_tracking.lock);
8992 dev_priv->fb_tracking.flip_bits
8993 |= frontbuffer_bits;
8994 mutex_unlock(&dev_priv->fb_tracking.lock);
8995}
8996
8997/**
8998 * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
8999 * @dev: DRM device
9000 * @frontbuffer_bits: frontbuffer plane tracking bits
9001 *
9002 * This function gets called after the flip has been latched and will complete
9003 * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
9004 *
9005 * Can be called without any locks held.
9006 */
9007void intel_frontbuffer_flip_complete(struct drm_device *dev,
9008 unsigned frontbuffer_bits)
9009{
9010 struct drm_i915_private *dev_priv = dev->dev_private;
9011
9012 mutex_lock(&dev_priv->fb_tracking.lock);
9013 /* Mask any cancelled flips. */
9014 frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
9015 dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
9016 mutex_unlock(&dev_priv->fb_tracking.lock);
9017
9018 intel_frontbuffer_flush(dev, frontbuffer_bits);
9019}
9020
8852static void intel_crtc_destroy(struct drm_crtc *crtc) 9021static void intel_crtc_destroy(struct drm_crtc *crtc)
8853{ 9022{
8854 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9023 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -8876,6 +9045,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
8876 struct intel_unpin_work *work = 9045 struct intel_unpin_work *work =
8877 container_of(__work, struct intel_unpin_work, work); 9046 container_of(__work, struct intel_unpin_work, work);
8878 struct drm_device *dev = work->crtc->dev; 9047 struct drm_device *dev = work->crtc->dev;
9048 enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
8879 9049
8880 mutex_lock(&dev->struct_mutex); 9050 mutex_lock(&dev->struct_mutex);
8881 intel_unpin_fb_obj(work->old_fb_obj); 9051 intel_unpin_fb_obj(work->old_fb_obj);
@@ -8885,6 +9055,8 @@ static void intel_unpin_work_fn(struct work_struct *__work)
8885 intel_update_fbc(dev); 9055 intel_update_fbc(dev);
8886 mutex_unlock(&dev->struct_mutex); 9056 mutex_unlock(&dev->struct_mutex);
8887 9057
9058 intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9059
8888 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); 9060 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
8889 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); 9061 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
8890 9062
@@ -9441,9 +9613,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9441 if (work == NULL) 9613 if (work == NULL)
9442 return -ENOMEM; 9614 return -ENOMEM;
9443 9615
9444 /* Exit PSR early in page flip */
9445 intel_edp_psr_exit(dev);
9446
9447 work->event = event; 9616 work->event = event;
9448 work->crtc = crtc; 9617 work->crtc = crtc;
9449 work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; 9618 work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
@@ -9519,7 +9688,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9519 INTEL_FRONTBUFFER_PRIMARY(pipe)); 9688 INTEL_FRONTBUFFER_PRIMARY(pipe));
9520 9689
9521 intel_disable_fbc(dev); 9690 intel_disable_fbc(dev);
9522 intel_mark_fb_busy(obj, NULL); 9691 intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9523 mutex_unlock(&dev->struct_mutex); 9692 mutex_unlock(&dev->struct_mutex);
9524 9693
9525 trace_i915_flip_request(intel_crtc->plane, obj); 9694 trace_i915_flip_request(intel_crtc->plane, obj);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 5d20f719309a..bd0d10eeaf44 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -724,8 +724,33 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev);
724int intel_pch_rawclk(struct drm_device *dev); 724int intel_pch_rawclk(struct drm_device *dev);
725int valleyview_cur_cdclk(struct drm_i915_private *dev_priv); 725int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
726void intel_mark_busy(struct drm_device *dev); 726void intel_mark_busy(struct drm_device *dev);
727void intel_mark_fb_busy(struct drm_i915_gem_object *obj, 727void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
728 struct intel_engine_cs *ring); 728 struct intel_engine_cs *ring);
729void intel_frontbuffer_flip_prepare(struct drm_device *dev,
730 unsigned frontbuffer_bits);
731void intel_frontbuffer_flip_complete(struct drm_device *dev,
732 unsigned frontbuffer_bits);
733void intel_frontbuffer_flush(struct drm_device *dev,
734 unsigned frontbuffer_bits);
735/**
736 * intel_frontbuffer_flip - prepare frontbuffer flip
737 * @dev: DRM device
738 * @frontbuffer_bits: frontbuffer plane tracking bits
739 *
740 * This function gets called after scheduling a flip on @obj. This is for
741 * synchronous plane updates which will happen on the next vblank and which will
742 * not get delayed by pending gpu rendering.
743 *
744 * Can be called without any locks held.
745 */
746static inline
747void intel_frontbuffer_flip(struct drm_device *dev,
748 unsigned frontbuffer_bits)
749{
750 intel_frontbuffer_flush(dev, frontbuffer_bits);
751}
752
753void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
729void intel_mark_idle(struct drm_device *dev); 754void intel_mark_idle(struct drm_device *dev);
730void intel_crtc_restore_mode(struct drm_crtc *crtc); 755void intel_crtc_restore_mode(struct drm_crtc *crtc);
731void intel_crtc_update_dpms(struct drm_crtc *crtc); 756void intel_crtc_update_dpms(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 99b6c142a095..307c2f1842b7 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -787,6 +787,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
787 overlay->old_vid_bo = overlay->vid_bo; 787 overlay->old_vid_bo = overlay->vid_bo;
788 overlay->vid_bo = new_bo; 788 overlay->vid_bo = new_bo;
789 789
790 intel_frontbuffer_flip(dev,
791 INTEL_FRONTBUFFER_OVERLAY(pipe));
792
790 return 0; 793 return 0;
791 794
792out_unpin: 795out_unpin:
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 140bd8359f0e..0e3fd5c59e28 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1034,6 +1034,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1034 else 1034 else
1035 intel_plane->disable_plane(plane, crtc); 1035 intel_plane->disable_plane(plane, crtc);
1036 1036
1037 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
1038
1037 if (!primary_was_enabled && primary_enabled) 1039 if (!primary_was_enabled && primary_enabled)
1038 intel_post_enable_primary(crtc); 1040 intel_post_enable_primary(crtc);
1039 } 1041 }
@@ -1054,8 +1056,6 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1054 mutex_unlock(&dev->struct_mutex); 1056 mutex_unlock(&dev->struct_mutex);
1055 } 1057 }
1056 1058
1057 intel_edp_psr_exit(dev);
1058
1059 return 0; 1059 return 0;
1060} 1060}
1061 1061