aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2016-05-24 11:13:53 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-05-25 03:33:04 -0400
commit5a21b6650a239ebc020912968a44047701104159 (patch)
tree6628986a1cf41af70578724db5fba7baad3f81d9
parent15da95656dc44538fa71e3c00fc65db05b4a4788 (diff)
drm/i915: Revert async unpin and nonblocking atomic commit
This reverts the following patches: d55dbd06bb5e1399aba9ab5227465339d1bbefff drm/i915: Allow nonblocking update of pageflips. 15c86bdb760185e871c7a0f559978328aa500971 drm/i915: Check for unpin correctness. 95c2ccdc82d520f59ae3b6fdc097b63c9b7082bb Reapply "drm/i915: Avoid stalling on pending flips for legacy cursor updates" a6747b7304a9d66758a196d885dab8bbfa5e7d1f drm/i915: Make unpin async. 03f476e1fcb42fca88fc50b94b0d3adbdbe887f0 drm/i915: Prepare connectors for nonblocking checks. 2099deffef4404f949ba1b68d2b17e0608190bc2 drm/i915: Pass atomic states to fbc update functions. ee7171af72c39c18b7d7571419a4ac6ca30aea66 drm/i915: Remove reset_counter from intel_crtc. 2ee004f7c59b2e642f0bb2834f847d756f2dd7b7 drm/i915: Remove queue_flip pointer. b8d2afae557dbb9b9c7bc6f6ec4f5278f3c4c34e drm/i915: Remove use_mmio_flip kernel parameter. 8dd634d922615ec3a9af7976029110ec037f8b50 drm/i915: Remove cs based page flip support. 143f73b3bf48c089b40f58462dd7f7c199fd4f0f drm/i915: Rework intel_crtc_page_flip to be almost atomic, v3. 84fc494b64e8c591be446a966b7447a9db519c88 drm/i915: Add the exclusive fence to plane_state. 6885843ae164e11f6c802209d06921e678a3f3f3 drm/i915: Convert flip_work to a list. aa420ddd8eeaa5df579894a412289e4d07c2fee9 drm/i915: Allow mmio updates on all platforms, v2. afee4d8707ab1f21b7668de995be3a5961e83582 Revert "drm/i915: Avoid stalling on pending flips for legacy cursor updates" "drm/i915: Allow nonblocking update of pageflips" should have been split up, misses a proper commit message and seems to cause issues in the legacy page_flip path as demonstrated by kms_flip. "drm/i915: Make unpin async" doesn't handle the unthrottled cursor updates correctly, leading to an apparent pin count leak. This is caught by the WARN_ON in i915_gem_object_do_pin which screams if we have more than DRM_I915_GEM_OBJECT_MAX_PIN_COUNT pins. Unfortuantely we can't just revert these two because this patch series came with a built-in bisect breakage in the form of temporarily removing the unthrottled cursor update hack for legacy cursor ioctl. Therefore there's no other option than to revert the entire pile :( There's one tiny conflict in intel_drv.h due to other patches, nothing serious. Normally I'd wait a bit longer with doing a maintainer revert, but since the minimal set of patches we need to revert (due to the bisect breakage) is so big, time is running out fast. And very soon (especially after a few attempts at fixing issues) it'll be really hard to revert things cleanly. Lessons learned: - Not a good idea to rush the review (done by someone fairly new to the area) and not make sure domain experts had a chance to read it. - Patches should be properly split up. I only looked at the two patches that should be reverted in detail, but both look like the mix up different things in one patch. - Patches really should have proper commit messages. Especially when doing more than one thing, and especially when touching critical and tricky core code. - Building a patch series and r-b stamping it when it has a built-in bisect breakage is not a good idea. - I also think we need to stop building up technical debt by postponing atomic igt testcases even longer. I think it's clear that there's enough corner cases in this beast that we really need to have the testcases _before_ the next step lands. Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Patrik Jakobsson <patrik.jakobsson@linux.intel.com> Cc: John Harrison <John.C.Harrison@Intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Acked-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Acked-by: Dave Airlie <airlied@redhat.com> Acked-by: Jani Nikula <jani.nikula@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c89
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c120
-rw-r--r--drivers/gpu/drm/i915/i915_params.c5
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c11
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1590
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h43
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c39
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c4
11 files changed, 1283 insertions, 625 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b29ba16c90b3..ac7e5692496d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -621,52 +621,6 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
621 return 0; 621 return 0;
622} 622}
623 623
624static void i915_dump_pageflip(struct seq_file *m,
625 struct drm_i915_private *dev_priv,
626 struct intel_crtc *crtc,
627 struct intel_flip_work *work)
628{
629 const char pipe = pipe_name(crtc->pipe);
630 u32 pending;
631 int i;
632
633 pending = atomic_read(&work->pending);
634 if (pending) {
635 seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
636 pipe, plane_name(crtc->plane));
637 } else {
638 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
639 pipe, plane_name(crtc->plane));
640 }
641
642 for (i = 0; i < work->num_planes; i++) {
643 struct intel_plane_state *old_plane_state = work->old_plane_state[i];
644 struct drm_plane *plane = old_plane_state->base.plane;
645 struct drm_i915_gem_request *req = old_plane_state->wait_req;
646 struct intel_engine_cs *engine;
647
648 seq_printf(m, "[PLANE:%i] part of flip.\n", plane->base.id);
649
650 if (!req) {
651 seq_printf(m, "Plane not associated with any engine\n");
652 continue;
653 }
654
655 engine = i915_gem_request_get_engine(req);
656
657 seq_printf(m, "Plane blocked on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
658 engine->name,
659 i915_gem_request_get_seqno(req),
660 dev_priv->next_seqno,
661 engine->get_seqno(engine),
662 i915_gem_request_completed(req, true));
663 }
664
665 seq_printf(m, "Flip queued on frame %d, now %d\n",
666 pending ? work->flip_queued_vblank : -1,
667 intel_crtc_get_vblank_counter(crtc));
668}
669
670static int i915_gem_pageflip_info(struct seq_file *m, void *data) 624static int i915_gem_pageflip_info(struct seq_file *m, void *data)
671{ 625{
672 struct drm_info_node *node = m->private; 626 struct drm_info_node *node = m->private;
@@ -685,13 +639,48 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
685 struct intel_flip_work *work; 639 struct intel_flip_work *work;
686 640
687 spin_lock_irq(&dev->event_lock); 641 spin_lock_irq(&dev->event_lock);
688 if (list_empty(&crtc->flip_work)) { 642 work = crtc->flip_work;
643 if (work == NULL) {
689 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 644 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
690 pipe, plane); 645 pipe, plane);
691 } else { 646 } else {
692 list_for_each_entry(work, &crtc->flip_work, head) { 647 u32 pending;
693 i915_dump_pageflip(m, dev_priv, crtc, work); 648 u32 addr;
694 seq_puts(m, "\n"); 649
650 pending = atomic_read(&work->pending);
651 if (pending) {
652 seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
653 pipe, plane);
654 } else {
655 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
656 pipe, plane);
657 }
658 if (work->flip_queued_req) {
659 struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
660
661 seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
662 engine->name,
663 i915_gem_request_get_seqno(work->flip_queued_req),
664 dev_priv->next_seqno,
665 engine->get_seqno(engine),
666 i915_gem_request_completed(work->flip_queued_req, true));
667 } else
668 seq_printf(m, "Flip not associated with any ring\n");
669 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
670 work->flip_queued_vblank,
671 work->flip_ready_vblank,
672 intel_crtc_get_vblank_counter(crtc));
673 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
674
675 if (INTEL_INFO(dev)->gen >= 4)
676 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
677 else
678 addr = I915_READ(DSPADDR(crtc->plane));
679 seq_printf(m, "Current scanout address 0x%08x\n", addr);
680
681 if (work->pending_flip_obj) {
682 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
683 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
695 } 684 }
696 } 685 }
697 spin_unlock_irq(&dev->event_lock); 686 spin_unlock_irq(&dev->event_lock);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 85a7c44ed55c..e4c8e341655c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -618,6 +618,11 @@ struct drm_i915_display_funcs {
618 void (*audio_codec_disable)(struct intel_encoder *encoder); 618 void (*audio_codec_disable)(struct intel_encoder *encoder);
619 void (*fdi_link_train)(struct drm_crtc *crtc); 619 void (*fdi_link_train)(struct drm_crtc *crtc);
620 void (*init_clock_gating)(struct drm_device *dev); 620 void (*init_clock_gating)(struct drm_device *dev);
621 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
622 struct drm_framebuffer *fb,
623 struct drm_i915_gem_object *obj,
624 struct drm_i915_gem_request *req,
625 uint32_t flags);
621 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); 626 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
622 /* clock updates for mode set */ 627 /* clock updates for mode set */
623 /* cursor updates */ 628 /* cursor updates */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index fc2b2a7e2c55..caaf1e2a7bc1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -136,12 +136,6 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
136 POSTING_READ(type##IIR); \ 136 POSTING_READ(type##IIR); \
137} while (0) 137} while (0)
138 138
139static void
140intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, unsigned pipe)
141{
142 DRM_DEBUG_KMS("Finished page flip\n");
143}
144
145/* 139/*
146 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
147 */ 141 */
@@ -1637,11 +1631,16 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1637 } 1631 }
1638} 1632}
1639 1633
1640static void intel_pipe_handle_vblank(struct drm_i915_private *dev_priv, 1634static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1641 enum pipe pipe) 1635 enum pipe pipe)
1642{ 1636{
1643 if (drm_handle_vblank(dev_priv->dev, pipe)) 1637 bool ret;
1638
1639 ret = drm_handle_vblank(dev_priv->dev, pipe);
1640 if (ret)
1644 intel_finish_page_flip_mmio(dev_priv, pipe); 1641 intel_finish_page_flip_mmio(dev_priv, pipe);
1642
1643 return ret;
1645} 1644}
1646 1645
1647static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1646static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
@@ -1708,8 +1707,9 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1708 enum pipe pipe; 1707 enum pipe pipe;
1709 1708
1710 for_each_pipe(dev_priv, pipe) { 1709 for_each_pipe(dev_priv, pipe) {
1711 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1710 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1712 intel_pipe_handle_vblank(dev_priv, pipe); 1711 intel_pipe_handle_vblank(dev_priv, pipe))
1712 intel_check_page_flip(dev_priv, pipe);
1713 1713
1714 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 1714 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1715 intel_finish_page_flip_cs(dev_priv, pipe); 1715 intel_finish_page_flip_cs(dev_priv, pipe);
@@ -2155,8 +2155,9 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2155 DRM_ERROR("Poison interrupt\n"); 2155 DRM_ERROR("Poison interrupt\n");
2156 2156
2157 for_each_pipe(dev_priv, pipe) { 2157 for_each_pipe(dev_priv, pipe) {
2158 if (de_iir & DE_PIPE_VBLANK(pipe)) 2158 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2159 intel_pipe_handle_vblank(dev_priv, pipe); 2159 intel_pipe_handle_vblank(dev_priv, pipe))
2160 intel_check_page_flip(dev_priv, pipe);
2160 2161
2161 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2162 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2162 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2163 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2205,8 +2206,9 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2205 intel_opregion_asle_intr(dev_priv); 2206 intel_opregion_asle_intr(dev_priv);
2206 2207
2207 for_each_pipe(dev_priv, pipe) { 2208 for_each_pipe(dev_priv, pipe) {
2208 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2209 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2209 intel_pipe_handle_vblank(dev_priv, pipe); 2210 intel_pipe_handle_vblank(dev_priv, pipe))
2211 intel_check_page_flip(dev_priv, pipe);
2210 2212
2211 /* plane/pipes map 1:1 on ilk+ */ 2213 /* plane/pipes map 1:1 on ilk+ */
2212 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 2214 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
@@ -2405,8 +2407,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2405 ret = IRQ_HANDLED; 2407 ret = IRQ_HANDLED;
2406 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2408 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2407 2409
2408 if (iir & GEN8_PIPE_VBLANK) 2410 if (iir & GEN8_PIPE_VBLANK &&
2409 intel_pipe_handle_vblank(dev_priv, pipe); 2411 intel_pipe_handle_vblank(dev_priv, pipe))
2412 intel_check_page_flip(dev_priv, pipe);
2410 2413
2411 flip_done = iir; 2414 flip_done = iir;
2412 if (INTEL_INFO(dev_priv)->gen >= 9) 2415 if (INTEL_INFO(dev_priv)->gen >= 9)
@@ -3972,6 +3975,37 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
3972 return 0; 3975 return 0;
3973} 3976}
3974 3977
3978/*
3979 * Returns true when a page flip has completed.
3980 */
3981static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
3982 int plane, int pipe, u32 iir)
3983{
3984 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3985
3986 if (!intel_pipe_handle_vblank(dev_priv, pipe))
3987 return false;
3988
3989 if ((iir & flip_pending) == 0)
3990 goto check_page_flip;
3991
3992 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3993 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3994 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3995 * the flip is completed (no longer pending). Since this doesn't raise
3996 * an interrupt per se, we watch for the change at vblank.
3997 */
3998 if (I915_READ16(ISR) & flip_pending)
3999 goto check_page_flip;
4000
4001 intel_finish_page_flip_cs(dev_priv, pipe);
4002 return true;
4003
4004check_page_flip:
4005 intel_check_page_flip(dev_priv, pipe);
4006 return false;
4007}
4008
3975static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4009static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3976{ 4010{
3977 struct drm_device *dev = arg; 4011 struct drm_device *dev = arg;
@@ -4024,8 +4058,13 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4024 notify_ring(&dev_priv->engine[RCS]); 4058 notify_ring(&dev_priv->engine[RCS]);
4025 4059
4026 for_each_pipe(dev_priv, pipe) { 4060 for_each_pipe(dev_priv, pipe) {
4027 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 4061 int plane = pipe;
4028 intel_pipe_handle_vblank(dev_priv, pipe); 4062 if (HAS_FBC(dev_priv))
4063 plane = !plane;
4064
4065 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4066 i8xx_handle_vblank(dev_priv, plane, pipe, iir))
4067 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4029 4068
4030 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4069 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4031 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4070 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -4125,6 +4164,37 @@ static int i915_irq_postinstall(struct drm_device *dev)
4125 return 0; 4164 return 0;
4126} 4165}
4127 4166
4167/*
4168 * Returns true when a page flip has completed.
4169 */
4170static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
4171 int plane, int pipe, u32 iir)
4172{
4173 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4174
4175 if (!intel_pipe_handle_vblank(dev_priv, pipe))
4176 return false;
4177
4178 if ((iir & flip_pending) == 0)
4179 goto check_page_flip;
4180
4181 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4182 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4183 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4184 * the flip is completed (no longer pending). Since this doesn't raise
4185 * an interrupt per se, we watch for the change at vblank.
4186 */
4187 if (I915_READ(ISR) & flip_pending)
4188 goto check_page_flip;
4189
4190 intel_finish_page_flip_cs(dev_priv, pipe);
4191 return true;
4192
4193check_page_flip:
4194 intel_check_page_flip(dev_priv, pipe);
4195 return false;
4196}
4197
4128static irqreturn_t i915_irq_handler(int irq, void *arg) 4198static irqreturn_t i915_irq_handler(int irq, void *arg)
4129{ 4199{
4130 struct drm_device *dev = arg; 4200 struct drm_device *dev = arg;
@@ -4185,8 +4255,13 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4185 notify_ring(&dev_priv->engine[RCS]); 4255 notify_ring(&dev_priv->engine[RCS]);
4186 4256
4187 for_each_pipe(dev_priv, pipe) { 4257 for_each_pipe(dev_priv, pipe) {
4188 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 4258 int plane = pipe;
4189 intel_pipe_handle_vblank(dev_priv, pipe); 4259 if (HAS_FBC(dev_priv))
4260 plane = !plane;
4261
4262 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4263 i915_handle_vblank(dev_priv, plane, pipe, iir))
4264 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4190 4265
4191 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4266 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4192 blc_event = true; 4267 blc_event = true;
@@ -4414,8 +4489,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4414 notify_ring(&dev_priv->engine[VCS]); 4489 notify_ring(&dev_priv->engine[VCS]);
4415 4490
4416 for_each_pipe(dev_priv, pipe) { 4491 for_each_pipe(dev_priv, pipe) {
4417 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 4492 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4418 intel_pipe_handle_vblank(dev_priv, pipe); 4493 i915_handle_vblank(dev_priv, pipe, pipe, iir))
4494 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4419 4495
4420 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4496 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4421 blc_event = true; 4497 blc_event = true;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 9a5d58b251f5..5e18cf9f754d 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -49,6 +49,7 @@ struct i915_params i915 __read_mostly = {
49 .invert_brightness = 0, 49 .invert_brightness = 0,
50 .disable_display = 0, 50 .disable_display = 0,
51 .enable_cmd_parser = 1, 51 .enable_cmd_parser = 1,
52 .use_mmio_flip = 0,
52 .mmio_debug = 0, 53 .mmio_debug = 0,
53 .verbose_state_checks = 1, 54 .verbose_state_checks = 1,
54 .nuclear_pageflip = 0, 55 .nuclear_pageflip = 0,
@@ -174,6 +175,10 @@ module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
174MODULE_PARM_DESC(enable_cmd_parser, 175MODULE_PARM_DESC(enable_cmd_parser,
175 "Enable command parsing (1=enabled [default], 0=disabled)"); 176 "Enable command parsing (1=enabled [default], 0=disabled)");
176 177
178module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600);
179MODULE_PARM_DESC(use_mmio_flip,
180 "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
181
177module_param_named(mmio_debug, i915.mmio_debug, int, 0600); 182module_param_named(mmio_debug, i915.mmio_debug, int, 0600);
178MODULE_PARM_DESC(mmio_debug, 183MODULE_PARM_DESC(mmio_debug,
179 "Enable the MMIO debug code for the first N failures (default: off). " 184 "Enable the MMIO debug code for the first N failures (default: off). "
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 658ce7379671..1323261a0cdd 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -48,6 +48,7 @@ struct i915_params {
48 int enable_guc_loading; 48 int enable_guc_loading;
49 int enable_guc_submission; 49 int enable_guc_submission;
50 int guc_log_level; 50 int guc_log_level;
51 int use_mmio_flip;
51 int mmio_debug; 52 int mmio_debug;
52 int edp_vswing; 53 int edp_vswing;
53 unsigned int inject_load_failure; 54 unsigned int inject_load_failure;
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index b4927f6bbeac..50ff90aea721 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -311,17 +311,6 @@ intel_atomic_state_alloc(struct drm_device *dev)
311void intel_atomic_state_clear(struct drm_atomic_state *s) 311void intel_atomic_state_clear(struct drm_atomic_state *s)
312{ 312{
313 struct intel_atomic_state *state = to_intel_atomic_state(s); 313 struct intel_atomic_state *state = to_intel_atomic_state(s);
314 int i;
315
316 for (i = 0; i < ARRAY_SIZE(state->work); i++) {
317 struct intel_flip_work *work = state->work[i];
318
319 if (work)
320 intel_free_flip_work(work);
321
322 state->work[i] = NULL;
323 }
324
325 drm_atomic_state_default_clear(&state->base); 314 drm_atomic_state_default_clear(&state->base);
326 state->dpll_set = state->modeset = false; 315 state->dpll_set = state->modeset = false;
327} 316}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 2ab45f16fa65..7de7721f65bc 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -102,7 +102,6 @@ intel_plane_destroy_state(struct drm_plane *plane,
102 struct drm_plane_state *state) 102 struct drm_plane_state *state)
103{ 103{
104 WARN_ON(state && to_intel_plane_state(state)->wait_req); 104 WARN_ON(state && to_intel_plane_state(state)->wait_req);
105 WARN_ON(state && state->fence);
106 drm_atomic_helper_plane_destroy_state(plane, state); 105 drm_atomic_helper_plane_destroy_state(plane, state);
107} 106}
108 107
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fd171fd2b255..46429e73e058 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -48,6 +48,11 @@
48#include <linux/reservation.h> 48#include <linux/reservation.h>
49#include <linux/dma-buf.h> 49#include <linux/dma-buf.h>
50 50
51static bool is_mmio_work(struct intel_flip_work *work)
52{
53 return work->mmio_work.func;
54}
55
51/* Primary plane formats for gen <= 3 */ 56/* Primary plane formats for gen <= 3 */
52static const uint32_t i8xx_primary_formats[] = { 57static const uint32_t i8xx_primary_formats[] = {
53 DRM_FORMAT_C8, 58 DRM_FORMAT_C8,
@@ -108,6 +113,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
108 const struct intel_crtc_state *pipe_config); 113 const struct intel_crtc_state *pipe_config);
109static void chv_prepare_pll(struct intel_crtc *crtc, 114static void chv_prepare_pll(struct intel_crtc *crtc,
110 const struct intel_crtc_state *pipe_config); 115 const struct intel_crtc_state *pipe_config);
116static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
117static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
111static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, 118static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
112 struct intel_crtc_state *crtc_state); 119 struct intel_crtc_state *crtc_state);
113static void skylake_pfit_enable(struct intel_crtc *crtc); 120static void skylake_pfit_enable(struct intel_crtc *crtc);
@@ -116,9 +123,6 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc);
116static void intel_modeset_setup_hw_state(struct drm_device *dev); 123static void intel_modeset_setup_hw_state(struct drm_device *dev);
117static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 124static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
118static int ilk_max_pixel_rate(struct drm_atomic_state *state); 125static int ilk_max_pixel_rate(struct drm_atomic_state *state);
119static void intel_modeset_verify_crtc(struct drm_crtc *crtc,
120 struct drm_crtc_state *old_state,
121 struct drm_crtc_state *new_state);
122static int broxton_calc_cdclk(int max_pixclk); 126static int broxton_calc_cdclk(int max_pixclk);
123 127
124struct intel_limit { 128struct intel_limit {
@@ -2524,6 +2528,20 @@ out_unref_obj:
2524 return false; 2528 return false;
2525} 2529}
2526 2530
2531/* Update plane->state->fb to match plane->fb after driver-internal updates */
2532static void
2533update_state_fb(struct drm_plane *plane)
2534{
2535 if (plane->fb == plane->state->fb)
2536 return;
2537
2538 if (plane->state->fb)
2539 drm_framebuffer_unreference(plane->state->fb);
2540 plane->state->fb = plane->fb;
2541 if (plane->state->fb)
2542 drm_framebuffer_reference(plane->state->fb);
2543}
2544
2527static void 2545static void
2528intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2546intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2529 struct intel_initial_plane_config *plane_config) 2547 struct intel_initial_plane_config *plane_config)
@@ -3096,6 +3114,14 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3096 return -ENODEV; 3114 return -ENODEV;
3097} 3115}
3098 3116
3117static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3118{
3119 struct intel_crtc *crtc;
3120
3121 for_each_intel_crtc(dev_priv->dev, crtc)
3122 intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3123}
3124
3099static void intel_update_primary_planes(struct drm_device *dev) 3125static void intel_update_primary_planes(struct drm_device *dev)
3100{ 3126{
3101 struct drm_crtc *crtc; 3127 struct drm_crtc *crtc;
@@ -3136,6 +3162,13 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
3136 3162
3137void intel_finish_reset(struct drm_i915_private *dev_priv) 3163void intel_finish_reset(struct drm_i915_private *dev_priv)
3138{ 3164{
3165 /*
3166 * Flips in the rings will be nuked by the reset,
3167 * so complete all pending flips so that user space
3168 * will get its events and not get stuck.
3169 */
3170 intel_complete_page_flips(dev_priv);
3171
3139 /* no reset support for gen2 */ 3172 /* no reset support for gen2 */
3140 if (IS_GEN2(dev_priv)) 3173 if (IS_GEN2(dev_priv))
3141 return; 3174 return;
@@ -3178,7 +3211,20 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3178 3211
3179static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3212static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3180{ 3213{
3181 return !list_empty_careful(&to_intel_crtc(crtc)->flip_work); 3214 struct drm_device *dev = crtc->dev;
3215 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3216 unsigned reset_counter;
3217 bool pending;
3218
3219 reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
3220 if (intel_crtc->reset_counter != reset_counter)
3221 return false;
3222
3223 spin_lock_irq(&dev->event_lock);
3224 pending = to_intel_crtc(crtc)->flip_work != NULL;
3225 spin_unlock_irq(&dev->event_lock);
3226
3227 return pending;
3182} 3228}
3183 3229
3184static void intel_update_pipe_config(struct intel_crtc *crtc, 3230static void intel_update_pipe_config(struct intel_crtc *crtc,
@@ -3754,7 +3800,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
3754 if (atomic_read(&crtc->unpin_work_count) == 0) 3800 if (atomic_read(&crtc->unpin_work_count) == 0)
3755 continue; 3801 continue;
3756 3802
3757 if (!list_empty_careful(&crtc->flip_work)) 3803 if (crtc->flip_work)
3758 intel_wait_for_vblank(dev, crtc->pipe); 3804 intel_wait_for_vblank(dev, crtc->pipe);
3759 3805
3760 return true; 3806 return true;
@@ -3763,30 +3809,23 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
3763 return false; 3809 return false;
3764} 3810}
3765 3811
3766static void page_flip_completed(struct intel_crtc *intel_crtc, struct intel_flip_work *work) 3812static void page_flip_completed(struct intel_crtc *intel_crtc)
3767{ 3813{
3768 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 3814 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3769 struct drm_plane_state *new_plane_state; 3815 struct intel_flip_work *work = intel_crtc->flip_work;
3770 struct drm_plane *primary = intel_crtc->base.primary; 3816
3817 intel_crtc->flip_work = NULL;
3771 3818
3772 if (work->event) 3819 if (work->event)
3773 drm_crtc_send_vblank_event(&intel_crtc->base, work->event); 3820 drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
3774 3821
3775 drm_crtc_vblank_put(&intel_crtc->base); 3822 drm_crtc_vblank_put(&intel_crtc->base);
3776 3823
3777 new_plane_state = &work->old_plane_state[0]->base; 3824 wake_up_all(&dev_priv->pending_flip_queue);
3778 if (work->num_planes >= 1 &&
3779 new_plane_state->plane == primary &&
3780 new_plane_state->fb)
3781 trace_i915_flip_complete(intel_crtc->plane,
3782 intel_fb_obj(new_plane_state->fb));
3783
3784 if (work->can_async_unpin) {
3785 list_del_init(&work->head);
3786 wake_up_all(&dev_priv->pending_flip_queue);
3787 }
3788
3789 queue_work(dev_priv->wq, &work->unpin_work); 3825 queue_work(dev_priv->wq, &work->unpin_work);
3826
3827 trace_i915_flip_complete(intel_crtc->plane,
3828 work->pending_flip_obj);
3790} 3829}
3791 3830
3792static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3831static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
@@ -3805,7 +3844,18 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3805 if (ret < 0) 3844 if (ret < 0)
3806 return ret; 3845 return ret;
3807 3846
3808 WARN(ret == 0, "Stuck page flip\n"); 3847 if (ret == 0) {
3848 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3849 struct intel_flip_work *work;
3850
3851 spin_lock_irq(&dev->event_lock);
3852 work = intel_crtc->flip_work;
3853 if (work && !is_mmio_work(work)) {
3854 WARN_ONCE(1, "Removing stuck page flip\n");
3855 page_flip_completed(intel_crtc);
3856 }
3857 spin_unlock_irq(&dev->event_lock);
3858 }
3809 3859
3810 return 0; 3860 return 0;
3811} 3861}
@@ -4536,6 +4586,39 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
4536 } 4586 }
4537} 4587}
4538 4588
4589static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
4590{
4591 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4592 struct drm_atomic_state *old_state = old_crtc_state->base.state;
4593 struct intel_crtc_state *pipe_config =
4594 to_intel_crtc_state(crtc->base.state);
4595 struct drm_device *dev = crtc->base.dev;
4596 struct drm_plane *primary = crtc->base.primary;
4597 struct drm_plane_state *old_pri_state =
4598 drm_atomic_get_existing_plane_state(old_state, primary);
4599
4600 intel_frontbuffer_flip(dev, pipe_config->fb_bits);
4601
4602 crtc->wm.cxsr_allowed = true;
4603
4604 if (pipe_config->update_wm_post && pipe_config->base.active)
4605 intel_update_watermarks(&crtc->base);
4606
4607 if (old_pri_state) {
4608 struct intel_plane_state *primary_state =
4609 to_intel_plane_state(primary->state);
4610 struct intel_plane_state *old_primary_state =
4611 to_intel_plane_state(old_pri_state);
4612
4613 intel_fbc_post_update(crtc);
4614
4615 if (primary_state->visible &&
4616 (needs_modeset(&pipe_config->base) ||
4617 !old_primary_state->visible))
4618 intel_post_enable_primary(&crtc->base);
4619 }
4620}
4621
4539static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) 4622static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
4540{ 4623{
4541 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 4624 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
@@ -4555,7 +4638,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
4555 struct intel_plane_state *old_primary_state = 4638 struct intel_plane_state *old_primary_state =
4556 to_intel_plane_state(old_pri_state); 4639 to_intel_plane_state(old_pri_state);
4557 4640
4558 intel_fbc_pre_update(crtc, pipe_config, primary_state); 4641 intel_fbc_pre_update(crtc);
4559 4642
4560 if (old_primary_state->visible && 4643 if (old_primary_state->visible &&
4561 (modeset || !primary_state->visible)) 4644 (modeset || !primary_state->visible))
@@ -5145,21 +5228,18 @@ modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5145 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 5228 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5146 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5229 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5147 enum intel_display_power_domain domain; 5230 enum intel_display_power_domain domain;
5148 unsigned long domains, new_domains, old_domains, ms_domain = 0; 5231 unsigned long domains, new_domains, old_domains;
5149 5232
5150 old_domains = intel_crtc->enabled_power_domains; 5233 old_domains = intel_crtc->enabled_power_domains;
5151 intel_crtc->enabled_power_domains = new_domains = 5234 intel_crtc->enabled_power_domains = new_domains =
5152 get_crtc_power_domains(crtc, crtc_state); 5235 get_crtc_power_domains(crtc, crtc_state);
5153 5236
5154 if (needs_modeset(&crtc_state->base)) 5237 domains = new_domains & ~old_domains;
5155 ms_domain = BIT(POWER_DOMAIN_MODESET);
5156
5157 domains = (new_domains & ~old_domains) | ms_domain;
5158 5238
5159 for_each_power_domain(domain, domains) 5239 for_each_power_domain(domain, domains)
5160 intel_display_power_get(dev_priv, domain); 5240 intel_display_power_get(dev_priv, domain);
5161 5241
5162 return (old_domains & ~new_domains) | ms_domain; 5242 return old_domains & ~new_domains;
5163} 5243}
5164 5244
5165static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 5245static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
@@ -6193,7 +6273,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6193 return; 6273 return;
6194 6274
6195 if (to_intel_plane_state(crtc->primary->state)->visible) { 6275 if (to_intel_plane_state(crtc->primary->state)->visible) {
6196 WARN_ON(list_empty(&intel_crtc->flip_work)); 6276 WARN_ON(intel_crtc->flip_work);
6197 6277
6198 intel_pre_disable_primary_noatomic(crtc); 6278 intel_pre_disable_primary_noatomic(crtc);
6199 6279
@@ -6245,12 +6325,6 @@ int intel_display_suspend(struct drm_device *dev)
6245 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 6325 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6246 else 6326 else
6247 dev_priv->modeset_restore_state = state; 6327 dev_priv->modeset_restore_state = state;
6248
6249 /*
6250 * Make sure all unpin_work completes before returning.
6251 */
6252 flush_workqueue(dev_priv->wq);
6253
6254 return ret; 6328 return ret;
6255} 6329}
6256 6330
@@ -6264,10 +6338,9 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
6264 6338
6265/* Cross check the actual hw state with our own modeset state tracking (and it's 6339/* Cross check the actual hw state with our own modeset state tracking (and it's
6266 * internal consistency). */ 6340 * internal consistency). */
6267static void intel_connector_verify_state(struct intel_connector *connector, 6341static void intel_connector_verify_state(struct intel_connector *connector)
6268 struct drm_connector_state *conn_state)
6269{ 6342{
6270 struct drm_crtc *crtc = conn_state->crtc; 6343 struct drm_crtc *crtc = connector->base.state->crtc;
6271 6344
6272 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 6345 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6273 connector->base.base.id, 6346 connector->base.base.id,
@@ -6275,6 +6348,7 @@ static void intel_connector_verify_state(struct intel_connector *connector,
6275 6348
6276 if (connector->get_hw_state(connector)) { 6349 if (connector->get_hw_state(connector)) {
6277 struct intel_encoder *encoder = connector->encoder; 6350 struct intel_encoder *encoder = connector->encoder;
6351 struct drm_connector_state *conn_state = connector->base.state;
6278 6352
6279 I915_STATE_WARN(!crtc, 6353 I915_STATE_WARN(!crtc,
6280 "connector enabled without attached crtc\n"); 6354 "connector enabled without attached crtc\n");
@@ -6296,7 +6370,7 @@ static void intel_connector_verify_state(struct intel_connector *connector,
6296 } else { 6370 } else {
6297 I915_STATE_WARN(crtc && crtc->state->active, 6371 I915_STATE_WARN(crtc && crtc->state->active,
6298 "attached crtc is active, but connector isn't\n"); 6372 "attached crtc is active, but connector isn't\n");
6299 I915_STATE_WARN(!crtc && conn_state->best_encoder, 6373 I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6300 "best encoder set without crtc!\n"); 6374 "best encoder set without crtc!\n");
6301 } 6375 }
6302} 6376}
@@ -10854,13 +10928,6 @@ void intel_mark_idle(struct drm_i915_private *dev_priv)
10854 intel_runtime_pm_put(dev_priv); 10928 intel_runtime_pm_put(dev_priv);
10855} 10929}
10856 10930
10857void intel_free_flip_work(struct intel_flip_work *work)
10858{
10859 kfree(work->old_connector_state);
10860 kfree(work->new_connector_state);
10861 kfree(work);
10862}
10863
10864static void intel_crtc_destroy(struct drm_crtc *crtc) 10931static void intel_crtc_destroy(struct drm_crtc *crtc)
10865{ 10932{
10866 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10933 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -10868,278 +10935,885 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
10868 struct intel_flip_work *work; 10935 struct intel_flip_work *work;
10869 10936
10870 spin_lock_irq(&dev->event_lock); 10937 spin_lock_irq(&dev->event_lock);
10871 while (!list_empty(&intel_crtc->flip_work)) { 10938 work = intel_crtc->flip_work;
10872 work = list_first_entry(&intel_crtc->flip_work, 10939 intel_crtc->flip_work = NULL;
10873 struct intel_flip_work, head); 10940 spin_unlock_irq(&dev->event_lock);
10874 list_del_init(&work->head);
10875 spin_unlock_irq(&dev->event_lock);
10876 10941
10942 if (work) {
10877 cancel_work_sync(&work->mmio_work); 10943 cancel_work_sync(&work->mmio_work);
10878 cancel_work_sync(&work->unpin_work); 10944 cancel_work_sync(&work->unpin_work);
10879 intel_free_flip_work(work); 10945 kfree(work);
10880
10881 spin_lock_irq(&dev->event_lock);
10882 } 10946 }
10883 spin_unlock_irq(&dev->event_lock);
10884 10947
10885 drm_crtc_cleanup(crtc); 10948 drm_crtc_cleanup(crtc);
10886 10949
10887 kfree(intel_crtc); 10950 kfree(intel_crtc);
10888} 10951}
10889 10952
10890static void intel_crtc_post_flip_update(struct intel_flip_work *work, 10953static void intel_unpin_work_fn(struct work_struct *__work)
10891 struct drm_crtc *crtc)
10892{ 10954{
10893 struct intel_crtc_state *crtc_state = work->new_crtc_state; 10955 struct intel_flip_work *work =
10956 container_of(__work, struct intel_flip_work, unpin_work);
10957 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10958 struct drm_device *dev = crtc->base.dev;
10959 struct drm_plane *primary = crtc->base.primary;
10960
10961 if (is_mmio_work(work))
10962 flush_work(&work->mmio_work);
10963
10964 mutex_lock(&dev->struct_mutex);
10965 intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
10966 drm_gem_object_unreference(&work->pending_flip_obj->base);
10967
10968 if (work->flip_queued_req)
10969 i915_gem_request_assign(&work->flip_queued_req, NULL);
10970 mutex_unlock(&dev->struct_mutex);
10971
10972 intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
10973 intel_fbc_post_update(crtc);
10974 drm_framebuffer_unreference(work->old_fb);
10975
10976 BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10977 atomic_dec(&crtc->unpin_work_count);
10978
10979 kfree(work);
10980}
10981
10982/* Is 'a' after or equal to 'b'? */
10983static bool g4x_flip_count_after_eq(u32 a, u32 b)
10984{
10985 return !((a - b) & 0x80000000);
10986}
10987
10988static bool __pageflip_finished_cs(struct intel_crtc *crtc,
10989 struct intel_flip_work *work)
10990{
10991 struct drm_device *dev = crtc->base.dev;
10992 struct drm_i915_private *dev_priv = dev->dev_private;
10993 unsigned reset_counter;
10994
10995 reset_counter = i915_reset_counter(&dev_priv->gpu_error);
10996 if (crtc->reset_counter != reset_counter)
10997 return true;
10998
10999 /*
11000 * The relevant registers doen't exist on pre-ctg.
11001 * As the flip done interrupt doesn't trigger for mmio
11002 * flips on gmch platforms, a flip count check isn't
11003 * really needed there. But since ctg has the registers,
11004 * include it in the check anyway.
11005 */
11006 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
11007 return true;
11008
11009 /*
11010 * BDW signals flip done immediately if the plane
11011 * is disabled, even if the plane enable is already
11012 * armed to occur at the next vblank :(
11013 */
11014
11015 /*
11016 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
11017 * used the same base address. In that case the mmio flip might
11018 * have completed, but the CS hasn't even executed the flip yet.
11019 *
11020 * A flip count check isn't enough as the CS might have updated
11021 * the base address just after start of vblank, but before we
11022 * managed to process the interrupt. This means we'd complete the
11023 * CS flip too soon.
11024 *
11025 * Combining both checks should get us a good enough result. It may
11026 * still happen that the CS flip has been executed, but has not
11027 * yet actually completed. But in case the base address is the same
11028 * anyway, we don't really care.
11029 */
11030 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
11031 crtc->flip_work->gtt_offset &&
11032 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
11033 crtc->flip_work->flip_count);
11034}
11035
11036static bool
11037__pageflip_finished_mmio(struct intel_crtc *crtc,
11038 struct intel_flip_work *work)
11039{
11040 /*
11041 * MMIO work completes when vblank is different from
11042 * flip_queued_vblank.
11043 *
11044 * Reset counter value doesn't matter, this is handled by
11045 * i915_wait_request finishing early, so no need to handle
11046 * reset here.
11047 */
11048 return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
11049}
11050
11051
11052static bool pageflip_finished(struct intel_crtc *crtc,
11053 struct intel_flip_work *work)
11054{
11055 if (!atomic_read(&work->pending))
11056 return false;
11057
11058 smp_rmb();
11059
11060 if (is_mmio_work(work))
11061 return __pageflip_finished_mmio(crtc, work);
11062 else
11063 return __pageflip_finished_cs(crtc, work);
11064}
11065
11066void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
11067{
11068 struct drm_device *dev = dev_priv->dev;
11069 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10894 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11070 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11071 struct intel_flip_work *work;
11072 unsigned long flags;
10895 11073
10896 if (crtc_state->disable_cxsr) 11074 /* Ignore early vblank irqs */
10897 intel_crtc->wm.cxsr_allowed = true; 11075 if (!crtc)
11076 return;
10898 11077
10899 if (crtc_state->update_wm_post && crtc_state->base.active) 11078 /*
10900 intel_update_watermarks(crtc); 11079 * This is called both by irq handlers and the reset code (to complete
11080 * lost pageflips) so needs the full irqsave spinlocks.
11081 */
11082 spin_lock_irqsave(&dev->event_lock, flags);
11083 work = intel_crtc->flip_work;
10901 11084
10902 if (work->num_planes > 0 && 11085 if (work != NULL &&
10903 work->old_plane_state[0]->base.plane == crtc->primary) { 11086 !is_mmio_work(work) &&
10904 struct intel_plane_state *plane_state = 11087 pageflip_finished(intel_crtc, work))
10905 work->new_plane_state[0]; 11088 page_flip_completed(intel_crtc);
10906 11089
10907 if (plane_state->visible && 11090 spin_unlock_irqrestore(&dev->event_lock, flags);
10908 (needs_modeset(&crtc_state->base) ||
10909 !work->old_plane_state[0]->visible))
10910 intel_post_enable_primary(crtc);
10911 }
10912} 11091}
10913 11092
10914static void intel_unpin_work_fn(struct work_struct *__work) 11093void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
10915{ 11094{
10916 struct intel_flip_work *work = 11095 struct drm_device *dev = dev_priv->dev;
10917 container_of(__work, struct intel_flip_work, unpin_work); 11096 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10918 struct drm_crtc *crtc = work->old_crtc_state->base.crtc;
10919 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11097 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10920 struct drm_device *dev = crtc->dev; 11098 struct intel_flip_work *work;
10921 struct drm_i915_private *dev_priv = dev->dev_private; 11099 unsigned long flags;
10922 int i;
10923 11100
10924 if (work->fb_bits) 11101 /* Ignore early vblank irqs */
10925 intel_frontbuffer_flip_complete(dev, work->fb_bits); 11102 if (!crtc)
11103 return;
10926 11104
10927 /* 11105 /*
10928 * Unless work->can_async_unpin is false, there's no way to ensure 11106 * This is called both by irq handlers and the reset code (to complete
10929 * that work->new_crtc_state contains valid memory during unpin 11107 * lost pageflips) so needs the full irqsave spinlocks.
10930 * because intel_atomic_commit may free it before this runs.
10931 */ 11108 */
10932 if (!work->can_async_unpin) { 11109 spin_lock_irqsave(&dev->event_lock, flags);
10933 intel_crtc_post_flip_update(work, crtc); 11110 work = intel_crtc->flip_work;
10934 11111
10935 if (dev_priv->display.optimize_watermarks) 11112 if (work != NULL &&
10936 dev_priv->display.optimize_watermarks(work->new_crtc_state); 11113 is_mmio_work(work) &&
10937 } 11114 pageflip_finished(intel_crtc, work))
11115 page_flip_completed(intel_crtc);
11116
11117 spin_unlock_irqrestore(&dev->event_lock, flags);
11118}
11119
11120static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
11121 struct intel_flip_work *work)
11122{
11123 work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
10938 11124
10939 if (work->fb_bits & to_intel_plane(crtc->primary)->frontbuffer_bit) 11125 /* Ensure that the work item is consistent when activating it ... */
10940 intel_fbc_post_update(intel_crtc); 11126 smp_mb__before_atomic();
11127 atomic_set(&work->pending, 1);
11128}
10941 11129
10942 if (work->put_power_domains) 11130static int intel_gen2_queue_flip(struct drm_device *dev,
10943 modeset_put_power_domains(dev_priv, work->put_power_domains); 11131 struct drm_crtc *crtc,
11132 struct drm_framebuffer *fb,
11133 struct drm_i915_gem_object *obj,
11134 struct drm_i915_gem_request *req,
11135 uint32_t flags)
11136{
11137 struct intel_engine_cs *engine = req->engine;
11138 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11139 u32 flip_mask;
11140 int ret;
10944 11141
10945 /* Make sure mmio work is completely finished before freeing all state here. */ 11142 ret = intel_ring_begin(req, 6);
10946 flush_work(&work->mmio_work); 11143 if (ret)
11144 return ret;
10947 11145
10948 if (!work->can_async_unpin && 11146 /* Can't queue multiple flips, so wait for the previous
10949 (work->new_crtc_state->update_pipe || 11147 * one to finish before executing the next.
10950 needs_modeset(&work->new_crtc_state->base))) { 11148 */
10951 /* This must be called before work is unpinned for serialization. */ 11149 if (intel_crtc->plane)
10952 intel_modeset_verify_crtc(crtc, &work->old_crtc_state->base, 11150 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
10953 &work->new_crtc_state->base); 11151 else
11152 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11153 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11154 intel_ring_emit(engine, MI_NOOP);
11155 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11156 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11157 intel_ring_emit(engine, fb->pitches[0]);
11158 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11159 intel_ring_emit(engine, 0); /* aux display base address, unused */
10954 11160
10955 for (i = 0; i < work->num_new_connectors; i++) { 11161 return 0;
10956 struct drm_connector_state *conn_state = 11162}
10957 work->new_connector_state[i];
10958 struct drm_connector *con = conn_state->connector;
10959 11163
10960 WARN_ON(!con); 11164static int intel_gen3_queue_flip(struct drm_device *dev,
11165 struct drm_crtc *crtc,
11166 struct drm_framebuffer *fb,
11167 struct drm_i915_gem_object *obj,
11168 struct drm_i915_gem_request *req,
11169 uint32_t flags)
11170{
11171 struct intel_engine_cs *engine = req->engine;
11172 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11173 u32 flip_mask;
11174 int ret;
10961 11175
10962 intel_connector_verify_state(to_intel_connector(con), 11176 ret = intel_ring_begin(req, 6);
10963 conn_state); 11177 if (ret)
10964 } 11178 return ret;
10965 }
10966 11179
10967 for (i = 0; i < work->num_old_connectors; i++) { 11180 if (intel_crtc->plane)
10968 struct drm_connector_state *old_con_state = 11181 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
10969 work->old_connector_state[i]; 11182 else
10970 struct drm_connector *con = 11183 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
10971 old_con_state->connector; 11184 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11185 intel_ring_emit(engine, MI_NOOP);
11186 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
11187 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11188 intel_ring_emit(engine, fb->pitches[0]);
11189 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11190 intel_ring_emit(engine, MI_NOOP);
10972 11191
10973 con->funcs->atomic_destroy_state(con, old_con_state); 11192 return 0;
11193}
11194
11195static int intel_gen4_queue_flip(struct drm_device *dev,
11196 struct drm_crtc *crtc,
11197 struct drm_framebuffer *fb,
11198 struct drm_i915_gem_object *obj,
11199 struct drm_i915_gem_request *req,
11200 uint32_t flags)
11201{
11202 struct intel_engine_cs *engine = req->engine;
11203 struct drm_i915_private *dev_priv = dev->dev_private;
11204 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11205 uint32_t pf, pipesrc;
11206 int ret;
11207
11208 ret = intel_ring_begin(req, 4);
11209 if (ret)
11210 return ret;
11211
11212 /* i965+ uses the linear or tiled offsets from the
11213 * Display Registers (which do not change across a page-flip)
11214 * so we need only reprogram the base address.
11215 */
11216 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11217 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11218 intel_ring_emit(engine, fb->pitches[0]);
11219 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
11220 obj->tiling_mode);
11221
11222 /* XXX Enabling the panel-fitter across page-flip is so far
11223 * untested on non-native modes, so ignore it for now.
11224 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11225 */
11226 pf = 0;
11227 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11228 intel_ring_emit(engine, pf | pipesrc);
11229
11230 return 0;
11231}
11232
11233static int intel_gen6_queue_flip(struct drm_device *dev,
11234 struct drm_crtc *crtc,
11235 struct drm_framebuffer *fb,
11236 struct drm_i915_gem_object *obj,
11237 struct drm_i915_gem_request *req,
11238 uint32_t flags)
11239{
11240 struct intel_engine_cs *engine = req->engine;
11241 struct drm_i915_private *dev_priv = dev->dev_private;
11242 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11243 uint32_t pf, pipesrc;
11244 int ret;
11245
11246 ret = intel_ring_begin(req, 4);
11247 if (ret)
11248 return ret;
11249
11250 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11251 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11252 intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
11253 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11254
11255 /* Contrary to the suggestions in the documentation,
11256 * "Enable Panel Fitter" does not seem to be required when page
11257 * flipping with a non-native mode, and worse causes a normal
11258 * modeset to fail.
11259 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11260 */
11261 pf = 0;
11262 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11263 intel_ring_emit(engine, pf | pipesrc);
11264
11265 return 0;
11266}
11267
11268static int intel_gen7_queue_flip(struct drm_device *dev,
11269 struct drm_crtc *crtc,
11270 struct drm_framebuffer *fb,
11271 struct drm_i915_gem_object *obj,
11272 struct drm_i915_gem_request *req,
11273 uint32_t flags)
11274{
11275 struct intel_engine_cs *engine = req->engine;
11276 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11277 uint32_t plane_bit = 0;
11278 int len, ret;
11279
11280 switch (intel_crtc->plane) {
11281 case PLANE_A:
11282 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11283 break;
11284 case PLANE_B:
11285 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11286 break;
11287 case PLANE_C:
11288 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11289 break;
11290 default:
11291 WARN_ONCE(1, "unknown plane in flip command\n");
11292 return -ENODEV;
10974 } 11293 }
10975 11294
10976 if (!work->can_async_unpin || !list_empty(&work->head)) { 11295 len = 4;
10977 spin_lock_irq(&dev->event_lock); 11296 if (engine->id == RCS) {
10978 WARN(list_empty(&work->head) != work->can_async_unpin, 11297 len += 6;
10979 "[CRTC:%i] Pin work %p async %i with %i planes, active %i -> %i ms %i\n", 11298 /*
10980 crtc->base.id, work, work->can_async_unpin, work->num_planes, 11299 * On Gen 8, SRM is now taking an extra dword to accommodate
10981 work->old_crtc_state->base.active, work->new_crtc_state->base.active, 11300 * 48bits addresses, and we need a NOOP for the batch size to
10982 needs_modeset(&work->new_crtc_state->base)); 11301 * stay even.
11302 */
11303 if (IS_GEN8(dev))
11304 len += 2;
11305 }
10983 11306
10984 if (!list_empty(&work->head)) 11307 /*
10985 list_del(&work->head); 11308 * BSpec MI_DISPLAY_FLIP for IVB:
11309 * "The full packet must be contained within the same cache line."
11310 *
11311 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11312 * cacheline, if we ever start emitting more commands before
11313 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11314 * then do the cacheline alignment, and finally emit the
11315 * MI_DISPLAY_FLIP.
11316 */
11317 ret = intel_ring_cacheline_align(req);
11318 if (ret)
11319 return ret;
10986 11320
10987 wake_up_all(&dev_priv->pending_flip_queue); 11321 ret = intel_ring_begin(req, len);
10988 spin_unlock_irq(&dev->event_lock); 11322 if (ret)
11323 return ret;
11324
11325 /* Unmask the flip-done completion message. Note that the bspec says that
11326 * we should do this for both the BCS and RCS, and that we must not unmask
11327 * more than one flip event at any time (or ensure that one flip message
11328 * can be sent by waiting for flip-done prior to queueing new flips).
11329 * Experimentation says that BCS works despite DERRMR masking all
11330 * flip-done completion events and that unmasking all planes at once
11331 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11332 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11333 */
11334 if (engine->id == RCS) {
11335 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
11336 intel_ring_emit_reg(engine, DERRMR);
11337 intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11338 DERRMR_PIPEB_PRI_FLIP_DONE |
11339 DERRMR_PIPEC_PRI_FLIP_DONE));
11340 if (IS_GEN8(dev))
11341 intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
11342 MI_SRM_LRM_GLOBAL_GTT);
11343 else
11344 intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
11345 MI_SRM_LRM_GLOBAL_GTT);
11346 intel_ring_emit_reg(engine, DERRMR);
11347 intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
11348 if (IS_GEN8(dev)) {
11349 intel_ring_emit(engine, 0);
11350 intel_ring_emit(engine, MI_NOOP);
11351 }
10989 } 11352 }
10990 11353
10991 /* New crtc_state freed? */ 11354 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
10992 if (work->free_new_crtc_state) 11355 intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
10993 intel_crtc_destroy_state(crtc, &work->new_crtc_state->base); 11356 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11357 intel_ring_emit(engine, (MI_NOOP));
10994 11358
10995 intel_crtc_destroy_state(crtc, &work->old_crtc_state->base); 11359 return 0;
11360}
10996 11361
10997 for (i = 0; i < work->num_planes; i++) { 11362static bool use_mmio_flip(struct intel_engine_cs *engine,
10998 struct intel_plane_state *old_plane_state = 11363 struct drm_i915_gem_object *obj)
10999 work->old_plane_state[i]; 11364{
11000 struct drm_framebuffer *old_fb = old_plane_state->base.fb; 11365 /*
11001 struct drm_plane *plane = old_plane_state->base.plane; 11366 * This is not being used for older platforms, because
11002 struct drm_i915_gem_request *req; 11367 * non-availability of flip done interrupt forces us to use
11368 * CS flips. Older platforms derive flip done using some clever
11369 * tricks involving the flip_pending status bits and vblank irqs.
11370 * So using MMIO flips there would disrupt this mechanism.
11371 */
11003 11372
11004 req = old_plane_state->wait_req; 11373 if (engine == NULL)
11005 old_plane_state->wait_req = NULL; 11374 return true;
11006 if (req)
11007 i915_gem_request_unreference(req);
11008 11375
11009 fence_put(old_plane_state->base.fence); 11376 if (INTEL_GEN(engine->i915) < 5)
11010 old_plane_state->base.fence = NULL; 11377 return false;
11011 11378
11012 if (old_fb && 11379 if (i915.use_mmio_flip < 0)
11013 (plane->type != DRM_PLANE_TYPE_CURSOR || 11380 return false;
11014 !INTEL_INFO(dev_priv)->cursor_needs_physical)) { 11381 else if (i915.use_mmio_flip > 0)
11015 mutex_lock(&dev->struct_mutex); 11382 return true;
11016 intel_unpin_fb_obj(old_fb, old_plane_state->base.rotation); 11383 else if (i915.enable_execlists)
11017 mutex_unlock(&dev->struct_mutex); 11384 return true;
11018 } 11385 else if (obj->base.dma_buf &&
11386 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
11387 false))
11388 return true;
11389 else
11390 return engine != i915_gem_request_get_engine(obj->last_write_req);
11391}
11392
11393static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11394 unsigned int rotation,
11395 struct intel_flip_work *work)
11396{
11397 struct drm_device *dev = intel_crtc->base.dev;
11398 struct drm_i915_private *dev_priv = dev->dev_private;
11399 struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11400 const enum pipe pipe = intel_crtc->pipe;
11401 u32 ctl, stride, tile_height;
11402
11403 ctl = I915_READ(PLANE_CTL(pipe, 0));
11404 ctl &= ~PLANE_CTL_TILED_MASK;
11405 switch (fb->modifier[0]) {
11406 case DRM_FORMAT_MOD_NONE:
11407 break;
11408 case I915_FORMAT_MOD_X_TILED:
11409 ctl |= PLANE_CTL_TILED_X;
11410 break;
11411 case I915_FORMAT_MOD_Y_TILED:
11412 ctl |= PLANE_CTL_TILED_Y;
11413 break;
11414 case I915_FORMAT_MOD_Yf_TILED:
11415 ctl |= PLANE_CTL_TILED_YF;
11416 break;
11417 default:
11418 MISSING_CASE(fb->modifier[0]);
11419 }
11019 11420
11020 intel_plane_destroy_state(plane, &old_plane_state->base); 11421 /*
11422 * The stride is either expressed as a multiple of 64 bytes chunks for
11423 * linear buffers or in number of tiles for tiled buffers.
11424 */
11425 if (intel_rotation_90_or_270(rotation)) {
11426 /* stride = Surface height in tiles */
11427 tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
11428 stride = DIV_ROUND_UP(fb->height, tile_height);
11429 } else {
11430 stride = fb->pitches[0] /
11431 intel_fb_stride_alignment(dev_priv, fb->modifier[0],
11432 fb->pixel_format);
11021 } 11433 }
11022 11434
11023 if (!WARN_ON(atomic_read(&intel_crtc->unpin_work_count) == 0)) 11435 /*
11024 atomic_dec(&intel_crtc->unpin_work_count); 11436 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11437 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11438 */
11439 I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11440 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11025 11441
11026 intel_free_flip_work(work); 11442 I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11443 POSTING_READ(PLANE_SURF(pipe, 0));
11027} 11444}
11028 11445
11446static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11447 struct intel_flip_work *work)
11448{
11449 struct drm_device *dev = intel_crtc->base.dev;
11450 struct drm_i915_private *dev_priv = dev->dev_private;
11451 struct intel_framebuffer *intel_fb =
11452 to_intel_framebuffer(intel_crtc->base.primary->fb);
11453 struct drm_i915_gem_object *obj = intel_fb->obj;
11454 i915_reg_t reg = DSPCNTR(intel_crtc->plane);
11455 u32 dspcntr;
11029 11456
11030static bool pageflip_finished(struct intel_crtc *crtc, 11457 dspcntr = I915_READ(reg);
11031 struct intel_flip_work *work) 11458
11459 if (obj->tiling_mode != I915_TILING_NONE)
11460 dspcntr |= DISPPLANE_TILED;
11461 else
11462 dspcntr &= ~DISPPLANE_TILED;
11463
11464 I915_WRITE(reg, dspcntr);
11465
11466 I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11467 POSTING_READ(DSPSURF(intel_crtc->plane));
11468}
11469
11470static void intel_mmio_flip_work_func(struct work_struct *w)
11032{ 11471{
11472 struct intel_flip_work *work =
11473 container_of(w, struct intel_flip_work, mmio_work);
11474 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
11475 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11476 struct intel_framebuffer *intel_fb =
11477 to_intel_framebuffer(crtc->base.primary->fb);
11478 struct drm_i915_gem_object *obj = intel_fb->obj;
11479
11480 if (work->flip_queued_req)
11481 WARN_ON(__i915_wait_request(work->flip_queued_req,
11482 false, NULL,
11483 &dev_priv->rps.mmioflips));
11484
11485 /* For framebuffer backed by dmabuf, wait for fence */
11486 if (obj->base.dma_buf)
11487 WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
11488 false, false,
11489 MAX_SCHEDULE_TIMEOUT) < 0);
11490
11491 intel_pipe_update_start(crtc);
11492
11493 if (INTEL_GEN(dev_priv) >= 9)
11494 skl_do_mmio_flip(crtc, work->rotation, work);
11495 else
11496 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11497 ilk_do_mmio_flip(crtc, work);
11498
11499 intel_pipe_update_end(crtc, work);
11500}
11501
11502static int intel_default_queue_flip(struct drm_device *dev,
11503 struct drm_crtc *crtc,
11504 struct drm_framebuffer *fb,
11505 struct drm_i915_gem_object *obj,
11506 struct drm_i915_gem_request *req,
11507 uint32_t flags)
11508{
11509 return -ENODEV;
11510}
11511
11512static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
11513 struct intel_crtc *intel_crtc,
11514 struct intel_flip_work *work)
11515{
11516 u32 addr, vblank;
11517
11033 if (!atomic_read(&work->pending)) 11518 if (!atomic_read(&work->pending))
11034 return false; 11519 return false;
11035 11520
11036 smp_rmb(); 11521 smp_rmb();
11037 11522
11038 /* 11523 vblank = intel_crtc_get_vblank_counter(intel_crtc);
11039 * MMIO work completes when vblank is different from 11524 if (work->flip_ready_vblank == 0) {
11040 * flip_queued_vblank. 11525 if (work->flip_queued_req &&
11526 !i915_gem_request_completed(work->flip_queued_req, true))
11527 return false;
11528
11529 work->flip_ready_vblank = vblank;
11530 }
11531
11532 if (vblank - work->flip_ready_vblank < 3)
11533 return false;
11534
11535 /* Potential stall - if we see that the flip has happened,
11536 * assume a missed interrupt. */
11537 if (INTEL_GEN(dev_priv) >= 4)
11538 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11539 else
11540 addr = I915_READ(DSPADDR(intel_crtc->plane));
11541
11542 /* There is a potential issue here with a false positive after a flip
11543 * to the same address. We could address this by checking for a
11544 * non-incrementing frame counter.
11041 */ 11545 */
11042 return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank; 11546 return addr == work->gtt_offset;
11043} 11547}
11044 11548
11045void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe) 11549void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
11046{ 11550{
11047 struct drm_device *dev = dev_priv->dev; 11551 struct drm_device *dev = dev_priv->dev;
11048 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11552 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11049 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11553 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11050 struct intel_flip_work *work; 11554 struct intel_flip_work *work;
11051 unsigned long flags;
11052 11555
11053 /* Ignore early vblank irqs */ 11556 WARN_ON(!in_interrupt());
11054 if (!crtc)
11055 return;
11056 11557
11057 /* 11558 if (crtc == NULL)
11058 * This is called both by irq handlers and the reset code (to complete 11559 return;
11059 * lost pageflips) so needs the full irqsave spinlocks.
11060 */
11061 spin_lock_irqsave(&dev->event_lock, flags);
11062 while (!list_empty(&intel_crtc->flip_work)) {
11063 work = list_first_entry(&intel_crtc->flip_work,
11064 struct intel_flip_work,
11065 head);
11066 11560
11067 if (!pageflip_finished(intel_crtc, work) || 11561 spin_lock(&dev->event_lock);
11068 work_busy(&work->unpin_work)) 11562 work = intel_crtc->flip_work;
11069 break;
11070 11563
11071 page_flip_completed(intel_crtc, work); 11564 if (work != NULL && !is_mmio_work(work) &&
11565 __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
11566 WARN_ONCE(1,
11567 "Kicking stuck page flip: queued at %d, now %d\n",
11568 work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
11569 page_flip_completed(intel_crtc);
11570 work = NULL;
11072 } 11571 }
11073 spin_unlock_irqrestore(&dev->event_lock, flags); 11572
11573 if (work != NULL && !is_mmio_work(work) &&
11574 intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
11575 intel_queue_rps_boost_for_request(work->flip_queued_req);
11576 spin_unlock(&dev->event_lock);
11074} 11577}
11075 11578
11076static void intel_mmio_flip_work_func(struct work_struct *w) 11579static int intel_crtc_page_flip(struct drm_crtc *crtc,
11580 struct drm_framebuffer *fb,
11581 struct drm_pending_vblank_event *event,
11582 uint32_t page_flip_flags)
11077{ 11583{
11078 struct intel_flip_work *work =
11079 container_of(w, struct intel_flip_work, mmio_work);
11080 struct drm_crtc *crtc = work->old_crtc_state->base.crtc;
11081 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11082 struct intel_crtc_state *crtc_state = work->new_crtc_state;
11083 struct drm_device *dev = crtc->dev; 11584 struct drm_device *dev = crtc->dev;
11084 struct drm_i915_private *dev_priv = dev->dev_private; 11585 struct drm_i915_private *dev_priv = dev->dev_private;
11085 struct drm_i915_gem_request *req; 11586 struct drm_framebuffer *old_fb = crtc->primary->fb;
11086 int i, ret; 11587 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11588 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11589 struct drm_plane *primary = crtc->primary;
11590 enum pipe pipe = intel_crtc->pipe;
11591 struct intel_flip_work *work;
11592 struct intel_engine_cs *engine;
11593 bool mmio_flip;
11594 struct drm_i915_gem_request *request = NULL;
11595 int ret;
11596
11597 /*
11598 * drm_mode_page_flip_ioctl() should already catch this, but double
11599 * check to be safe. In the future we may enable pageflipping from
11600 * a disabled primary plane.
11601 */
11602 if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11603 return -EBUSY;
11604
11605 /* Can't change pixel format via MI display flips. */
11606 if (fb->pixel_format != crtc->primary->fb->pixel_format)
11607 return -EINVAL;
11608
11609 /*
11610 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11611 * Note that pitch changes could also affect these register.
11612 */
11613 if (INTEL_INFO(dev)->gen > 3 &&
11614 (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11615 fb->pitches[0] != crtc->primary->fb->pitches[0]))
11616 return -EINVAL;
11617
11618 if (i915_terminally_wedged(&dev_priv->gpu_error))
11619 goto out_hang;
11087 11620
11088 if (!needs_modeset(&crtc_state->base) && crtc_state->update_pipe) { 11621 work = kzalloc(sizeof(*work), GFP_KERNEL);
11089 work->put_power_domains = 11622 if (work == NULL)
11090 modeset_get_crtc_power_domains(crtc, crtc_state); 11623 return -ENOMEM;
11624
11625 work->event = event;
11626 work->crtc = crtc;
11627 work->old_fb = old_fb;
11628 INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
11629
11630 ret = drm_crtc_vblank_get(crtc);
11631 if (ret)
11632 goto free_work;
11633
11634 /* We borrow the event spin lock for protecting flip_work */
11635 spin_lock_irq(&dev->event_lock);
11636 if (intel_crtc->flip_work) {
11637 /* Before declaring the flip queue wedged, check if
11638 * the hardware completed the operation behind our backs.
11639 */
11640 if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
11641 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11642 page_flip_completed(intel_crtc);
11643 } else {
11644 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11645 spin_unlock_irq(&dev->event_lock);
11646
11647 drm_crtc_vblank_put(crtc);
11648 kfree(work);
11649 return -EBUSY;
11650 }
11091 } 11651 }
11652 intel_crtc->flip_work = work;
11653 spin_unlock_irq(&dev->event_lock);
11092 11654
11093 for (i = 0; i < work->num_planes; i++) { 11655 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11094 struct intel_plane_state *old_plane_state = work->old_plane_state[i]; 11656 flush_workqueue(dev_priv->wq);
11095 11657
11096 /* For framebuffer backed by dmabuf, wait for fence */ 11658 /* Reference the objects for the scheduled work. */
11097 if (old_plane_state->base.fence) 11659 drm_framebuffer_reference(work->old_fb);
11098 WARN_ON(fence_wait(old_plane_state->base.fence, false) < 0); 11660 drm_gem_object_reference(&obj->base);
11099 11661
11100 req = old_plane_state->wait_req; 11662 crtc->primary->fb = fb;
11101 if (!req) 11663 update_state_fb(crtc->primary);
11102 continue; 11664 intel_fbc_pre_update(intel_crtc);
11103 11665
11104 WARN_ON(__i915_wait_request(req, false, NULL, 11666 work->pending_flip_obj = obj;
11105 &dev_priv->rps.mmioflips)); 11667
11668 ret = i915_mutex_lock_interruptible(dev);
11669 if (ret)
11670 goto cleanup;
11671
11672 intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
11673 if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
11674 ret = -EIO;
11675 goto cleanup;
11106 } 11676 }
11107 11677
11108 ret = drm_crtc_vblank_get(crtc); 11678 atomic_inc(&intel_crtc->unpin_work_count);
11109 I915_STATE_WARN(ret < 0, "enabling vblank failed with %i\n", ret); 11679
11680 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11681 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11110 11682
11111 if (work->num_planes && 11683 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
11112 work->old_plane_state[0]->base.plane == crtc->primary) 11684 engine = &dev_priv->engine[BCS];
11113 intel_fbc_enable(intel_crtc, work->new_crtc_state, work->new_plane_state[0]); 11685 if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
11686 /* vlv: DISPLAY_FLIP fails to change tiling */
11687 engine = NULL;
11688 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
11689 engine = &dev_priv->engine[BCS];
11690 } else if (INTEL_INFO(dev)->gen >= 7) {
11691 engine = i915_gem_request_get_engine(obj->last_write_req);
11692 if (engine == NULL || engine->id != RCS)
11693 engine = &dev_priv->engine[BCS];
11694 } else {
11695 engine = &dev_priv->engine[RCS];
11696 }
11114 11697
11115 intel_frontbuffer_flip_prepare(dev, work->fb_bits); 11698 mmio_flip = use_mmio_flip(engine, obj);
11116 11699
11117 intel_pipe_update_start(intel_crtc); 11700 /* When using CS flips, we want to emit semaphores between rings.
11118 if (!needs_modeset(&crtc_state->base)) { 11701 * However, when using mmio flips we will create a task to do the
11119 if (crtc_state->base.color_mgmt_changed || crtc_state->update_pipe) { 11702 * synchronisation, so all we want here is to pin the framebuffer
11120 intel_color_set_csc(&crtc_state->base); 11703 * into the display plane and skip any waits.
11121 intel_color_load_luts(&crtc_state->base); 11704 */
11705 if (!mmio_flip) {
11706 ret = i915_gem_object_sync(obj, engine, &request);
11707 if (!ret && !request) {
11708 request = i915_gem_request_alloc(engine, NULL);
11709 ret = PTR_ERR_OR_ZERO(request);
11122 } 11710 }
11123 11711
11124 if (crtc_state->update_pipe) 11712 if (ret)
11125 intel_update_pipe_config(intel_crtc, work->old_crtc_state); 11713 goto cleanup_pending;
11126 else if (INTEL_INFO(dev)->gen >= 9)
11127 skl_detach_scalers(intel_crtc);
11128 } 11714 }
11129 11715
11130 for (i = 0; i < work->num_planes; i++) { 11716 ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
11131 struct intel_plane_state *new_plane_state = work->new_plane_state[i]; 11717 if (ret)
11132 struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane); 11718 goto cleanup_pending;
11719
11720 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11721 obj, 0);
11722 work->gtt_offset += intel_crtc->dspaddr_offset;
11723 work->rotation = crtc->primary->state->rotation;
11133 11724
11134 if (new_plane_state->visible) 11725 if (mmio_flip) {
11135 plane->update_plane(&plane->base, crtc_state, new_plane_state); 11726 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
11136 else 11727
11137 plane->disable_plane(&plane->base, crtc); 11728 i915_gem_request_assign(&work->flip_queued_req,
11729 obj->last_write_req);
11730
11731 schedule_work(&work->mmio_work);
11732 } else {
11733 i915_gem_request_assign(&work->flip_queued_req, request);
11734 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11735 page_flip_flags);
11736 if (ret)
11737 goto cleanup_unpin;
11738
11739 intel_mark_page_flip_active(intel_crtc, work);
11740
11741 i915_add_request_no_flush(request);
11138 } 11742 }
11139 11743
11140 intel_pipe_update_end(intel_crtc, work); 11744 i915_gem_track_fb(intel_fb_obj(old_fb), obj,
11745 to_intel_plane(primary)->frontbuffer_bit);
11746 mutex_unlock(&dev->struct_mutex);
11747
11748 intel_frontbuffer_flip_prepare(dev,
11749 to_intel_plane(primary)->frontbuffer_bit);
11750
11751 trace_i915_flip_request(intel_crtc->plane, obj);
11752
11753 return 0;
11754
11755cleanup_unpin:
11756 intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
11757cleanup_pending:
11758 if (!IS_ERR_OR_NULL(request))
11759 i915_add_request_no_flush(request);
11760 atomic_dec(&intel_crtc->unpin_work_count);
11761 mutex_unlock(&dev->struct_mutex);
11762cleanup:
11763 crtc->primary->fb = old_fb;
11764 update_state_fb(crtc->primary);
11765
11766 drm_gem_object_unreference_unlocked(&obj->base);
11767 drm_framebuffer_unreference(work->old_fb);
11768
11769 spin_lock_irq(&dev->event_lock);
11770 intel_crtc->flip_work = NULL;
11771 spin_unlock_irq(&dev->event_lock);
11772
11773 drm_crtc_vblank_put(crtc);
11774free_work:
11775 kfree(work);
11776
11777 if (ret == -EIO) {
11778 struct drm_atomic_state *state;
11779 struct drm_plane_state *plane_state;
11780
11781out_hang:
11782 state = drm_atomic_state_alloc(dev);
11783 if (!state)
11784 return -ENOMEM;
11785 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11786
11787retry:
11788 plane_state = drm_atomic_get_plane_state(state, primary);
11789 ret = PTR_ERR_OR_ZERO(plane_state);
11790 if (!ret) {
11791 drm_atomic_set_fb_for_plane(plane_state, fb);
11792
11793 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11794 if (!ret)
11795 ret = drm_atomic_commit(state);
11796 }
11797
11798 if (ret == -EDEADLK) {
11799 drm_modeset_backoff(state->acquire_ctx);
11800 drm_atomic_state_clear(state);
11801 goto retry;
11802 }
11803
11804 if (ret)
11805 drm_atomic_state_free(state);
11806
11807 if (ret == 0 && event) {
11808 spin_lock_irq(&dev->event_lock);
11809 drm_crtc_send_vblank_event(crtc, event);
11810 spin_unlock_irq(&dev->event_lock);
11811 }
11812 }
11813 return ret;
11141} 11814}
11142 11815
11816
11143/** 11817/**
11144 * intel_wm_need_update - Check whether watermarks need updating 11818 * intel_wm_need_update - Check whether watermarks need updating
11145 * @plane: drm plane 11819 * @plane: drm plane
@@ -11416,6 +12090,8 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11416 12090
11417static const struct drm_crtc_helper_funcs intel_helper_funcs = { 12091static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11418 .mode_set_base_atomic = intel_pipe_set_base_atomic, 12092 .mode_set_base_atomic = intel_pipe_set_base_atomic,
12093 .atomic_begin = intel_begin_crtc_commit,
12094 .atomic_flush = intel_finish_crtc_commit,
11419 .atomic_check = intel_crtc_atomic_check, 12095 .atomic_check = intel_crtc_atomic_check,
11420}; 12096};
11421 12097
@@ -12255,8 +12931,7 @@ verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
12255 if (state->crtc != crtc) 12931 if (state->crtc != crtc)
12256 continue; 12932 continue;
12257 12933
12258 intel_connector_verify_state(to_intel_connector(connector), 12934 intel_connector_verify_state(to_intel_connector(connector));
12259 connector->state);
12260 12935
12261 I915_STATE_WARN(state->best_encoder != encoder, 12936 I915_STATE_WARN(state->best_encoder != encoder,
12262 "connector's atomic encoder doesn't match legacy encoder\n"); 12937 "connector's atomic encoder doesn't match legacy encoder\n");
@@ -12458,7 +13133,12 @@ intel_modeset_verify_crtc(struct drm_crtc *crtc,
12458 struct drm_crtc_state *old_state, 13133 struct drm_crtc_state *old_state,
12459 struct drm_crtc_state *new_state) 13134 struct drm_crtc_state *new_state)
12460{ 13135{
13136 if (!needs_modeset(new_state) &&
13137 !to_intel_crtc_state(new_state)->update_pipe)
13138 return;
13139
12461 verify_wm_state(crtc, new_state); 13140 verify_wm_state(crtc, new_state);
13141 verify_connector_state(crtc->dev, crtc);
12462 verify_crtc_state(crtc, old_state, new_state); 13142 verify_crtc_state(crtc, old_state, new_state);
12463 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state); 13143 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
12464} 13144}
@@ -12810,83 +13490,32 @@ static int intel_atomic_check(struct drm_device *dev,
12810 return calc_watermark_data(state); 13490 return calc_watermark_data(state);
12811} 13491}
12812 13492
12813static bool needs_work(struct drm_crtc_state *crtc_state)
12814{
12815 /* hw state checker needs to run */
12816 if (needs_modeset(crtc_state))
12817 return true;
12818
12819 /* unpin old fb's, possibly vblank update */
12820 if (crtc_state->planes_changed)
12821 return true;
12822
12823 /* pipe parameters need to be updated, and hw state checker */
12824 if (to_intel_crtc_state(crtc_state)->update_pipe)
12825 return true;
12826
12827 /* vblank event requested? */
12828 if (crtc_state->event)
12829 return true;
12830
12831 return false;
12832}
12833
12834static int intel_atomic_prepare_commit(struct drm_device *dev, 13493static int intel_atomic_prepare_commit(struct drm_device *dev,
12835 struct drm_atomic_state *state, 13494 struct drm_atomic_state *state,
12836 bool nonblock) 13495 bool nonblock)
12837{ 13496{
12838 struct drm_i915_private *dev_priv = dev->dev_private; 13497 struct drm_i915_private *dev_priv = dev->dev_private;
12839 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12840 struct drm_plane_state *plane_state; 13498 struct drm_plane_state *plane_state;
12841 struct drm_crtc_state *crtc_state; 13499 struct drm_crtc_state *crtc_state;
12842 struct drm_plane *plane; 13500 struct drm_plane *plane;
12843 struct drm_crtc *crtc; 13501 struct drm_crtc *crtc;
12844 int i, ret; 13502 int i, ret;
12845 13503
12846 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13504 if (nonblock) {
12847 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13505 DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
12848 struct intel_flip_work *work; 13506 return -EINVAL;
12849 13507 }
12850 if (!state->legacy_cursor_update) {
12851 ret = intel_crtc_wait_for_pending_flips(crtc);
12852 if (ret)
12853 return ret;
12854
12855 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
12856 flush_workqueue(dev_priv->wq);
12857 }
12858 13508
12859 /* test if we need to update something */ 13509 for_each_crtc_in_state(state, crtc, crtc_state, i) {
12860 if (!needs_work(crtc_state)) 13510 if (state->legacy_cursor_update)
12861 continue; 13511 continue;
12862 13512
12863 intel_state->work[i] = work = 13513 ret = intel_crtc_wait_for_pending_flips(crtc);
12864 kzalloc(sizeof(**intel_state->work), GFP_KERNEL); 13514 if (ret)
12865 13515 return ret;
12866 if (!work)
12867 return -ENOMEM;
12868
12869 if (needs_modeset(crtc_state) ||
12870 to_intel_crtc_state(crtc_state)->update_pipe) {
12871 work->num_old_connectors = hweight32(crtc->state->connector_mask);
12872
12873 work->old_connector_state = kcalloc(work->num_old_connectors,
12874 sizeof(*work->old_connector_state),
12875 GFP_KERNEL);
12876
12877 work->num_new_connectors = hweight32(crtc_state->connector_mask);
12878 work->new_connector_state = kcalloc(work->num_new_connectors,
12879 sizeof(*work->new_connector_state),
12880 GFP_KERNEL);
12881
12882 if (!work->old_connector_state || !work->new_connector_state)
12883 return -ENOMEM;
12884 }
12885 }
12886 13516
12887 if (intel_state->modeset && nonblock) { 13517 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
12888 DRM_DEBUG_ATOMIC("Nonblock modesets are not yet supported!\n"); 13518 flush_workqueue(dev_priv->wq);
12889 return -EINVAL;
12890 } 13519 }
12891 13520
12892 ret = mutex_lock_interruptible(&dev->struct_mutex); 13521 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -12901,15 +13530,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
12901 struct intel_plane_state *intel_plane_state = 13530 struct intel_plane_state *intel_plane_state =
12902 to_intel_plane_state(plane_state); 13531 to_intel_plane_state(plane_state);
12903 13532
12904 if (plane_state->fence) {
12905 long lret = fence_wait(plane_state->fence, true);
12906
12907 if (lret < 0) {
12908 ret = lret;
12909 break;
12910 }
12911 }
12912
12913 if (!intel_plane_state->wait_req) 13533 if (!intel_plane_state->wait_req)
12914 continue; 13534 continue;
12915 13535
@@ -12939,157 +13559,69 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12939 return dev->driver->get_vblank_counter(dev, crtc->pipe); 13559 return dev->driver->get_vblank_counter(dev, crtc->pipe);
12940} 13560}
12941 13561
12942static void intel_prepare_work(struct drm_crtc *crtc, 13562static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
12943 struct intel_flip_work *work, 13563 struct drm_i915_private *dev_priv,
12944 struct drm_atomic_state *state, 13564 unsigned crtc_mask)
12945 struct drm_crtc_state *old_crtc_state)
12946{ 13565{
12947 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13566 unsigned last_vblank_count[I915_MAX_PIPES];
12948 struct drm_plane_state *old_plane_state; 13567 enum pipe pipe;
12949 struct drm_plane *plane; 13568 int ret;
12950 int i, j = 0;
12951 13569
12952 INIT_WORK(&work->unpin_work, intel_unpin_work_fn); 13570 if (!crtc_mask)
12953 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); 13571 return;
12954 atomic_inc(&intel_crtc->unpin_work_count);
12955 13572
12956 for_each_plane_in_state(state, plane, old_plane_state, i) { 13573 for_each_pipe(dev_priv, pipe) {
12957 struct intel_plane_state *old_state = to_intel_plane_state(old_plane_state); 13574 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
12958 struct intel_plane_state *new_state = to_intel_plane_state(plane->state);
12959 13575
12960 if (old_state->base.crtc != crtc && 13576 if (!((1 << pipe) & crtc_mask))
12961 new_state->base.crtc != crtc)
12962 continue; 13577 continue;
12963 13578
12964 if (plane->type == DRM_PLANE_TYPE_PRIMARY) { 13579 ret = drm_crtc_vblank_get(crtc);
12965 plane->fb = new_state->base.fb; 13580 if (WARN_ON(ret != 0)) {
12966 crtc->x = new_state->base.src_x >> 16; 13581 crtc_mask &= ~(1 << pipe);
12967 crtc->y = new_state->base.src_y >> 16; 13582 continue;
12968 } 13583 }
12969 13584
12970 old_state->wait_req = new_state->wait_req; 13585 last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
12971 new_state->wait_req = NULL;
12972
12973 old_state->base.fence = new_state->base.fence;
12974 new_state->base.fence = NULL;
12975
12976 /* remove plane state from the atomic state and move it to work */
12977 old_plane_state->state = NULL;
12978 state->planes[i] = NULL;
12979 state->plane_states[i] = NULL;
12980
12981 work->old_plane_state[j] = old_state;
12982 work->new_plane_state[j++] = new_state;
12983 } 13586 }
12984 13587
12985 old_crtc_state->state = NULL; 13588 for_each_pipe(dev_priv, pipe) {
12986 state->crtcs[drm_crtc_index(crtc)] = NULL; 13589 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
12987 state->crtc_states[drm_crtc_index(crtc)] = NULL; 13590 long lret;
12988
12989 work->old_crtc_state = to_intel_crtc_state(old_crtc_state);
12990 work->new_crtc_state = to_intel_crtc_state(crtc->state);
12991 work->num_planes = j;
12992
12993 work->event = crtc->state->event;
12994 crtc->state->event = NULL;
12995
12996 if (needs_modeset(crtc->state) || work->new_crtc_state->update_pipe) {
12997 struct drm_connector *conn;
12998 struct drm_connector_state *old_conn_state;
12999 int k = 0;
13000
13001 j = 0;
13002
13003 /*
13004 * intel_unpin_work_fn cannot depend on the connector list
13005 * because it may be freed from underneath it, so add
13006 * them all to the work struct while we're holding locks.
13007 */
13008 for_each_connector_in_state(state, conn, old_conn_state, i) {
13009 if (old_conn_state->crtc == crtc) {
13010 work->old_connector_state[j++] = old_conn_state;
13011
13012 state->connectors[i] = NULL;
13013 state->connector_states[i] = NULL;
13014 }
13015 }
13016
13017 /* If another crtc has stolen the connector from state,
13018 * then for_each_connector_in_state is no longer reliable,
13019 * so use drm_for_each_connector here.
13020 */
13021 drm_for_each_connector(conn, state->dev)
13022 if (conn->state->crtc == crtc)
13023 work->new_connector_state[k++] = conn->state;
13024
13025 WARN(j != work->num_old_connectors, "j = %i, expected %i\n", j, work->num_old_connectors);
13026 WARN(k != work->num_new_connectors, "k = %i, expected %i\n", k, work->num_new_connectors);
13027 } else if (!work->new_crtc_state->update_wm_post)
13028 work->can_async_unpin = true;
13029
13030 work->fb_bits = work->new_crtc_state->fb_bits;
13031}
13032
13033static void intel_schedule_unpin(struct drm_crtc *crtc,
13034 struct intel_atomic_state *state,
13035 struct intel_flip_work *work)
13036{
13037 struct drm_device *dev = crtc->dev;
13038 struct drm_i915_private *dev_priv = dev->dev_private;
13039
13040 to_intel_crtc(crtc)->config = work->new_crtc_state;
13041
13042 queue_work(dev_priv->wq, &work->unpin_work);
13043}
13044 13591
13045static void intel_schedule_flip(struct drm_crtc *crtc, 13592 if (!((1 << pipe) & crtc_mask))
13046 struct intel_atomic_state *state, 13593 continue;
13047 struct intel_flip_work *work,
13048 bool nonblock)
13049{
13050 struct intel_crtc_state *crtc_state = work->new_crtc_state;
13051 13594
13052 if (crtc_state->base.planes_changed || 13595 lret = wait_event_timeout(dev->vblank[pipe].queue,
13053 needs_modeset(&crtc_state->base) || 13596 last_vblank_count[pipe] !=
13054 crtc_state->update_pipe) { 13597 drm_crtc_vblank_count(crtc),
13055 if (nonblock) 13598 msecs_to_jiffies(50));
13056 schedule_work(&work->mmio_work);
13057 else
13058 intel_mmio_flip_work_func(&work->mmio_work);
13059 } else {
13060 int ret;
13061 13599
13062 ret = drm_crtc_vblank_get(crtc); 13600 WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
13063 I915_STATE_WARN(ret < 0, "enabling vblank failed with %i\n", ret);
13064 13601
13065 work->flip_queued_vblank = intel_crtc_get_vblank_counter(to_intel_crtc(crtc)); 13602 drm_crtc_vblank_put(crtc);
13066 smp_mb__before_atomic();
13067 atomic_set(&work->pending, 1);
13068 } 13603 }
13069} 13604}
13070 13605
13071static void intel_schedule_update(struct drm_crtc *crtc, 13606static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
13072 struct intel_atomic_state *state,
13073 struct intel_flip_work *work,
13074 bool nonblock)
13075{ 13607{
13076 struct drm_device *dev = crtc->dev; 13608 /* fb updated, need to unpin old fb */
13077 struct intel_crtc_state *pipe_config = work->new_crtc_state; 13609 if (crtc_state->fb_changed)
13610 return true;
13078 13611
13079 if (!pipe_config->base.active && work->can_async_unpin) { 13612 /* wm changes, need vblank before final wm's */
13080 INIT_LIST_HEAD(&work->head); 13613 if (crtc_state->update_wm_post)
13081 intel_schedule_unpin(crtc, state, work); 13614 return true;
13082 return;
13083 }
13084 13615
13085 spin_lock_irq(&dev->event_lock); 13616 /*
13086 list_add_tail(&work->head, &to_intel_crtc(crtc)->flip_work); 13617 * cxsr is re-enabled after vblank.
13087 spin_unlock_irq(&dev->event_lock); 13618 * This is already handled by crtc_state->update_wm_post,
13619 * but added for clarity.
13620 */
13621 if (crtc_state->disable_cxsr)
13622 return true;
13088 13623
13089 if (!pipe_config->base.active) 13624 return false;
13090 intel_schedule_unpin(crtc, state, work);
13091 else
13092 intel_schedule_flip(crtc, state, work, nonblock);
13093} 13625}
13094 13626
13095/** 13627/**
@@ -13116,7 +13648,11 @@ static int intel_atomic_commit(struct drm_device *dev,
13116 struct drm_i915_private *dev_priv = dev->dev_private; 13648 struct drm_i915_private *dev_priv = dev->dev_private;
13117 struct drm_crtc_state *old_crtc_state; 13649 struct drm_crtc_state *old_crtc_state;
13118 struct drm_crtc *crtc; 13650 struct drm_crtc *crtc;
13651 struct intel_crtc_state *intel_cstate;
13119 int ret = 0, i; 13652 int ret = 0, i;
13653 bool hw_check = intel_state->modeset;
13654 unsigned long put_domains[I915_MAX_PIPES] = {};
13655 unsigned crtc_vblank_mask = 0;
13120 13656
13121 ret = intel_atomic_prepare_commit(dev, state, nonblock); 13657 ret = intel_atomic_prepare_commit(dev, state, nonblock);
13122 if (ret) { 13658 if (ret) {
@@ -13134,20 +13670,27 @@ static int intel_atomic_commit(struct drm_device *dev,
13134 sizeof(intel_state->min_pixclk)); 13670 sizeof(intel_state->min_pixclk));
13135 dev_priv->active_crtcs = intel_state->active_crtcs; 13671 dev_priv->active_crtcs = intel_state->active_crtcs;
13136 dev_priv->atomic_cdclk_freq = intel_state->cdclk; 13672 dev_priv->atomic_cdclk_freq = intel_state->cdclk;
13673
13674 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13137 } 13675 }
13138 13676
13139 for_each_crtc_in_state(state, crtc, old_crtc_state, i) { 13677 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13140 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13678 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13141 13679
13680 if (needs_modeset(crtc->state) ||
13681 to_intel_crtc_state(crtc->state)->update_pipe) {
13682 hw_check = true;
13683
13684 put_domains[to_intel_crtc(crtc)->pipe] =
13685 modeset_get_crtc_power_domains(crtc,
13686 to_intel_crtc_state(crtc->state));
13687 }
13688
13142 if (!needs_modeset(crtc->state)) 13689 if (!needs_modeset(crtc->state))
13143 continue; 13690 continue;
13144 13691
13145 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); 13692 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13146 13693
13147 intel_state->work[i]->put_power_domains =
13148 modeset_get_crtc_power_domains(crtc,
13149 to_intel_crtc_state(crtc->state));
13150
13151 if (old_crtc_state->active) { 13694 if (old_crtc_state->active) {
13152 intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask); 13695 intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
13153 dev_priv->display.crtc_disable(crtc); 13696 dev_priv->display.crtc_disable(crtc);
@@ -13184,9 +13727,11 @@ static int intel_atomic_commit(struct drm_device *dev,
13184 13727
13185 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 13728 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13186 for_each_crtc_in_state(state, crtc, old_crtc_state, i) { 13729 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13187 struct intel_flip_work *work = intel_state->work[i];
13188 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13730 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13189 bool modeset = needs_modeset(crtc->state); 13731 bool modeset = needs_modeset(crtc->state);
13732 struct intel_crtc_state *pipe_config =
13733 to_intel_crtc_state(crtc->state);
13734 bool update_pipe = !modeset && pipe_config->update_pipe;
13190 13735
13191 if (modeset && crtc->state->active) { 13736 if (modeset && crtc->state->active) {
13192 update_scanline_offset(to_intel_crtc(crtc)); 13737 update_scanline_offset(to_intel_crtc(crtc));
@@ -13196,30 +13741,53 @@ static int intel_atomic_commit(struct drm_device *dev,
13196 if (!modeset) 13741 if (!modeset)
13197 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); 13742 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13198 13743
13199 if (!work) { 13744 if (crtc->state->active &&
13200 if (!list_empty_careful(&intel_crtc->flip_work)) { 13745 drm_atomic_get_existing_plane_state(state, crtc->primary))
13201 spin_lock_irq(&dev->event_lock); 13746 intel_fbc_enable(intel_crtc);
13202 if (!list_empty(&intel_crtc->flip_work))
13203 work = list_last_entry(&intel_crtc->flip_work,
13204 struct intel_flip_work, head);
13205
13206 if (work && work->new_crtc_state == to_intel_crtc_state(old_crtc_state)) {
13207 work->free_new_crtc_state = true;
13208 state->crtc_states[i] = NULL;
13209 state->crtcs[i] = NULL;
13210 }
13211 spin_unlock_irq(&dev->event_lock);
13212 }
13213 continue;
13214 }
13215 13747
13216 intel_state->work[i] = NULL; 13748 if (crtc->state->active &&
13217 intel_prepare_work(crtc, work, state, old_crtc_state); 13749 (crtc->state->planes_changed || update_pipe))
13218 intel_schedule_update(crtc, intel_state, work, nonblock); 13750 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
13751
13752 if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13753 crtc_vblank_mask |= 1 << i;
13219 } 13754 }
13220 13755
13221 /* FIXME: add subpixel order */ 13756 /* FIXME: add subpixel order */
13222 13757
13758 if (!state->legacy_cursor_update)
13759 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
13760
13761 /*
13762 * Now that the vblank has passed, we can go ahead and program the
13763 * optimal watermarks on platforms that need two-step watermark
13764 * programming.
13765 *
13766 * TODO: Move this (and other cleanup) to an async worker eventually.
13767 */
13768 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13769 intel_cstate = to_intel_crtc_state(crtc->state);
13770
13771 if (dev_priv->display.optimize_watermarks)
13772 dev_priv->display.optimize_watermarks(intel_cstate);
13773 }
13774
13775 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13776 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13777
13778 if (put_domains[i])
13779 modeset_put_power_domains(dev_priv, put_domains[i]);
13780
13781 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
13782 }
13783
13784 if (intel_state->modeset)
13785 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13786
13787 mutex_lock(&dev->struct_mutex);
13788 drm_atomic_helper_cleanup_planes(dev, state);
13789 mutex_unlock(&dev->struct_mutex);
13790
13223 drm_atomic_state_free(state); 13791 drm_atomic_state_free(state);
13224 13792
13225 /* As one of the primary mmio accessors, KMS has a high likelihood 13793 /* As one of the primary mmio accessors, KMS has a high likelihood
@@ -13283,38 +13851,11 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
13283 .set_config = drm_atomic_helper_set_config, 13851 .set_config = drm_atomic_helper_set_config,
13284 .set_property = drm_atomic_helper_crtc_set_property, 13852 .set_property = drm_atomic_helper_crtc_set_property,
13285 .destroy = intel_crtc_destroy, 13853 .destroy = intel_crtc_destroy,
13286 .page_flip = drm_atomic_helper_page_flip, 13854 .page_flip = intel_crtc_page_flip,
13287 .atomic_duplicate_state = intel_crtc_duplicate_state, 13855 .atomic_duplicate_state = intel_crtc_duplicate_state,
13288 .atomic_destroy_state = intel_crtc_destroy_state, 13856 .atomic_destroy_state = intel_crtc_destroy_state,
13289}; 13857};
13290 13858
13291static struct fence *intel_get_excl_fence(struct drm_i915_gem_object *obj)
13292{
13293 struct reservation_object *resv;
13294
13295
13296 if (!obj->base.dma_buf)
13297 return NULL;
13298
13299 resv = obj->base.dma_buf->resv;
13300
13301 /* For framebuffer backed by dmabuf, wait for fence */
13302 while (1) {
13303 struct fence *fence_excl, *ret = NULL;
13304
13305 rcu_read_lock();
13306
13307 fence_excl = rcu_dereference(resv->fence_excl);
13308 if (fence_excl)
13309 ret = fence_get_rcu(fence_excl);
13310
13311 rcu_read_unlock();
13312
13313 if (ret == fence_excl)
13314 return ret;
13315 }
13316}
13317
13318/** 13859/**
13319 * intel_prepare_plane_fb - Prepare fb for usage on plane 13860 * intel_prepare_plane_fb - Prepare fb for usage on plane
13320 * @plane: drm plane to prepare for 13861 * @plane: drm plane to prepare for
@@ -13338,20 +13879,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13338 struct intel_plane *intel_plane = to_intel_plane(plane); 13879 struct intel_plane *intel_plane = to_intel_plane(plane);
13339 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13880 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13340 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 13881 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13341 struct drm_crtc *crtc = new_state->crtc ?: plane->state->crtc;
13342 int ret = 0; 13882 int ret = 0;
13343 13883
13344 if (!obj && !old_obj) 13884 if (!obj && !old_obj)
13345 return 0; 13885 return 0;
13346 13886
13347 if (WARN_ON(!new_state->state) || WARN_ON(!crtc) ||
13348 WARN_ON(!to_intel_atomic_state(new_state->state)->work[to_intel_crtc(crtc)->pipe])) {
13349 if (WARN_ON(old_obj != obj))
13350 return -EINVAL;
13351
13352 return 0;
13353 }
13354
13355 if (old_obj) { 13887 if (old_obj) {
13356 struct drm_crtc_state *crtc_state = 13888 struct drm_crtc_state *crtc_state =
13357 drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc); 13889 drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
@@ -13376,6 +13908,19 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13376 } 13908 }
13377 } 13909 }
13378 13910
13911 /* For framebuffer backed by dmabuf, wait for fence */
13912 if (obj && obj->base.dma_buf) {
13913 long lret;
13914
13915 lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
13916 false, true,
13917 MAX_SCHEDULE_TIMEOUT);
13918 if (lret == -ERESTARTSYS)
13919 return lret;
13920
13921 WARN(lret < 0, "waiting returns %li\n", lret);
13922 }
13923
13379 if (!obj) { 13924 if (!obj) {
13380 ret = 0; 13925 ret = 0;
13381 } else if (plane->type == DRM_PLANE_TYPE_CURSOR && 13926 } else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
@@ -13395,8 +13940,6 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13395 13940
13396 i915_gem_request_assign(&plane_state->wait_req, 13941 i915_gem_request_assign(&plane_state->wait_req,
13397 obj->last_write_req); 13942 obj->last_write_req);
13398
13399 plane_state->base.fence = intel_get_excl_fence(obj);
13400 } 13943 }
13401 13944
13402 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); 13945 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
@@ -13439,9 +13982,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
13439 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); 13982 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13440 13983
13441 i915_gem_request_assign(&old_intel_state->wait_req, NULL); 13984 i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13442
13443 fence_put(old_intel_state->base.fence);
13444 old_intel_state->base.fence = NULL;
13445} 13985}
13446 13986
13447int 13987int
@@ -13501,6 +14041,40 @@ intel_check_primary_plane(struct drm_plane *plane,
13501 &state->visible); 14041 &state->visible);
13502} 14042}
13503 14043
14044static void intel_begin_crtc_commit(struct drm_crtc *crtc,
14045 struct drm_crtc_state *old_crtc_state)
14046{
14047 struct drm_device *dev = crtc->dev;
14048 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14049 struct intel_crtc_state *old_intel_state =
14050 to_intel_crtc_state(old_crtc_state);
14051 bool modeset = needs_modeset(crtc->state);
14052
14053 /* Perform vblank evasion around commit operation */
14054 intel_pipe_update_start(intel_crtc);
14055
14056 if (modeset)
14057 return;
14058
14059 if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
14060 intel_color_set_csc(crtc->state);
14061 intel_color_load_luts(crtc->state);
14062 }
14063
14064 if (to_intel_crtc_state(crtc->state)->update_pipe)
14065 intel_update_pipe_config(intel_crtc, old_intel_state);
14066 else if (INTEL_INFO(dev)->gen >= 9)
14067 skl_detach_scalers(intel_crtc);
14068}
14069
14070static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14071 struct drm_crtc_state *old_crtc_state)
14072{
14073 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14074
14075 intel_pipe_update_end(intel_crtc, NULL);
14076}
14077
13504/** 14078/**
13505 * intel_plane_destroy - destroy a plane 14079 * intel_plane_destroy - destroy a plane
13506 * @plane: plane to destroy 14080 * @plane: plane to destroy
@@ -13811,8 +14385,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
13811 intel_crtc->base.state = &crtc_state->base; 14385 intel_crtc->base.state = &crtc_state->base;
13812 crtc_state->base.crtc = &intel_crtc->base; 14386 crtc_state->base.crtc = &intel_crtc->base;
13813 14387
13814 INIT_LIST_HEAD(&intel_crtc->flip_work);
13815
13816 /* initialize shared scalers */ 14388 /* initialize shared scalers */
13817 if (INTEL_INFO(dev)->gen >= 9) { 14389 if (INTEL_INFO(dev)->gen >= 9) {
13818 if (pipe == PIPE_C) 14390 if (pipe == PIPE_C)
@@ -14568,6 +15140,34 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14568 dev_priv->display.modeset_calc_cdclk = 15140 dev_priv->display.modeset_calc_cdclk =
14569 skl_modeset_calc_cdclk; 15141 skl_modeset_calc_cdclk;
14570 } 15142 }
15143
15144 switch (INTEL_INFO(dev_priv)->gen) {
15145 case 2:
15146 dev_priv->display.queue_flip = intel_gen2_queue_flip;
15147 break;
15148
15149 case 3:
15150 dev_priv->display.queue_flip = intel_gen3_queue_flip;
15151 break;
15152
15153 case 4:
15154 case 5:
15155 dev_priv->display.queue_flip = intel_gen4_queue_flip;
15156 break;
15157
15158 case 6:
15159 dev_priv->display.queue_flip = intel_gen6_queue_flip;
15160 break;
15161 case 7:
15162 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
15163 dev_priv->display.queue_flip = intel_gen7_queue_flip;
15164 break;
15165 case 9:
15166 /* Drop through - unsupported since execlist only. */
15167 default:
15168 /* Default just returns -ENODEV to indicate unsupported */
15169 dev_priv->display.queue_flip = intel_default_queue_flip;
15170 }
14571} 15171}
14572 15172
14573/* 15173/*
@@ -15526,9 +16126,9 @@ void intel_modeset_gem_init(struct drm_device *dev)
15526 DRM_ERROR("failed to pin boot fb on pipe %d\n", 16126 DRM_ERROR("failed to pin boot fb on pipe %d\n",
15527 to_intel_crtc(c)->pipe); 16127 to_intel_crtc(c)->pipe);
15528 drm_framebuffer_unreference(c->primary->fb); 16128 drm_framebuffer_unreference(c->primary->fb);
15529 drm_framebuffer_unreference(c->primary->state->fb); 16129 c->primary->fb = NULL;
15530 c->primary->fb = c->primary->state->fb = NULL;
15531 c->primary->crtc = c->primary->state->crtc = NULL; 16130 c->primary->crtc = c->primary->state->crtc = NULL;
16131 update_state_fb(c->primary);
15532 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); 16132 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
15533 } 16133 }
15534 } 16134 }
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 03d4b1ade2d1..9b5f6634c558 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -304,8 +304,6 @@ struct intel_atomic_state {
304 unsigned int active_crtcs; 304 unsigned int active_crtcs;
305 unsigned int min_pixclk[I915_MAX_PIPES]; 305 unsigned int min_pixclk[I915_MAX_PIPES];
306 306
307 struct intel_flip_work *work[I915_MAX_PIPES];
308
309 /* SKL/KBL Only */ 307 /* SKL/KBL Only */
310 unsigned int cdclk_pll_vco; 308 unsigned int cdclk_pll_vco;
311 309
@@ -646,7 +644,7 @@ struct intel_crtc {
646 unsigned long enabled_power_domains; 644 unsigned long enabled_power_domains;
647 bool lowfreq_avail; 645 bool lowfreq_avail;
648 struct intel_overlay *overlay; 646 struct intel_overlay *overlay;
649 struct list_head flip_work; 647 struct intel_flip_work *flip_work;
650 648
651 atomic_t unpin_work_count; 649 atomic_t unpin_work_count;
652 650
@@ -664,6 +662,9 @@ struct intel_crtc {
664 662
665 struct intel_crtc_state *config; 663 struct intel_crtc_state *config;
666 664
665 /* reset counter value when the last flip was submitted */
666 unsigned int reset_counter;
667
667 /* Access to these should be protected by dev_priv->irq_lock. */ 668 /* Access to these should be protected by dev_priv->irq_lock. */
668 bool cpu_fifo_underrun_disabled; 669 bool cpu_fifo_underrun_disabled;
669 bool pch_fifo_underrun_disabled; 670 bool pch_fifo_underrun_disabled;
@@ -972,28 +973,20 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
972} 973}
973 974
974struct intel_flip_work { 975struct intel_flip_work {
975 struct list_head head;
976
977 struct work_struct unpin_work; 976 struct work_struct unpin_work;
978 struct work_struct mmio_work; 977 struct work_struct mmio_work;
979 978
979 struct drm_crtc *crtc;
980 struct drm_framebuffer *old_fb;
981 struct drm_i915_gem_object *pending_flip_obj;
980 struct drm_pending_vblank_event *event; 982 struct drm_pending_vblank_event *event;
981 atomic_t pending; 983 atomic_t pending;
984 u32 flip_count;
985 u32 gtt_offset;
986 struct drm_i915_gem_request *flip_queued_req;
982 u32 flip_queued_vblank; 987 u32 flip_queued_vblank;
983 988 u32 flip_ready_vblank;
984 unsigned put_power_domains; 989 unsigned int rotation;
985 unsigned num_planes;
986
987 bool can_async_unpin, free_new_crtc_state;
988 unsigned fb_bits;
989
990 unsigned num_old_connectors, num_new_connectors;
991 struct drm_connector_state **old_connector_state;
992 struct drm_connector_state **new_connector_state;
993
994 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
995 struct intel_plane_state *old_plane_state[I915_MAX_PLANES + 1];
996 struct intel_plane_state *new_plane_state[I915_MAX_PLANES + 1];
997}; 990};
998 991
999struct intel_load_detect_pipe { 992struct intel_load_detect_pipe {
@@ -1153,7 +1146,6 @@ unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info
1153bool intel_has_pending_fb_unpin(struct drm_device *dev); 1146bool intel_has_pending_fb_unpin(struct drm_device *dev);
1154void intel_mark_busy(struct drm_i915_private *dev_priv); 1147void intel_mark_busy(struct drm_i915_private *dev_priv);
1155void intel_mark_idle(struct drm_i915_private *dev_priv); 1148void intel_mark_idle(struct drm_i915_private *dev_priv);
1156void intel_free_flip_work(struct intel_flip_work *work);
1157void intel_crtc_restore_mode(struct drm_crtc *crtc); 1149void intel_crtc_restore_mode(struct drm_crtc *crtc);
1158int intel_display_suspend(struct drm_device *dev); 1150int intel_display_suspend(struct drm_device *dev);
1159void intel_encoder_destroy(struct drm_encoder *encoder); 1151void intel_encoder_destroy(struct drm_encoder *encoder);
@@ -1206,8 +1198,9 @@ struct drm_framebuffer *
1206__intel_framebuffer_create(struct drm_device *dev, 1198__intel_framebuffer_create(struct drm_device *dev,
1207 struct drm_mode_fb_cmd2 *mode_cmd, 1199 struct drm_mode_fb_cmd2 *mode_cmd,
1208 struct drm_i915_gem_object *obj); 1200 struct drm_i915_gem_object *obj);
1201void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
1209void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe); 1202void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
1210 1203void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
1211int intel_prepare_plane_fb(struct drm_plane *plane, 1204int intel_prepare_plane_fb(struct drm_plane *plane,
1212 const struct drm_plane_state *new_state); 1205 const struct drm_plane_state *new_state);
1213void intel_cleanup_plane_fb(struct drm_plane *plane, 1206void intel_cleanup_plane_fb(struct drm_plane *plane,
@@ -1430,15 +1423,11 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
1430void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, 1423void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
1431 struct drm_atomic_state *state); 1424 struct drm_atomic_state *state);
1432bool intel_fbc_is_active(struct drm_i915_private *dev_priv); 1425bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
1433void intel_fbc_pre_update(struct intel_crtc *crtc, 1426void intel_fbc_pre_update(struct intel_crtc *crtc);
1434 struct intel_crtc_state *crtc_state,
1435 struct intel_plane_state *plane_state);
1436void intel_fbc_post_update(struct intel_crtc *crtc); 1427void intel_fbc_post_update(struct intel_crtc *crtc);
1437void intel_fbc_init(struct drm_i915_private *dev_priv); 1428void intel_fbc_init(struct drm_i915_private *dev_priv);
1438void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv); 1429void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
1439void intel_fbc_enable(struct intel_crtc *crtc, 1430void intel_fbc_enable(struct intel_crtc *crtc);
1440 struct intel_crtc_state *crtc_state,
1441 struct intel_plane_state *plane_state);
1442void intel_fbc_disable(struct intel_crtc *crtc); 1431void intel_fbc_disable(struct intel_crtc *crtc);
1443void intel_fbc_global_disable(struct drm_i915_private *dev_priv); 1432void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
1444void intel_fbc_invalidate(struct drm_i915_private *dev_priv, 1433void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index d2b0269b2fe4..0dea5fbcd8aa 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -480,10 +480,10 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
480 intel_fbc_hw_deactivate(dev_priv); 480 intel_fbc_hw_deactivate(dev_priv);
481} 481}
482 482
483static bool multiple_pipes_ok(struct intel_crtc *crtc, 483static bool multiple_pipes_ok(struct intel_crtc *crtc)
484 struct intel_plane_state *plane_state)
485{ 484{
486 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 485 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
486 struct drm_plane *primary = crtc->base.primary;
487 struct intel_fbc *fbc = &dev_priv->fbc; 487 struct intel_fbc *fbc = &dev_priv->fbc;
488 enum pipe pipe = crtc->pipe; 488 enum pipe pipe = crtc->pipe;
489 489
@@ -491,7 +491,9 @@ static bool multiple_pipes_ok(struct intel_crtc *crtc,
491 if (!no_fbc_on_multiple_pipes(dev_priv)) 491 if (!no_fbc_on_multiple_pipes(dev_priv))
492 return true; 492 return true;
493 493
494 if (plane_state->visible) 494 WARN_ON(!drm_modeset_is_locked(&primary->mutex));
495
496 if (to_intel_plane_state(primary->state)->visible)
495 fbc->visible_pipes_mask |= (1 << pipe); 497 fbc->visible_pipes_mask |= (1 << pipe);
496 else 498 else
497 fbc->visible_pipes_mask &= ~(1 << pipe); 499 fbc->visible_pipes_mask &= ~(1 << pipe);
@@ -706,16 +708,21 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
706 return effective_w <= max_w && effective_h <= max_h; 708 return effective_w <= max_w && effective_h <= max_h;
707} 709}
708 710
709static void intel_fbc_update_state_cache(struct intel_crtc *crtc, 711static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
710 struct intel_crtc_state *crtc_state,
711 struct intel_plane_state *plane_state)
712{ 712{
713 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 713 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
714 struct intel_fbc *fbc = &dev_priv->fbc; 714 struct intel_fbc *fbc = &dev_priv->fbc;
715 struct intel_fbc_state_cache *cache = &fbc->state_cache; 715 struct intel_fbc_state_cache *cache = &fbc->state_cache;
716 struct intel_crtc_state *crtc_state =
717 to_intel_crtc_state(crtc->base.state);
718 struct intel_plane_state *plane_state =
719 to_intel_plane_state(crtc->base.primary->state);
716 struct drm_framebuffer *fb = plane_state->base.fb; 720 struct drm_framebuffer *fb = plane_state->base.fb;
717 struct drm_i915_gem_object *obj; 721 struct drm_i915_gem_object *obj;
718 722
723 WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
724 WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex));
725
719 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; 726 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
720 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 727 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
721 cache->crtc.hsw_bdw_pixel_rate = 728 cache->crtc.hsw_bdw_pixel_rate =
@@ -880,9 +887,7 @@ static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
880 return memcmp(params1, params2, sizeof(*params1)) == 0; 887 return memcmp(params1, params2, sizeof(*params1)) == 0;
881} 888}
882 889
883void intel_fbc_pre_update(struct intel_crtc *crtc, 890void intel_fbc_pre_update(struct intel_crtc *crtc)
884 struct intel_crtc_state *crtc_state,
885 struct intel_plane_state *plane_state)
886{ 891{
887 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 892 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
888 struct intel_fbc *fbc = &dev_priv->fbc; 893 struct intel_fbc *fbc = &dev_priv->fbc;
@@ -892,7 +897,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
892 897
893 mutex_lock(&fbc->lock); 898 mutex_lock(&fbc->lock);
894 899
895 if (!multiple_pipes_ok(crtc, plane_state)) { 900 if (!multiple_pipes_ok(crtc)) {
896 fbc->no_fbc_reason = "more than one pipe active"; 901 fbc->no_fbc_reason = "more than one pipe active";
897 goto deactivate; 902 goto deactivate;
898 } 903 }
@@ -900,7 +905,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
900 if (!fbc->enabled || fbc->crtc != crtc) 905 if (!fbc->enabled || fbc->crtc != crtc)
901 goto unlock; 906 goto unlock;
902 907
903 intel_fbc_update_state_cache(crtc, crtc_state, plane_state); 908 intel_fbc_update_state_cache(crtc);
904 909
905deactivate: 910deactivate:
906 intel_fbc_deactivate(dev_priv); 911 intel_fbc_deactivate(dev_priv);
@@ -1084,9 +1089,7 @@ out:
1084 * intel_fbc_enable multiple times for the same pipe without an 1089 * intel_fbc_enable multiple times for the same pipe without an
1085 * intel_fbc_disable in the middle, as long as it is deactivated. 1090 * intel_fbc_disable in the middle, as long as it is deactivated.
1086 */ 1091 */
1087void intel_fbc_enable(struct intel_crtc *crtc, 1092void intel_fbc_enable(struct intel_crtc *crtc)
1088 struct intel_crtc_state *crtc_state,
1089 struct intel_plane_state *plane_state)
1090{ 1093{
1091 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1094 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1092 struct intel_fbc *fbc = &dev_priv->fbc; 1095 struct intel_fbc *fbc = &dev_priv->fbc;
@@ -1099,19 +1102,19 @@ void intel_fbc_enable(struct intel_crtc *crtc,
1099 if (fbc->enabled) { 1102 if (fbc->enabled) {
1100 WARN_ON(fbc->crtc == NULL); 1103 WARN_ON(fbc->crtc == NULL);
1101 if (fbc->crtc == crtc) { 1104 if (fbc->crtc == crtc) {
1102 WARN_ON(!crtc_state->enable_fbc); 1105 WARN_ON(!crtc->config->enable_fbc);
1103 WARN_ON(fbc->active); 1106 WARN_ON(fbc->active);
1104 } 1107 }
1105 goto out; 1108 goto out;
1106 } 1109 }
1107 1110
1108 if (!crtc_state->enable_fbc) 1111 if (!crtc->config->enable_fbc)
1109 goto out; 1112 goto out;
1110 1113
1111 WARN_ON(fbc->active); 1114 WARN_ON(fbc->active);
1112 WARN_ON(fbc->crtc != NULL); 1115 WARN_ON(fbc->crtc != NULL);
1113 1116
1114 intel_fbc_update_state_cache(crtc, crtc_state, plane_state); 1117 intel_fbc_update_state_cache(crtc);
1115 if (intel_fbc_alloc_cfb(crtc)) { 1118 if (intel_fbc_alloc_cfb(crtc)) {
1116 fbc->no_fbc_reason = "not enough stolen memory"; 1119 fbc->no_fbc_reason = "not enough stolen memory";
1117 goto out; 1120 goto out;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 53715037ab54..5c191a1afaaf 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -260,7 +260,9 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enabl
260 if (enable_execlists == 0) 260 if (enable_execlists == 0)
261 return 0; 261 return 0;
262 262
263 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && USES_PPGTT(dev_priv)) 263 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
264 USES_PPGTT(dev_priv) &&
265 i915.use_mmio_flip >= 0)
264 return 1; 266 return 1;
265 267
266 return 0; 268 return 0;