aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@linux.intel.com>2016-05-17 09:07:54 -0400
committerMaarten Lankhorst <maarten.lankhorst@linux.intel.com>2016-05-19 08:38:06 -0400
commit143f73b3bf48c089b40f58462dd7f7c199fd4f0f (patch)
tree471125f7551e8d448b30d3c20ad738de0d73b1d9
parent84fc494b64e8c591be446a966b7447a9db519c88 (diff)
drm/i915: Rework intel_crtc_page_flip to be almost atomic, v3.
Create a work structure that will be used for all changes. This will be used later on in the atomic commit function. Changes since v1: - Free old_crtc_state from unpin_work_fn properly. Changes since v2: - Add hunk for calling hw state verifier. - Add missing support for color spaces. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1463490484-19540-12-git-send-email-maarten.lankhorst@linux.intel.com Reviewed-by: Patrik Jakobsson <patrik.jakobsson@linux.intel.com>
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c36
-rw-r--r--drivers/gpu/drm/i915/intel_display.c672
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h13
3 files changed, 441 insertions, 280 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 220ec15e9864..6bce4fd8aaf4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -598,29 +598,43 @@ static void i915_dump_pageflip(struct seq_file *m,
598 struct intel_flip_work *work) 598 struct intel_flip_work *work)
599{ 599{
600 const char pipe = pipe_name(crtc->pipe); 600 const char pipe = pipe_name(crtc->pipe);
601 const char plane = plane_name(crtc->plane);
602 u32 pending; 601 u32 pending;
603 u32 addr; 602 u32 addr;
603 int i;
604 604
605 pending = atomic_read(&work->pending); 605 pending = atomic_read(&work->pending);
606 if (pending) { 606 if (pending) {
607 seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n", 607 seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
608 pipe, plane); 608 pipe, plane_name(crtc->plane));
609 } else { 609 } else {
610 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 610 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
611 pipe, plane); 611 pipe, plane_name(crtc->plane));
612 } 612 }
613 if (work->flip_queued_req) {
614 struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
615 613
616 seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n", 614
615 for (i = 0; i < work->num_planes; i++) {
616 struct intel_plane_state *old_plane_state = work->old_plane_state[i];
617 struct drm_plane *plane = old_plane_state->base.plane;
618 struct drm_i915_gem_request *req = old_plane_state->wait_req;
619 struct intel_engine_cs *engine;
620
621 seq_printf(m, "[PLANE:%i] part of flip.\n", plane->base.id);
622
623 if (!req) {
624 seq_printf(m, "Plane not associated with any engine\n");
625 continue;
626 }
627
628 engine = i915_gem_request_get_engine(req);
629
630 seq_printf(m, "Plane blocked on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
617 engine->name, 631 engine->name,
618 i915_gem_request_get_seqno(work->flip_queued_req), 632 i915_gem_request_get_seqno(req),
619 dev_priv->next_seqno, 633 dev_priv->next_seqno,
620 engine->get_seqno(engine), 634 engine->get_seqno(engine),
621 i915_gem_request_completed(work->flip_queued_req, true)); 635 i915_gem_request_completed(req, true));
622 } else 636 }
623 seq_printf(m, "Flip not associated with any ring\n"); 637
624 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 638 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
625 work->flip_queued_vblank, 639 work->flip_queued_vblank,
626 work->flip_ready_vblank, 640 work->flip_ready_vblank,
@@ -633,7 +647,7 @@ static void i915_dump_pageflip(struct seq_file *m,
633 addr = I915_READ(DSPADDR(crtc->plane)); 647 addr = I915_READ(DSPADDR(crtc->plane));
634 seq_printf(m, "Current scanout address 0x%08x\n", addr); 648 seq_printf(m, "Current scanout address 0x%08x\n", addr);
635 649
636 if (work->pending_flip_obj) { 650 if (work->flip_queued_req) {
637 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset); 651 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
638 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset); 652 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
639 } 653 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9900050ff30a..59881116beec 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -50,7 +50,7 @@
50 50
51static bool is_mmio_work(struct intel_flip_work *work) 51static bool is_mmio_work(struct intel_flip_work *work)
52{ 52{
53 return work->mmio_work.func; 53 return !work->flip_queued_req;
54} 54}
55 55
56/* Primary plane formats for gen <= 3 */ 56/* Primary plane formats for gen <= 3 */
@@ -123,6 +123,9 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc);
123static void intel_modeset_setup_hw_state(struct drm_device *dev); 123static void intel_modeset_setup_hw_state(struct drm_device *dev);
124static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 124static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
125static int ilk_max_pixel_rate(struct drm_atomic_state *state); 125static int ilk_max_pixel_rate(struct drm_atomic_state *state);
126static void intel_modeset_verify_crtc(struct drm_crtc *crtc,
127 struct drm_crtc_state *old_state,
128 struct drm_crtc_state *new_state);
126 129
127struct intel_limit { 130struct intel_limit {
128 struct { 131 struct {
@@ -2527,20 +2530,6 @@ out_unref_obj:
2527 return false; 2530 return false;
2528} 2531}
2529 2532
2530/* Update plane->state->fb to match plane->fb after driver-internal updates */
2531static void
2532update_state_fb(struct drm_plane *plane)
2533{
2534 if (plane->fb == plane->state->fb)
2535 return;
2536
2537 if (plane->state->fb)
2538 drm_framebuffer_unreference(plane->state->fb);
2539 plane->state->fb = plane->fb;
2540 if (plane->state->fb)
2541 drm_framebuffer_reference(plane->state->fb);
2542}
2543
2544static void 2533static void
2545intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2534intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2546 struct intel_initial_plane_config *plane_config) 2535 struct intel_initial_plane_config *plane_config)
@@ -3806,19 +3795,27 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
3806static void page_flip_completed(struct intel_crtc *intel_crtc, struct intel_flip_work *work) 3795static void page_flip_completed(struct intel_crtc *intel_crtc, struct intel_flip_work *work)
3807{ 3796{
3808 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 3797 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3809 3798 struct drm_plane_state *new_plane_state;
3810 list_del_init(&work->head); 3799 struct drm_plane *primary = intel_crtc->base.primary;
3811 3800
3812 if (work->event) 3801 if (work->event)
3813 drm_crtc_send_vblank_event(&intel_crtc->base, work->event); 3802 drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
3814 3803
3815 drm_crtc_vblank_put(&intel_crtc->base); 3804 drm_crtc_vblank_put(&intel_crtc->base);
3816 3805
3817 wake_up_all(&dev_priv->pending_flip_queue); 3806 new_plane_state = &work->old_plane_state[0]->base;
3818 queue_work(dev_priv->wq, &work->unpin_work); 3807 if (work->num_planes >= 1 &&
3808 new_plane_state->plane == primary &&
3809 new_plane_state->fb)
3810 trace_i915_flip_complete(intel_crtc->plane,
3811 intel_fb_obj(new_plane_state->fb));
3819 3812
3820 trace_i915_flip_complete(intel_crtc->plane, 3813 if (work->can_async_unpin) {
3821 work->pending_flip_obj); 3814 list_del_init(&work->head);
3815 wake_up_all(&dev_priv->pending_flip_queue);
3816 }
3817
3818 queue_work(dev_priv->wq, &work->unpin_work);
3822} 3819}
3823 3820
3824static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3821static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
@@ -3849,7 +3846,9 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3849 */ 3846 */
3850 work = list_first_entry_or_null(&intel_crtc->flip_work, 3847 work = list_first_entry_or_null(&intel_crtc->flip_work,
3851 struct intel_flip_work, head); 3848 struct intel_flip_work, head);
3852 if (work && !is_mmio_work(work)) { 3849
3850 if (work && !is_mmio_work(work) &&
3851 !work_busy(&work->unpin_work)) {
3853 WARN_ONCE(1, "Removing stuck page flip\n"); 3852 WARN_ONCE(1, "Removing stuck page flip\n");
3854 page_flip_completed(intel_crtc, work); 3853 page_flip_completed(intel_crtc, work);
3855 } 3854 }
@@ -10850,31 +10849,112 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
10850 kfree(intel_crtc); 10849 kfree(intel_crtc);
10851} 10850}
10852 10851
10852static void intel_crtc_post_flip_update(struct intel_flip_work *work,
10853 struct drm_crtc *crtc)
10854{
10855 struct intel_crtc_state *crtc_state = work->new_crtc_state;
10856 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10857
10858 if (crtc_state->disable_cxsr)
10859 intel_crtc->wm.cxsr_allowed = true;
10860
10861 if (crtc_state->update_wm_post && crtc_state->base.active)
10862 intel_update_watermarks(crtc);
10863
10864 if (work->num_planes > 0 &&
10865 work->old_plane_state[0]->base.plane == crtc->primary) {
10866 struct intel_plane_state *plane_state =
10867 work->new_plane_state[0];
10868
10869 if (plane_state->visible &&
10870 (needs_modeset(&crtc_state->base) ||
10871 !work->old_plane_state[0]->visible))
10872 intel_post_enable_primary(crtc);
10873 }
10874}
10875
10853static void intel_unpin_work_fn(struct work_struct *__work) 10876static void intel_unpin_work_fn(struct work_struct *__work)
10854{ 10877{
10855 struct intel_flip_work *work = 10878 struct intel_flip_work *work =
10856 container_of(__work, struct intel_flip_work, unpin_work); 10879 container_of(__work, struct intel_flip_work, unpin_work);
10857 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 10880 struct drm_crtc *crtc = work->old_crtc_state->base.crtc;
10858 struct drm_device *dev = crtc->base.dev; 10881 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10859 struct drm_plane *primary = crtc->base.primary; 10882 struct drm_device *dev = crtc->dev;
10883 struct drm_i915_private *dev_priv = dev->dev_private;
10884 int i;
10860 10885
10861 if (is_mmio_work(work)) 10886 if (work->fb_bits)
10862 flush_work(&work->mmio_work); 10887 intel_frontbuffer_flip_complete(dev, work->fb_bits);
10863 10888
10864 mutex_lock(&dev->struct_mutex); 10889 /*
10865 intel_unpin_fb_obj(work->old_fb, primary->state->rotation); 10890 * Unless work->can_async_unpin is false, there's no way to ensure
10866 drm_gem_object_unreference(&work->pending_flip_obj->base); 10891 * that work->new_crtc_state contains valid memory during unpin
10892 * because intel_atomic_commit may free it before this runs.
10893 */
10894 if (!work->can_async_unpin)
10895 intel_crtc_post_flip_update(work, crtc);
10896
10897 if (work->fb_bits & to_intel_plane(crtc->primary)->frontbuffer_bit)
10898 intel_fbc_post_update(intel_crtc);
10899
10900 if (work->put_power_domains)
10901 modeset_put_power_domains(dev_priv, work->put_power_domains);
10902
10903 /* Make sure mmio work is completely finished before freeing all state here. */
10904 flush_work(&work->mmio_work);
10905
10906 if (!work->can_async_unpin)
10907 /* This must be called before work is unpinned for serialization. */
10908 intel_modeset_verify_crtc(crtc, &work->old_crtc_state->base,
10909 &work->new_crtc_state->base);
10910
10911 if (!work->can_async_unpin || !list_empty(&work->head)) {
10912 spin_lock_irq(&dev->event_lock);
10913 WARN(list_empty(&work->head) != work->can_async_unpin,
10914 "[CRTC:%i] Pin work %p async %i with %i planes, active %i -> %i ms %i\n",
10915 crtc->base.id, work, work->can_async_unpin, work->num_planes,
10916 work->old_crtc_state->base.active, work->new_crtc_state->base.active,
10917 needs_modeset(&work->new_crtc_state->base));
10918
10919 if (!list_empty(&work->head))
10920 list_del(&work->head);
10921
10922 wake_up_all(&dev_priv->pending_flip_queue);
10923 spin_unlock_irq(&dev->event_lock);
10924 }
10925
10926 intel_crtc_destroy_state(crtc, &work->old_crtc_state->base);
10867 10927
10868 if (work->flip_queued_req) 10928 if (work->flip_queued_req)
10869 i915_gem_request_assign(&work->flip_queued_req, NULL); 10929 i915_gem_request_unreference(work->flip_queued_req);
10870 mutex_unlock(&dev->struct_mutex); 10930
10931 for (i = 0; i < work->num_planes; i++) {
10932 struct intel_plane_state *old_plane_state =
10933 work->old_plane_state[i];
10934 struct drm_framebuffer *old_fb = old_plane_state->base.fb;
10935 struct drm_plane *plane = old_plane_state->base.plane;
10936 struct drm_i915_gem_request *req;
10937
10938 req = old_plane_state->wait_req;
10939 old_plane_state->wait_req = NULL;
10940 i915_gem_request_unreference(req);
10941
10942 fence_put(old_plane_state->base.fence);
10943 old_plane_state->base.fence = NULL;
10944
10945 if (old_fb &&
10946 (plane->type != DRM_PLANE_TYPE_CURSOR ||
10947 !INTEL_INFO(dev_priv)->cursor_needs_physical)) {
10948 mutex_lock(&dev->struct_mutex);
10949 intel_unpin_fb_obj(old_fb, old_plane_state->base.rotation);
10950 mutex_unlock(&dev->struct_mutex);
10951 }
10871 10952
10872 intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit); 10953 intel_plane_destroy_state(plane, &old_plane_state->base);
10873 intel_fbc_post_update(crtc); 10954 }
10874 drm_framebuffer_unreference(work->old_fb);
10875 10955
10876 BUG_ON(atomic_read(&crtc->unpin_work_count) == 0); 10956 if (!WARN_ON(atomic_read(&intel_crtc->unpin_work_count) == 0))
10877 atomic_dec(&crtc->unpin_work_count); 10957 atomic_dec(&intel_crtc->unpin_work_count);
10878 10958
10879 kfree(work); 10959 kfree(work);
10880} 10960}
@@ -10988,7 +11068,8 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
10988 if (is_mmio_work(work)) 11068 if (is_mmio_work(work))
10989 break; 11069 break;
10990 11070
10991 if (!pageflip_finished(intel_crtc, work)) 11071 if (!pageflip_finished(intel_crtc, work) ||
11072 work_busy(&work->unpin_work))
10992 break; 11073 break;
10993 11074
10994 page_flip_completed(intel_crtc, work); 11075 page_flip_completed(intel_crtc, work);
@@ -11021,7 +11102,8 @@ void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
11021 if (!is_mmio_work(work)) 11102 if (!is_mmio_work(work))
11022 break; 11103 break;
11023 11104
11024 if (!pageflip_finished(intel_crtc, work)) 11105 if (!pageflip_finished(intel_crtc, work) ||
11106 work_busy(&work->unpin_work))
11025 break; 11107 break;
11026 11108
11027 page_flip_completed(intel_crtc, work); 11109 page_flip_completed(intel_crtc, work);
@@ -11270,70 +11352,204 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
11270 return 0; 11352 return 0;
11271} 11353}
11272 11354
11273static bool use_mmio_flip(struct intel_engine_cs *engine, 11355static struct intel_engine_cs *
11274 struct drm_i915_gem_object *obj) 11356intel_get_flip_engine(struct drm_device *dev,
11357 struct drm_i915_private *dev_priv,
11358 struct drm_i915_gem_object *obj)
11275{ 11359{
11276 /* 11360 if (IS_VALLEYVIEW(dev) || IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
11277 * This is not being used for older platforms, because 11361 return &dev_priv->engine[BCS];
11278 * non-availability of flip done interrupt forces us to use
11279 * CS flips. Older platforms derive flip done using some clever
11280 * tricks involving the flip_pending status bits and vblank irqs.
11281 * So using MMIO flips there would disrupt this mechanism.
11282 */
11283 11362
11284 if (engine == NULL) 11363 if (dev_priv->info.gen >= 7) {
11285 return true; 11364 struct intel_engine_cs *engine;
11286 11365
11287 if (i915.use_mmio_flip < 0) 11366 engine = i915_gem_request_get_engine(obj->last_write_req);
11367 if (engine && engine->id == RCS)
11368 return engine;
11369
11370 return &dev_priv->engine[BCS];
11371 } else
11372 return &dev_priv->engine[RCS];
11373}
11374
11375static bool
11376flip_fb_compatible(struct drm_device *dev,
11377 struct drm_framebuffer *fb,
11378 struct drm_framebuffer *old_fb)
11379{
11380 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11381 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
11382
11383 if (old_fb->pixel_format != fb->pixel_format)
11288 return false; 11384 return false;
11289 else if (i915.use_mmio_flip > 0) 11385
11290 return true; 11386 if (INTEL_INFO(dev)->gen > 3 &&
11291 else if (i915.enable_execlists) 11387 (fb->offsets[0] != old_fb->offsets[0] ||
11292 return true; 11388 fb->pitches[0] != old_fb->pitches[0]))
11293 else if (obj->base.dma_buf && 11389 return false;
11294 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv, 11390
11295 false)) 11391 /* vlv: DISPLAY_FLIP fails to change tiling */
11296 return true; 11392 if (IS_VALLEYVIEW(dev) && obj->tiling_mode != old_obj->tiling_mode)
11297 else 11393 return false;
11298 return engine != i915_gem_request_get_engine(obj->last_write_req); 11394
11395 return true;
11396}
11397
11398static void
11399intel_display_flip_prepare(struct drm_device *dev, struct drm_crtc *crtc,
11400 struct intel_flip_work *work)
11401{
11402 struct drm_i915_private *dev_priv = dev->dev_private;
11403 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11404
11405 if (work->flip_prepared)
11406 return;
11407
11408 work->flip_prepared = true;
11409
11410 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11411 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(intel_crtc->pipe)) + 1;
11412 work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
11413
11414 intel_frontbuffer_flip_prepare(dev, work->new_crtc_state->fb_bits);
11415}
11416
11417static void intel_flip_schedule_request(struct intel_flip_work *work, struct drm_crtc *crtc)
11418{
11419 struct drm_device *dev = crtc->dev;
11420 struct drm_i915_private *dev_priv = dev->dev_private;
11421 struct intel_plane_state *new_state = work->new_plane_state[0];
11422 struct intel_plane_state *old_state = work->old_plane_state[0];
11423 struct drm_framebuffer *fb, *old_fb;
11424 struct drm_i915_gem_request *request = NULL;
11425 struct intel_engine_cs *engine;
11426 struct drm_i915_gem_object *obj;
11427 struct fence *fence;
11428 int ret;
11429
11430 to_intel_crtc(crtc)->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
11431 if (__i915_reset_in_progress_or_wedged(to_intel_crtc(crtc)->reset_counter))
11432 goto mmio;
11433
11434 if (i915_terminally_wedged(&dev_priv->gpu_error) ||
11435 i915_reset_in_progress(&dev_priv->gpu_error) ||
11436 i915.enable_execlists || i915.use_mmio_flip > 0 ||
11437 !dev_priv->display.queue_flip)
11438 goto mmio;
11439
11440 /* Not right after modesetting, surface parameters need to be updated */
11441 if (needs_modeset(crtc->state) ||
11442 to_intel_crtc_state(crtc->state)->update_pipe)
11443 goto mmio;
11444
11445 /* Only allow a mmio flip for a primary plane without a dma-buf fence */
11446 if (work->num_planes != 1 ||
11447 new_state->base.plane != crtc->primary ||
11448 new_state->base.fence)
11449 goto mmio;
11450
11451 fence = work->old_plane_state[0]->base.fence;
11452 if (fence && !fence_is_signaled(fence))
11453 goto mmio;
11454
11455 old_fb = old_state->base.fb;
11456 fb = new_state->base.fb;
11457 obj = intel_fb_obj(fb);
11458
11459 trace_i915_flip_request(to_intel_crtc(crtc)->plane, obj);
11460
11461 /* Only when updating a already visible fb. */
11462 if (!new_state->visible || !old_state->visible)
11463 goto mmio;
11464
11465 if (!flip_fb_compatible(dev, fb, old_fb))
11466 goto mmio;
11467
11468 engine = intel_get_flip_engine(dev, dev_priv, obj);
11469 if (i915.use_mmio_flip == 0 && obj->last_write_req &&
11470 i915_gem_request_get_engine(obj->last_write_req) != engine)
11471 goto mmio;
11472
11473 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(crtc->primary), obj, 0);
11474 work->gtt_offset += to_intel_crtc(crtc)->dspaddr_offset;
11475
11476 ret = i915_gem_object_sync(obj, engine, &request);
11477 if (!ret && !request) {
11478 request = i915_gem_request_alloc(engine, NULL);
11479 ret = PTR_ERR_OR_ZERO(request);
11480
11481 if (ret)
11482 request = NULL;
11483 }
11484
11485 intel_display_flip_prepare(dev, crtc, work);
11486
11487 if (!ret)
11488 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, 0);
11489
11490 if (!ret) {
11491 i915_gem_request_assign(&work->flip_queued_req, request);
11492 intel_mark_page_flip_active(to_intel_crtc(crtc), work);
11493 i915_add_request_no_flush(request);
11494 return;
11495 }
11496 if (request)
11497 i915_add_request_no_flush(request);
11498
11499mmio:
11500 schedule_work(&work->mmio_work);
11299} 11501}
11300 11502
11301static void intel_mmio_flip_work_func(struct work_struct *w) 11503static void intel_mmio_flip_work_func(struct work_struct *w)
11302{ 11504{
11303 struct intel_flip_work *work = 11505 struct intel_flip_work *work =
11304 container_of(w, struct intel_flip_work, mmio_work); 11506 container_of(w, struct intel_flip_work, mmio_work);
11305 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 11507 struct drm_crtc *crtc = work->old_crtc_state->base.crtc;
11306 struct drm_device *dev = crtc->base.dev; 11508 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11509 struct intel_crtc_state *crtc_state = work->new_crtc_state;
11510 struct drm_device *dev = crtc->dev;
11307 struct drm_i915_private *dev_priv = dev->dev_private; 11511 struct drm_i915_private *dev_priv = dev->dev_private;
11308 struct intel_plane *primary = to_intel_plane(crtc->base.primary); 11512 struct drm_i915_gem_request *req;
11309 struct drm_i915_gem_object *obj = intel_fb_obj(primary->base.state->fb); 11513 int i;
11310 11514
11311 if (work->flip_queued_req) 11515 for (i = 0; i < work->num_planes; i++) {
11312 WARN_ON(__i915_wait_request(work->flip_queued_req, 11516 struct intel_plane_state *old_plane_state = work->old_plane_state[i];
11313 false, NULL, 11517
11518 /* For framebuffer backed by dmabuf, wait for fence */
11519 if (old_plane_state->base.fence)
11520 WARN_ON(fence_wait(old_plane_state->base.fence, false) < 0);
11521
11522 req = old_plane_state->wait_req;
11523 if (!req)
11524 continue;
11525
11526 WARN_ON(__i915_wait_request(req, false, NULL,
11314 &dev_priv->rps.mmioflips)); 11527 &dev_priv->rps.mmioflips));
11528 }
11315 11529
11316 /* For framebuffer backed by dmabuf, wait for fence */ 11530 intel_display_flip_prepare(dev, crtc, work);
11317 if (obj->base.dma_buf)
11318 WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
11319 false, false,
11320 MAX_SCHEDULE_TIMEOUT) < 0);
11321 11531
11322 intel_pipe_update_start(crtc); 11532 intel_pipe_update_start(intel_crtc);
11323 primary->update_plane(&primary->base, 11533 if (!needs_modeset(&crtc_state->base)) {
11324 crtc->config, 11534 if (crtc_state->base.color_mgmt_changed || crtc_state->update_pipe) {
11325 to_intel_plane_state(primary->base.state)); 11535 intel_color_set_csc(&crtc_state->base);
11326 intel_pipe_update_end(crtc, work); 11536 intel_color_load_luts(&crtc_state->base);
11327} 11537 }
11328 11538
11329static int intel_default_queue_flip(struct drm_device *dev, 11539 if (crtc_state->update_pipe)
11330 struct drm_crtc *crtc, 11540 intel_update_pipe_config(intel_crtc, work->old_crtc_state);
11331 struct drm_framebuffer *fb, 11541 else if (INTEL_INFO(dev)->gen >= 9)
11332 struct drm_i915_gem_object *obj, 11542 skl_detach_scalers(intel_crtc);
11333 struct drm_i915_gem_request *req, 11543 }
11334 uint64_t gtt_offset) 11544
11335{ 11545 for (i = 0; i < work->num_planes; i++) {
11336 return -ENODEV; 11546 struct intel_plane_state *new_plane_state = work->new_plane_state[i];
11547 struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
11548
11549 plane->update_plane(&plane->base, crtc_state, new_plane_state);
11550 }
11551
11552 intel_pipe_update_end(intel_crtc, work);
11337} 11553}
11338 11554
11339static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv, 11555static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
@@ -11342,7 +11558,8 @@ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
11342{ 11558{
11343 u32 addr, vblank; 11559 u32 addr, vblank;
11344 11560
11345 if (!atomic_read(&work->pending)) 11561 if (!atomic_read(&work->pending) ||
11562 work_busy(&work->unpin_work))
11346 return false; 11563 return false;
11347 11564
11348 smp_rmb(); 11565 smp_rmb();
@@ -11409,6 +11626,33 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
11409 spin_unlock(&dev->event_lock); 11626 spin_unlock(&dev->event_lock);
11410} 11627}
11411 11628
11629static struct fence *intel_get_excl_fence(struct drm_i915_gem_object *obj)
11630{
11631 struct reservation_object *resv;
11632
11633
11634 if (!obj->base.dma_buf)
11635 return NULL;
11636
11637 resv = obj->base.dma_buf->resv;
11638
11639 /* For framebuffer backed by dmabuf, wait for fence */
11640 while (1) {
11641 struct fence *fence_excl, *ret = NULL;
11642
11643 rcu_read_lock();
11644
11645 fence_excl = rcu_dereference(resv->fence_excl);
11646 if (fence_excl)
11647 ret = fence_get_rcu(fence_excl);
11648
11649 rcu_read_unlock();
11650
11651 if (ret == fence_excl)
11652 return ret;
11653 }
11654}
11655
11412static int intel_crtc_page_flip(struct drm_crtc *crtc, 11656static int intel_crtc_page_flip(struct drm_crtc *crtc,
11413 struct drm_framebuffer *fb, 11657 struct drm_framebuffer *fb,
11414 struct drm_pending_vblank_event *event, 11658 struct drm_pending_vblank_event *event,
@@ -11416,17 +11660,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11416{ 11660{
11417 struct drm_device *dev = crtc->dev; 11661 struct drm_device *dev = crtc->dev;
11418 struct drm_i915_private *dev_priv = dev->dev_private; 11662 struct drm_i915_private *dev_priv = dev->dev_private;
11419 struct drm_framebuffer *old_fb = crtc->primary->fb; 11663 struct drm_plane_state *old_state, *new_state = NULL;
11664 struct drm_crtc_state *new_crtc_state = NULL;
11665 struct drm_framebuffer *old_fb = crtc->primary->state->fb;
11420 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11666 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11421 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11667 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11422 struct drm_plane *primary = crtc->primary; 11668 struct drm_plane *primary = crtc->primary;
11423 enum pipe pipe = intel_crtc->pipe;
11424 struct intel_flip_work *work; 11669 struct intel_flip_work *work;
11425 struct intel_engine_cs *engine;
11426 bool mmio_flip;
11427 struct drm_i915_gem_request *request = NULL;
11428 int ret; 11670 int ret;
11429 11671
11672 old_state = crtc->primary->state;
11673
11674 if (!crtc->state->active)
11675 return -EINVAL;
11676
11430 /* 11677 /*
11431 * drm_mode_page_flip_ioctl() should already catch this, but double 11678 * drm_mode_page_flip_ioctl() should already catch this, but double
11432 * check to be safe. In the future we may enable pageflipping from 11679 * check to be safe. In the future we may enable pageflipping from
@@ -11436,7 +11683,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11436 return -EBUSY; 11683 return -EBUSY;
11437 11684
11438 /* Can't change pixel format via MI display flips. */ 11685 /* Can't change pixel format via MI display flips. */
11439 if (fb->pixel_format != crtc->primary->fb->pixel_format) 11686 if (fb->pixel_format != old_fb->pixel_format)
11440 return -EINVAL; 11687 return -EINVAL;
11441 11688
11442 /* 11689 /*
@@ -11444,25 +11691,44 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11444 * Note that pitch changes could also affect these register. 11691 * Note that pitch changes could also affect these register.
11445 */ 11692 */
11446 if (INTEL_INFO(dev)->gen > 3 && 11693 if (INTEL_INFO(dev)->gen > 3 &&
11447 (fb->offsets[0] != crtc->primary->fb->offsets[0] || 11694 (fb->offsets[0] != old_fb->offsets[0] ||
11448 fb->pitches[0] != crtc->primary->fb->pitches[0])) 11695 fb->pitches[0] != old_fb->pitches[0]))
11449 return -EINVAL; 11696 return -EINVAL;
11450 11697
11451 if (i915_terminally_wedged(&dev_priv->gpu_error))
11452 goto out_hang;
11453
11454 work = kzalloc(sizeof(*work), GFP_KERNEL); 11698 work = kzalloc(sizeof(*work), GFP_KERNEL);
11455 if (work == NULL) 11699 new_crtc_state = intel_crtc_duplicate_state(crtc);
11456 return -ENOMEM; 11700 new_state = intel_plane_duplicate_state(primary);
11701
11702 if (!work || !new_crtc_state || !new_state) {
11703 ret = -ENOMEM;
11704 goto cleanup;
11705 }
11706
11707 drm_framebuffer_unreference(new_state->fb);
11708 drm_framebuffer_reference(fb);
11709 new_state->fb = fb;
11457 11710
11458 work->event = event; 11711 work->event = event;
11459 work->crtc = crtc;
11460 work->old_fb = old_fb;
11461 INIT_WORK(&work->unpin_work, intel_unpin_work_fn); 11712 INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
11713 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
11714
11715 work->new_crtc_state = to_intel_crtc_state(new_crtc_state);
11716 work->old_crtc_state = intel_crtc->config;
11717
11718 work->fb_bits = to_intel_plane(primary)->frontbuffer_bit;
11719 work->new_crtc_state->fb_bits = work->fb_bits;
11462 11720
11721 work->can_async_unpin = true;
11722 work->num_planes = 1;
11723 work->old_plane_state[0] = to_intel_plane_state(old_state);
11724 work->new_plane_state[0] = to_intel_plane_state(new_state);
11725
11726 /* Step 1: vblank waiting and workqueue throttling,
11727 * similar to intel_atomic_prepare_commit
11728 */
11463 ret = drm_crtc_vblank_get(crtc); 11729 ret = drm_crtc_vblank_get(crtc);
11464 if (ret) 11730 if (ret)
11465 goto free_work; 11731 goto cleanup;
11466 11732
11467 /* We borrow the event spin lock for protecting flip_work */ 11733 /* We borrow the event spin lock for protecting flip_work */
11468 spin_lock_irq(&dev->event_lock); 11734 spin_lock_irq(&dev->event_lock);
@@ -11482,9 +11748,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11482 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 11748 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11483 spin_unlock_irq(&dev->event_lock); 11749 spin_unlock_irq(&dev->event_lock);
11484 11750
11485 drm_crtc_vblank_put(crtc); 11751 ret = -EBUSY;
11486 kfree(work); 11752 goto cleanup_vblank;
11487 return -EBUSY;
11488 } 11753 }
11489 } 11754 }
11490 list_add_tail(&work->head, &intel_crtc->flip_work); 11755 list_add_tail(&work->head, &intel_crtc->flip_work);
@@ -11493,160 +11758,62 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11493 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 11758 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11494 flush_workqueue(dev_priv->wq); 11759 flush_workqueue(dev_priv->wq);
11495 11760
11496 /* Reference the objects for the scheduled work. */ 11761 /* step 2, similar to intel_prepare_plane_fb */
11497 drm_framebuffer_reference(work->old_fb); 11762 ret = mutex_lock_interruptible(&dev->struct_mutex);
11498 drm_gem_object_reference(&obj->base);
11499
11500 crtc->primary->fb = fb;
11501 update_state_fb(crtc->primary);
11502 intel_fbc_pre_update(intel_crtc);
11503
11504 work->pending_flip_obj = obj;
11505
11506 ret = i915_mutex_lock_interruptible(dev);
11507 if (ret) 11763 if (ret)
11508 goto cleanup; 11764 goto cleanup_work;
11509
11510 intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
11511 if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
11512 ret = -EIO;
11513 goto cleanup;
11514 }
11515
11516 atomic_inc(&intel_crtc->unpin_work_count);
11517
11518 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11519 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11520
11521 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
11522 engine = &dev_priv->engine[BCS];
11523 if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
11524 /* vlv: DISPLAY_FLIP fails to change tiling */
11525 engine = NULL;
11526 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
11527 engine = &dev_priv->engine[BCS];
11528 } else if (INTEL_INFO(dev)->gen >= 7) {
11529 engine = i915_gem_request_get_engine(obj->last_write_req);
11530 if (engine == NULL || engine->id != RCS)
11531 engine = &dev_priv->engine[BCS];
11532 } else {
11533 engine = &dev_priv->engine[RCS];
11534 }
11535
11536 mmio_flip = use_mmio_flip(engine, obj);
11537 11765
11538 /* When using CS flips, we want to emit semaphores between rings. 11766 ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
11539 * However, when using mmio flips we will create a task to do the
11540 * synchronisation, so all we want here is to pin the framebuffer
11541 * into the display plane and skip any waits.
11542 */
11543 if (!mmio_flip) {
11544 ret = i915_gem_object_sync(obj, engine, &request);
11545 if (!ret && !request) {
11546 request = i915_gem_request_alloc(engine, NULL);
11547 ret = PTR_ERR_OR_ZERO(request);
11548 }
11549
11550 if (ret)
11551 goto cleanup_pending;
11552 }
11553
11554 ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
11555 if (ret) 11767 if (ret)
11556 goto cleanup_pending; 11768 goto cleanup_unlock;
11557 11769
11558 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), 11770 i915_gem_track_fb(intel_fb_obj(old_fb), obj,
11559 obj, 0); 11771 to_intel_plane(primary)->frontbuffer_bit);
11560 work->gtt_offset += intel_crtc->dspaddr_offset;
11561 11772
11562 if (mmio_flip) { 11773 /* point of no return, swap state */
11563 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); 11774 primary->state = new_state;
11775 crtc->state = new_crtc_state;
11776 intel_crtc->config = to_intel_crtc_state(new_crtc_state);
11777 primary->fb = fb;
11564 11778
11565 i915_gem_request_assign(&work->flip_queued_req, 11779 /* scheduling flip work */
11780 atomic_inc(&intel_crtc->unpin_work_count);
11781
11782 if (obj->last_write_req &&
11783 !i915_gem_request_completed(obj->last_write_req, true))
11784 i915_gem_request_assign(&work->old_plane_state[0]->wait_req,
11566 obj->last_write_req); 11785 obj->last_write_req);
11567 11786
11568 schedule_work(&work->mmio_work); 11787 if (obj->base.dma_buf)
11569 } else { 11788 work->old_plane_state[0]->base.fence = intel_get_excl_fence(obj);
11570 i915_gem_request_assign(&work->flip_queued_req, request);
11571 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11572 work->gtt_offset);
11573 if (ret)
11574 goto cleanup_unpin;
11575 11789
11576 intel_mark_page_flip_active(intel_crtc, work); 11790 intel_fbc_pre_update(intel_crtc);
11577 11791
11578 i915_add_request_no_flush(request); 11792 intel_flip_schedule_request(work, crtc);
11579 }
11580 11793
11581 i915_gem_track_fb(intel_fb_obj(old_fb), obj,
11582 to_intel_plane(primary)->frontbuffer_bit);
11583 mutex_unlock(&dev->struct_mutex); 11794 mutex_unlock(&dev->struct_mutex);
11584 11795
11585 intel_frontbuffer_flip_prepare(dev,
11586 to_intel_plane(primary)->frontbuffer_bit);
11587
11588 trace_i915_flip_request(intel_crtc->plane, obj); 11796 trace_i915_flip_request(intel_crtc->plane, obj);
11589 11797
11590 return 0; 11798 return 0;
11591 11799
11592cleanup_unpin: 11800cleanup_unlock:
11593 intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
11594cleanup_pending:
11595 if (!IS_ERR_OR_NULL(request))
11596 i915_add_request_no_flush(request);
11597 atomic_dec(&intel_crtc->unpin_work_count);
11598 mutex_unlock(&dev->struct_mutex); 11801 mutex_unlock(&dev->struct_mutex);
11599cleanup: 11802cleanup_work:
11600 crtc->primary->fb = old_fb;
11601 update_state_fb(crtc->primary);
11602
11603 drm_gem_object_unreference_unlocked(&obj->base);
11604 drm_framebuffer_unreference(work->old_fb);
11605
11606 spin_lock_irq(&dev->event_lock); 11803 spin_lock_irq(&dev->event_lock);
11607 list_del(&work->head); 11804 list_del(&work->head);
11608 spin_unlock_irq(&dev->event_lock); 11805 spin_unlock_irq(&dev->event_lock);
11609 11806
11807cleanup_vblank:
11610 drm_crtc_vblank_put(crtc); 11808 drm_crtc_vblank_put(crtc);
11611free_work: 11809cleanup:
11612 kfree(work); 11810 if (new_state)
11613 11811 intel_plane_destroy_state(primary, new_state);
11614 if (ret == -EIO) {
11615 struct drm_atomic_state *state;
11616 struct drm_plane_state *plane_state;
11617
11618out_hang:
11619 state = drm_atomic_state_alloc(dev);
11620 if (!state)
11621 return -ENOMEM;
11622 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11623
11624retry:
11625 plane_state = drm_atomic_get_plane_state(state, primary);
11626 ret = PTR_ERR_OR_ZERO(plane_state);
11627 if (!ret) {
11628 drm_atomic_set_fb_for_plane(plane_state, fb);
11629
11630 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11631 if (!ret)
11632 ret = drm_atomic_commit(state);
11633 }
11634
11635 if (ret == -EDEADLK) {
11636 drm_modeset_backoff(state->acquire_ctx);
11637 drm_atomic_state_clear(state);
11638 goto retry;
11639 }
11640 11812
11641 if (ret) 11813 if (new_crtc_state)
11642 drm_atomic_state_free(state); 11814 intel_crtc_destroy_state(crtc, new_crtc_state);
11643 11815
11644 if (ret == 0 && event) { 11816 kfree(work);
11645 spin_lock_irq(&dev->event_lock);
11646 drm_crtc_send_vblank_event(crtc, event);
11647 spin_unlock_irq(&dev->event_lock);
11648 }
11649 }
11650 return ret; 11817 return ret;
11651} 11818}
11652 11819
@@ -13690,33 +13857,6 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
13690 .atomic_destroy_state = intel_crtc_destroy_state, 13857 .atomic_destroy_state = intel_crtc_destroy_state,
13691}; 13858};
13692 13859
13693static struct fence *intel_get_excl_fence(struct drm_i915_gem_object *obj)
13694{
13695 struct reservation_object *resv;
13696
13697
13698 if (!obj->base.dma_buf)
13699 return NULL;
13700
13701 resv = obj->base.dma_buf->resv;
13702
13703 /* For framebuffer backed by dmabuf, wait for fence */
13704 while (1) {
13705 struct fence *fence_excl, *ret = NULL;
13706
13707 rcu_read_lock();
13708
13709 fence_excl = rcu_dereference(resv->fence_excl);
13710 if (fence_excl)
13711 ret = fence_get_rcu(fence_excl);
13712
13713 rcu_read_unlock();
13714
13715 if (ret == fence_excl)
13716 return ret;
13717 }
13718}
13719
13720/** 13860/**
13721 * intel_prepare_plane_fb - Prepare fb for usage on plane 13861 * intel_prepare_plane_fb - Prepare fb for usage on plane
13722 * @plane: drm plane to prepare for 13862 * @plane: drm plane to prepare for
@@ -15016,7 +15156,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15016 /* Drop through - unsupported since execlist only. */ 15156 /* Drop through - unsupported since execlist only. */
15017 default: 15157 default:
15018 /* Default just returns -ENODEV to indicate unsupported */ 15158 /* Default just returns -ENODEV to indicate unsupported */
15019 dev_priv->display.queue_flip = intel_default_queue_flip; 15159 break;
15020 } 15160 }
15021} 15161}
15022 15162
@@ -15975,9 +16115,9 @@ void intel_modeset_gem_init(struct drm_device *dev)
15975 DRM_ERROR("failed to pin boot fb on pipe %d\n", 16115 DRM_ERROR("failed to pin boot fb on pipe %d\n",
15976 to_intel_crtc(c)->pipe); 16116 to_intel_crtc(c)->pipe);
15977 drm_framebuffer_unreference(c->primary->fb); 16117 drm_framebuffer_unreference(c->primary->fb);
15978 c->primary->fb = NULL; 16118 drm_framebuffer_unreference(c->primary->state->fb);
16119 c->primary->fb = c->primary->state->fb = NULL;
15979 c->primary->crtc = c->primary->state->crtc = NULL; 16120 c->primary->crtc = c->primary->state->crtc = NULL;
15980 update_state_fb(c->primary);
15981 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); 16121 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
15982 } 16122 }
15983 } 16123 }
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cec2deb438af..8d16337a40fc 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -975,9 +975,6 @@ struct intel_flip_work {
975 struct work_struct unpin_work; 975 struct work_struct unpin_work;
976 struct work_struct mmio_work; 976 struct work_struct mmio_work;
977 977
978 struct drm_crtc *crtc;
979 struct drm_framebuffer *old_fb;
980 struct drm_i915_gem_object *pending_flip_obj;
981 struct drm_pending_vblank_event *event; 978 struct drm_pending_vblank_event *event;
982 atomic_t pending; 979 atomic_t pending;
983 u32 flip_count; 980 u32 flip_count;
@@ -985,6 +982,16 @@ struct intel_flip_work {
985 struct drm_i915_gem_request *flip_queued_req; 982 struct drm_i915_gem_request *flip_queued_req;
986 u32 flip_queued_vblank; 983 u32 flip_queued_vblank;
987 u32 flip_ready_vblank; 984 u32 flip_ready_vblank;
985
986 unsigned put_power_domains;
987 unsigned num_planes;
988
989 bool can_async_unpin, flip_prepared;
990 unsigned fb_bits;
991
992 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
993 struct intel_plane_state *old_plane_state[I915_MAX_PLANES + 1];
994 struct intel_plane_state *new_plane_state[I915_MAX_PLANES + 1];
988}; 995};
989 996
990struct intel_load_detect_pipe { 997struct intel_load_detect_pipe {