aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c21
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h35
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c27
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c5
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c12
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c14
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c33
-rw-r--r--drivers/gpu/drm/i915/intel_display.c30
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c70
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c21
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h7
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c15
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c122
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c32
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c218
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c7
20 files changed, 432 insertions, 247 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 7f4e8adec8a8..bfb2efd8d4d4 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1786,15 +1786,6 @@ void i915_reset(struct drm_i915_private *dev_priv)
1786 goto error; 1786 goto error;
1787 } 1787 }
1788 1788
1789 /*
1790 * rps/rc6 re-init is necessary to restore state lost after the
1791 * reset and the re-install of gt irqs. Skip for ironlake per
1792 * previous concerns that it doesn't respond well to some forms
1793 * of re-init after reset.
1794 */
1795 intel_sanitize_gt_powersave(dev_priv);
1796 intel_autoenable_gt_powersave(dev_priv);
1797
1798wakeup: 1789wakeup:
1799 wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS); 1790 wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
1800 return; 1791 return;
@@ -1872,7 +1863,17 @@ static int i915_pm_resume(struct device *kdev)
1872/* freeze: before creating the hibernation_image */ 1863/* freeze: before creating the hibernation_image */
1873static int i915_pm_freeze(struct device *kdev) 1864static int i915_pm_freeze(struct device *kdev)
1874{ 1865{
1875 return i915_pm_suspend(kdev); 1866 int ret;
1867
1868 ret = i915_pm_suspend(kdev);
1869 if (ret)
1870 return ret;
1871
1872 ret = i915_gem_freeze(kdev_to_i915(kdev));
1873 if (ret)
1874 return ret;
1875
1876 return 0;
1876} 1877}
1877 1878
1878static int i915_pm_freeze_late(struct device *kdev) 1879static int i915_pm_freeze_late(struct device *kdev)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4dd307ed4336..8b9ee4e390c0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1984,11 +1984,11 @@ struct drm_i915_private {
1984 struct vlv_s0ix_state vlv_s0ix_state; 1984 struct vlv_s0ix_state vlv_s0ix_state;
1985 1985
1986 enum { 1986 enum {
1987 I915_SKL_SAGV_UNKNOWN = 0, 1987 I915_SAGV_UNKNOWN = 0,
1988 I915_SKL_SAGV_DISABLED, 1988 I915_SAGV_DISABLED,
1989 I915_SKL_SAGV_ENABLED, 1989 I915_SAGV_ENABLED,
1990 I915_SKL_SAGV_NOT_CONTROLLED 1990 I915_SAGV_NOT_CONTROLLED
1991 } skl_sagv_status; 1991 } sagv_status;
1992 1992
1993 struct { 1993 struct {
1994 /* 1994 /*
@@ -2276,21 +2276,19 @@ struct drm_i915_gem_object {
2276 /** Record of address bit 17 of each page at last unbind. */ 2276 /** Record of address bit 17 of each page at last unbind. */
2277 unsigned long *bit_17; 2277 unsigned long *bit_17;
2278 2278
2279 union { 2279 struct i915_gem_userptr {
2280 /** for phy allocated objects */ 2280 uintptr_t ptr;
2281 struct drm_dma_handle *phys_handle; 2281 unsigned read_only :1;
2282 2282 unsigned workers :4;
2283 struct i915_gem_userptr {
2284 uintptr_t ptr;
2285 unsigned read_only :1;
2286 unsigned workers :4;
2287#define I915_GEM_USERPTR_MAX_WORKERS 15 2283#define I915_GEM_USERPTR_MAX_WORKERS 15
2288 2284
2289 struct i915_mm_struct *mm; 2285 struct i915_mm_struct *mm;
2290 struct i915_mmu_object *mmu_object; 2286 struct i915_mmu_object *mmu_object;
2291 struct work_struct *work; 2287 struct work_struct *work;
2292 } userptr; 2288 } userptr;
2293 }; 2289
2290 /** for phys allocated objects */
2291 struct drm_dma_handle *phys_handle;
2294}; 2292};
2295 2293
2296static inline struct drm_i915_gem_object * 2294static inline struct drm_i915_gem_object *
@@ -3076,6 +3074,7 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
3076void i915_gem_load_init(struct drm_device *dev); 3074void i915_gem_load_init(struct drm_device *dev);
3077void i915_gem_load_cleanup(struct drm_device *dev); 3075void i915_gem_load_cleanup(struct drm_device *dev);
3078void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); 3076void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
3077int i915_gem_freeze(struct drm_i915_private *dev_priv);
3079int i915_gem_freeze_late(struct drm_i915_private *dev_priv); 3078int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
3080 3079
3081void *i915_gem_object_alloc(struct drm_device *dev); 3080void *i915_gem_object_alloc(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2c8106758922..1418c1c522cb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2616,8 +2616,6 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2616 list_for_each_entry_continue(request, &engine->request_list, link) 2616 list_for_each_entry_continue(request, &engine->request_list, link)
2617 if (request->ctx == incomplete_ctx) 2617 if (request->ctx == incomplete_ctx)
2618 reset_request(request); 2618 reset_request(request);
2619
2620 engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
2621} 2619}
2622 2620
2623void i915_gem_reset(struct drm_i915_private *dev_priv) 2621void i915_gem_reset(struct drm_i915_private *dev_priv)
@@ -2628,9 +2626,15 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
2628 2626
2629 for_each_engine(engine, dev_priv) 2627 for_each_engine(engine, dev_priv)
2630 i915_gem_reset_engine(engine); 2628 i915_gem_reset_engine(engine);
2631 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2632 2629
2633 i915_gem_restore_fences(&dev_priv->drm); 2630 i915_gem_restore_fences(&dev_priv->drm);
2631
2632 if (dev_priv->gt.awake) {
2633 intel_sanitize_gt_powersave(dev_priv);
2634 intel_enable_gt_powersave(dev_priv);
2635 if (INTEL_GEN(dev_priv) >= 6)
2636 gen6_rps_busy(dev_priv);
2637 }
2634} 2638}
2635 2639
2636static void nop_submit_request(struct drm_i915_gem_request *request) 2640static void nop_submit_request(struct drm_i915_gem_request *request)
@@ -4589,6 +4593,19 @@ void i915_gem_load_cleanup(struct drm_device *dev)
4589 rcu_barrier(); 4593 rcu_barrier();
4590} 4594}
4591 4595
4596int i915_gem_freeze(struct drm_i915_private *dev_priv)
4597{
4598 intel_runtime_pm_get(dev_priv);
4599
4600 mutex_lock(&dev_priv->drm.struct_mutex);
4601 i915_gem_shrink_all(dev_priv);
4602 mutex_unlock(&dev_priv->drm.struct_mutex);
4603
4604 intel_runtime_pm_put(dev_priv);
4605
4606 return 0;
4607}
4608
4592int i915_gem_freeze_late(struct drm_i915_private *dev_priv) 4609int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4593{ 4610{
4594 struct drm_i915_gem_object *obj; 4611 struct drm_i915_gem_object *obj;
@@ -4612,7 +4629,8 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4612 * the objects as well. 4629 * the objects as well.
4613 */ 4630 */
4614 4631
4615 i915_gem_shrink_all(dev_priv); 4632 mutex_lock(&dev_priv->drm.struct_mutex);
4633 i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
4616 4634
4617 for (p = phases; *p; p++) { 4635 for (p = phases; *p; p++) {
4618 list_for_each_entry(obj, *p, global_list) { 4636 list_for_each_entry(obj, *p, global_list) {
@@ -4620,6 +4638,7 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4620 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4638 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4621 } 4639 }
4622 } 4640 }
4641 mutex_unlock(&dev_priv->drm.struct_mutex);
4623 4642
4624 return 0; 4643 return 0;
4625} 4644}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 33c85227643d..222796f5afb2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -451,8 +451,8 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
451 0, ggtt->mappable_end, 451 0, ggtt->mappable_end,
452 DRM_MM_SEARCH_DEFAULT, 452 DRM_MM_SEARCH_DEFAULT,
453 DRM_MM_CREATE_DEFAULT); 453 DRM_MM_CREATE_DEFAULT);
454 if (ret) 454 if (ret) /* no inactive aperture space, use cpu reloc */
455 return ERR_PTR(ret); 455 return NULL;
456 } else { 456 } else {
457 ret = i915_vma_put_fence(vma); 457 ret = i915_vma_put_fence(vma);
458 if (ret) { 458 if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 40978bc12ceb..8832f8ec1583 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -328,6 +328,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
328 328
329 switch (state) { 329 switch (state) {
330 case FENCE_COMPLETE: 330 case FENCE_COMPLETE:
331 request->engine->last_submitted_seqno = request->fence.seqno;
331 request->engine->submit_request(request); 332 request->engine->submit_request(request);
332 break; 333 break;
333 334
@@ -641,8 +642,8 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
641 &request->submitq); 642 &request->submitq);
642 643
643 request->emitted_jiffies = jiffies; 644 request->emitted_jiffies = jiffies;
644 request->previous_seqno = engine->last_submitted_seqno; 645 request->previous_seqno = engine->last_pending_seqno;
645 engine->last_submitted_seqno = request->fence.seqno; 646 engine->last_pending_seqno = request->fence.seqno;
646 i915_gem_active_set(&engine->last_request, request); 647 i915_gem_active_set(&engine->last_request, request);
647 list_add_tail(&request->link, &engine->request_list); 648 list_add_tail(&request->link, &engine->request_list);
648 list_add_tail(&request->ring_link, &ring->request_list); 649 list_add_tail(&request->ring_link, &ring->request_list);
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 43358e18d34c..3106dcc06fe9 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -451,6 +451,18 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
451 return ret; 451 return ret;
452} 452}
453 453
454void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
455{
456 const size_t wqi_size = sizeof(struct guc_wq_item);
457 struct i915_guc_client *gc = request->i915->guc.execbuf_client;
458
459 GEM_BUG_ON(READ_ONCE(gc->wq_rsvd) < wqi_size);
460
461 spin_lock(&gc->wq_lock);
462 gc->wq_rsvd -= wqi_size;
463 spin_unlock(&gc->wq_lock);
464}
465
454/* Construct a Work Item and append it to the GuC's Work Queue */ 466/* Construct a Work Item and append it to the GuC's Work Queue */
455static void guc_wq_item_append(struct i915_guc_client *gc, 467static void guc_wq_item_append(struct i915_guc_client *gc,
456 struct drm_i915_gem_request *rq) 468 struct drm_i915_gem_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c128fdbd24e4..3fc286cd1157 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -350,6 +350,9 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
350 350
351void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 351void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
352{ 352{
353 if (READ_ONCE(dev_priv->rps.interrupts_enabled))
354 return;
355
353 spin_lock_irq(&dev_priv->irq_lock); 356 spin_lock_irq(&dev_priv->irq_lock);
354 WARN_ON_ONCE(dev_priv->rps.pm_iir); 357 WARN_ON_ONCE(dev_priv->rps.pm_iir);
355 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 358 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
@@ -368,6 +371,9 @@ u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
368 371
369void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 372void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
370{ 373{
374 if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
375 return;
376
371 spin_lock_irq(&dev_priv->irq_lock); 377 spin_lock_irq(&dev_priv->irq_lock);
372 dev_priv->rps.interrupts_enabled = false; 378 dev_priv->rps.interrupts_enabled = false;
373 379
@@ -2816,7 +2822,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2816 if (engine == signaller) 2822 if (engine == signaller)
2817 continue; 2823 continue;
2818 2824
2819 if (offset == signaller->semaphore.signal_ggtt[engine->id]) 2825 if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
2820 return signaller; 2826 return signaller;
2821 } 2827 }
2822 } else { 2828 } else {
@@ -2826,13 +2832,13 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2826 if(engine == signaller) 2832 if(engine == signaller)
2827 continue; 2833 continue;
2828 2834
2829 if (sync_bits == signaller->semaphore.mbox.wait[engine->id]) 2835 if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
2830 return signaller; 2836 return signaller;
2831 } 2837 }
2832 } 2838 }
2833 2839
2834 DRM_DEBUG_DRIVER("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", 2840 DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
2835 engine->id, ipehr, offset); 2841 engine->name, ipehr, offset);
2836 2842
2837 return ERR_PTR(-ENODEV); 2843 return ERR_PTR(-ENODEV);
2838} 2844}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 9bad14d22c95..495611b7068d 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -578,6 +578,36 @@ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
578 return 0; 578 return 0;
579} 579}
580 580
581static void cancel_fake_irq(struct intel_engine_cs *engine)
582{
583 struct intel_breadcrumbs *b = &engine->breadcrumbs;
584
585 del_timer_sync(&b->hangcheck);
586 del_timer_sync(&b->fake_irq);
587 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
588}
589
590void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
591{
592 struct intel_breadcrumbs *b = &engine->breadcrumbs;
593
594 cancel_fake_irq(engine);
595 spin_lock(&b->lock);
596
597 __intel_breadcrumbs_disable_irq(b);
598 if (intel_engine_has_waiter(engine)) {
599 b->timeout = wait_timeout();
600 __intel_breadcrumbs_enable_irq(b);
601 if (READ_ONCE(b->irq_posted))
602 wake_up_process(b->first_wait->tsk);
603 } else {
604 /* sanitize the IMR and unmask any auxiliary interrupts */
605 irq_disable(engine);
606 }
607
608 spin_unlock(&b->lock);
609}
610
581void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) 611void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
582{ 612{
583 struct intel_breadcrumbs *b = &engine->breadcrumbs; 613 struct intel_breadcrumbs *b = &engine->breadcrumbs;
@@ -585,8 +615,7 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
585 if (!IS_ERR_OR_NULL(b->signaler)) 615 if (!IS_ERR_OR_NULL(b->signaler))
586 kthread_stop(b->signaler); 616 kthread_stop(b->signaler);
587 617
588 del_timer_sync(&b->hangcheck); 618 cancel_fake_irq(engine);
589 del_timer_sync(&b->fake_irq);
590} 619}
591 620
592unsigned int intel_kick_waiters(struct drm_i915_private *i915) 621unsigned int intel_kick_waiters(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ad8d712ae84c..fbcfed63a76e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3408,6 +3408,8 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
3408 dst_w--; 3408 dst_w--;
3409 dst_h--; 3409 dst_h--;
3410 3410
3411 intel_crtc->dspaddr_offset = surf_addr;
3412
3411 intel_crtc->adjusted_x = src_x; 3413 intel_crtc->adjusted_x = src_x;
3412 intel_crtc->adjusted_y = src_y; 3414 intel_crtc->adjusted_y = src_y;
3413 3415
@@ -3629,6 +3631,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3629 intel_runtime_pm_disable_interrupts(dev_priv); 3631 intel_runtime_pm_disable_interrupts(dev_priv);
3630 intel_runtime_pm_enable_interrupts(dev_priv); 3632 intel_runtime_pm_enable_interrupts(dev_priv);
3631 3633
3634 intel_pps_unlock_regs_wa(dev_priv);
3632 intel_modeset_init_hw(dev); 3635 intel_modeset_init_hw(dev);
3633 3636
3634 spin_lock_irq(&dev_priv->irq_lock); 3637 spin_lock_irq(&dev_priv->irq_lock);
@@ -9509,6 +9512,24 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
9509 if (intel_crtc_has_dp_encoder(crtc_state)) 9512 if (intel_crtc_has_dp_encoder(crtc_state))
9510 dpll |= DPLL_SDVO_HIGH_SPEED; 9513 dpll |= DPLL_SDVO_HIGH_SPEED;
9511 9514
9515 /*
9516 * The high speed IO clock is only really required for
9517 * SDVO/HDMI/DP, but we also enable it for CRT to make it
9518 * possible to share the DPLL between CRT and HDMI. Enabling
9519 * the clock needlessly does no real harm, except use up a
9520 * bit of power potentially.
9521 *
9522 * We'll limit this to IVB with 3 pipes, since it has only two
9523 * DPLLs and so DPLL sharing is the only way to get three pipes
9524 * driving PCH ports at the same time. On SNB we could do this,
9525 * and potentially avoid enabling the second DPLL, but it's not
9526 * clear if it''s a win or loss power wise. No point in doing
9527 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9528 */
9529 if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
9530 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9531 dpll |= DPLL_SDVO_HIGH_SPEED;
9532
9512 /* compute bitmask from p1 value */ 9533 /* compute bitmask from p1 value */
9513 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 9534 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9514 /* also FPA1 */ 9535 /* also FPA1 */
@@ -14364,8 +14385,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
14364 * SKL workaround: bspec recommends we disable the SAGV when we 14385 * SKL workaround: bspec recommends we disable the SAGV when we
14365 * have more then one pipe enabled 14386 * have more then one pipe enabled
14366 */ 14387 */
14367 if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state)) 14388 if (!intel_can_enable_sagv(state))
14368 skl_disable_sagv(dev_priv); 14389 intel_disable_sagv(dev_priv);
14369 14390
14370 intel_modeset_verify_disabled(dev); 14391 intel_modeset_verify_disabled(dev);
14371 } 14392 }
@@ -14422,9 +14443,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
14422 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); 14443 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
14423 } 14444 }
14424 14445
14425 if (IS_SKYLAKE(dev_priv) && intel_state->modeset && 14446 if (intel_state->modeset && intel_can_enable_sagv(state))
14426 skl_can_enable_sagv(state)) 14447 intel_enable_sagv(dev_priv);
14427 skl_enable_sagv(dev_priv);
14428 14448
14429 drm_atomic_helper_commit_hw_done(state); 14449 drm_atomic_helper_commit_hw_done(state);
14430 14450
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index acd0c51f74d5..14a3cf0b7213 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4337,7 +4337,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
4337 intel_dp->has_audio = false; 4337 intel_dp->has_audio = false;
4338} 4338}
4339 4339
4340static void 4340static enum drm_connector_status
4341intel_dp_long_pulse(struct intel_connector *intel_connector) 4341intel_dp_long_pulse(struct intel_connector *intel_connector)
4342{ 4342{
4343 struct drm_connector *connector = &intel_connector->base; 4343 struct drm_connector *connector = &intel_connector->base;
@@ -4361,7 +4361,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
4361 else 4361 else
4362 status = connector_status_disconnected; 4362 status = connector_status_disconnected;
4363 4363
4364 if (status != connector_status_connected) { 4364 if (status == connector_status_disconnected) {
4365 intel_dp->compliance_test_active = 0; 4365 intel_dp->compliance_test_active = 0;
4366 intel_dp->compliance_test_type = 0; 4366 intel_dp->compliance_test_type = 0;
4367 intel_dp->compliance_test_data = 0; 4367 intel_dp->compliance_test_data = 0;
@@ -4423,8 +4423,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
4423 intel_dp->aux.i2c_defer_count = 0; 4423 intel_dp->aux.i2c_defer_count = 0;
4424 4424
4425 intel_dp_set_edid(intel_dp); 4425 intel_dp_set_edid(intel_dp);
4426 4426 if (is_edp(intel_dp) || intel_connector->detect_edid)
4427 status = connector_status_connected; 4427 status = connector_status_connected;
4428 intel_dp->detect_done = true; 4428 intel_dp->detect_done = true;
4429 4429
4430 /* Try to read the source of the interrupt */ 4430 /* Try to read the source of the interrupt */
@@ -4443,12 +4443,11 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
4443 } 4443 }
4444 4444
4445out: 4445out:
4446 if ((status != connector_status_connected) && 4446 if (status != connector_status_connected && !intel_dp->is_mst)
4447 (intel_dp->is_mst == false))
4448 intel_dp_unset_edid(intel_dp); 4447 intel_dp_unset_edid(intel_dp);
4449 4448
4450 intel_display_power_put(to_i915(dev), power_domain); 4449 intel_display_power_put(to_i915(dev), power_domain);
4451 return; 4450 return status;
4452} 4451}
4453 4452
4454static enum drm_connector_status 4453static enum drm_connector_status
@@ -4457,7 +4456,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
4457 struct intel_dp *intel_dp = intel_attached_dp(connector); 4456 struct intel_dp *intel_dp = intel_attached_dp(connector);
4458 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4457 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4459 struct intel_encoder *intel_encoder = &intel_dig_port->base; 4458 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4460 struct intel_connector *intel_connector = to_intel_connector(connector); 4459 enum drm_connector_status status = connector->status;
4461 4460
4462 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 4461 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4463 connector->base.id, connector->name); 4462 connector->base.id, connector->name);
@@ -4472,14 +4471,11 @@ intel_dp_detect(struct drm_connector *connector, bool force)
4472 4471
4473 /* If full detect is not performed yet, do a full detect */ 4472 /* If full detect is not performed yet, do a full detect */
4474 if (!intel_dp->detect_done) 4473 if (!intel_dp->detect_done)
4475 intel_dp_long_pulse(intel_dp->attached_connector); 4474 status = intel_dp_long_pulse(intel_dp->attached_connector);
4476 4475
4477 intel_dp->detect_done = false; 4476 intel_dp->detect_done = false;
4478 4477
4479 if (is_edp(intel_dp) || intel_connector->detect_edid) 4478 return status;
4480 return connector_status_connected;
4481 else
4482 return connector_status_disconnected;
4483} 4479}
4484 4480
4485static void 4481static void
@@ -4831,36 +4827,34 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4831 port_name(intel_dig_port->port), 4827 port_name(intel_dig_port->port),
4832 long_hpd ? "long" : "short"); 4828 long_hpd ? "long" : "short");
4833 4829
4830 if (long_hpd) {
4831 intel_dp->detect_done = false;
4832 return IRQ_NONE;
4833 }
4834
4834 power_domain = intel_display_port_aux_power_domain(intel_encoder); 4835 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4835 intel_display_power_get(dev_priv, power_domain); 4836 intel_display_power_get(dev_priv, power_domain);
4836 4837
4837 if (long_hpd) { 4838 if (intel_dp->is_mst) {
4838 intel_dp_long_pulse(intel_dp->attached_connector); 4839 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
4839 if (intel_dp->is_mst) 4840 /*
4840 ret = IRQ_HANDLED; 4841 * If we were in MST mode, and device is not
4841 goto put_power; 4842 * there, get out of MST mode
4842 4843 */
4843 } else { 4844 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4844 if (intel_dp->is_mst) { 4845 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4845 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { 4846 intel_dp->is_mst = false;
4846 /* 4847 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4847 * If we were in MST mode, and device is not 4848 intel_dp->is_mst);
4848 * there, get out of MST mode 4849 intel_dp->detect_done = false;
4849 */ 4850 goto put_power;
4850 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4851 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4852 intel_dp->is_mst = false;
4853 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4854 intel_dp->is_mst);
4855 goto put_power;
4856 }
4857 } 4851 }
4852 }
4858 4853
4859 if (!intel_dp->is_mst) { 4854 if (!intel_dp->is_mst) {
4860 if (!intel_dp_short_pulse(intel_dp)) { 4855 if (!intel_dp_short_pulse(intel_dp)) {
4861 intel_dp_long_pulse(intel_dp->attached_connector); 4856 intel_dp->detect_done = false;
4862 goto put_power; 4857 goto put_power;
4863 }
4864 } 4858 }
4865 } 4859 }
4866 4860
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index c26d18a574b6..1c59ca50c430 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1694,21 +1694,32 @@ bool bxt_ddi_dp_set_dpll_hw_state(int clock,
1694 return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state); 1694 return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
1695} 1695}
1696 1696
1697static bool
1698bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc,
1699 struct intel_crtc_state *crtc_state, int clock,
1700 struct intel_dpll_hw_state *dpll_hw_state)
1701{
1702 struct bxt_clk_div clk_div = { };
1703
1704 bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div);
1705
1706 return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
1707}
1708
1697static struct intel_shared_dpll * 1709static struct intel_shared_dpll *
1698bxt_get_dpll(struct intel_crtc *crtc, 1710bxt_get_dpll(struct intel_crtc *crtc,
1699 struct intel_crtc_state *crtc_state, 1711 struct intel_crtc_state *crtc_state,
1700 struct intel_encoder *encoder) 1712 struct intel_encoder *encoder)
1701{ 1713{
1702 struct bxt_clk_div clk_div = {0}; 1714 struct intel_dpll_hw_state dpll_hw_state = { };
1703 struct intel_dpll_hw_state dpll_hw_state = {0};
1704 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1715 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1705 struct intel_digital_port *intel_dig_port; 1716 struct intel_digital_port *intel_dig_port;
1706 struct intel_shared_dpll *pll; 1717 struct intel_shared_dpll *pll;
1707 int i, clock = crtc_state->port_clock; 1718 int i, clock = crtc_state->port_clock;
1708 1719
1709 if (encoder->type == INTEL_OUTPUT_HDMI 1720 if (encoder->type == INTEL_OUTPUT_HDMI &&
1710 && !bxt_ddi_hdmi_pll_dividers(crtc, crtc_state, 1721 !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
1711 clock, &clk_div)) 1722 &dpll_hw_state))
1712 return NULL; 1723 return NULL;
1713 1724
1714 if ((encoder->type == INTEL_OUTPUT_DP || 1725 if ((encoder->type == INTEL_OUTPUT_DP ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8fd16adf069b..a19ec06f9e42 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -263,6 +263,7 @@ struct intel_panel {
263 bool enabled; 263 bool enabled;
264 bool combination_mode; /* gen 2/4 only */ 264 bool combination_mode; /* gen 2/4 only */
265 bool active_low_pwm; 265 bool active_low_pwm;
266 bool alternate_pwm_increment; /* lpt+ */
266 267
267 /* PWM chip */ 268 /* PWM chip */
268 bool util_pin_active_low; /* bxt+ */ 269 bool util_pin_active_low; /* bxt+ */
@@ -1741,9 +1742,9 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
1741void skl_wm_get_hw_state(struct drm_device *dev); 1742void skl_wm_get_hw_state(struct drm_device *dev);
1742void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, 1743void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
1743 struct skl_ddb_allocation *ddb /* out */); 1744 struct skl_ddb_allocation *ddb /* out */);
1744bool skl_can_enable_sagv(struct drm_atomic_state *state); 1745bool intel_can_enable_sagv(struct drm_atomic_state *state);
1745int skl_enable_sagv(struct drm_i915_private *dev_priv); 1746int intel_enable_sagv(struct drm_i915_private *dev_priv);
1746int skl_disable_sagv(struct drm_i915_private *dev_priv); 1747int intel_disable_sagv(struct drm_i915_private *dev_priv);
1747bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old, 1748bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
1748 const struct skl_ddb_allocation *new, 1749 const struct skl_ddb_allocation *new,
1749 enum pipe pipe); 1750 enum pipe pipe);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index e405f1080296..025e232a4205 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -210,9 +210,6 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
210void intel_engine_init_hangcheck(struct intel_engine_cs *engine) 210void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
211{ 211{
212 memset(&engine->hangcheck, 0, sizeof(engine->hangcheck)); 212 memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
213 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
214 if (intel_engine_has_waiter(engine))
215 i915_queue_hangcheck(engine->i915);
216} 213}
217 214
218static void intel_engine_init_requests(struct intel_engine_cs *engine) 215static void intel_engine_init_requests(struct intel_engine_cs *engine)
@@ -307,18 +304,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
307 return 0; 304 return 0;
308} 305}
309 306
310void intel_engine_reset_irq(struct intel_engine_cs *engine)
311{
312 struct drm_i915_private *dev_priv = engine->i915;
313
314 spin_lock_irq(&dev_priv->irq_lock);
315 if (intel_engine_has_waiter(engine))
316 engine->irq_enable(engine);
317 else
318 engine->irq_disable(engine);
319 spin_unlock_irq(&dev_priv->irq_lock);
320}
321
322/** 307/**
323 * intel_engines_cleanup_common - cleans up the engine state created by 308 * intel_engines_cleanup_common - cleans up the engine state created by
324 * the common initiailizers. 309 * the common initiailizers.
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index b1ba86958811..5cdf7aa75be5 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -160,6 +160,7 @@ extern int intel_guc_resume(struct drm_device *dev);
160int i915_guc_submission_init(struct drm_i915_private *dev_priv); 160int i915_guc_submission_init(struct drm_i915_private *dev_priv);
161int i915_guc_submission_enable(struct drm_i915_private *dev_priv); 161int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
162int i915_guc_wq_reserve(struct drm_i915_gem_request *rq); 162int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
163void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
163void i915_guc_submission_disable(struct drm_i915_private *dev_priv); 164void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
164void i915_guc_submission_fini(struct drm_i915_private *dev_priv); 165void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
165 166
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 251143361f31..0adb879833ff 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -226,10 +226,16 @@ enum {
226/* Typical size of the average request (2 pipecontrols and a MI_BB) */ 226/* Typical size of the average request (2 pipecontrols and a MI_BB) */
227#define EXECLISTS_REQUEST_SIZE 64 /* bytes */ 227#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
228 228
229#define WA_TAIL_DWORDS 2
230
229static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, 231static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
230 struct intel_engine_cs *engine); 232 struct intel_engine_cs *engine);
231static int intel_lr_context_pin(struct i915_gem_context *ctx, 233static int intel_lr_context_pin(struct i915_gem_context *ctx,
232 struct intel_engine_cs *engine); 234 struct intel_engine_cs *engine);
235static void execlists_init_reg_state(u32 *reg_state,
236 struct i915_gem_context *ctx,
237 struct intel_engine_cs *engine,
238 struct intel_ring *ring);
233 239
234/** 240/**
235 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists 241 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -621,6 +627,10 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
621 627
622 request->ring = ce->ring; 628 request->ring = ce->ring;
623 629
630 ret = intel_lr_context_pin(request->ctx, engine);
631 if (ret)
632 return ret;
633
624 if (i915.enable_guc_submission) { 634 if (i915.enable_guc_submission) {
625 /* 635 /*
626 * Check that the GuC has space for the request before 636 * Check that the GuC has space for the request before
@@ -629,21 +639,17 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
629 */ 639 */
630 ret = i915_guc_wq_reserve(request); 640 ret = i915_guc_wq_reserve(request);
631 if (ret) 641 if (ret)
632 return ret; 642 goto err_unpin;
633 } 643 }
634 644
635 ret = intel_lr_context_pin(request->ctx, engine);
636 if (ret)
637 return ret;
638
639 ret = intel_ring_begin(request, 0); 645 ret = intel_ring_begin(request, 0);
640 if (ret) 646 if (ret)
641 goto err_unpin; 647 goto err_unreserve;
642 648
643 if (!ce->initialised) { 649 if (!ce->initialised) {
644 ret = engine->init_context(request); 650 ret = engine->init_context(request);
645 if (ret) 651 if (ret)
646 goto err_unpin; 652 goto err_unreserve;
647 653
648 ce->initialised = true; 654 ce->initialised = true;
649 } 655 }
@@ -658,6 +664,9 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
658 request->reserved_space -= EXECLISTS_REQUEST_SIZE; 664 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
659 return 0; 665 return 0;
660 666
667err_unreserve:
668 if (i915.enable_guc_submission)
669 i915_guc_wq_unreserve(request);
661err_unpin: 670err_unpin:
662 intel_lr_context_unpin(request->ctx, engine); 671 intel_lr_context_unpin(request->ctx, engine);
663 return ret; 672 return ret;
@@ -708,7 +717,6 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
708{ 717{
709 struct intel_context *ce = &ctx->engine[engine->id]; 718 struct intel_context *ce = &ctx->engine[engine->id];
710 void *vaddr; 719 void *vaddr;
711 u32 *lrc_reg_state;
712 int ret; 720 int ret;
713 721
714 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 722 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
@@ -727,17 +735,16 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
727 goto unpin_vma; 735 goto unpin_vma;
728 } 736 }
729 737
730 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
731
732 ret = intel_ring_pin(ce->ring); 738 ret = intel_ring_pin(ce->ring);
733 if (ret) 739 if (ret)
734 goto unpin_map; 740 goto unpin_map;
735 741
736 intel_lr_context_descriptor_update(ctx, engine); 742 intel_lr_context_descriptor_update(ctx, engine);
737 743
738 lrc_reg_state[CTX_RING_BUFFER_START+1] = 744 ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
745 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
739 i915_ggtt_offset(ce->ring->vma); 746 i915_ggtt_offset(ce->ring->vma);
740 ce->lrc_reg_state = lrc_reg_state; 747
741 ce->state->obj->dirty = true; 748 ce->state->obj->dirty = true;
742 749
743 /* Invalidate GuC TLB. */ 750 /* Invalidate GuC TLB. */
@@ -1231,7 +1238,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
1231 1238
1232 lrc_init_hws(engine); 1239 lrc_init_hws(engine);
1233 1240
1234 intel_engine_reset_irq(engine); 1241 intel_engine_reset_breadcrumbs(engine);
1235 1242
1236 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff); 1243 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
1237 1244
@@ -1289,8 +1296,21 @@ static void reset_common_ring(struct intel_engine_cs *engine,
1289 struct execlist_port *port = engine->execlist_port; 1296 struct execlist_port *port = engine->execlist_port;
1290 struct intel_context *ce = &request->ctx->engine[engine->id]; 1297 struct intel_context *ce = &request->ctx->engine[engine->id];
1291 1298
1299 /* We want a simple context + ring to execute the breadcrumb update.
1300 * We cannot rely on the context being intact across the GPU hang,
1301 * so clear it and rebuild just what we need for the breadcrumb.
1302 * All pending requests for this context will be zapped, and any
1303 * future request will be after userspace has had the opportunity
1304 * to recreate its own state.
1305 */
1306 execlists_init_reg_state(ce->lrc_reg_state,
1307 request->ctx, engine, ce->ring);
1308
1292 /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */ 1309 /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
1310 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
1311 i915_ggtt_offset(ce->ring->vma);
1293 ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix; 1312 ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
1313
1294 request->ring->head = request->postfix; 1314 request->ring->head = request->postfix;
1295 request->ring->last_retired_head = -1; 1315 request->ring->last_retired_head = -1;
1296 intel_ring_update_space(request->ring); 1316 intel_ring_update_space(request->ring);
@@ -1310,6 +1330,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
1310 GEM_BUG_ON(request->ctx != port[0].request->ctx); 1330 GEM_BUG_ON(request->ctx != port[0].request->ctx);
1311 port[0].count = 0; 1331 port[0].count = 0;
1312 port[1].count = 0; 1332 port[1].count = 0;
1333
1334 /* Reset WaIdleLiteRestore:bdw,skl as well */
1335 request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
1313} 1336}
1314 1337
1315static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) 1338static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
@@ -1547,7 +1570,6 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
1547 * used as a workaround for not being allowed to do lite 1570 * used as a workaround for not being allowed to do lite
1548 * restore with HEAD==TAIL (WaIdleLiteRestore). 1571 * restore with HEAD==TAIL (WaIdleLiteRestore).
1549 */ 1572 */
1550#define WA_TAIL_DWORDS 2
1551 1573
1552static int gen8_emit_request(struct drm_i915_gem_request *request) 1574static int gen8_emit_request(struct drm_i915_gem_request *request)
1553{ 1575{
@@ -1894,38 +1916,13 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
1894 return indirect_ctx_offset; 1916 return indirect_ctx_offset;
1895} 1917}
1896 1918
1897static int 1919static void execlists_init_reg_state(u32 *reg_state,
1898populate_lr_context(struct i915_gem_context *ctx, 1920 struct i915_gem_context *ctx,
1899 struct drm_i915_gem_object *ctx_obj, 1921 struct intel_engine_cs *engine,
1900 struct intel_engine_cs *engine, 1922 struct intel_ring *ring)
1901 struct intel_ring *ring)
1902{ 1923{
1903 struct drm_i915_private *dev_priv = ctx->i915; 1924 struct drm_i915_private *dev_priv = engine->i915;
1904 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 1925 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
1905 void *vaddr;
1906 u32 *reg_state;
1907 int ret;
1908
1909 if (!ppgtt)
1910 ppgtt = dev_priv->mm.aliasing_ppgtt;
1911
1912 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
1913 if (ret) {
1914 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
1915 return ret;
1916 }
1917
1918 vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
1919 if (IS_ERR(vaddr)) {
1920 ret = PTR_ERR(vaddr);
1921 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
1922 return ret;
1923 }
1924 ctx_obj->dirty = true;
1925
1926 /* The second page of the context object contains some fields which must
1927 * be set up prior to the first execution. */
1928 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
1929 1926
1930 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM 1927 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
1931 * commands followed by (reg, value) pairs. The values we are setting here are 1928 * commands followed by (reg, value) pairs. The values we are setting here are
@@ -1939,14 +1936,11 @@ populate_lr_context(struct i915_gem_context *ctx,
1939 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | 1936 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
1940 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 1937 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
1941 (HAS_RESOURCE_STREAMER(dev_priv) ? 1938 (HAS_RESOURCE_STREAMER(dev_priv) ?
1942 CTX_CTRL_RS_CTX_ENABLE : 0))); 1939 CTX_CTRL_RS_CTX_ENABLE : 0)));
1943 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base), 1940 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
1944 0); 1941 0);
1945 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base), 1942 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
1946 0); 1943 0);
1947 /* Ring buffer start address is not known until the buffer is pinned.
1948 * It is written to the context image in execlists_update_context()
1949 */
1950 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, 1944 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
1951 RING_START(engine->mmio_base), 0); 1945 RING_START(engine->mmio_base), 0);
1952 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, 1946 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
@@ -2029,6 +2023,36 @@ populate_lr_context(struct i915_gem_context *ctx,
2029 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 2023 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2030 make_rpcs(dev_priv)); 2024 make_rpcs(dev_priv));
2031 } 2025 }
2026}
2027
2028static int
2029populate_lr_context(struct i915_gem_context *ctx,
2030 struct drm_i915_gem_object *ctx_obj,
2031 struct intel_engine_cs *engine,
2032 struct intel_ring *ring)
2033{
2034 void *vaddr;
2035 int ret;
2036
2037 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2038 if (ret) {
2039 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2040 return ret;
2041 }
2042
2043 vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
2044 if (IS_ERR(vaddr)) {
2045 ret = PTR_ERR(vaddr);
2046 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
2047 return ret;
2048 }
2049 ctx_obj->dirty = true;
2050
2051 /* The second page of the context object contains some fields which must
2052 * be set up prior to the first execution. */
2053
2054 execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
2055 ctx, engine, ring);
2032 2056
2033 i915_gem_object_unpin_map(ctx_obj); 2057 i915_gem_object_unpin_map(ctx_obj);
2034 2058
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index c10e9b0405e8..be4b4d546fd9 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -841,7 +841,7 @@ static void lpt_enable_backlight(struct intel_connector *connector)
841{ 841{
842 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 842 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
843 struct intel_panel *panel = &connector->panel; 843 struct intel_panel *panel = &connector->panel;
844 u32 pch_ctl1, pch_ctl2; 844 u32 pch_ctl1, pch_ctl2, schicken;
845 845
846 pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); 846 pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
847 if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { 847 if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
@@ -850,6 +850,22 @@ static void lpt_enable_backlight(struct intel_connector *connector)
850 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); 850 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
851 } 851 }
852 852
853 if (HAS_PCH_LPT(dev_priv)) {
854 schicken = I915_READ(SOUTH_CHICKEN2);
855 if (panel->backlight.alternate_pwm_increment)
856 schicken |= LPT_PWM_GRANULARITY;
857 else
858 schicken &= ~LPT_PWM_GRANULARITY;
859 I915_WRITE(SOUTH_CHICKEN2, schicken);
860 } else {
861 schicken = I915_READ(SOUTH_CHICKEN1);
862 if (panel->backlight.alternate_pwm_increment)
863 schicken |= SPT_PWM_GRANULARITY;
864 else
865 schicken &= ~SPT_PWM_GRANULARITY;
866 I915_WRITE(SOUTH_CHICKEN1, schicken);
867 }
868
853 pch_ctl2 = panel->backlight.max << 16; 869 pch_ctl2 = panel->backlight.max << 16;
854 I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2); 870 I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
855 871
@@ -1242,10 +1258,10 @@ static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1242 */ 1258 */
1243static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) 1259static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1244{ 1260{
1245 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1261 struct intel_panel *panel = &connector->panel;
1246 u32 mul; 1262 u32 mul;
1247 1263
1248 if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY) 1264 if (panel->backlight.alternate_pwm_increment)
1249 mul = 128; 1265 mul = 128;
1250 else 1266 else
1251 mul = 16; 1267 mul = 16;
@@ -1261,9 +1277,10 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1261static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) 1277static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1262{ 1278{
1263 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1279 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1280 struct intel_panel *panel = &connector->panel;
1264 u32 mul, clock; 1281 u32 mul, clock;
1265 1282
1266 if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY) 1283 if (panel->backlight.alternate_pwm_increment)
1267 mul = 16; 1284 mul = 16;
1268 else 1285 else
1269 mul = 128; 1286 mul = 128;
@@ -1414,6 +1431,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
1414 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1431 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1415 struct intel_panel *panel = &connector->panel; 1432 struct intel_panel *panel = &connector->panel;
1416 u32 pch_ctl1, pch_ctl2, val; 1433 u32 pch_ctl1, pch_ctl2, val;
1434 bool alt;
1435
1436 if (HAS_PCH_LPT(dev_priv))
1437 alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
1438 else
1439 alt = I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
1440 panel->backlight.alternate_pwm_increment = alt;
1417 1441
1418 pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); 1442 pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
1419 panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY; 1443 panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2df06b703e3d..a2f751cd187a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2127,32 +2127,34 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2127 GEN9_MEM_LATENCY_LEVEL_MASK; 2127 GEN9_MEM_LATENCY_LEVEL_MASK;
2128 2128
2129 /* 2129 /*
2130 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2131 * need to be disabled. We make sure to sanitize the values out
2132 * of the punit to satisfy this requirement.
2133 */
2134 for (level = 1; level <= max_level; level++) {
2135 if (wm[level] == 0) {
2136 for (i = level + 1; i <= max_level; i++)
2137 wm[i] = 0;
2138 break;
2139 }
2140 }
2141
2142 /*
2130 * WaWmMemoryReadLatency:skl 2143 * WaWmMemoryReadLatency:skl
2131 * 2144 *
2132 * punit doesn't take into account the read latency so we need 2145 * punit doesn't take into account the read latency so we need
2133 * to add 2us to the various latency levels we retrieve from 2146 * to add 2us to the various latency levels we retrieve from the
2134 * the punit. 2147 * punit when level 0 response data us 0us.
2135 * - W0 is a bit special in that it's the only level that
2136 * can't be disabled if we want to have display working, so
2137 * we always add 2us there.
2138 * - For levels >=1, punit returns 0us latency when they are
2139 * disabled, so we respect that and don't add 2us then
2140 *
2141 * Additionally, if a level n (n > 1) has a 0us latency, all
2142 * levels m (m >= n) need to be disabled. We make sure to
2143 * sanitize the values out of the punit to satisfy this
2144 * requirement.
2145 */ 2148 */
2146 wm[0] += 2; 2149 if (wm[0] == 0) {
2147 for (level = 1; level <= max_level; level++) 2150 wm[0] += 2;
2148 if (wm[level] != 0) 2151 for (level = 1; level <= max_level; level++) {
2152 if (wm[level] == 0)
2153 break;
2149 wm[level] += 2; 2154 wm[level] += 2;
2150 else {
2151 for (i = level + 1; i <= max_level; i++)
2152 wm[i] = 0;
2153
2154 break;
2155 } 2155 }
2156 }
2157
2156 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2158 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2157 uint64_t sskpd = I915_READ64(MCH_SSKPD); 2159 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2158 2160
@@ -2877,6 +2879,19 @@ skl_wm_plane_id(const struct intel_plane *plane)
2877 } 2879 }
2878} 2880}
2879 2881
2882static bool
2883intel_has_sagv(struct drm_i915_private *dev_priv)
2884{
2885 if (IS_KABYLAKE(dev_priv))
2886 return true;
2887
2888 if (IS_SKYLAKE(dev_priv) &&
2889 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
2890 return true;
2891
2892 return false;
2893}
2894
2880/* 2895/*
2881 * SAGV dynamically adjusts the system agent voltage and clock frequencies 2896 * SAGV dynamically adjusts the system agent voltage and clock frequencies
2882 * depending on power and performance requirements. The display engine access 2897 * depending on power and performance requirements. The display engine access
@@ -2889,12 +2904,14 @@ skl_wm_plane_id(const struct intel_plane *plane)
2889 * - We're not using an interlaced display configuration 2904 * - We're not using an interlaced display configuration
2890 */ 2905 */
2891int 2906int
2892skl_enable_sagv(struct drm_i915_private *dev_priv) 2907intel_enable_sagv(struct drm_i915_private *dev_priv)
2893{ 2908{
2894 int ret; 2909 int ret;
2895 2910
2896 if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED || 2911 if (!intel_has_sagv(dev_priv))
2897 dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED) 2912 return 0;
2913
2914 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
2898 return 0; 2915 return 0;
2899 2916
2900 DRM_DEBUG_KMS("Enabling the SAGV\n"); 2917 DRM_DEBUG_KMS("Enabling the SAGV\n");
@@ -2910,21 +2927,21 @@ skl_enable_sagv(struct drm_i915_private *dev_priv)
2910 * Some skl systems, pre-release machines in particular, 2927 * Some skl systems, pre-release machines in particular,
2911 * don't actually have an SAGV. 2928 * don't actually have an SAGV.
2912 */ 2929 */
2913 if (ret == -ENXIO) { 2930 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
2914 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); 2931 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2915 dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED; 2932 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
2916 return 0; 2933 return 0;
2917 } else if (ret < 0) { 2934 } else if (ret < 0) {
2918 DRM_ERROR("Failed to enable the SAGV\n"); 2935 DRM_ERROR("Failed to enable the SAGV\n");
2919 return ret; 2936 return ret;
2920 } 2937 }
2921 2938
2922 dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED; 2939 dev_priv->sagv_status = I915_SAGV_ENABLED;
2923 return 0; 2940 return 0;
2924} 2941}
2925 2942
2926static int 2943static int
2927skl_do_sagv_disable(struct drm_i915_private *dev_priv) 2944intel_do_sagv_disable(struct drm_i915_private *dev_priv)
2928{ 2945{
2929 int ret; 2946 int ret;
2930 uint32_t temp = GEN9_SAGV_DISABLE; 2947 uint32_t temp = GEN9_SAGV_DISABLE;
@@ -2938,19 +2955,21 @@ skl_do_sagv_disable(struct drm_i915_private *dev_priv)
2938} 2955}
2939 2956
2940int 2957int
2941skl_disable_sagv(struct drm_i915_private *dev_priv) 2958intel_disable_sagv(struct drm_i915_private *dev_priv)
2942{ 2959{
2943 int ret, result; 2960 int ret, result;
2944 2961
2945 if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED || 2962 if (!intel_has_sagv(dev_priv))
2946 dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED) 2963 return 0;
2964
2965 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
2947 return 0; 2966 return 0;
2948 2967
2949 DRM_DEBUG_KMS("Disabling the SAGV\n"); 2968 DRM_DEBUG_KMS("Disabling the SAGV\n");
2950 mutex_lock(&dev_priv->rps.hw_lock); 2969 mutex_lock(&dev_priv->rps.hw_lock);
2951 2970
2952 /* bspec says to keep retrying for at least 1 ms */ 2971 /* bspec says to keep retrying for at least 1 ms */
2953 ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1); 2972 ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
2954 mutex_unlock(&dev_priv->rps.hw_lock); 2973 mutex_unlock(&dev_priv->rps.hw_lock);
2955 2974
2956 if (ret == -ETIMEDOUT) { 2975 if (ret == -ETIMEDOUT) {
@@ -2962,20 +2981,20 @@ skl_disable_sagv(struct drm_i915_private *dev_priv)
2962 * Some skl systems, pre-release machines in particular, 2981 * Some skl systems, pre-release machines in particular,
2963 * don't actually have an SAGV. 2982 * don't actually have an SAGV.
2964 */ 2983 */
2965 if (result == -ENXIO) { 2984 if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
2966 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); 2985 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2967 dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED; 2986 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
2968 return 0; 2987 return 0;
2969 } else if (result < 0) { 2988 } else if (result < 0) {
2970 DRM_ERROR("Failed to disable the SAGV\n"); 2989 DRM_ERROR("Failed to disable the SAGV\n");
2971 return result; 2990 return result;
2972 } 2991 }
2973 2992
2974 dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED; 2993 dev_priv->sagv_status = I915_SAGV_DISABLED;
2975 return 0; 2994 return 0;
2976} 2995}
2977 2996
2978bool skl_can_enable_sagv(struct drm_atomic_state *state) 2997bool intel_can_enable_sagv(struct drm_atomic_state *state)
2979{ 2998{
2980 struct drm_device *dev = state->dev; 2999 struct drm_device *dev = state->dev;
2981 struct drm_i915_private *dev_priv = to_i915(dev); 3000 struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2984,6 +3003,9 @@ bool skl_can_enable_sagv(struct drm_atomic_state *state)
2984 enum pipe pipe; 3003 enum pipe pipe;
2985 int level, plane; 3004 int level, plane;
2986 3005
3006 if (!intel_has_sagv(dev_priv))
3007 return false;
3008
2987 /* 3009 /*
2988 * SKL workaround: bspec recommends we disable the SAGV when we have 3010 * SKL workaround: bspec recommends we disable the SAGV when we have
2989 * more then one pipe enabled 3011 * more then one pipe enabled
@@ -3472,29 +3494,14 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latenc
3472} 3494}
3473 3495
3474static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, 3496static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3475 uint32_t horiz_pixels, uint8_t cpp, 3497 uint32_t latency, uint32_t plane_blocks_per_line)
3476 uint64_t tiling, uint32_t latency)
3477{ 3498{
3478 uint32_t ret; 3499 uint32_t ret;
3479 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3480 uint32_t wm_intermediate_val; 3500 uint32_t wm_intermediate_val;
3481 3501
3482 if (latency == 0) 3502 if (latency == 0)
3483 return UINT_MAX; 3503 return UINT_MAX;
3484 3504
3485 plane_bytes_per_line = horiz_pixels * cpp;
3486
3487 if (tiling == I915_FORMAT_MOD_Y_TILED ||
3488 tiling == I915_FORMAT_MOD_Yf_TILED) {
3489 plane_bytes_per_line *= 4;
3490 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3491 plane_blocks_per_line /= 4;
3492 } else if (tiling == DRM_FORMAT_MOD_NONE) {
3493 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
3494 } else {
3495 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3496 }
3497
3498 wm_intermediate_val = latency * pixel_rate; 3505 wm_intermediate_val = latency * pixel_rate;
3499 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) * 3506 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
3500 plane_blocks_per_line; 3507 plane_blocks_per_line;
@@ -3545,6 +3552,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3545 uint8_t cpp; 3552 uint8_t cpp;
3546 uint32_t width = 0, height = 0; 3553 uint32_t width = 0, height = 0;
3547 uint32_t plane_pixel_rate; 3554 uint32_t plane_pixel_rate;
3555 uint32_t y_tile_minimum, y_min_scanlines;
3548 3556
3549 if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) { 3557 if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
3550 *enabled = false; 3558 *enabled = false;
@@ -3560,38 +3568,51 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3560 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3568 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3561 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); 3569 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
3562 3570
3571 if (intel_rotation_90_or_270(pstate->rotation)) {
3572 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3573 drm_format_plane_cpp(fb->pixel_format, 1) :
3574 drm_format_plane_cpp(fb->pixel_format, 0);
3575
3576 switch (cpp) {
3577 case 1:
3578 y_min_scanlines = 16;
3579 break;
3580 case 2:
3581 y_min_scanlines = 8;
3582 break;
3583 default:
3584 WARN(1, "Unsupported pixel depth for rotation");
3585 case 4:
3586 y_min_scanlines = 4;
3587 break;
3588 }
3589 } else {
3590 y_min_scanlines = 4;
3591 }
3592
3593 plane_bytes_per_line = width * cpp;
3594 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3595 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3596 plane_blocks_per_line =
3597 DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512);
3598 plane_blocks_per_line /= y_min_scanlines;
3599 } else if (fb->modifier[0] == DRM_FORMAT_MOD_NONE) {
3600 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512)
3601 + 1;
3602 } else {
3603 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3604 }
3605
3563 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency); 3606 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
3564 method2 = skl_wm_method2(plane_pixel_rate, 3607 method2 = skl_wm_method2(plane_pixel_rate,
3565 cstate->base.adjusted_mode.crtc_htotal, 3608 cstate->base.adjusted_mode.crtc_htotal,
3566 width, 3609 latency,
3567 cpp, 3610 plane_blocks_per_line);
3568 fb->modifier[0],
3569 latency);
3570 3611
3571 plane_bytes_per_line = width * cpp; 3612 y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
3572 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3573 3613
3574 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || 3614 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3575 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { 3615 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3576 uint32_t min_scanlines = 4;
3577 uint32_t y_tile_minimum;
3578 if (intel_rotation_90_or_270(pstate->rotation)) {
3579 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3580 drm_format_plane_cpp(fb->pixel_format, 1) :
3581 drm_format_plane_cpp(fb->pixel_format, 0);
3582
3583 switch (cpp) {
3584 case 1:
3585 min_scanlines = 16;
3586 break;
3587 case 2:
3588 min_scanlines = 8;
3589 break;
3590 case 8:
3591 WARN(1, "Unsupported pixel depth for rotation");
3592 }
3593 }
3594 y_tile_minimum = plane_blocks_per_line * min_scanlines;
3595 selected_result = max(method2, y_tile_minimum); 3616 selected_result = max(method2, y_tile_minimum);
3596 } else { 3617 } else {
3597 if ((ddb_allocation / plane_blocks_per_line) >= 1) 3618 if ((ddb_allocation / plane_blocks_per_line) >= 1)
@@ -3605,10 +3626,12 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3605 3626
3606 if (level >= 1 && level <= 7) { 3627 if (level >= 1 && level <= 7) {
3607 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || 3628 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3608 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) 3629 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3609 res_lines += 4; 3630 res_blocks += y_tile_minimum;
3610 else 3631 res_lines += y_min_scanlines;
3632 } else {
3611 res_blocks++; 3633 res_blocks++;
3634 }
3612 } 3635 }
3613 3636
3614 if (res_blocks >= ddb_allocation || res_lines > 31) { 3637 if (res_blocks >= ddb_allocation || res_lines > 31) {
@@ -3939,6 +3962,41 @@ pipes_modified(struct drm_atomic_state *state)
3939 return ret; 3962 return ret;
3940} 3963}
3941 3964
3965int
3966skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
3967{
3968 struct drm_atomic_state *state = cstate->base.state;
3969 struct drm_device *dev = state->dev;
3970 struct drm_crtc *crtc = cstate->base.crtc;
3971 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3972 struct drm_i915_private *dev_priv = to_i915(dev);
3973 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3974 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
3975 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
3976 struct drm_plane_state *plane_state;
3977 struct drm_plane *plane;
3978 enum pipe pipe = intel_crtc->pipe;
3979 int id;
3980
3981 WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
3982
3983 drm_for_each_plane_mask(plane, dev, crtc->state->plane_mask) {
3984 id = skl_wm_plane_id(to_intel_plane(plane));
3985
3986 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
3987 &new_ddb->plane[pipe][id]) &&
3988 skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][id],
3989 &new_ddb->y_plane[pipe][id]))
3990 continue;
3991
3992 plane_state = drm_atomic_get_plane_state(state, plane);
3993 if (IS_ERR(plane_state))
3994 return PTR_ERR(plane_state);
3995 }
3996
3997 return 0;
3998}
3999
3942static int 4000static int
3943skl_compute_ddb(struct drm_atomic_state *state) 4001skl_compute_ddb(struct drm_atomic_state *state)
3944{ 4002{
@@ -4003,7 +4061,7 @@ skl_compute_ddb(struct drm_atomic_state *state)
4003 if (ret) 4061 if (ret)
4004 return ret; 4062 return ret;
4005 4063
4006 ret = drm_atomic_add_affected_planes(state, &intel_crtc->base); 4064 ret = skl_ddb_add_affected_planes(cstate);
4007 if (ret) 4065 if (ret)
4008 return ret; 4066 return ret;
4009 } 4067 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 7a74750076c5..ed9955dce156 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -564,7 +564,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
564 else 564 else
565 intel_ring_setup_status_page(engine); 565 intel_ring_setup_status_page(engine);
566 566
567 intel_engine_reset_irq(engine); 567 intel_engine_reset_breadcrumbs(engine);
568 568
569 /* Enforce ordering by reading HEAD register back */ 569 /* Enforce ordering by reading HEAD register back */
570 I915_READ_HEAD(engine); 570 I915_READ_HEAD(engine);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 7f64d611159b..ec0b4a0c605d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -328,6 +328,7 @@ struct intel_engine_cs {
328 * inspecting request list. 328 * inspecting request list.
329 */ 329 */
330 u32 last_submitted_seqno; 330 u32 last_submitted_seqno;
331 u32 last_pending_seqno;
331 332
332 /* An RCU guarded pointer to the last request. No reference is 333 /* An RCU guarded pointer to the last request. No reference is
333 * held to the request, users must carefully acquire a reference to 334 * held to the request, users must carefully acquire a reference to
@@ -492,7 +493,6 @@ int __intel_ring_space(int head, int tail, int size);
492void intel_ring_update_space(struct intel_ring *ring); 493void intel_ring_update_space(struct intel_ring *ring);
493 494
494void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno); 495void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
495void intel_engine_reset_irq(struct intel_engine_cs *engine);
496 496
497void intel_engine_setup_common(struct intel_engine_cs *engine); 497void intel_engine_setup_common(struct intel_engine_cs *engine);
498int intel_engine_init_common(struct intel_engine_cs *engine); 498int intel_engine_init_common(struct intel_engine_cs *engine);
@@ -584,6 +584,7 @@ static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
584 return wakeup; 584 return wakeup;
585} 585}
586 586
587void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
587void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); 588void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
588unsigned int intel_kick_waiters(struct drm_i915_private *i915); 589unsigned int intel_kick_waiters(struct drm_i915_private *i915);
589unsigned int intel_kick_signalers(struct drm_i915_private *i915); 590unsigned int intel_kick_signalers(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a9b6c936aadd..ee2306a79747 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -796,10 +796,9 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
796 const bool read, 796 const bool read,
797 const bool before) 797 const bool before)
798{ 798{
799 if (WARN(check_for_unclaimed_mmio(dev_priv), 799 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
800 "Unclaimed register detected %s %s register 0x%x\n", 800 "Unclaimed %s register 0x%x\n",
801 before ? "before" : "after", 801 read ? "read from" : "write to",
802 read ? "reading" : "writing to",
803 i915_mmio_reg_offset(reg))) 802 i915_mmio_reg_offset(reg)))
804 i915.mmio_debug--; /* Only report the first N failures */ 803 i915.mmio_debug--; /* Only report the first N failures */
805} 804}