aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-10-27 22:37:58 -0400
committerDave Airlie <airlied@redhat.com>2014-10-27 22:37:58 -0400
commitbbf0ef0334f2267687a92ec6d8114fd67b8157a3 (patch)
tree668abebfb74a9221b47e93518b679b2a4eec156f /drivers/gpu
parentcac7f2429872d3733dc3f9915857b1691da2eb2f (diff)
parentcacc6c837b799b058d59d2af02c11140640cc1d2 (diff)
Merge tag 'drm-intel-next-2014-10-03-no-ppgtt' of git://anongit.freedesktop.org/drm-intel into drm-next
Ok, new attempt, this time around with full ppgtt disabled again. drm-intel-next-2014-10-03: - first batch of skl stage 1 enabling - fixes from Rodrigo to the PSR, fbc and sink crc code - kerneldoc for the frontbuffer tracking code, runtime pm code and the basic interrupt enable/disable functions - smaller stuff all over drm-intel-next-2014-09-19: - bunch more i830M fixes from Ville - full ppgtt now again enabled by default - more ppgtt fixes from Michel Thierry and Chris Wilson - plane config work from Gustavo Padovan - spinlock clarifications - piles of smaller improvements all over, as usual * tag 'drm-intel-next-2014-10-03-no-ppgtt' of git://anongit.freedesktop.org/drm-intel: (114 commits) Revert "drm/i915: Enable full PPGTT on gen7" drm/i915: Update DRIVER_DATE to 20141003 drm/i915: Remove the duplicated logic between the two shrink phases drm/i915: kerneldoc for interrupt enable/disable functions drm/i915: Use dev_priv instead of dev in irq setup functions drm/i915: s/pm._irqs_disabled/pm.irqs_enabled/ drm/i915: Clear TX FIFO reset master override bits on chv drm/i915: Make sure hardware uses the correct swing margin/deemph bits on chv drm/i915: make sink_crc return -EIO on aux read/write failure drm/i915: Constify send buffer for intel_dp_aux_ch drm/i915: De-magic the PSR AUX message drm/i915: Reinstate error level message for non-simulated gpu hangs drm/i915: Kerneldoc for intel_runtime_pm.c drm/i915: Call runtime_pm_disable directly drm/i915: Move intel_display_set_init_power to intel_runtime_pm.c drm/i915: Bikeshed rpm functions name a bit. drm/i915: Extract intel_runtime_pm.c drm/i915: Remove intel_modeset_suspend_hw drm/i915: spelling fixes for frontbuffer tracking kerneldoc drm/i915: Tighting frontbuffer tracking around flips ...
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/Makefile5
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c11
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c9
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c29
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c49
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h48
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c101
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c36
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c13
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c274
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h190
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h10
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c52
-rw-r--r--drivers/gpu/drm/i915/intel_display.c895
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c194
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h90
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c279
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c25
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c32
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1247
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c34
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c1375
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c473
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c9
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c19
29 files changed, 3332 insertions, 2175 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index c1dd485aeb6c..3a6bce047f6f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -11,7 +11,9 @@ i915-y := i915_drv.o \
11 i915_params.o \ 11 i915_params.o \
12 i915_suspend.o \ 12 i915_suspend.o \
13 i915_sysfs.o \ 13 i915_sysfs.o \
14 intel_pm.o 14 intel_pm.o \
15 intel_runtime_pm.o
16
15i915-$(CONFIG_COMPAT) += i915_ioc32.o 17i915-$(CONFIG_COMPAT) += i915_ioc32.o
16i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o 18i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
17 19
@@ -43,6 +45,7 @@ i915-y += intel_renderstate_gen6.o \
43# modesetting core code 45# modesetting core code
44i915-y += intel_bios.o \ 46i915-y += intel_bios.o \
45 intel_display.o \ 47 intel_display.o \
48 intel_frontbuffer.o \
46 intel_modes.o \ 49 intel_modes.o \
47 intel_overlay.o \ 50 intel_overlay.o \
48 intel_sideband.o \ 51 intel_sideband.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 593b657d3e59..86b3ae0934a7 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -847,12 +847,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
847 if (!ring->needs_cmd_parser) 847 if (!ring->needs_cmd_parser)
848 return false; 848 return false;
849 849
850 /* 850 if (!USES_PPGTT(ring->dev))
851 * XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
852 * disabled. That will cause all of the parser's PPGTT checks to
853 * fail. For now, disable parsing when PPGTT is off.
854 */
855 if (USES_PPGTT(ring->dev))
856 return false; 851 return false;
857 852
858 return (i915.enable_cmd_parser == 1); 853 return (i915.enable_cmd_parser == 1);
@@ -888,8 +883,10 @@ static bool check_cmd(const struct intel_engine_cs *ring,
888 * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands. 883 * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
889 */ 884 */
890 if (reg_addr == OACONTROL) { 885 if (reg_addr == OACONTROL) {
891 if (desc->cmd.value == MI_LOAD_REGISTER_MEM) 886 if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
887 DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
892 return false; 888 return false;
889 }
893 890
894 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) 891 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
895 *oacontrol_set = (cmd[2] != 0); 892 *oacontrol_set = (cmd[2] != 0);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 063b44817e08..da4036d0bab9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -516,7 +516,6 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
516 struct drm_info_node *node = m->private; 516 struct drm_info_node *node = m->private;
517 struct drm_device *dev = node->minor->dev; 517 struct drm_device *dev = node->minor->dev;
518 struct drm_i915_private *dev_priv = dev->dev_private; 518 struct drm_i915_private *dev_priv = dev->dev_private;
519 unsigned long flags;
520 struct intel_crtc *crtc; 519 struct intel_crtc *crtc;
521 int ret; 520 int ret;
522 521
@@ -529,7 +528,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
529 const char plane = plane_name(crtc->plane); 528 const char plane = plane_name(crtc->plane);
530 struct intel_unpin_work *work; 529 struct intel_unpin_work *work;
531 530
532 spin_lock_irqsave(&dev->event_lock, flags); 531 spin_lock_irq(&dev->event_lock);
533 work = crtc->unpin_work; 532 work = crtc->unpin_work;
534 if (work == NULL) { 533 if (work == NULL) {
535 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 534 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
@@ -575,7 +574,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
575 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset); 574 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
576 } 575 }
577 } 576 }
578 spin_unlock_irqrestore(&dev->event_lock, flags); 577 spin_unlock_irq(&dev->event_lock);
579 } 578 }
580 579
581 mutex_unlock(&dev->struct_mutex); 580 mutex_unlock(&dev->struct_mutex);
@@ -717,7 +716,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
717 } 716 }
718 717
719 for_each_pipe(dev_priv, pipe) { 718 for_each_pipe(dev_priv, pipe) {
720 if (!intel_display_power_enabled(dev_priv, 719 if (!intel_display_power_is_enabled(dev_priv,
721 POWER_DOMAIN_PIPE(pipe))) { 720 POWER_DOMAIN_PIPE(pipe))) {
722 seq_printf(m, "Pipe %c power disabled\n", 721 seq_printf(m, "Pipe %c power disabled\n",
723 pipe_name(pipe)); 722 pipe_name(pipe));
@@ -1986,7 +1985,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1986 I915_READ(MAD_DIMM_C2)); 1985 I915_READ(MAD_DIMM_C2));
1987 seq_printf(m, "TILECTL = 0x%08x\n", 1986 seq_printf(m, "TILECTL = 0x%08x\n",
1988 I915_READ(TILECTL)); 1987 I915_READ(TILECTL));
1989 if (IS_GEN8(dev)) 1988 if (INTEL_INFO(dev)->gen >= 8)
1990 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 1989 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1991 I915_READ(GAMTARBMODE)); 1990 I915_READ(GAMTARBMODE));
1992 else 1991 else
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 1403b01e8216..85d14e169409 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1338,14 +1338,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1338 1338
1339 intel_power_domains_init_hw(dev_priv); 1339 intel_power_domains_init_hw(dev_priv);
1340 1340
1341 /* 1341 ret = intel_irq_install(dev_priv);
1342 * We enable some interrupt sources in our postinstall hooks, so mark
1343 * interrupts as enabled _before_ actually enabling them to avoid
1344 * special cases in our ordering checks.
1345 */
1346 dev_priv->pm._irqs_disabled = false;
1347
1348 ret = drm_irq_install(dev, dev->pdev->irq);
1349 if (ret) 1342 if (ret)
1350 goto cleanup_gem_stolen; 1343 goto cleanup_gem_stolen;
1351 1344
@@ -1370,7 +1363,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1370 goto cleanup_gem; 1363 goto cleanup_gem;
1371 1364
1372 /* Only enable hotplug handling once the fbdev is fully set up. */ 1365 /* Only enable hotplug handling once the fbdev is fully set up. */
1373 intel_hpd_init(dev); 1366 intel_hpd_init(dev_priv);
1374 1367
1375 /* 1368 /*
1376 * Some ports require correctly set-up hpd registers for detection to 1369 * Some ports require correctly set-up hpd registers for detection to
@@ -1534,7 +1527,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
1534 1527
1535 info = (struct intel_device_info *)&dev_priv->info; 1528 info = (struct intel_device_info *)&dev_priv->info;
1536 1529
1537 if (IS_VALLEYVIEW(dev)) 1530 if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
1538 for_each_pipe(dev_priv, pipe) 1531 for_each_pipe(dev_priv, pipe)
1539 info->num_sprites[pipe] = 2; 1532 info->num_sprites[pipe] = 2;
1540 else 1533 else
@@ -1614,7 +1607,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1614 1607
1615 spin_lock_init(&dev_priv->irq_lock); 1608 spin_lock_init(&dev_priv->irq_lock);
1616 spin_lock_init(&dev_priv->gpu_error.lock); 1609 spin_lock_init(&dev_priv->gpu_error.lock);
1617 spin_lock_init(&dev_priv->backlight_lock); 1610 mutex_init(&dev_priv->backlight_lock);
1618 spin_lock_init(&dev_priv->uncore.lock); 1611 spin_lock_init(&dev_priv->uncore.lock);
1619 spin_lock_init(&dev_priv->mm.object_stat_lock); 1612 spin_lock_init(&dev_priv->mm.object_stat_lock);
1620 spin_lock_init(&dev_priv->mmio_flip_lock); 1613 spin_lock_init(&dev_priv->mmio_flip_lock);
@@ -1740,7 +1733,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1740 goto out_freewq; 1733 goto out_freewq;
1741 } 1734 }
1742 1735
1743 intel_irq_init(dev); 1736 intel_irq_init(dev_priv);
1744 intel_uncore_sanitize(dev); 1737 intel_uncore_sanitize(dev);
1745 1738
1746 /* Try to make sure MCHBAR is enabled before poking at it */ 1739 /* Try to make sure MCHBAR is enabled before poking at it */
@@ -1798,12 +1791,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1798 if (IS_GEN5(dev)) 1791 if (IS_GEN5(dev))
1799 intel_gpu_ips_init(dev_priv); 1792 intel_gpu_ips_init(dev_priv);
1800 1793
1801 intel_init_runtime_pm(dev_priv); 1794 intel_runtime_pm_enable(dev_priv);
1802 1795
1803 return 0; 1796 return 0;
1804 1797
1805out_power_well: 1798out_power_well:
1806 intel_power_domains_remove(dev_priv); 1799 intel_power_domains_fini(dev_priv);
1807 drm_vblank_cleanup(dev); 1800 drm_vblank_cleanup(dev);
1808out_gem_unload: 1801out_gem_unload:
1809 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); 1802 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
@@ -1846,16 +1839,10 @@ int i915_driver_unload(struct drm_device *dev)
1846 return ret; 1839 return ret;
1847 } 1840 }
1848 1841
1849 intel_fini_runtime_pm(dev_priv); 1842 intel_power_domains_fini(dev_priv);
1850 1843
1851 intel_gpu_ips_teardown(); 1844 intel_gpu_ips_teardown();
1852 1845
1853 /* The i915.ko module is still not prepared to be loaded when
1854 * the power well is not enabled, so just enable it in case
1855 * we're going to unload/reload. */
1856 intel_display_set_init_power(dev_priv, true);
1857 intel_power_domains_remove(dev_priv);
1858
1859 i915_teardown_sysfs(dev); 1846 i915_teardown_sysfs(dev);
1860 1847
1861 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); 1848 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 055d5e7fbf12..bd7978cb094f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -356,6 +356,19 @@ static const struct intel_device_info intel_cherryview_info = {
356 CURSOR_OFFSETS, 356 CURSOR_OFFSETS,
357}; 357};
358 358
359static const struct intel_device_info intel_skylake_info = {
360 .is_preliminary = 1,
361 .is_skylake = 1,
362 .gen = 9, .num_pipes = 3,
363 .need_gfx_hws = 1, .has_hotplug = 1,
364 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
365 .has_llc = 1,
366 .has_ddi = 1,
367 .has_fbc = 1,
368 GEN_DEFAULT_PIPEOFFSETS,
369 IVB_CURSOR_OFFSETS,
370};
371
359/* 372/*
360 * Make sure any device matches here are from most specific to most 373 * Make sure any device matches here are from most specific to most
361 * general. For example, since the Quanta match is based on the subsystem 374 * general. For example, since the Quanta match is based on the subsystem
@@ -392,7 +405,8 @@ static const struct intel_device_info intel_cherryview_info = {
392 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \ 405 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
393 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \ 406 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
394 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \ 407 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
395 INTEL_CHV_IDS(&intel_cherryview_info) 408 INTEL_CHV_IDS(&intel_cherryview_info), \
409 INTEL_SKL_IDS(&intel_skylake_info)
396 410
397static const struct pci_device_id pciidlist[] = { /* aka */ 411static const struct pci_device_id pciidlist[] = { /* aka */
398 INTEL_PCI_IDS, 412 INTEL_PCI_IDS,
@@ -461,6 +475,16 @@ void intel_detect_pch(struct drm_device *dev)
461 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 475 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
462 WARN_ON(!IS_HASWELL(dev)); 476 WARN_ON(!IS_HASWELL(dev));
463 WARN_ON(!IS_ULT(dev)); 477 WARN_ON(!IS_ULT(dev));
478 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
479 dev_priv->pch_type = PCH_SPT;
480 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
481 WARN_ON(!IS_SKYLAKE(dev));
482 WARN_ON(IS_ULT(dev));
483 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
484 dev_priv->pch_type = PCH_SPT;
485 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
486 WARN_ON(!IS_SKYLAKE(dev));
487 WARN_ON(!IS_ULT(dev));
464 } else 488 } else
465 continue; 489 continue;
466 490
@@ -575,14 +599,14 @@ static int i915_drm_freeze(struct drm_device *dev)
575 599
576 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 600 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
577 601
578 intel_runtime_pm_disable_interrupts(dev); 602 intel_runtime_pm_disable_interrupts(dev_priv);
579 intel_hpd_cancel_work(dev_priv); 603 intel_hpd_cancel_work(dev_priv);
580 604
581 intel_suspend_encoders(dev_priv); 605 intel_suspend_encoders(dev_priv);
582 606
583 intel_suspend_gt_powersave(dev); 607 intel_suspend_gt_powersave(dev);
584 608
585 intel_modeset_suspend_hw(dev); 609 intel_suspend_hw(dev);
586 } 610 }
587 611
588 i915_gem_suspend_gtt_mappings(dev); 612 i915_gem_suspend_gtt_mappings(dev);
@@ -680,16 +704,16 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
680 } 704 }
681 mutex_unlock(&dev->struct_mutex); 705 mutex_unlock(&dev->struct_mutex);
682 706
683 intel_runtime_pm_restore_interrupts(dev); 707 /* We need working interrupts for modeset enabling ... */
708 intel_runtime_pm_enable_interrupts(dev_priv);
684 709
685 intel_modeset_init_hw(dev); 710 intel_modeset_init_hw(dev);
686 711
687 { 712 {
688 unsigned long irqflags; 713 spin_lock_irq(&dev_priv->irq_lock);
689 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
690 if (dev_priv->display.hpd_irq_setup) 714 if (dev_priv->display.hpd_irq_setup)
691 dev_priv->display.hpd_irq_setup(dev); 715 dev_priv->display.hpd_irq_setup(dev);
692 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 716 spin_unlock_irq(&dev_priv->irq_lock);
693 } 717 }
694 718
695 intel_dp_mst_resume(dev); 719 intel_dp_mst_resume(dev);
@@ -703,7 +727,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
703 * bother with the tiny race here where we might loose hotplug 727 * bother with the tiny race here where we might loose hotplug
704 * notifications. 728 * notifications.
705 * */ 729 * */
706 intel_hpd_init(dev); 730 intel_hpd_init(dev_priv);
707 /* Config may have changed between suspend and resume */ 731 /* Config may have changed between suspend and resume */
708 drm_helper_hpd_irq_event(dev); 732 drm_helper_hpd_irq_event(dev);
709 } 733 }
@@ -820,6 +844,9 @@ int i915_reset(struct drm_device *dev)
820 } 844 }
821 } 845 }
822 846
847 if (i915_stop_ring_allow_warn(dev_priv))
848 pr_notice("drm/i915: Resetting chip after gpu hang\n");
849
823 if (ret) { 850 if (ret) {
824 DRM_ERROR("Failed to reset chip: %i\n", ret); 851 DRM_ERROR("Failed to reset chip: %i\n", ret);
825 mutex_unlock(&dev->struct_mutex); 852 mutex_unlock(&dev->struct_mutex);
@@ -1446,12 +1473,12 @@ static int intel_runtime_suspend(struct device *device)
1446 * intel_mark_idle(). 1473 * intel_mark_idle().
1447 */ 1474 */
1448 cancel_work_sync(&dev_priv->rps.work); 1475 cancel_work_sync(&dev_priv->rps.work);
1449 intel_runtime_pm_disable_interrupts(dev); 1476 intel_runtime_pm_disable_interrupts(dev_priv);
1450 1477
1451 ret = intel_suspend_complete(dev_priv); 1478 ret = intel_suspend_complete(dev_priv);
1452 if (ret) { 1479 if (ret) {
1453 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); 1480 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1454 intel_runtime_pm_restore_interrupts(dev); 1481 intel_runtime_pm_enable_interrupts(dev_priv);
1455 1482
1456 return ret; 1483 return ret;
1457 } 1484 }
@@ -1511,7 +1538,7 @@ static int intel_runtime_resume(struct device *device)
1511 i915_gem_init_swizzling(dev); 1538 i915_gem_init_swizzling(dev);
1512 gen6_update_ring_freq(dev); 1539 gen6_update_ring_freq(dev);
1513 1540
1514 intel_runtime_pm_restore_interrupts(dev); 1541 intel_runtime_pm_enable_interrupts(dev_priv);
1515 intel_reset_gt_powersave(dev); 1542 intel_reset_gt_powersave(dev);
1516 1543
1517 if (ret) 1544 if (ret)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 16a6f6d187a1..9962da202456 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,7 +55,7 @@
55 55
56#define DRIVER_NAME "i915" 56#define DRIVER_NAME "i915"
57#define DRIVER_DESC "Intel Graphics" 57#define DRIVER_DESC "Intel Graphics"
58#define DRIVER_DATE "20140905" 58#define DRIVER_DATE "20141003"
59 59
60enum pipe { 60enum pipe {
61 INVALID_PIPE = -1, 61 INVALID_PIPE = -1,
@@ -76,6 +76,14 @@ enum transcoder {
76}; 76};
77#define transcoder_name(t) ((t) + 'A') 77#define transcoder_name(t) ((t) + 'A')
78 78
79/*
80 * This is the maximum (across all platforms) number of planes (primary +
81 * sprites) that can be active at the same time on one pipe.
82 *
83 * This value doesn't count the cursor plane.
84 */
85#define I915_MAX_PLANES 3
86
79enum plane { 87enum plane {
80 PLANE_A = 0, 88 PLANE_A = 0,
81 PLANE_B, 89 PLANE_B,
@@ -551,6 +559,7 @@ struct intel_uncore {
551 func(is_ivybridge) sep \ 559 func(is_ivybridge) sep \
552 func(is_valleyview) sep \ 560 func(is_valleyview) sep \
553 func(is_haswell) sep \ 561 func(is_haswell) sep \
562 func(is_skylake) sep \
554 func(is_preliminary) sep \ 563 func(is_preliminary) sep \
555 func(has_fbc) sep \ 564 func(has_fbc) sep \
556 func(has_pipe_cxsr) sep \ 565 func(has_pipe_cxsr) sep \
@@ -663,6 +672,18 @@ struct i915_fbc {
663 672
664 bool false_color; 673 bool false_color;
665 674
675 /* Tracks whether the HW is actually enabled, not whether the feature is
676 * possible. */
677 bool enabled;
678
679 /* On gen8 some rings cannont perform fbc clean operation so for now
680 * we are doing this on SW with mmio.
681 * This variable works in the opposite information direction
682 * of ring->fbc_dirty telling software on frontbuffer tracking
683 * to perform the cache clean on sw side.
684 */
685 bool need_sw_cache_clean;
686
666 struct intel_fbc_work { 687 struct intel_fbc_work {
667 struct delayed_work work; 688 struct delayed_work work;
668 struct drm_crtc *crtc; 689 struct drm_crtc *crtc;
@@ -704,6 +725,7 @@ enum intel_pch {
704 PCH_IBX, /* Ibexpeak PCH */ 725 PCH_IBX, /* Ibexpeak PCH */
705 PCH_CPT, /* Cougarpoint PCH */ 726 PCH_CPT, /* Cougarpoint PCH */
706 PCH_LPT, /* Lynxpoint PCH */ 727 PCH_LPT, /* Lynxpoint PCH */
728 PCH_SPT, /* Sunrisepoint PCH */
707 PCH_NOP, 729 PCH_NOP,
708}; 730};
709 731
@@ -1369,7 +1391,7 @@ struct ilk_wm_values {
1369 * 1391 *
1370 * Our driver uses the autosuspend delay feature, which means we'll only really 1392 * Our driver uses the autosuspend delay feature, which means we'll only really
1371 * suspend if we stay with zero refcount for a certain amount of time. The 1393 * suspend if we stay with zero refcount for a certain amount of time. The
1372 * default value is currently very conservative (see intel_init_runtime_pm), but 1394 * default value is currently very conservative (see intel_runtime_pm_enable), but
1373 * it can be changed with the standard runtime PM files from sysfs. 1395 * it can be changed with the standard runtime PM files from sysfs.
1374 * 1396 *
1375 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1397 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
@@ -1382,7 +1404,7 @@ struct ilk_wm_values {
1382 */ 1404 */
1383struct i915_runtime_pm { 1405struct i915_runtime_pm {
1384 bool suspended; 1406 bool suspended;
1385 bool _irqs_disabled; 1407 bool irqs_enabled;
1386}; 1408};
1387 1409
1388enum intel_pipe_crc_source { 1410enum intel_pipe_crc_source {
@@ -1509,7 +1531,7 @@ struct drm_i915_private {
1509 struct intel_overlay *overlay; 1531 struct intel_overlay *overlay;
1510 1532
1511 /* backlight registers and fields in struct intel_panel */ 1533 /* backlight registers and fields in struct intel_panel */
1512 spinlock_t backlight_lock; 1534 struct mutex backlight_lock;
1513 1535
1514 /* LVDS info */ 1536 /* LVDS info */
1515 bool no_aux_handshake; 1537 bool no_aux_handshake;
@@ -2073,6 +2095,7 @@ struct drm_i915_cmd_table {
2073#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2095#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2074#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2096#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
2075#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2097#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2098#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
2076#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2099#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
2077#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2100#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
2078 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2101 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
@@ -2080,6 +2103,8 @@ struct drm_i915_cmd_table {
2080 ((INTEL_DEVID(dev) & 0xf) == 0x2 || \ 2103 ((INTEL_DEVID(dev) & 0xf) == 0x2 || \
2081 (INTEL_DEVID(dev) & 0xf) == 0x6 || \ 2104 (INTEL_DEVID(dev) & 0xf) == 0x6 || \
2082 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2105 (INTEL_DEVID(dev) & 0xf) == 0xe))
2106#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
2107 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
2083#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ 2108#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
2084 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) 2109 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
2085#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 2110#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
@@ -2103,6 +2128,7 @@ struct drm_i915_cmd_table {
2103#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 2128#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
2104#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 2129#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
2105#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 2130#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
2131#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9)
2106 2132
2107#define RENDER_RING (1<<RCS) 2133#define RENDER_RING (1<<RCS)
2108#define BSD_RING (1<<VCS) 2134#define BSD_RING (1<<VCS)
@@ -2120,8 +2146,6 @@ struct drm_i915_cmd_table {
2120 2146
2121#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 2147#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
2122#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) 2148#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
2123#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
2124#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
2125#define USES_PPGTT(dev) (i915.enable_ppgtt) 2149#define USES_PPGTT(dev) (i915.enable_ppgtt)
2126#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2) 2150#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2)
2127 2151
@@ -2168,8 +2192,11 @@ struct drm_i915_cmd_table {
2168#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2192#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
2169#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2193#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
2170#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2194#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
2195#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
2196#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
2171 2197
2172#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type) 2198#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
2199#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
2173#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2200#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
2174#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 2201#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
2175#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 2202#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@ -2262,8 +2289,10 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
2262 2289
2263void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir, 2290void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
2264 int new_delay); 2291 int new_delay);
2265extern void intel_irq_init(struct drm_device *dev); 2292extern void intel_irq_init(struct drm_i915_private *dev_priv);
2266extern void intel_hpd_init(struct drm_device *dev); 2293extern void intel_hpd_init(struct drm_i915_private *dev_priv);
2294int intel_irq_install(struct drm_i915_private *dev_priv);
2295void intel_irq_uninstall(struct drm_i915_private *dev_priv);
2267 2296
2268extern void intel_uncore_sanitize(struct drm_device *dev); 2297extern void intel_uncore_sanitize(struct drm_device *dev);
2269extern void intel_uncore_early_sanitize(struct drm_device *dev, 2298extern void intel_uncore_early_sanitize(struct drm_device *dev,
@@ -2793,7 +2822,6 @@ static inline void intel_unregister_dsm_handler(void) { return; }
2793 2822
2794/* modesetting */ 2823/* modesetting */
2795extern void intel_modeset_init_hw(struct drm_device *dev); 2824extern void intel_modeset_init_hw(struct drm_device *dev);
2796extern void intel_modeset_suspend_hw(struct drm_device *dev);
2797extern void intel_modeset_init(struct drm_device *dev); 2825extern void intel_modeset_init(struct drm_device *dev);
2798extern void intel_modeset_gem_init(struct drm_device *dev); 2826extern void intel_modeset_gem_init(struct drm_device *dev);
2799extern void intel_modeset_cleanup(struct drm_device *dev); 2827extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -2804,7 +2832,7 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2804extern void i915_redisable_vga(struct drm_device *dev); 2832extern void i915_redisable_vga(struct drm_device *dev);
2805extern void i915_redisable_vga_power_on(struct drm_device *dev); 2833extern void i915_redisable_vga_power_on(struct drm_device *dev);
2806extern bool intel_fbc_enabled(struct drm_device *dev); 2834extern bool intel_fbc_enabled(struct drm_device *dev);
2807extern void gen8_fbc_sw_flush(struct drm_device *dev, u32 value); 2835extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
2808extern void intel_disable_fbc(struct drm_device *dev); 2836extern void intel_disable_fbc(struct drm_device *dev);
2809extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 2837extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2810extern void intel_init_pch_refclk(struct drm_device *dev); 2838extern void intel_init_pch_refclk(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 28f91df2604d..2719c25588cb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1945,7 +1945,14 @@ unsigned long
1945i915_gem_shrink(struct drm_i915_private *dev_priv, 1945i915_gem_shrink(struct drm_i915_private *dev_priv,
1946 long target, unsigned flags) 1946 long target, unsigned flags)
1947{ 1947{
1948 const bool purgeable_only = flags & I915_SHRINK_PURGEABLE; 1948 const struct {
1949 struct list_head *list;
1950 unsigned int bit;
1951 } phases[] = {
1952 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
1953 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
1954 { NULL, 0 },
1955 }, *phase;
1949 unsigned long count = 0; 1956 unsigned long count = 0;
1950 1957
1951 /* 1958 /*
@@ -1967,48 +1974,30 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
1967 * dev->struct_mutex and so we won't ever be able to observe an 1974 * dev->struct_mutex and so we won't ever be able to observe an
1968 * object on the bound_list with a reference count equals 0. 1975 * object on the bound_list with a reference count equals 0.
1969 */ 1976 */
1970 if (flags & I915_SHRINK_UNBOUND) { 1977 for (phase = phases; phase->list; phase++) {
1971 struct list_head still_in_list; 1978 struct list_head still_in_list;
1972 1979
1973 INIT_LIST_HEAD(&still_in_list); 1980 if ((flags & phase->bit) == 0)
1974 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) { 1981 continue;
1975 struct drm_i915_gem_object *obj;
1976
1977 obj = list_first_entry(&dev_priv->mm.unbound_list,
1978 typeof(*obj), global_list);
1979 list_move_tail(&obj->global_list, &still_in_list);
1980
1981 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1982 continue;
1983
1984 drm_gem_object_reference(&obj->base);
1985
1986 if (i915_gem_object_put_pages(obj) == 0)
1987 count += obj->base.size >> PAGE_SHIFT;
1988
1989 drm_gem_object_unreference(&obj->base);
1990 }
1991 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1992 }
1993
1994 if (flags & I915_SHRINK_BOUND) {
1995 struct list_head still_in_list;
1996 1982
1997 INIT_LIST_HEAD(&still_in_list); 1983 INIT_LIST_HEAD(&still_in_list);
1998 while (count < target && !list_empty(&dev_priv->mm.bound_list)) { 1984 while (count < target && !list_empty(phase->list)) {
1999 struct drm_i915_gem_object *obj; 1985 struct drm_i915_gem_object *obj;
2000 struct i915_vma *vma, *v; 1986 struct i915_vma *vma, *v;
2001 1987
2002 obj = list_first_entry(&dev_priv->mm.bound_list, 1988 obj = list_first_entry(phase->list,
2003 typeof(*obj), global_list); 1989 typeof(*obj), global_list);
2004 list_move_tail(&obj->global_list, &still_in_list); 1990 list_move_tail(&obj->global_list, &still_in_list);
2005 1991
2006 if (!i915_gem_object_is_purgeable(obj) && purgeable_only) 1992 if (flags & I915_SHRINK_PURGEABLE &&
1993 !i915_gem_object_is_purgeable(obj))
2007 continue; 1994 continue;
2008 1995
2009 drm_gem_object_reference(&obj->base); 1996 drm_gem_object_reference(&obj->base);
2010 1997
2011 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) 1998 /* For the unbound phase, this should be a no-op! */
1999 list_for_each_entry_safe(vma, v,
2000 &obj->vma_list, vma_link)
2012 if (i915_vma_unbind(vma)) 2001 if (i915_vma_unbind(vma))
2013 break; 2002 break;
2014 2003
@@ -2017,7 +2006,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
2017 2006
2018 drm_gem_object_unreference(&obj->base); 2007 drm_gem_object_unreference(&obj->base);
2019 } 2008 }
2020 list_splice(&still_in_list, &dev_priv->mm.bound_list); 2009 list_splice(&still_in_list, phase->list);
2021 } 2010 }
2022 2011
2023 return count; 2012 return count;
@@ -3166,6 +3155,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
3166 obj->stride, obj->tiling_mode); 3155 obj->stride, obj->tiling_mode);
3167 3156
3168 switch (INTEL_INFO(dev)->gen) { 3157 switch (INTEL_INFO(dev)->gen) {
3158 case 9:
3169 case 8: 3159 case 8:
3170 case 7: 3160 case 7:
3171 case 6: 3161 case 6:
@@ -3384,46 +3374,6 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3384 return true; 3374 return true;
3385} 3375}
3386 3376
3387static void i915_gem_verify_gtt(struct drm_device *dev)
3388{
3389#if WATCH_GTT
3390 struct drm_i915_private *dev_priv = dev->dev_private;
3391 struct drm_i915_gem_object *obj;
3392 int err = 0;
3393
3394 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3395 if (obj->gtt_space == NULL) {
3396 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3397 err++;
3398 continue;
3399 }
3400
3401 if (obj->cache_level != obj->gtt_space->color) {
3402 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3403 i915_gem_obj_ggtt_offset(obj),
3404 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3405 obj->cache_level,
3406 obj->gtt_space->color);
3407 err++;
3408 continue;
3409 }
3410
3411 if (!i915_gem_valid_gtt_space(dev,
3412 obj->gtt_space,
3413 obj->cache_level)) {
3414 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3415 i915_gem_obj_ggtt_offset(obj),
3416 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3417 obj->cache_level);
3418 err++;
3419 continue;
3420 }
3421 }
3422
3423 WARN_ON(err);
3424#endif
3425}
3426
3427/** 3377/**
3428 * Finds free space in the GTT aperture and binds the object there. 3378 * Finds free space in the GTT aperture and binds the object there.
3429 */ 3379 */
@@ -3532,7 +3482,6 @@ search_free:
3532 vma->bind_vma(vma, obj->cache_level, 3482 vma->bind_vma(vma, obj->cache_level,
3533 flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0); 3483 flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
3534 3484
3535 i915_gem_verify_gtt(dev);
3536 return vma; 3485 return vma;
3537 3486
3538err_remove_node: 3487err_remove_node:
@@ -3769,7 +3718,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3769 old_write_domain); 3718 old_write_domain);
3770 } 3719 }
3771 3720
3772 i915_gem_verify_gtt(dev);
3773 return 0; 3721 return 0;
3774} 3722}
3775 3723
@@ -5119,6 +5067,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5119 return ret; 5067 return ret;
5120} 5068}
5121 5069
5070/**
5071 * i915_gem_track_fb - update frontbuffer tracking
5072 * old: current GEM buffer for the frontbuffer slots
5073 * new: new GEM buffer for the frontbuffer slots
5074 * frontbuffer_bits: bitmask of frontbuffer slots
5075 *
5076 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5077 * from @old and setting them in @new. Both @old and @new can be NULL.
5078 */
5122void i915_gem_track_fb(struct drm_i915_gem_object *old, 5079void i915_gem_track_fb(struct drm_i915_gem_object *old,
5123 struct drm_i915_gem_object *new, 5080 struct drm_i915_gem_object *new,
5124 unsigned frontbuffer_bits) 5081 unsigned frontbuffer_bits)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b672b843fd5e..ae82ef5e7df4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -35,13 +35,21 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
35 35
36static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) 36static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
37{ 37{
38 if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) 38 bool has_aliasing_ppgtt;
39 bool has_full_ppgtt;
40
41 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
42 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
43 if (IS_GEN8(dev))
44 has_full_ppgtt = false; /* XXX why? */
45
46 if (enable_ppgtt == 0 || !has_aliasing_ppgtt)
39 return 0; 47 return 0;
40 48
41 if (enable_ppgtt == 1) 49 if (enable_ppgtt == 1)
42 return 1; 50 return 1;
43 51
44 if (enable_ppgtt == 2 && HAS_PPGTT(dev)) 52 if (enable_ppgtt == 2 && has_full_ppgtt)
45 return 2; 53 return 2;
46 54
47#ifdef CONFIG_INTEL_IOMMU 55#ifdef CONFIG_INTEL_IOMMU
@@ -59,7 +67,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
59 return 0; 67 return 0;
60 } 68 }
61 69
62 return HAS_ALIASING_PPGTT(dev) ? 1 : 0; 70 return has_aliasing_ppgtt ? 1 : 0;
63} 71}
64 72
65 73
@@ -1092,7 +1100,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1092 1100
1093 if (INTEL_INFO(dev)->gen < 8) 1101 if (INTEL_INFO(dev)->gen < 8)
1094 return gen6_ppgtt_init(ppgtt); 1102 return gen6_ppgtt_init(ppgtt);
1095 else if (IS_GEN8(dev)) 1103 else if (IS_GEN8(dev) || IS_GEN9(dev))
1096 return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); 1104 return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
1097 else 1105 else
1098 BUG(); 1106 BUG();
@@ -1764,7 +1772,6 @@ static int setup_scratch_page(struct drm_device *dev)
1764 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 1772 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1765 if (page == NULL) 1773 if (page == NULL)
1766 return -ENOMEM; 1774 return -ENOMEM;
1767 get_page(page);
1768 set_pages_uc(page, 1); 1775 set_pages_uc(page, 1);
1769 1776
1770#ifdef CONFIG_INTEL_IOMMU 1777#ifdef CONFIG_INTEL_IOMMU
@@ -1789,7 +1796,6 @@ static void teardown_scratch_page(struct drm_device *dev)
1789 set_pages_wb(page, 1); 1796 set_pages_wb(page, 1);
1790 pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr, 1797 pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
1791 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 1798 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1792 put_page(page);
1793 __free_page(page); 1799 __free_page(page);
1794} 1800}
1795 1801
@@ -1859,6 +1865,18 @@ static size_t chv_get_stolen_size(u16 gmch_ctrl)
1859 return (gmch_ctrl - 0x17 + 9) << 22; 1865 return (gmch_ctrl - 0x17 + 9) << 22;
1860} 1866}
1861 1867
1868static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
1869{
1870 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
1871 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
1872
1873 if (gen9_gmch_ctl < 0xf0)
1874 return gen9_gmch_ctl << 25; /* 32 MB units */
1875 else
1876 /* 4MB increments starting at 0xf0 for 4MB */
1877 return (gen9_gmch_ctl - 0xf0 + 1) << 22;
1878}
1879
1862static int ggtt_probe_common(struct drm_device *dev, 1880static int ggtt_probe_common(struct drm_device *dev,
1863 size_t gtt_size) 1881 size_t gtt_size)
1864{ 1882{
@@ -1955,7 +1973,10 @@ static int gen8_gmch_probe(struct drm_device *dev,
1955 1973
1956 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 1974 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1957 1975
1958 if (IS_CHERRYVIEW(dev)) { 1976 if (INTEL_INFO(dev)->gen >= 9) {
1977 *stolen = gen9_get_stolen_size(snb_gmch_ctl);
1978 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
1979 } else if (IS_CHERRYVIEW(dev)) {
1959 *stolen = chv_get_stolen_size(snb_gmch_ctl); 1980 *stolen = chv_get_stolen_size(snb_gmch_ctl);
1960 gtt_size = chv_get_total_gtt_size(snb_gmch_ctl); 1981 gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
1961 } else { 1982 } else {
@@ -2127,6 +2148,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
2127 vma->obj = obj; 2148 vma->obj = obj;
2128 2149
2129 switch (INTEL_INFO(vm->dev)->gen) { 2150 switch (INTEL_INFO(vm->dev)->gen) {
2151 case 9:
2130 case 8: 2152 case 8:
2131 case 7: 2153 case 7:
2132 case 6: 2154 case 6:
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 2c87a797213f..e664599de6e7 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -765,6 +765,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
765 765
766 /* Fences */ 766 /* Fences */
767 switch (INTEL_INFO(dev)->gen) { 767 switch (INTEL_INFO(dev)->gen) {
768 case 9:
768 case 8: 769 case 8:
769 case 7: 770 case 7:
770 case 6: 771 case 6:
@@ -923,6 +924,7 @@ static void i915_record_ring_state(struct drm_device *dev,
923 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring)); 924 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
924 925
925 switch (INTEL_INFO(dev)->gen) { 926 switch (INTEL_INFO(dev)->gen) {
927 case 9:
926 case 8: 928 case 8:
927 for (i = 0; i < 4; i++) { 929 for (i = 0; i < 4; i++) {
928 ering->vm_info.pdp[i] = 930 ering->vm_info.pdp[i] =
@@ -1326,13 +1328,12 @@ void i915_error_state_get(struct drm_device *dev,
1326 struct i915_error_state_file_priv *error_priv) 1328 struct i915_error_state_file_priv *error_priv)
1327{ 1329{
1328 struct drm_i915_private *dev_priv = dev->dev_private; 1330 struct drm_i915_private *dev_priv = dev->dev_private;
1329 unsigned long flags;
1330 1331
1331 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1332 spin_lock_irq(&dev_priv->gpu_error.lock);
1332 error_priv->error = dev_priv->gpu_error.first_error; 1333 error_priv->error = dev_priv->gpu_error.first_error;
1333 if (error_priv->error) 1334 if (error_priv->error)
1334 kref_get(&error_priv->error->ref); 1335 kref_get(&error_priv->error->ref);
1335 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1336 spin_unlock_irq(&dev_priv->gpu_error.lock);
1336 1337
1337} 1338}
1338 1339
@@ -1346,12 +1347,11 @@ void i915_destroy_error_state(struct drm_device *dev)
1346{ 1347{
1347 struct drm_i915_private *dev_priv = dev->dev_private; 1348 struct drm_i915_private *dev_priv = dev->dev_private;
1348 struct drm_i915_error_state *error; 1349 struct drm_i915_error_state *error;
1349 unsigned long flags;
1350 1350
1351 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1351 spin_lock_irq(&dev_priv->gpu_error.lock);
1352 error = dev_priv->gpu_error.first_error; 1352 error = dev_priv->gpu_error.first_error;
1353 dev_priv->gpu_error.first_error = NULL; 1353 dev_priv->gpu_error.first_error = NULL;
1354 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1354 spin_unlock_irq(&dev_priv->gpu_error.lock);
1355 1355
1356 if (error) 1356 if (error)
1357 kref_put(&error->ref, i915_error_state_free); 1357 kref_put(&error->ref, i915_error_state_free);
@@ -1389,6 +1389,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
1389 WARN_ONCE(1, "Unsupported platform\n"); 1389 WARN_ONCE(1, "Unsupported platform\n");
1390 case 7: 1390 case 7:
1391 case 8: 1391 case 8:
1392 case 9:
1392 instdone[0] = I915_READ(GEN7_INSTDONE_1); 1393 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1393 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 1394 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1394 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 1395 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f66392b6e287..f17bbf3ac136 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -37,6 +37,14 @@
37#include "i915_trace.h" 37#include "i915_trace.h"
38#include "intel_drv.h" 38#include "intel_drv.h"
39 39
40/**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
40static const u32 hpd_ibx[] = { 48static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG, 49 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
@@ -310,9 +318,8 @@ void i9xx_check_fifo_underruns(struct drm_device *dev)
310{ 318{
311 struct drm_i915_private *dev_priv = dev->dev_private; 319 struct drm_i915_private *dev_priv = dev->dev_private;
312 struct intel_crtc *crtc; 320 struct intel_crtc *crtc;
313 unsigned long flags;
314 321
315 spin_lock_irqsave(&dev_priv->irq_lock, flags); 322 spin_lock_irq(&dev_priv->irq_lock);
316 323
317 for_each_intel_crtc(dev, crtc) { 324 for_each_intel_crtc(dev, crtc) {
318 u32 reg = PIPESTAT(crtc->pipe); 325 u32 reg = PIPESTAT(crtc->pipe);
@@ -331,7 +338,7 @@ void i9xx_check_fifo_underruns(struct drm_device *dev)
331 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); 338 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
332 } 339 }
333 340
334 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 341 spin_unlock_irq(&dev_priv->irq_lock);
335} 342}
336 343
337static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, 344static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -503,7 +510,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 510 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
504 else if (IS_GEN7(dev)) 511 else if (IS_GEN7(dev))
505 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); 512 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
506 else if (IS_GEN8(dev)) 513 else if (IS_GEN8(dev) || IS_GEN9(dev))
507 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 514 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
508 515
509 return old; 516 return old;
@@ -589,6 +596,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
589 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 596 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
590 597
591 assert_spin_locked(&dev_priv->irq_lock); 598 assert_spin_locked(&dev_priv->irq_lock);
599 WARN_ON(!intel_irqs_enabled(dev_priv));
592 600
593 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 601 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
594 status_mask & ~PIPESTAT_INT_STATUS_MASK, 602 status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@ -615,6 +623,7 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
615 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 623 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
616 624
617 assert_spin_locked(&dev_priv->irq_lock); 625 assert_spin_locked(&dev_priv->irq_lock);
626 WARN_ON(!intel_irqs_enabled(dev_priv));
618 627
619 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 628 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
620 status_mask & ~PIPESTAT_INT_STATUS_MASK, 629 status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@ -694,19 +703,18 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
694static void i915_enable_asle_pipestat(struct drm_device *dev) 703static void i915_enable_asle_pipestat(struct drm_device *dev)
695{ 704{
696 struct drm_i915_private *dev_priv = dev->dev_private; 705 struct drm_i915_private *dev_priv = dev->dev_private;
697 unsigned long irqflags;
698 706
699 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 707 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
700 return; 708 return;
701 709
702 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 710 spin_lock_irq(&dev_priv->irq_lock);
703 711
704 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 712 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
705 if (INTEL_INFO(dev)->gen >= 4) 713 if (INTEL_INFO(dev)->gen >= 4)
706 i915_enable_pipestat(dev_priv, PIPE_A, 714 i915_enable_pipestat(dev_priv, PIPE_A,
707 PIPE_LEGACY_BLC_EVENT_STATUS); 715 PIPE_LEGACY_BLC_EVENT_STATUS);
708 716
709 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 717 spin_unlock_irq(&dev_priv->irq_lock);
710} 718}
711 719
712/** 720/**
@@ -1094,18 +1102,17 @@ static void i915_digport_work_func(struct work_struct *work)
1094{ 1102{
1095 struct drm_i915_private *dev_priv = 1103 struct drm_i915_private *dev_priv =
1096 container_of(work, struct drm_i915_private, dig_port_work); 1104 container_of(work, struct drm_i915_private, dig_port_work);
1097 unsigned long irqflags;
1098 u32 long_port_mask, short_port_mask; 1105 u32 long_port_mask, short_port_mask;
1099 struct intel_digital_port *intel_dig_port; 1106 struct intel_digital_port *intel_dig_port;
1100 int i, ret; 1107 int i, ret;
1101 u32 old_bits = 0; 1108 u32 old_bits = 0;
1102 1109
1103 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1110 spin_lock_irq(&dev_priv->irq_lock);
1104 long_port_mask = dev_priv->long_hpd_port_mask; 1111 long_port_mask = dev_priv->long_hpd_port_mask;
1105 dev_priv->long_hpd_port_mask = 0; 1112 dev_priv->long_hpd_port_mask = 0;
1106 short_port_mask = dev_priv->short_hpd_port_mask; 1113 short_port_mask = dev_priv->short_hpd_port_mask;
1107 dev_priv->short_hpd_port_mask = 0; 1114 dev_priv->short_hpd_port_mask = 0;
1108 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1115 spin_unlock_irq(&dev_priv->irq_lock);
1109 1116
1110 for (i = 0; i < I915_MAX_PORTS; i++) { 1117 for (i = 0; i < I915_MAX_PORTS; i++) {
1111 bool valid = false; 1118 bool valid = false;
@@ -1130,9 +1137,9 @@ static void i915_digport_work_func(struct work_struct *work)
1130 } 1137 }
1131 1138
1132 if (old_bits) { 1139 if (old_bits) {
1133 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1140 spin_lock_irq(&dev_priv->irq_lock);
1134 dev_priv->hpd_event_bits |= old_bits; 1141 dev_priv->hpd_event_bits |= old_bits;
1135 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1142 spin_unlock_irq(&dev_priv->irq_lock);
1136 schedule_work(&dev_priv->hotplug_work); 1143 schedule_work(&dev_priv->hotplug_work);
1137 } 1144 }
1138} 1145}
@@ -1151,7 +1158,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
1151 struct intel_connector *intel_connector; 1158 struct intel_connector *intel_connector;
1152 struct intel_encoder *intel_encoder; 1159 struct intel_encoder *intel_encoder;
1153 struct drm_connector *connector; 1160 struct drm_connector *connector;
1154 unsigned long irqflags;
1155 bool hpd_disabled = false; 1161 bool hpd_disabled = false;
1156 bool changed = false; 1162 bool changed = false;
1157 u32 hpd_event_bits; 1163 u32 hpd_event_bits;
@@ -1159,7 +1165,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
1159 mutex_lock(&mode_config->mutex); 1165 mutex_lock(&mode_config->mutex);
1160 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 1166 DRM_DEBUG_KMS("running encoder hotplug functions\n");
1161 1167
1162 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1168 spin_lock_irq(&dev_priv->irq_lock);
1163 1169
1164 hpd_event_bits = dev_priv->hpd_event_bits; 1170 hpd_event_bits = dev_priv->hpd_event_bits;
1165 dev_priv->hpd_event_bits = 0; 1171 dev_priv->hpd_event_bits = 0;
@@ -1193,7 +1199,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
1193 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 1199 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1194 } 1200 }
1195 1201
1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1202 spin_unlock_irq(&dev_priv->irq_lock);
1197 1203
1198 list_for_each_entry(connector, &mode_config->connector_list, head) { 1204 list_for_each_entry(connector, &mode_config->connector_list, head) {
1199 intel_connector = to_intel_connector(connector); 1205 intel_connector = to_intel_connector(connector);
@@ -1488,7 +1494,6 @@ static void ivybridge_parity_work(struct work_struct *work)
1488 u32 error_status, row, bank, subbank; 1494 u32 error_status, row, bank, subbank;
1489 char *parity_event[6]; 1495 char *parity_event[6];
1490 uint32_t misccpctl; 1496 uint32_t misccpctl;
1491 unsigned long flags;
1492 uint8_t slice = 0; 1497 uint8_t slice = 0;
1493 1498
1494 /* We must turn off DOP level clock gating to access the L3 registers. 1499 /* We must turn off DOP level clock gating to access the L3 registers.
@@ -1547,9 +1552,9 @@ static void ivybridge_parity_work(struct work_struct *work)
1547 1552
1548out: 1553out:
1549 WARN_ON(dev_priv->l3_parity.which_slice); 1554 WARN_ON(dev_priv->l3_parity.which_slice);
1550 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1555 spin_lock_irq(&dev_priv->irq_lock);
1551 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1556 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1552 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1557 spin_unlock_irq(&dev_priv->irq_lock);
1553 1558
1554 mutex_unlock(&dev_priv->dev->struct_mutex); 1559 mutex_unlock(&dev_priv->dev->struct_mutex);
1555} 1560}
@@ -2566,7 +2571,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2566 } 2571 }
2567 2572
2568 for_each_pipe(dev_priv, pipe) { 2573 for_each_pipe(dev_priv, pipe) {
2569 uint32_t pipe_iir; 2574 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2570 2575
2571 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2576 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2572 continue; 2577 continue;
@@ -2575,11 +2580,17 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2575 if (pipe_iir) { 2580 if (pipe_iir) {
2576 ret = IRQ_HANDLED; 2581 ret = IRQ_HANDLED;
2577 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2582 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2583
2578 if (pipe_iir & GEN8_PIPE_VBLANK && 2584 if (pipe_iir & GEN8_PIPE_VBLANK &&
2579 intel_pipe_handle_vblank(dev, pipe)) 2585 intel_pipe_handle_vblank(dev, pipe))
2580 intel_check_page_flip(dev, pipe); 2586 intel_check_page_flip(dev, pipe);
2581 2587
2582 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { 2588 if (IS_GEN9(dev))
2589 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2590 else
2591 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2592
2593 if (flip_done) {
2583 intel_prepare_page_flip(dev, pipe); 2594 intel_prepare_page_flip(dev, pipe);
2584 intel_finish_page_flip_plane(dev, pipe); 2595 intel_finish_page_flip_plane(dev, pipe);
2585 } 2596 }
@@ -2594,11 +2605,16 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2594 pipe_name(pipe)); 2605 pipe_name(pipe));
2595 } 2606 }
2596 2607
2597 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 2608
2609 if (IS_GEN9(dev))
2610 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2611 else
2612 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2613
2614 if (fault_errors)
2598 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2615 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2599 pipe_name(pipe), 2616 pipe_name(pipe),
2600 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2617 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2601 }
2602 } else 2618 } else
2603 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2619 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2604 } 2620 }
@@ -3444,8 +3460,8 @@ static void gen8_irq_reset(struct drm_device *dev)
3444 gen8_gt_irq_reset(dev_priv); 3460 gen8_gt_irq_reset(dev_priv);
3445 3461
3446 for_each_pipe(dev_priv, pipe) 3462 for_each_pipe(dev_priv, pipe)
3447 if (intel_display_power_enabled(dev_priv, 3463 if (intel_display_power_is_enabled(dev_priv,
3448 POWER_DOMAIN_PIPE(pipe))) 3464 POWER_DOMAIN_PIPE(pipe)))
3449 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3465 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3450 3466
3451 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3467 GEN5_IRQ_RESET(GEN8_DE_PORT_);
@@ -3457,15 +3473,14 @@ static void gen8_irq_reset(struct drm_device *dev)
3457 3473
3458void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) 3474void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3459{ 3475{
3460 unsigned long irqflags;
3461 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3476 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3462 3477
3463 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3478 spin_lock_irq(&dev_priv->irq_lock);
3464 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], 3479 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3465 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); 3480 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3466 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], 3481 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3467 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); 3482 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3468 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3483 spin_unlock_irq(&dev_priv->irq_lock);
3469} 3484}
3470 3485
3471static void cherryview_irq_preinstall(struct drm_device *dev) 3486static void cherryview_irq_preinstall(struct drm_device *dev)
@@ -3584,7 +3599,6 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
3584 3599
3585static int ironlake_irq_postinstall(struct drm_device *dev) 3600static int ironlake_irq_postinstall(struct drm_device *dev)
3586{ 3601{
3587 unsigned long irqflags;
3588 struct drm_i915_private *dev_priv = dev->dev_private; 3602 struct drm_i915_private *dev_priv = dev->dev_private;
3589 u32 display_mask, extra_mask; 3603 u32 display_mask, extra_mask;
3590 3604
@@ -3623,9 +3637,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
3623 * spinlocking not required here for correctness since interrupt 3637 * spinlocking not required here for correctness since interrupt
3624 * setup is guaranteed to run in single-threaded context. But we 3638 * setup is guaranteed to run in single-threaded context. But we
3625 * need it to make the assert_spin_locked happy. */ 3639 * need it to make the assert_spin_locked happy. */
3626 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3640 spin_lock_irq(&dev_priv->irq_lock);
3627 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3641 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3628 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3642 spin_unlock_irq(&dev_priv->irq_lock);
3629 } 3643 }
3630 3644
3631 return 0; 3645 return 0;
@@ -3701,7 +3715,7 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3701 3715
3702 dev_priv->display_irqs_enabled = true; 3716 dev_priv->display_irqs_enabled = true;
3703 3717
3704 if (dev_priv->dev->irq_enabled) 3718 if (intel_irqs_enabled(dev_priv))
3705 valleyview_display_irqs_install(dev_priv); 3719 valleyview_display_irqs_install(dev_priv);
3706} 3720}
3707 3721
@@ -3714,14 +3728,13 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3714 3728
3715 dev_priv->display_irqs_enabled = false; 3729 dev_priv->display_irqs_enabled = false;
3716 3730
3717 if (dev_priv->dev->irq_enabled) 3731 if (intel_irqs_enabled(dev_priv))
3718 valleyview_display_irqs_uninstall(dev_priv); 3732 valleyview_display_irqs_uninstall(dev_priv);
3719} 3733}
3720 3734
3721static int valleyview_irq_postinstall(struct drm_device *dev) 3735static int valleyview_irq_postinstall(struct drm_device *dev)
3722{ 3736{
3723 struct drm_i915_private *dev_priv = dev->dev_private; 3737 struct drm_i915_private *dev_priv = dev->dev_private;
3724 unsigned long irqflags;
3725 3738
3726 dev_priv->irq_mask = ~0; 3739 dev_priv->irq_mask = ~0;
3727 3740
@@ -3735,10 +3748,10 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
3735 3748
3736 /* Interrupt setup is already guaranteed to be single-threaded, this is 3749 /* Interrupt setup is already guaranteed to be single-threaded, this is
3737 * just to make the assert_spin_locked check happy. */ 3750 * just to make the assert_spin_locked check happy. */
3738 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3751 spin_lock_irq(&dev_priv->irq_lock);
3739 if (dev_priv->display_irqs_enabled) 3752 if (dev_priv->display_irqs_enabled)
3740 valleyview_display_irqs_install(dev_priv); 3753 valleyview_display_irqs_install(dev_priv);
3741 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3754 spin_unlock_irq(&dev_priv->irq_lock);
3742 3755
3743 I915_WRITE(VLV_IIR, 0xffffffff); 3756 I915_WRITE(VLV_IIR, 0xffffffff);
3744 I915_WRITE(VLV_IIR, 0xffffffff); 3757 I915_WRITE(VLV_IIR, 0xffffffff);
@@ -3783,18 +3796,26 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3783 3796
3784static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3797static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3785{ 3798{
3786 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE | 3799 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3787 GEN8_PIPE_CDCLK_CRC_DONE | 3800 uint32_t de_pipe_enables;
3788 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3789 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3790 GEN8_PIPE_FIFO_UNDERRUN;
3791 int pipe; 3801 int pipe;
3802
3803 if (IS_GEN9(dev_priv))
3804 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3805 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3806 else
3807 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3808 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3809
3810 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3811 GEN8_PIPE_FIFO_UNDERRUN;
3812
3792 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3813 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3793 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3814 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3794 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3815 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3795 3816
3796 for_each_pipe(dev_priv, pipe) 3817 for_each_pipe(dev_priv, pipe)
3797 if (intel_display_power_enabled(dev_priv, 3818 if (intel_display_power_is_enabled(dev_priv,
3798 POWER_DOMAIN_PIPE(pipe))) 3819 POWER_DOMAIN_PIPE(pipe)))
3799 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3820 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3800 dev_priv->de_irq_mask[pipe], 3821 dev_priv->de_irq_mask[pipe],
@@ -3829,7 +3850,6 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
3829 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3850 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3830 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV | 3851 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3831 PIPE_CRC_DONE_INTERRUPT_STATUS; 3852 PIPE_CRC_DONE_INTERRUPT_STATUS;
3832 unsigned long irqflags;
3833 int pipe; 3853 int pipe;
3834 3854
3835 /* 3855 /*
@@ -3841,11 +3861,11 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
3841 for_each_pipe(dev_priv, pipe) 3861 for_each_pipe(dev_priv, pipe)
3842 I915_WRITE(PIPESTAT(pipe), 0xffff); 3862 I915_WRITE(PIPESTAT(pipe), 0xffff);
3843 3863
3844 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3864 spin_lock_irq(&dev_priv->irq_lock);
3845 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3865 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3846 for_each_pipe(dev_priv, pipe) 3866 for_each_pipe(dev_priv, pipe)
3847 i915_enable_pipestat(dev_priv, pipe, pipestat_enable); 3867 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3848 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3868 spin_unlock_irq(&dev_priv->irq_lock);
3849 3869
3850 I915_WRITE(VLV_IIR, 0xffffffff); 3870 I915_WRITE(VLV_IIR, 0xffffffff);
3851 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3871 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
@@ -3872,7 +3892,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
3872static void valleyview_irq_uninstall(struct drm_device *dev) 3892static void valleyview_irq_uninstall(struct drm_device *dev)
3873{ 3893{
3874 struct drm_i915_private *dev_priv = dev->dev_private; 3894 struct drm_i915_private *dev_priv = dev->dev_private;
3875 unsigned long irqflags;
3876 int pipe; 3895 int pipe;
3877 3896
3878 if (!dev_priv) 3897 if (!dev_priv)
@@ -3887,10 +3906,12 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
3887 I915_WRITE(PORT_HOTPLUG_EN, 0); 3906 I915_WRITE(PORT_HOTPLUG_EN, 0);
3888 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3907 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3889 3908
3890 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3909 /* Interrupt setup is already guaranteed to be single-threaded, this is
3910 * just to make the assert_spin_locked check happy. */
3911 spin_lock_irq(&dev_priv->irq_lock);
3891 if (dev_priv->display_irqs_enabled) 3912 if (dev_priv->display_irqs_enabled)
3892 valleyview_display_irqs_uninstall(dev_priv); 3913 valleyview_display_irqs_uninstall(dev_priv);
3893 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3914 spin_unlock_irq(&dev_priv->irq_lock);
3894 3915
3895 dev_priv->irq_mask = 0; 3916 dev_priv->irq_mask = 0;
3896 3917
@@ -3976,7 +3997,6 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
3976static int i8xx_irq_postinstall(struct drm_device *dev) 3997static int i8xx_irq_postinstall(struct drm_device *dev)
3977{ 3998{
3978 struct drm_i915_private *dev_priv = dev->dev_private; 3999 struct drm_i915_private *dev_priv = dev->dev_private;
3979 unsigned long irqflags;
3980 4000
3981 I915_WRITE16(EMR, 4001 I915_WRITE16(EMR,
3982 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 4002 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -3999,10 +4019,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
3999 4019
4000 /* Interrupt setup is already guaranteed to be single-threaded, this is 4020 /* Interrupt setup is already guaranteed to be single-threaded, this is
4001 * just to make the assert_spin_locked check happy. */ 4021 * just to make the assert_spin_locked check happy. */
4002 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4022 spin_lock_irq(&dev_priv->irq_lock);
4003 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4023 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4004 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4024 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4005 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4025 spin_unlock_irq(&dev_priv->irq_lock);
4006 4026
4007 return 0; 4027 return 0;
4008} 4028}
@@ -4047,7 +4067,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4047 struct drm_i915_private *dev_priv = dev->dev_private; 4067 struct drm_i915_private *dev_priv = dev->dev_private;
4048 u16 iir, new_iir; 4068 u16 iir, new_iir;
4049 u32 pipe_stats[2]; 4069 u32 pipe_stats[2];
4050 unsigned long irqflags;
4051 int pipe; 4070 int pipe;
4052 u16 flip_mask = 4071 u16 flip_mask =
4053 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4072 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4063,7 +4082,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4063 * It doesn't set the bit in iir again, but it still produces 4082 * It doesn't set the bit in iir again, but it still produces
4064 * interrupts (for non-MSI). 4083 * interrupts (for non-MSI).
4065 */ 4084 */
4066 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4085 spin_lock(&dev_priv->irq_lock);
4067 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4086 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4068 i915_handle_error(dev, false, 4087 i915_handle_error(dev, false,
4069 "Command parser error, iir 0x%08x", 4088 "Command parser error, iir 0x%08x",
@@ -4079,7 +4098,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4079 if (pipe_stats[pipe] & 0x8000ffff) 4098 if (pipe_stats[pipe] & 0x8000ffff)
4080 I915_WRITE(reg, pipe_stats[pipe]); 4099 I915_WRITE(reg, pipe_stats[pipe]);
4081 } 4100 }
4082 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4101 spin_unlock(&dev_priv->irq_lock);
4083 4102
4084 I915_WRITE16(IIR, iir & ~flip_mask); 4103 I915_WRITE16(IIR, iir & ~flip_mask);
4085 new_iir = I915_READ16(IIR); /* Flush posted writes */ 4104 new_iir = I915_READ16(IIR); /* Flush posted writes */
@@ -4149,7 +4168,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
4149{ 4168{
4150 struct drm_i915_private *dev_priv = dev->dev_private; 4169 struct drm_i915_private *dev_priv = dev->dev_private;
4151 u32 enable_mask; 4170 u32 enable_mask;
4152 unsigned long irqflags;
4153 4171
4154 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 4172 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4155 4173
@@ -4187,10 +4205,10 @@ static int i915_irq_postinstall(struct drm_device *dev)
4187 4205
4188 /* Interrupt setup is already guaranteed to be single-threaded, this is 4206 /* Interrupt setup is already guaranteed to be single-threaded, this is
4189 * just to make the assert_spin_locked check happy. */ 4207 * just to make the assert_spin_locked check happy. */
4190 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4208 spin_lock_irq(&dev_priv->irq_lock);
4191 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4209 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4192 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4210 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4193 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4211 spin_unlock_irq(&dev_priv->irq_lock);
4194 4212
4195 return 0; 4213 return 0;
4196} 4214}
@@ -4234,7 +4252,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4234 struct drm_device *dev = arg; 4252 struct drm_device *dev = arg;
4235 struct drm_i915_private *dev_priv = dev->dev_private; 4253 struct drm_i915_private *dev_priv = dev->dev_private;
4236 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 4254 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4237 unsigned long irqflags;
4238 u32 flip_mask = 4255 u32 flip_mask =
4239 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4256 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4240 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4257 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
@@ -4250,7 +4267,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4250 * It doesn't set the bit in iir again, but it still produces 4267 * It doesn't set the bit in iir again, but it still produces
4251 * interrupts (for non-MSI). 4268 * interrupts (for non-MSI).
4252 */ 4269 */
4253 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4270 spin_lock(&dev_priv->irq_lock);
4254 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4271 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4255 i915_handle_error(dev, false, 4272 i915_handle_error(dev, false,
4256 "Command parser error, iir 0x%08x", 4273 "Command parser error, iir 0x%08x",
@@ -4266,7 +4283,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4266 irq_received = true; 4283 irq_received = true;
4267 } 4284 }
4268 } 4285 }
4269 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4286 spin_unlock(&dev_priv->irq_lock);
4270 4287
4271 if (!irq_received) 4288 if (!irq_received)
4272 break; 4289 break;
@@ -4372,7 +4389,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
4372 struct drm_i915_private *dev_priv = dev->dev_private; 4389 struct drm_i915_private *dev_priv = dev->dev_private;
4373 u32 enable_mask; 4390 u32 enable_mask;
4374 u32 error_mask; 4391 u32 error_mask;
4375 unsigned long irqflags;
4376 4392
4377 /* Unmask the interrupts that we always want on. */ 4393 /* Unmask the interrupts that we always want on. */
4378 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4394 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
@@ -4393,11 +4409,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
4393 4409
4394 /* Interrupt setup is already guaranteed to be single-threaded, this is 4410 /* Interrupt setup is already guaranteed to be single-threaded, this is
4395 * just to make the assert_spin_locked check happy. */ 4411 * just to make the assert_spin_locked check happy. */
4396 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4412 spin_lock_irq(&dev_priv->irq_lock);
4397 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4413 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4398 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4414 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4399 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4415 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4400 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4416 spin_unlock_irq(&dev_priv->irq_lock);
4401 4417
4402 /* 4418 /*
4403 * Enable some error detection, note the instruction error mask 4419 * Enable some error detection, note the instruction error mask
@@ -4462,7 +4478,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4462 struct drm_i915_private *dev_priv = dev->dev_private; 4478 struct drm_i915_private *dev_priv = dev->dev_private;
4463 u32 iir, new_iir; 4479 u32 iir, new_iir;
4464 u32 pipe_stats[I915_MAX_PIPES]; 4480 u32 pipe_stats[I915_MAX_PIPES];
4465 unsigned long irqflags;
4466 int ret = IRQ_NONE, pipe; 4481 int ret = IRQ_NONE, pipe;
4467 u32 flip_mask = 4482 u32 flip_mask =
4468 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4483 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4479,7 +4494,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4479 * It doesn't set the bit in iir again, but it still produces 4494 * It doesn't set the bit in iir again, but it still produces
4480 * interrupts (for non-MSI). 4495 * interrupts (for non-MSI).
4481 */ 4496 */
4482 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4497 spin_lock(&dev_priv->irq_lock);
4483 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4498 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4484 i915_handle_error(dev, false, 4499 i915_handle_error(dev, false,
4485 "Command parser error, iir 0x%08x", 4500 "Command parser error, iir 0x%08x",
@@ -4497,7 +4512,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4497 irq_received = true; 4512 irq_received = true;
4498 } 4513 }
4499 } 4514 }
4500 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4515 spin_unlock(&dev_priv->irq_lock);
4501 4516
4502 if (!irq_received) 4517 if (!irq_received)
4503 break; 4518 break;
@@ -4584,19 +4599,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
4584 I915_WRITE(IIR, I915_READ(IIR)); 4599 I915_WRITE(IIR, I915_READ(IIR));
4585} 4600}
4586 4601
4587static void intel_hpd_irq_reenable(struct work_struct *work) 4602static void intel_hpd_irq_reenable_work(struct work_struct *work)
4588{ 4603{
4589 struct drm_i915_private *dev_priv = 4604 struct drm_i915_private *dev_priv =
4590 container_of(work, typeof(*dev_priv), 4605 container_of(work, typeof(*dev_priv),
4591 hotplug_reenable_work.work); 4606 hotplug_reenable_work.work);
4592 struct drm_device *dev = dev_priv->dev; 4607 struct drm_device *dev = dev_priv->dev;
4593 struct drm_mode_config *mode_config = &dev->mode_config; 4608 struct drm_mode_config *mode_config = &dev->mode_config;
4594 unsigned long irqflags;
4595 int i; 4609 int i;
4596 4610
4597 intel_runtime_pm_get(dev_priv); 4611 intel_runtime_pm_get(dev_priv);
4598 4612
4599 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4613 spin_lock_irq(&dev_priv->irq_lock);
4600 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4614 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4601 struct drm_connector *connector; 4615 struct drm_connector *connector;
4602 4616
@@ -4620,14 +4634,21 @@ static void intel_hpd_irq_reenable(struct work_struct *work)
4620 } 4634 }
4621 if (dev_priv->display.hpd_irq_setup) 4635 if (dev_priv->display.hpd_irq_setup)
4622 dev_priv->display.hpd_irq_setup(dev); 4636 dev_priv->display.hpd_irq_setup(dev);
4623 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4637 spin_unlock_irq(&dev_priv->irq_lock);
4624 4638
4625 intel_runtime_pm_put(dev_priv); 4639 intel_runtime_pm_put(dev_priv);
4626} 4640}
4627 4641
4628void intel_irq_init(struct drm_device *dev) 4642/**
4643 * intel_irq_init - initializes irq support
4644 * @dev_priv: i915 device instance
4645 *
4646 * This function initializes all the irq support including work items, timers
4647 * and all the vtables. It does not setup the interrupt itself though.
4648 */
4649void intel_irq_init(struct drm_i915_private *dev_priv)
4629{ 4650{
4630 struct drm_i915_private *dev_priv = dev->dev_private; 4651 struct drm_device *dev = dev_priv->dev;
4631 4652
4632 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4653 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4633 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); 4654 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
@@ -4636,7 +4657,7 @@ void intel_irq_init(struct drm_device *dev)
4636 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4657 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4637 4658
4638 /* Let's track the enabled rps events */ 4659 /* Let's track the enabled rps events */
4639 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) 4660 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4640 /* WaGsvRC0ResidencyMethod:vlv */ 4661 /* WaGsvRC0ResidencyMethod:vlv */
4641 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4662 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4642 else 4663 else
@@ -4646,17 +4667,14 @@ void intel_irq_init(struct drm_device *dev)
4646 i915_hangcheck_elapsed, 4667 i915_hangcheck_elapsed,
4647 (unsigned long) dev); 4668 (unsigned long) dev);
4648 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, 4669 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4649 intel_hpd_irq_reenable); 4670 intel_hpd_irq_reenable_work);
4650 4671
4651 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4672 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4652 4673
4653 /* Haven't installed the IRQ handler yet */ 4674 if (IS_GEN2(dev_priv)) {
4654 dev_priv->pm._irqs_disabled = true;
4655
4656 if (IS_GEN2(dev)) {
4657 dev->max_vblank_count = 0; 4675 dev->max_vblank_count = 0;
4658 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4676 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4659 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 4677 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4660 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4678 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4661 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4679 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4662 } else { 4680 } else {
@@ -4669,7 +4687,7 @@ void intel_irq_init(struct drm_device *dev)
4669 * Gen2 doesn't have a hardware frame counter and so depends on 4687 * Gen2 doesn't have a hardware frame counter and so depends on
4670 * vblank interrupts to produce sane vblank seuquence numbers. 4688 * vblank interrupts to produce sane vblank seuquence numbers.
4671 */ 4689 */
4672 if (!IS_GEN2(dev)) 4690 if (!IS_GEN2(dev_priv))
4673 dev->vblank_disable_immediate = true; 4691 dev->vblank_disable_immediate = true;
4674 4692
4675 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 4693 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -4677,7 +4695,7 @@ void intel_irq_init(struct drm_device *dev)
4677 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4695 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4678 } 4696 }
4679 4697
4680 if (IS_CHERRYVIEW(dev)) { 4698 if (IS_CHERRYVIEW(dev_priv)) {
4681 dev->driver->irq_handler = cherryview_irq_handler; 4699 dev->driver->irq_handler = cherryview_irq_handler;
4682 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4700 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4683 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4701 dev->driver->irq_postinstall = cherryview_irq_postinstall;
@@ -4685,7 +4703,7 @@ void intel_irq_init(struct drm_device *dev)
4685 dev->driver->enable_vblank = valleyview_enable_vblank; 4703 dev->driver->enable_vblank = valleyview_enable_vblank;
4686 dev->driver->disable_vblank = valleyview_disable_vblank; 4704 dev->driver->disable_vblank = valleyview_disable_vblank;
4687 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4705 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4688 } else if (IS_VALLEYVIEW(dev)) { 4706 } else if (IS_VALLEYVIEW(dev_priv)) {
4689 dev->driver->irq_handler = valleyview_irq_handler; 4707 dev->driver->irq_handler = valleyview_irq_handler;
4690 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4708 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4691 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4709 dev->driver->irq_postinstall = valleyview_irq_postinstall;
@@ -4693,7 +4711,7 @@ void intel_irq_init(struct drm_device *dev)
4693 dev->driver->enable_vblank = valleyview_enable_vblank; 4711 dev->driver->enable_vblank = valleyview_enable_vblank;
4694 dev->driver->disable_vblank = valleyview_disable_vblank; 4712 dev->driver->disable_vblank = valleyview_disable_vblank;
4695 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4713 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4696 } else if (IS_GEN8(dev)) { 4714 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4697 dev->driver->irq_handler = gen8_irq_handler; 4715 dev->driver->irq_handler = gen8_irq_handler;
4698 dev->driver->irq_preinstall = gen8_irq_reset; 4716 dev->driver->irq_preinstall = gen8_irq_reset;
4699 dev->driver->irq_postinstall = gen8_irq_postinstall; 4717 dev->driver->irq_postinstall = gen8_irq_postinstall;
@@ -4710,12 +4728,12 @@ void intel_irq_init(struct drm_device *dev)
4710 dev->driver->disable_vblank = ironlake_disable_vblank; 4728 dev->driver->disable_vblank = ironlake_disable_vblank;
4711 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4729 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4712 } else { 4730 } else {
4713 if (INTEL_INFO(dev)->gen == 2) { 4731 if (INTEL_INFO(dev_priv)->gen == 2) {
4714 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4732 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4715 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4733 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4716 dev->driver->irq_handler = i8xx_irq_handler; 4734 dev->driver->irq_handler = i8xx_irq_handler;
4717 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4735 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4718 } else if (INTEL_INFO(dev)->gen == 3) { 4736 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4719 dev->driver->irq_preinstall = i915_irq_preinstall; 4737 dev->driver->irq_preinstall = i915_irq_preinstall;
4720 dev->driver->irq_postinstall = i915_irq_postinstall; 4738 dev->driver->irq_postinstall = i915_irq_postinstall;
4721 dev->driver->irq_uninstall = i915_irq_uninstall; 4739 dev->driver->irq_uninstall = i915_irq_uninstall;
@@ -4733,12 +4751,23 @@ void intel_irq_init(struct drm_device *dev)
4733 } 4751 }
4734} 4752}
4735 4753
4736void intel_hpd_init(struct drm_device *dev) 4754/**
4755 * intel_hpd_init - initializes and enables hpd support
4756 * @dev_priv: i915 device instance
4757 *
4758 * This function enables the hotplug support. It requires that interrupts have
4759 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4760 * poll request can run concurrently to other code, so locking rules must be
4761 * obeyed.
4762 *
4763 * This is a separate step from interrupt enabling to simplify the locking rules
4764 * in the driver load and resume code.
4765 */
4766void intel_hpd_init(struct drm_i915_private *dev_priv)
4737{ 4767{
4738 struct drm_i915_private *dev_priv = dev->dev_private; 4768 struct drm_device *dev = dev_priv->dev;
4739 struct drm_mode_config *mode_config = &dev->mode_config; 4769 struct drm_mode_config *mode_config = &dev->mode_config;
4740 struct drm_connector *connector; 4770 struct drm_connector *connector;
4741 unsigned long irqflags;
4742 int i; 4771 int i;
4743 4772
4744 for (i = 1; i < HPD_NUM_PINS; i++) { 4773 for (i = 1; i < HPD_NUM_PINS; i++) {
@@ -4756,27 +4785,72 @@ void intel_hpd_init(struct drm_device *dev)
4756 4785
4757 /* Interrupt setup is already guaranteed to be single-threaded, this is 4786 /* Interrupt setup is already guaranteed to be single-threaded, this is
4758 * just to make the assert_spin_locked checks happy. */ 4787 * just to make the assert_spin_locked checks happy. */
4759 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4788 spin_lock_irq(&dev_priv->irq_lock);
4760 if (dev_priv->display.hpd_irq_setup) 4789 if (dev_priv->display.hpd_irq_setup)
4761 dev_priv->display.hpd_irq_setup(dev); 4790 dev_priv->display.hpd_irq_setup(dev);
4762 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4791 spin_unlock_irq(&dev_priv->irq_lock);
4763} 4792}
4764 4793
4765/* Disable interrupts so we can allow runtime PM. */ 4794/**
4766void intel_runtime_pm_disable_interrupts(struct drm_device *dev) 4795 * intel_irq_install - enables the hardware interrupt
4796 * @dev_priv: i915 device instance
4797 *
4798 * This function enables the hardware interrupt handling, but leaves the hotplug
4799 * handling still disabled. It is called after intel_irq_init().
4800 *
4801 * In the driver load and resume code we need working interrupts in a few places
4802 * but don't want to deal with the hassle of concurrent probe and hotplug
4803 * workers. Hence the split into this two-stage approach.
4804 */
4805int intel_irq_install(struct drm_i915_private *dev_priv)
4767{ 4806{
4768 struct drm_i915_private *dev_priv = dev->dev_private; 4807 /*
4808 * We enable some interrupt sources in our postinstall hooks, so mark
4809 * interrupts as enabled _before_ actually enabling them to avoid
4810 * special cases in our ordering checks.
4811 */
4812 dev_priv->pm.irqs_enabled = true;
4769 4813
4770 dev->driver->irq_uninstall(dev); 4814 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4771 dev_priv->pm._irqs_disabled = true;
4772} 4815}
4773 4816
4774/* Restore interrupts so we can recover from runtime PM. */ 4817/**
4775void intel_runtime_pm_restore_interrupts(struct drm_device *dev) 4818 * intel_irq_uninstall - finilizes all irq handling
4819 * @dev_priv: i915 device instance
4820 *
4821 * This stops interrupt and hotplug handling and unregisters and frees all
4822 * resources acquired in the init functions.
4823 */
4824void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4776{ 4825{
4777 struct drm_i915_private *dev_priv = dev->dev_private; 4826 drm_irq_uninstall(dev_priv->dev);
4827 intel_hpd_cancel_work(dev_priv);
4828 dev_priv->pm.irqs_enabled = false;
4829}
4830
4831/**
4832 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4833 * @dev_priv: i915 device instance
4834 *
4835 * This function is used to disable interrupts at runtime, both in the runtime
4836 * pm and the system suspend/resume code.
4837 */
4838void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4839{
4840 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4841 dev_priv->pm.irqs_enabled = false;
4842}
4778 4843
4779 dev_priv->pm._irqs_disabled = false; 4844/**
4780 dev->driver->irq_preinstall(dev); 4845 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4781 dev->driver->irq_postinstall(dev); 4846 * @dev_priv: i915 device instance
4847 *
4848 * This function is used to enable interrupts at runtime, both in the runtime
4849 * pm and the system suspend/resume code.
4850 */
4851void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4852{
4853 dev_priv->pm.irqs_enabled = true;
4854 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4855 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4782} 4856}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c01e5f31430e..a56d9a7e7e0e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,8 +26,8 @@
26#define _I915_REG_H_ 26#define _I915_REG_H_
27 27
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) 28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
29#define _PLANE(plane, a, b) _PIPE(plane, a, b)
29#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) 30#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
30
31#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) 31#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
32#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \ 32#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
33 (pipe) == PIPE_B ? (b) : (c)) 33 (pipe) == PIPE_B ? (b) : (c))
@@ -796,6 +796,8 @@ enum punit_power_well {
796#define _VLV_PCS_DW0_CH1 0x8400 796#define _VLV_PCS_DW0_CH1 0x8400
797#define DPIO_PCS_TX_LANE2_RESET (1<<16) 797#define DPIO_PCS_TX_LANE2_RESET (1<<16)
798#define DPIO_PCS_TX_LANE1_RESET (1<<7) 798#define DPIO_PCS_TX_LANE1_RESET (1<<7)
799#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4)
800#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1<<3)
799#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1) 801#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
800 802
801#define _VLV_PCS01_DW0_CH0 0x200 803#define _VLV_PCS01_DW0_CH0 0x200
@@ -836,12 +838,31 @@ enum punit_power_well {
836 838
837#define _VLV_PCS_DW9_CH0 0x8224 839#define _VLV_PCS_DW9_CH0 0x8224
838#define _VLV_PCS_DW9_CH1 0x8424 840#define _VLV_PCS_DW9_CH1 0x8424
841#define DPIO_PCS_TX2MARGIN_MASK (0x7<<13)
842#define DPIO_PCS_TX2MARGIN_000 (0<<13)
843#define DPIO_PCS_TX2MARGIN_101 (1<<13)
844#define DPIO_PCS_TX1MARGIN_MASK (0x7<<10)
845#define DPIO_PCS_TX1MARGIN_000 (0<<10)
846#define DPIO_PCS_TX1MARGIN_101 (1<<10)
839#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1) 847#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
840 848
849#define _VLV_PCS01_DW9_CH0 0x224
850#define _VLV_PCS23_DW9_CH0 0x424
851#define _VLV_PCS01_DW9_CH1 0x2624
852#define _VLV_PCS23_DW9_CH1 0x2824
853#define VLV_PCS01_DW9(ch) _PORT(ch, _VLV_PCS01_DW9_CH0, _VLV_PCS01_DW9_CH1)
854#define VLV_PCS23_DW9(ch) _PORT(ch, _VLV_PCS23_DW9_CH0, _VLV_PCS23_DW9_CH1)
855
841#define _CHV_PCS_DW10_CH0 0x8228 856#define _CHV_PCS_DW10_CH0 0x8228
842#define _CHV_PCS_DW10_CH1 0x8428 857#define _CHV_PCS_DW10_CH1 0x8428
843#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30) 858#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
844#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31) 859#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
860#define DPIO_PCS_TX2DEEMP_MASK (0xf<<24)
861#define DPIO_PCS_TX2DEEMP_9P5 (0<<24)
862#define DPIO_PCS_TX2DEEMP_6P0 (2<<24)
863#define DPIO_PCS_TX1DEEMP_MASK (0xf<<16)
864#define DPIO_PCS_TX1DEEMP_9P5 (0<<16)
865#define DPIO_PCS_TX1DEEMP_6P0 (2<<16)
845#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1) 866#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
846 867
847#define _VLV_PCS01_DW10_CH0 0x0228 868#define _VLV_PCS01_DW10_CH0 0x0228
@@ -853,8 +874,18 @@ enum punit_power_well {
853 874
854#define _VLV_PCS_DW11_CH0 0x822c 875#define _VLV_PCS_DW11_CH0 0x822c
855#define _VLV_PCS_DW11_CH1 0x842c 876#define _VLV_PCS_DW11_CH1 0x842c
877#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
878#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
879#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
856#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1) 880#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
857 881
882#define _VLV_PCS01_DW11_CH0 0x022c
883#define _VLV_PCS23_DW11_CH0 0x042c
884#define _VLV_PCS01_DW11_CH1 0x262c
885#define _VLV_PCS23_DW11_CH1 0x282c
886#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1)
887#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1)
888
858#define _VLV_PCS_DW12_CH0 0x8230 889#define _VLV_PCS_DW12_CH0 0x8230
859#define _VLV_PCS_DW12_CH1 0x8430 890#define _VLV_PCS_DW12_CH1 0x8430
860#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1) 891#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
@@ -2506,9 +2537,7 @@ enum punit_power_well {
2506 2537
2507#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10) 2538#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
2508#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14) 2539#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
2509#define EDP_PSR_DPCD_COMMAND 0x80060000
2510#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18) 2540#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
2511#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
2512#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c) 2541#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
2513#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20) 2542#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
2514#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24) 2543#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
@@ -3645,6 +3674,7 @@ enum punit_power_well {
3645#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) 3674#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
3646#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) 3675#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
3647#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 3676#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
3677#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
3648 3678
3649/* 3679/*
3650 * Computing GMCH M and N values for the Display Port link 3680 * Computing GMCH M and N values for the Display Port link
@@ -4510,6 +4540,143 @@ enum punit_power_well {
4510#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA) 4540#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
4511#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC) 4541#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
4512 4542
4543/* Skylake plane registers */
4544
4545#define _PLANE_CTL_1_A 0x70180
4546#define _PLANE_CTL_2_A 0x70280
4547#define _PLANE_CTL_3_A 0x70380
4548#define PLANE_CTL_ENABLE (1 << 31)
4549#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30)
4550#define PLANE_CTL_FORMAT_MASK (0xf << 24)
4551#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
4552#define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
4553#define PLANE_CTL_FORMAT_XRGB_2101010 ( 2 << 24)
4554#define PLANE_CTL_FORMAT_XRGB_8888 ( 4 << 24)
4555#define PLANE_CTL_FORMAT_XRGB_16161616F ( 6 << 24)
4556#define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
4557#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
4558#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
4559#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23)
4560#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
4561#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
4562#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21)
4563#define PLANE_CTL_ORDER_BGRX (0 << 20)
4564#define PLANE_CTL_ORDER_RGBX (1 << 20)
4565#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
4566#define PLANE_CTL_YUV422_YUYV ( 0 << 16)
4567#define PLANE_CTL_YUV422_UYVY ( 1 << 16)
4568#define PLANE_CTL_YUV422_YVYU ( 2 << 16)
4569#define PLANE_CTL_YUV422_VYUY ( 3 << 16)
4570#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
4571#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
4572#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13)
4573#define PLANE_CTL_TILED_MASK (0x7 << 10)
4574#define PLANE_CTL_TILED_LINEAR ( 0 << 10)
4575#define PLANE_CTL_TILED_X ( 1 << 10)
4576#define PLANE_CTL_TILED_Y ( 4 << 10)
4577#define PLANE_CTL_TILED_YF ( 5 << 10)
4578#define PLANE_CTL_ALPHA_MASK (0x3 << 4)
4579#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
4580#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
4581#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
4582#define _PLANE_STRIDE_1_A 0x70188
4583#define _PLANE_STRIDE_2_A 0x70288
4584#define _PLANE_STRIDE_3_A 0x70388
4585#define _PLANE_POS_1_A 0x7018c
4586#define _PLANE_POS_2_A 0x7028c
4587#define _PLANE_POS_3_A 0x7038c
4588#define _PLANE_SIZE_1_A 0x70190
4589#define _PLANE_SIZE_2_A 0x70290
4590#define _PLANE_SIZE_3_A 0x70390
4591#define _PLANE_SURF_1_A 0x7019c
4592#define _PLANE_SURF_2_A 0x7029c
4593#define _PLANE_SURF_3_A 0x7039c
4594#define _PLANE_OFFSET_1_A 0x701a4
4595#define _PLANE_OFFSET_2_A 0x702a4
4596#define _PLANE_OFFSET_3_A 0x703a4
4597#define _PLANE_KEYVAL_1_A 0x70194
4598#define _PLANE_KEYVAL_2_A 0x70294
4599#define _PLANE_KEYMSK_1_A 0x70198
4600#define _PLANE_KEYMSK_2_A 0x70298
4601#define _PLANE_KEYMAX_1_A 0x701a0
4602#define _PLANE_KEYMAX_2_A 0x702a0
4603
4604#define _PLANE_CTL_1_B 0x71180
4605#define _PLANE_CTL_2_B 0x71280
4606#define _PLANE_CTL_3_B 0x71380
4607#define _PLANE_CTL_1(pipe) _PIPE(pipe, _PLANE_CTL_1_A, _PLANE_CTL_1_B)
4608#define _PLANE_CTL_2(pipe) _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B)
4609#define _PLANE_CTL_3(pipe) _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B)
4610#define PLANE_CTL(pipe, plane) \
4611 _PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
4612
4613#define _PLANE_STRIDE_1_B 0x71188
4614#define _PLANE_STRIDE_2_B 0x71288
4615#define _PLANE_STRIDE_3_B 0x71388
4616#define _PLANE_STRIDE_1(pipe) \
4617 _PIPE(pipe, _PLANE_STRIDE_1_A, _PLANE_STRIDE_1_B)
4618#define _PLANE_STRIDE_2(pipe) \
4619 _PIPE(pipe, _PLANE_STRIDE_2_A, _PLANE_STRIDE_2_B)
4620#define _PLANE_STRIDE_3(pipe) \
4621 _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
4622#define PLANE_STRIDE(pipe, plane) \
4623 _PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
4624
4625#define _PLANE_POS_1_B 0x7118c
4626#define _PLANE_POS_2_B 0x7128c
4627#define _PLANE_POS_3_B 0x7138c
4628#define _PLANE_POS_1(pipe) _PIPE(pipe, _PLANE_POS_1_A, _PLANE_POS_1_B)
4629#define _PLANE_POS_2(pipe) _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B)
4630#define _PLANE_POS_3(pipe) _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B)
4631#define PLANE_POS(pipe, plane) \
4632 _PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
4633
4634#define _PLANE_SIZE_1_B 0x71190
4635#define _PLANE_SIZE_2_B 0x71290
4636#define _PLANE_SIZE_3_B 0x71390
4637#define _PLANE_SIZE_1(pipe) _PIPE(pipe, _PLANE_SIZE_1_A, _PLANE_SIZE_1_B)
4638#define _PLANE_SIZE_2(pipe) _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B)
4639#define _PLANE_SIZE_3(pipe) _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B)
4640#define PLANE_SIZE(pipe, plane) \
4641 _PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
4642
4643#define _PLANE_SURF_1_B 0x7119c
4644#define _PLANE_SURF_2_B 0x7129c
4645#define _PLANE_SURF_3_B 0x7139c
4646#define _PLANE_SURF_1(pipe) _PIPE(pipe, _PLANE_SURF_1_A, _PLANE_SURF_1_B)
4647#define _PLANE_SURF_2(pipe) _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B)
4648#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
4649#define PLANE_SURF(pipe, plane) \
4650 _PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
4651
4652#define _PLANE_OFFSET_1_B 0x711a4
4653#define _PLANE_OFFSET_2_B 0x712a4
4654#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B)
4655#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B)
4656#define PLANE_OFFSET(pipe, plane) \
4657 _PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
4658
4659#define _PLANE_KEYVAL_1_B 0x71194
4660#define _PLANE_KEYVAL_2_B 0x71294
4661#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B)
4662#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B)
4663#define PLANE_KEYVAL(pipe, plane) \
4664 _PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
4665
4666#define _PLANE_KEYMSK_1_B 0x71198
4667#define _PLANE_KEYMSK_2_B 0x71298
4668#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B)
4669#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B)
4670#define PLANE_KEYMSK(pipe, plane) \
4671 _PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
4672
4673#define _PLANE_KEYMAX_1_B 0x711a0
4674#define _PLANE_KEYMAX_2_B 0x712a0
4675#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B)
4676#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B)
4677#define PLANE_KEYMAX(pipe, plane) \
4678 _PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
4679
4513/* VBIOS regs */ 4680/* VBIOS regs */
4514#define VGACNTRL 0x71400 4681#define VGACNTRL 0x71400
4515# define VGA_DISP_DISABLE (1 << 31) 4682# define VGA_DISP_DISABLE (1 << 31)
@@ -4746,10 +4913,23 @@ enum punit_power_well {
4746#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2) 4913#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
4747#define GEN8_PIPE_VSYNC (1 << 1) 4914#define GEN8_PIPE_VSYNC (1 << 1)
4748#define GEN8_PIPE_VBLANK (1 << 0) 4915#define GEN8_PIPE_VBLANK (1 << 0)
4916#define GEN9_PIPE_CURSOR_FAULT (1 << 11)
4917#define GEN9_PIPE_PLANE3_FAULT (1 << 9)
4918#define GEN9_PIPE_PLANE2_FAULT (1 << 8)
4919#define GEN9_PIPE_PLANE1_FAULT (1 << 7)
4920#define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5)
4921#define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4)
4922#define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3)
4923#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + p))
4749#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \ 4924#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
4750 (GEN8_PIPE_CURSOR_FAULT | \ 4925 (GEN8_PIPE_CURSOR_FAULT | \
4751 GEN8_PIPE_SPRITE_FAULT | \ 4926 GEN8_PIPE_SPRITE_FAULT | \
4752 GEN8_PIPE_PRIMARY_FAULT) 4927 GEN8_PIPE_PRIMARY_FAULT)
4928#define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
4929 (GEN9_PIPE_CURSOR_FAULT | \
4930 GEN9_PIPE_PLANE3_FAULT | \
4931 GEN9_PIPE_PLANE2_FAULT | \
4932 GEN9_PIPE_PLANE1_FAULT)
4753 4933
4754#define GEN8_DE_PORT_ISR 0x44440 4934#define GEN8_DE_PORT_ISR 0x44440
4755#define GEN8_DE_PORT_IMR 0x44444 4935#define GEN8_DE_PORT_IMR 0x44444
@@ -4839,6 +5019,7 @@ enum punit_power_well {
4839/* GEN8 chicken */ 5019/* GEN8 chicken */
4840#define HDC_CHICKEN0 0x7300 5020#define HDC_CHICKEN0 0x7300
4841#define HDC_FORCE_NON_COHERENT (1<<4) 5021#define HDC_FORCE_NON_COHERENT (1<<4)
5022#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
4842 5023
4843/* WaCatErrorRejectionIssue */ 5024/* WaCatErrorRejectionIssue */
4844#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 5025#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
@@ -5751,6 +5932,9 @@ enum punit_power_well {
5751#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10) 5932#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
5752#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) 5933#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
5753 5934
5935#define GEN9_HALF_SLICE_CHICKEN5 0xe188
5936#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
5937
5754#define GEN8_ROW_CHICKEN 0xe4f0 5938#define GEN8_ROW_CHICKEN 0xe4f0
5755#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8) 5939#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
5756#define STALL_DOP_GATING_DISABLE (1<<5) 5940#define STALL_DOP_GATING_DISABLE (1<<5)
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 905999bee2ac..7603765c91fc 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -46,7 +46,7 @@ struct bdb_header {
46 u16 version; /**< decimal */ 46 u16 version; /**< decimal */
47 u16 header_size; /**< in bytes */ 47 u16 header_size; /**< in bytes */
48 u16 bdb_size; /**< in bytes */ 48 u16 bdb_size; /**< in bytes */
49}; 49} __packed;
50 50
51/* strictly speaking, this is a "skip" block, but it has interesting info */ 51/* strictly speaking, this is a "skip" block, but it has interesting info */
52struct vbios_data { 52struct vbios_data {
@@ -252,7 +252,7 @@ union child_device_config {
252 /* This one should also be safe to use anywhere, even without version 252 /* This one should also be safe to use anywhere, even without version
253 * checks. */ 253 * checks. */
254 struct common_child_dev_config common; 254 struct common_child_dev_config common;
255}; 255} __packed;
256 256
257struct bdb_general_definitions { 257struct bdb_general_definitions {
258 /* DDC GPIO */ 258 /* DDC GPIO */
@@ -888,12 +888,12 @@ struct mipi_pps_data {
888 u16 bl_disable_delay; 888 u16 bl_disable_delay;
889 u16 panel_off_delay; 889 u16 panel_off_delay;
890 u16 panel_power_cycle_delay; 890 u16 panel_power_cycle_delay;
891}; 891} __packed;
892 892
893struct bdb_mipi_config { 893struct bdb_mipi_config {
894 struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; 894 struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
895 struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; 895 struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
896}; 896} __packed;
897 897
898/* Block 53 contains MIPI sequences as needed by the panel 898/* Block 53 contains MIPI sequences as needed by the panel
899 * for enabling it. This block can be variable in size and 899 * for enabling it. This block can be variable in size and
@@ -902,7 +902,7 @@ struct bdb_mipi_config {
902struct bdb_mipi_sequence { 902struct bdb_mipi_sequence {
903 u8 version; 903 u8 version;
904 u8 data[0]; 904 u8 data[0];
905}; 905} __packed;
906 906
907/* MIPI Sequnece Block definitions */ 907/* MIPI Sequnece Block definitions */
908enum mipi_seq { 908enum mipi_seq {
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9212e6504e0f..dacaad5f4e34 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -72,7 +72,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
72 u32 tmp; 72 u32 tmp;
73 73
74 power_domain = intel_display_port_power_domain(encoder); 74 power_domain = intel_display_port_power_domain(encoder);
75 if (!intel_display_power_enabled(dev_priv, power_domain)) 75 if (!intel_display_power_is_enabled(dev_priv, power_domain))
76 return false; 76 return false;
77 77
78 tmp = I915_READ(crt->adpa_reg); 78 tmp = I915_READ(crt->adpa_reg);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b63d4fa204a3..a151de7d13cd 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -95,8 +95,8 @@ static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
95 { 0x00BEFFFF, 0x00140006 }, 95 { 0x00BEFFFF, 0x00140006 },
96 { 0x80B2CFFF, 0x001B0002 }, 96 { 0x80B2CFFF, 0x001B0002 },
97 { 0x00FFFFFF, 0x000E000A }, 97 { 0x00FFFFFF, 0x000E000A },
98 { 0x00D75FFF, 0x00180004 }, 98 { 0x00DB6FFF, 0x00160005 },
99 { 0x80CB2FFF, 0x001B0002 }, 99 { 0x80C71FFF, 0x001A0002 },
100 { 0x00F7DFFF, 0x00180004 }, 100 { 0x00F7DFFF, 0x00180004 },
101 { 0x80D75FFF, 0x001B0002 }, 101 { 0x80D75FFF, 0x001B0002 },
102}; 102};
@@ -127,6 +127,32 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
127 { 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */ 127 { 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */
128}; 128};
129 129
130static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
131 { 0x00000018, 0x000000a0 },
132 { 0x00004014, 0x00000098 },
133 { 0x00006012, 0x00000088 },
134 { 0x00008010, 0x00000080 },
135 { 0x00000018, 0x00000098 },
136 { 0x00004014, 0x00000088 },
137 { 0x00006012, 0x00000080 },
138 { 0x00000018, 0x00000088 },
139 { 0x00004014, 0x00000080 },
140};
141
142static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
143 /* Idx NT mV T mV db */
144 { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
145 { 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */
146 { 0x00006012, 0x00000088 }, /* 2: 400 800 6 */
147 { 0x00000018, 0x0000003c }, /* 3: 450 450 0 */
148 { 0x00000018, 0x00000098 }, /* 4: 600 600 0 */
149 { 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */
150 { 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */
151 { 0x00000018, 0x00000088 }, /* 7: 800 800 0 */
152 { 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */
153 { 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */
154};
155
130enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) 156enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
131{ 157{
132 struct drm_encoder *encoder = &intel_encoder->base; 158 struct drm_encoder *encoder = &intel_encoder->base;
@@ -169,7 +195,14 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
169 const struct ddi_buf_trans *ddi_translations_hdmi; 195 const struct ddi_buf_trans *ddi_translations_hdmi;
170 const struct ddi_buf_trans *ddi_translations; 196 const struct ddi_buf_trans *ddi_translations;
171 197
172 if (IS_BROADWELL(dev)) { 198 if (IS_SKYLAKE(dev)) {
199 ddi_translations_fdi = NULL;
200 ddi_translations_dp = skl_ddi_translations_dp;
201 ddi_translations_edp = skl_ddi_translations_dp;
202 ddi_translations_hdmi = skl_ddi_translations_hdmi;
203 n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
204 hdmi_800mV_0dB = 7;
205 } else if (IS_BROADWELL(dev)) {
173 ddi_translations_fdi = bdw_ddi_translations_fdi; 206 ddi_translations_fdi = bdw_ddi_translations_fdi;
174 ddi_translations_dp = bdw_ddi_translations_dp; 207 ddi_translations_dp = bdw_ddi_translations_dp;
175 ddi_translations_edp = bdw_ddi_translations_edp; 208 ddi_translations_edp = bdw_ddi_translations_edp;
@@ -208,7 +241,10 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
208 ddi_translations = ddi_translations_dp; 241 ddi_translations = ddi_translations_dp;
209 break; 242 break;
210 case PORT_E: 243 case PORT_E:
211 ddi_translations = ddi_translations_fdi; 244 if (ddi_translations_fdi)
245 ddi_translations = ddi_translations_fdi;
246 else
247 ddi_translations = ddi_translations_dp;
212 break; 248 break;
213 default: 249 default:
214 BUG(); 250 BUG();
@@ -962,7 +998,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
962 uint32_t tmp; 998 uint32_t tmp;
963 999
964 power_domain = intel_display_port_power_domain(intel_encoder); 1000 power_domain = intel_display_port_power_domain(intel_encoder);
965 if (!intel_display_power_enabled(dev_priv, power_domain)) 1001 if (!intel_display_power_is_enabled(dev_priv, power_domain))
966 return false; 1002 return false;
967 1003
968 if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) 1004 if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
@@ -1008,7 +1044,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1008 int i; 1044 int i;
1009 1045
1010 power_domain = intel_display_port_power_domain(encoder); 1046 power_domain = intel_display_port_power_domain(encoder);
1011 if (!intel_display_power_enabled(dev_priv, power_domain)) 1047 if (!intel_display_power_is_enabled(dev_priv, power_domain))
1012 return false; 1048 return false;
1013 1049
1014 tmp = I915_READ(DDI_BUF_CTL(port)); 1050 tmp = I915_READ(DDI_BUF_CTL(port));
@@ -1296,7 +1332,7 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1296{ 1332{
1297 uint32_t val; 1333 uint32_t val;
1298 1334
1299 if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS)) 1335 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
1300 return false; 1336 return false;
1301 1337
1302 val = I915_READ(WRPLL_CTL(pll->id)); 1338 val = I915_READ(WRPLL_CTL(pll->id));
@@ -1486,7 +1522,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1486 break; 1522 break;
1487 } 1523 }
1488 1524
1489 if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { 1525 if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
1490 temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 1526 temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
1491 if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4))) 1527 if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4)))
1492 pipe_config->has_audio = true; 1528 pipe_config->has_audio = true;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c9e220963a78..1fc05ffc4695 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -73,8 +73,6 @@ static const uint32_t intel_cursor_formats[] = {
73 DRM_FORMAT_ARGB8888, 73 DRM_FORMAT_ARGB8888,
74}; 74};
75 75
76static void intel_increase_pllclock(struct drm_device *dev,
77 enum pipe pipe);
78static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 76static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
79 77
80static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 78static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -889,60 +887,6 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
889 return intel_crtc->config.cpu_transcoder; 887 return intel_crtc->config.cpu_transcoder;
890} 888}
891 889
892static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
893{
894 struct drm_i915_private *dev_priv = dev->dev_private;
895 u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
896
897 frame = I915_READ(frame_reg);
898
899 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
900 WARN(1, "vblank wait on pipe %c timed out\n",
901 pipe_name(pipe));
902}
903
904/**
905 * intel_wait_for_vblank - wait for vblank on a given pipe
906 * @dev: drm device
907 * @pipe: pipe to wait for
908 *
909 * Wait for vblank to occur on a given pipe. Needed for various bits of
910 * mode setting code.
911 */
912void intel_wait_for_vblank(struct drm_device *dev, int pipe)
913{
914 struct drm_i915_private *dev_priv = dev->dev_private;
915 int pipestat_reg = PIPESTAT(pipe);
916
917 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
918 g4x_wait_for_vblank(dev, pipe);
919 return;
920 }
921
922 /* Clear existing vblank status. Note this will clear any other
923 * sticky status fields as well.
924 *
925 * This races with i915_driver_irq_handler() with the result
926 * that either function could miss a vblank event. Here it is not
927 * fatal, as we will either wait upon the next vblank interrupt or
928 * timeout. Generally speaking intel_wait_for_vblank() is only
929 * called during modeset at which time the GPU should be idle and
930 * should *not* be performing page flips and thus not waiting on
931 * vblanks...
932 * Currently, the result of us stealing a vblank from the irq
933 * handler is that a single frame will be skipped during swapbuffers.
934 */
935 I915_WRITE(pipestat_reg,
936 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
937
938 /* Wait for vblank interrupt bit to set */
939 if (wait_for(I915_READ(pipestat_reg) &
940 PIPE_VBLANK_INTERRUPT_STATUS,
941 50))
942 DRM_DEBUG_KMS("vblank wait on pipe %c timed out\n",
943 pipe_name(pipe));
944}
945
946static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) 890static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
947{ 891{
948 struct drm_i915_private *dev_priv = dev->dev_private; 892 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1189,8 +1133,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1189 state_string(state), state_string(cur_state)); 1133 state_string(state), state_string(cur_state));
1190} 1134}
1191 1135
1192static void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1136void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1193 enum pipe pipe) 1137 enum pipe pipe)
1194{ 1138{
1195 struct drm_device *dev = dev_priv->dev; 1139 struct drm_device *dev = dev_priv->dev;
1196 int pp_reg; 1140 int pp_reg;
@@ -1263,7 +1207,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1263 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1207 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1264 state = true; 1208 state = true;
1265 1209
1266 if (!intel_display_power_enabled(dev_priv, 1210 if (!intel_display_power_is_enabled(dev_priv,
1267 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { 1211 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1268 cur_state = false; 1212 cur_state = false;
1269 } else { 1213 } else {
@@ -1332,7 +1276,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1332 int reg, sprite; 1276 int reg, sprite;
1333 u32 val; 1277 u32 val;
1334 1278
1335 if (IS_VALLEYVIEW(dev)) { 1279 if (INTEL_INFO(dev)->gen >= 9) {
1280 for_each_sprite(pipe, sprite) {
1281 val = I915_READ(PLANE_CTL(pipe, sprite));
1282 WARN(val & PLANE_CTL_ENABLE,
1283 "plane %d assertion failure, should be off on pipe %c but is still active\n",
1284 sprite, pipe_name(pipe));
1285 }
1286 } else if (IS_VALLEYVIEW(dev)) {
1336 for_each_sprite(pipe, sprite) { 1287 for_each_sprite(pipe, sprite) {
1337 reg = SPCNTR(pipe, sprite); 1288 reg = SPCNTR(pipe, sprite);
1338 val = I915_READ(reg); 1289 val = I915_READ(reg);
@@ -2233,7 +2184,9 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2233 2184
2234 switch (obj->tiling_mode) { 2185 switch (obj->tiling_mode) {
2235 case I915_TILING_NONE: 2186 case I915_TILING_NONE:
2236 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 2187 if (INTEL_INFO(dev)->gen >= 9)
2188 alignment = 256 * 1024;
2189 else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2237 alignment = 128 * 1024; 2190 alignment = 128 * 1024;
2238 else if (INTEL_INFO(dev)->gen >= 4) 2191 else if (INTEL_INFO(dev)->gen >= 4)
2239 alignment = 4 * 1024; 2192 alignment = 4 * 1024;
@@ -2241,8 +2194,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2241 alignment = 64 * 1024; 2194 alignment = 64 * 1024;
2242 break; 2195 break;
2243 case I915_TILING_X: 2196 case I915_TILING_X:
2244 /* pin() will align the object as required by fence */ 2197 if (INTEL_INFO(dev)->gen >= 9)
2245 alignment = 0; 2198 alignment = 256 * 1024;
2199 else {
2200 /* pin() will align the object as required by fence */
2201 alignment = 0;
2202 }
2246 break; 2203 break;
2247 case I915_TILING_Y: 2204 case I915_TILING_Y:
2248 WARN(1, "Y tiled bo slipped through, driver bug!\n"); 2205 WARN(1, "Y tiled bo slipped through, driver bug!\n");
@@ -2672,6 +2629,90 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2672 POSTING_READ(reg); 2629 POSTING_READ(reg);
2673} 2630}
2674 2631
2632static void skylake_update_primary_plane(struct drm_crtc *crtc,
2633 struct drm_framebuffer *fb,
2634 int x, int y)
2635{
2636 struct drm_device *dev = crtc->dev;
2637 struct drm_i915_private *dev_priv = dev->dev_private;
2638 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2639 struct intel_framebuffer *intel_fb;
2640 struct drm_i915_gem_object *obj;
2641 int pipe = intel_crtc->pipe;
2642 u32 plane_ctl, stride;
2643
2644 if (!intel_crtc->primary_enabled) {
2645 I915_WRITE(PLANE_CTL(pipe, 0), 0);
2646 I915_WRITE(PLANE_SURF(pipe, 0), 0);
2647 POSTING_READ(PLANE_CTL(pipe, 0));
2648 return;
2649 }
2650
2651 plane_ctl = PLANE_CTL_ENABLE |
2652 PLANE_CTL_PIPE_GAMMA_ENABLE |
2653 PLANE_CTL_PIPE_CSC_ENABLE;
2654
2655 switch (fb->pixel_format) {
2656 case DRM_FORMAT_RGB565:
2657 plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
2658 break;
2659 case DRM_FORMAT_XRGB8888:
2660 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2661 break;
2662 case DRM_FORMAT_XBGR8888:
2663 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2664 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2665 break;
2666 case DRM_FORMAT_XRGB2101010:
2667 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2668 break;
2669 case DRM_FORMAT_XBGR2101010:
2670 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2671 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2672 break;
2673 default:
2674 BUG();
2675 }
2676
2677 intel_fb = to_intel_framebuffer(fb);
2678 obj = intel_fb->obj;
2679
2680 /*
2681 * The stride is either expressed as a multiple of 64 bytes chunks for
2682 * linear buffers or in number of tiles for tiled buffers.
2683 */
2684 switch (obj->tiling_mode) {
2685 case I915_TILING_NONE:
2686 stride = fb->pitches[0] >> 6;
2687 break;
2688 case I915_TILING_X:
2689 plane_ctl |= PLANE_CTL_TILED_X;
2690 stride = fb->pitches[0] >> 9;
2691 break;
2692 default:
2693 BUG();
2694 }
2695
2696 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
2697
2698 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
2699
2700 DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
2701 i915_gem_obj_ggtt_offset(obj),
2702 x, y, fb->width, fb->height,
2703 fb->pitches[0]);
2704
2705 I915_WRITE(PLANE_POS(pipe, 0), 0);
2706 I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
2707 I915_WRITE(PLANE_SIZE(pipe, 0),
2708 (intel_crtc->config.pipe_src_h - 1) << 16 |
2709 (intel_crtc->config.pipe_src_w - 1));
2710 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
2711 I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
2712
2713 POSTING_READ(PLANE_SURF(pipe, 0));
2714}
2715
2675/* Assume fb object is pinned & idle & fenced and just update base pointers */ 2716/* Assume fb object is pinned & idle & fenced and just update base pointers */
2676static int 2717static int
2677intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 2718intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -2682,7 +2723,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2682 2723
2683 if (dev_priv->display.disable_fbc) 2724 if (dev_priv->display.disable_fbc)
2684 dev_priv->display.disable_fbc(dev); 2725 dev_priv->display.disable_fbc(dev);
2685 intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
2686 2726
2687 dev_priv->display.update_primary_plane(crtc, fb, x, y); 2727 dev_priv->display.update_primary_plane(crtc, fb, x, y);
2688 2728
@@ -2762,20 +2802,58 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2762 struct drm_device *dev = crtc->dev; 2802 struct drm_device *dev = crtc->dev;
2763 struct drm_i915_private *dev_priv = dev->dev_private; 2803 struct drm_i915_private *dev_priv = dev->dev_private;
2764 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2804 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2765 unsigned long flags;
2766 bool pending; 2805 bool pending;
2767 2806
2768 if (i915_reset_in_progress(&dev_priv->gpu_error) || 2807 if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2769 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 2808 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2770 return false; 2809 return false;
2771 2810
2772 spin_lock_irqsave(&dev->event_lock, flags); 2811 spin_lock_irq(&dev->event_lock);
2773 pending = to_intel_crtc(crtc)->unpin_work != NULL; 2812 pending = to_intel_crtc(crtc)->unpin_work != NULL;
2774 spin_unlock_irqrestore(&dev->event_lock, flags); 2813 spin_unlock_irq(&dev->event_lock);
2775 2814
2776 return pending; 2815 return pending;
2777} 2816}
2778 2817
2818static void intel_update_pipe_size(struct intel_crtc *crtc)
2819{
2820 struct drm_device *dev = crtc->base.dev;
2821 struct drm_i915_private *dev_priv = dev->dev_private;
2822 const struct drm_display_mode *adjusted_mode;
2823
2824 if (!i915.fastboot)
2825 return;
2826
2827 /*
2828 * Update pipe size and adjust fitter if needed: the reason for this is
2829 * that in compute_mode_changes we check the native mode (not the pfit
2830 * mode) to see if we can flip rather than do a full mode set. In the
2831 * fastboot case, we'll flip, but if we don't update the pipesrc and
2832 * pfit state, we'll end up with a big fb scanned out into the wrong
2833 * sized surface.
2834 *
2835 * To fix this properly, we need to hoist the checks up into
2836 * compute_mode_changes (or above), check the actual pfit state and
2837 * whether the platform allows pfit disable with pipe active, and only
2838 * then update the pipesrc and pfit state, even on the flip path.
2839 */
2840
2841 adjusted_mode = &crtc->config.adjusted_mode;
2842
2843 I915_WRITE(PIPESRC(crtc->pipe),
2844 ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2845 (adjusted_mode->crtc_vdisplay - 1));
2846 if (!crtc->config.pch_pfit.enabled &&
2847 (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) ||
2848 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))) {
2849 I915_WRITE(PF_CTL(crtc->pipe), 0);
2850 I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
2851 I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
2852 }
2853 crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2854 crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2855}
2856
2779static int 2857static int
2780intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 2858intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2781 struct drm_framebuffer *fb) 2859 struct drm_framebuffer *fb)
@@ -2818,36 +2896,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2818 return ret; 2896 return ret;
2819 } 2897 }
2820 2898
2821 /* 2899 intel_update_pipe_size(intel_crtc);
2822 * Update pipe size and adjust fitter if needed: the reason for this is
2823 * that in compute_mode_changes we check the native mode (not the pfit
2824 * mode) to see if we can flip rather than do a full mode set. In the
2825 * fastboot case, we'll flip, but if we don't update the pipesrc and
2826 * pfit state, we'll end up with a big fb scanned out into the wrong
2827 * sized surface.
2828 *
2829 * To fix this properly, we need to hoist the checks up into
2830 * compute_mode_changes (or above), check the actual pfit state and
2831 * whether the platform allows pfit disable with pipe active, and only
2832 * then update the pipesrc and pfit state, even on the flip path.
2833 */
2834 if (i915.fastboot) {
2835 const struct drm_display_mode *adjusted_mode =
2836 &intel_crtc->config.adjusted_mode;
2837
2838 I915_WRITE(PIPESRC(intel_crtc->pipe),
2839 ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2840 (adjusted_mode->crtc_vdisplay - 1));
2841 if (!intel_crtc->config.pch_pfit.enabled &&
2842 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2843 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2844 I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2845 I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2846 I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2847 }
2848 intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2849 intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2850 }
2851 2900
2852 dev_priv->display.update_primary_plane(crtc, fb, x, y); 2901 dev_priv->display.update_primary_plane(crtc, fb, x, y);
2853 2902
@@ -3472,14 +3521,13 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3472 !intel_crtc_has_pending_flip(crtc), 3521 !intel_crtc_has_pending_flip(crtc),
3473 60*HZ) == 0)) { 3522 60*HZ) == 0)) {
3474 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3523 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3475 unsigned long flags;
3476 3524
3477 spin_lock_irqsave(&dev->event_lock, flags); 3525 spin_lock_irq(&dev->event_lock);
3478 if (intel_crtc->unpin_work) { 3526 if (intel_crtc->unpin_work) {
3479 WARN_ONCE(1, "Removing stuck page flip\n"); 3527 WARN_ONCE(1, "Removing stuck page flip\n");
3480 page_flip_completed(intel_crtc); 3528 page_flip_completed(intel_crtc);
3481 } 3529 }
3482 spin_unlock_irqrestore(&dev->event_lock, flags); 3530 spin_unlock_irq(&dev->event_lock);
3483 } 3531 }
3484 3532
3485 if (crtc->primary->fb) { 3533 if (crtc->primary->fb) {
@@ -4038,10 +4086,6 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
4038 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4086 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4039 int pipe = intel_crtc->pipe; 4087 int pipe = intel_crtc->pipe;
4040 4088
4041 assert_vblank_disabled(crtc);
4042
4043 drm_vblank_on(dev, pipe);
4044
4045 intel_enable_primary_hw_plane(crtc->primary, crtc); 4089 intel_enable_primary_hw_plane(crtc->primary, crtc);
4046 intel_enable_planes(crtc); 4090 intel_enable_planes(crtc);
4047 intel_crtc_update_cursor(crtc, true); 4091 intel_crtc_update_cursor(crtc, true);
@@ -4087,10 +4131,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
4087 * consider this a flip to a NULL plane. 4131 * consider this a flip to a NULL plane.
4088 */ 4132 */
4089 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4133 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4090
4091 drm_vblank_off(dev, pipe);
4092
4093 assert_vblank_disabled(crtc);
4094} 4134}
4095 4135
4096static void ironlake_crtc_enable(struct drm_crtc *crtc) 4136static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4160,6 +4200,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4160 if (HAS_PCH_CPT(dev)) 4200 if (HAS_PCH_CPT(dev))
4161 cpt_verify_modeset(dev, intel_crtc->pipe); 4201 cpt_verify_modeset(dev, intel_crtc->pipe);
4162 4202
4203 assert_vblank_disabled(crtc);
4204 drm_crtc_vblank_on(crtc);
4205
4163 intel_crtc_enable_planes(crtc); 4206 intel_crtc_enable_planes(crtc);
4164} 4207}
4165 4208
@@ -4272,6 +4315,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4272 intel_opregion_notify_encoder(encoder, true); 4315 intel_opregion_notify_encoder(encoder, true);
4273 } 4316 }
4274 4317
4318 assert_vblank_disabled(crtc);
4319 drm_crtc_vblank_on(crtc);
4320
4275 /* If we change the relative order between pipe/planes enabling, we need 4321 /* If we change the relative order between pipe/planes enabling, we need
4276 * to change the workaround. */ 4322 * to change the workaround. */
4277 haswell_mode_set_planes_workaround(intel_crtc); 4323 haswell_mode_set_planes_workaround(intel_crtc);
@@ -4307,6 +4353,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4307 4353
4308 intel_crtc_disable_planes(crtc); 4354 intel_crtc_disable_planes(crtc);
4309 4355
4356 drm_crtc_vblank_off(crtc);
4357 assert_vblank_disabled(crtc);
4358
4310 for_each_encoder_on_crtc(dev, crtc, encoder) 4359 for_each_encoder_on_crtc(dev, crtc, encoder)
4311 encoder->disable(encoder); 4360 encoder->disable(encoder);
4312 4361
@@ -4369,6 +4418,9 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4369 4418
4370 intel_crtc_disable_planes(crtc); 4419 intel_crtc_disable_planes(crtc);
4371 4420
4421 drm_crtc_vblank_off(crtc);
4422 assert_vblank_disabled(crtc);
4423
4372 for_each_encoder_on_crtc(dev, crtc, encoder) { 4424 for_each_encoder_on_crtc(dev, crtc, encoder) {
4373 intel_opregion_notify_encoder(encoder, false); 4425 intel_opregion_notify_encoder(encoder, false);
4374 encoder->disable(encoder); 4426 encoder->disable(encoder);
@@ -4510,20 +4562,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4510 return mask; 4562 return mask;
4511} 4563}
4512 4564
4513void intel_display_set_init_power(struct drm_i915_private *dev_priv,
4514 bool enable)
4515{
4516 if (dev_priv->power_domains.init_power_on == enable)
4517 return;
4518
4519 if (enable)
4520 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
4521 else
4522 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
4523
4524 dev_priv->power_domains.init_power_on = enable;
4525}
4526
4527static void modeset_update_crtc_power_domains(struct drm_device *dev) 4565static void modeset_update_crtc_power_domains(struct drm_device *dev)
4528{ 4566{
4529 struct drm_i915_private *dev_priv = dev->dev_private; 4567 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4835,6 +4873,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4835 for_each_encoder_on_crtc(dev, crtc, encoder) 4873 for_each_encoder_on_crtc(dev, crtc, encoder)
4836 encoder->enable(encoder); 4874 encoder->enable(encoder);
4837 4875
4876 assert_vblank_disabled(crtc);
4877 drm_crtc_vblank_on(crtc);
4878
4838 intel_crtc_enable_planes(crtc); 4879 intel_crtc_enable_planes(crtc);
4839 4880
4840 /* Underruns don't raise interrupts, so check manually. */ 4881 /* Underruns don't raise interrupts, so check manually. */
@@ -4892,6 +4933,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
4892 for_each_encoder_on_crtc(dev, crtc, encoder) 4933 for_each_encoder_on_crtc(dev, crtc, encoder)
4893 encoder->enable(encoder); 4934 encoder->enable(encoder);
4894 4935
4936 assert_vblank_disabled(crtc);
4937 drm_crtc_vblank_on(crtc);
4938
4895 intel_crtc_enable_planes(crtc); 4939 intel_crtc_enable_planes(crtc);
4896 4940
4897 /* 4941 /*
@@ -4955,9 +4999,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4955 intel_set_memory_cxsr(dev_priv, false); 4999 intel_set_memory_cxsr(dev_priv, false);
4956 intel_crtc_disable_planes(crtc); 5000 intel_crtc_disable_planes(crtc);
4957 5001
4958 for_each_encoder_on_crtc(dev, crtc, encoder)
4959 encoder->disable(encoder);
4960
4961 /* 5002 /*
4962 * On gen2 planes are double buffered but the pipe isn't, so we must 5003 * On gen2 planes are double buffered but the pipe isn't, so we must
4963 * wait for planes to fully turn off before disabling the pipe. 5004 * wait for planes to fully turn off before disabling the pipe.
@@ -4966,6 +5007,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4966 */ 5007 */
4967 intel_wait_for_vblank(dev, pipe); 5008 intel_wait_for_vblank(dev, pipe);
4968 5009
5010 drm_crtc_vblank_off(crtc);
5011 assert_vblank_disabled(crtc);
5012
5013 for_each_encoder_on_crtc(dev, crtc, encoder)
5014 encoder->disable(encoder);
5015
4969 intel_disable_pipe(intel_crtc); 5016 intel_disable_pipe(intel_crtc);
4970 5017
4971 i9xx_pfit_disable(intel_crtc); 5018 i9xx_pfit_disable(intel_crtc);
@@ -6434,8 +6481,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6434 struct drm_i915_private *dev_priv = dev->dev_private; 6481 struct drm_i915_private *dev_priv = dev->dev_private;
6435 uint32_t tmp; 6482 uint32_t tmp;
6436 6483
6437 if (!intel_display_power_enabled(dev_priv, 6484 if (!intel_display_power_is_enabled(dev_priv,
6438 POWER_DOMAIN_PIPE(crtc->pipe))) 6485 POWER_DOMAIN_PIPE(crtc->pipe)))
6439 return false; 6486 return false;
6440 6487
6441 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 6488 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -7021,7 +7068,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
7021 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); 7068 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
7022 POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); 7069 POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
7023 7070
7024 if (IS_BROADWELL(dev)) { 7071 if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
7025 val = 0; 7072 val = 0;
7026 7073
7027 switch (intel_crtc->config.pipe_bpp) { 7074 switch (intel_crtc->config.pipe_bpp) {
@@ -7444,8 +7491,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7444 struct drm_i915_private *dev_priv = dev->dev_private; 7491 struct drm_i915_private *dev_priv = dev->dev_private;
7445 uint32_t tmp; 7492 uint32_t tmp;
7446 7493
7447 if (!intel_display_power_enabled(dev_priv, 7494 if (!intel_display_power_is_enabled(dev_priv,
7448 POWER_DOMAIN_PIPE(crtc->pipe))) 7495 POWER_DOMAIN_PIPE(crtc->pipe)))
7449 return false; 7496 return false;
7450 7497
7451 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7498 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -7638,7 +7685,6 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
7638static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 7685static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7639{ 7686{
7640 uint32_t val; 7687 uint32_t val;
7641 unsigned long irqflags;
7642 7688
7643 val = I915_READ(LCPLL_CTL); 7689 val = I915_READ(LCPLL_CTL);
7644 7690
@@ -7658,10 +7704,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7658 * to call special forcewake code that doesn't touch runtime PM and 7704 * to call special forcewake code that doesn't touch runtime PM and
7659 * doesn't enable the forcewake delayed work. 7705 * doesn't enable the forcewake delayed work.
7660 */ 7706 */
7661 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 7707 spin_lock_irq(&dev_priv->uncore.lock);
7662 if (dev_priv->uncore.forcewake_count++ == 0) 7708 if (dev_priv->uncore.forcewake_count++ == 0)
7663 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); 7709 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
7664 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 7710 spin_unlock_irq(&dev_priv->uncore.lock);
7665 7711
7666 if (val & LCPLL_POWER_DOWN_ALLOW) { 7712 if (val & LCPLL_POWER_DOWN_ALLOW) {
7667 val &= ~LCPLL_POWER_DOWN_ALLOW; 7713 val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -7692,10 +7738,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7692 } 7738 }
7693 7739
7694 /* See the big comment above. */ 7740 /* See the big comment above. */
7695 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 7741 spin_lock_irq(&dev_priv->uncore.lock);
7696 if (--dev_priv->uncore.forcewake_count == 0) 7742 if (--dev_priv->uncore.forcewake_count == 0)
7697 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); 7743 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
7698 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 7744 spin_unlock_irq(&dev_priv->uncore.lock);
7699} 7745}
7700 7746
7701/* 7747/*
@@ -7824,7 +7870,8 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
7824 * DDI E. So just check whether this pipe is wired to DDI E and whether 7870 * DDI E. So just check whether this pipe is wired to DDI E and whether
7825 * the PCH transcoder is on. 7871 * the PCH transcoder is on.
7826 */ 7872 */
7827 if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 7873 if (INTEL_INFO(dev)->gen < 9 &&
7874 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7828 pipe_config->has_pch_encoder = true; 7875 pipe_config->has_pch_encoder = true;
7829 7876
7830 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 7877 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
@@ -7843,7 +7890,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7843 enum intel_display_power_domain pfit_domain; 7890 enum intel_display_power_domain pfit_domain;
7844 uint32_t tmp; 7891 uint32_t tmp;
7845 7892
7846 if (!intel_display_power_enabled(dev_priv, 7893 if (!intel_display_power_is_enabled(dev_priv,
7847 POWER_DOMAIN_PIPE(crtc->pipe))) 7894 POWER_DOMAIN_PIPE(crtc->pipe)))
7848 return false; 7895 return false;
7849 7896
@@ -7872,7 +7919,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7872 pipe_config->cpu_transcoder = TRANSCODER_EDP; 7919 pipe_config->cpu_transcoder = TRANSCODER_EDP;
7873 } 7920 }
7874 7921
7875 if (!intel_display_power_enabled(dev_priv, 7922 if (!intel_display_power_is_enabled(dev_priv,
7876 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 7923 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
7877 return false; 7924 return false;
7878 7925
@@ -7885,7 +7932,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7885 intel_get_pipe_timings(crtc, pipe_config); 7932 intel_get_pipe_timings(crtc, pipe_config);
7886 7933
7887 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 7934 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
7888 if (intel_display_power_enabled(dev_priv, pfit_domain)) 7935 if (intel_display_power_is_enabled(dev_priv, pfit_domain))
7889 ironlake_get_pfit_config(crtc, pipe_config); 7936 ironlake_get_pfit_config(crtc, pipe_config);
7890 7937
7891 if (IS_HASWELL(dev)) 7938 if (IS_HASWELL(dev))
@@ -8255,8 +8302,10 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
8255 intel_crtc->cursor_cntl = 0; 8302 intel_crtc->cursor_cntl = 0;
8256 } 8303 }
8257 8304
8258 if (intel_crtc->cursor_base != base) 8305 if (intel_crtc->cursor_base != base) {
8259 I915_WRITE(_CURABASE, base); 8306 I915_WRITE(_CURABASE, base);
8307 intel_crtc->cursor_base = base;
8308 }
8260 8309
8261 if (intel_crtc->cursor_size != size) { 8310 if (intel_crtc->cursor_size != size) {
8262 I915_WRITE(CURSIZE, size); 8311 I915_WRITE(CURSIZE, size);
@@ -8296,9 +8345,10 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8296 return; 8345 return;
8297 } 8346 }
8298 cntl |= pipe << 28; /* Connect to correct pipe */ 8347 cntl |= pipe << 28; /* Connect to correct pipe */
8348
8349 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
8350 cntl |= CURSOR_PIPE_CSC_ENABLE;
8299 } 8351 }
8300 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
8301 cntl |= CURSOR_PIPE_CSC_ENABLE;
8302 8352
8303 if (intel_crtc->cursor_cntl != cntl) { 8353 if (intel_crtc->cursor_cntl != cntl) {
8304 I915_WRITE(CURCNTR(pipe), cntl); 8354 I915_WRITE(CURCNTR(pipe), cntl);
@@ -8309,6 +8359,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8309 /* and commit changes on next vblank */ 8359 /* and commit changes on next vblank */
8310 I915_WRITE(CURBASE(pipe), base); 8360 I915_WRITE(CURBASE(pipe), base);
8311 POSTING_READ(CURBASE(pipe)); 8361 POSTING_READ(CURBASE(pipe));
8362
8363 intel_crtc->cursor_base = base;
8312} 8364}
8313 8365
8314/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 8366/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -8359,7 +8411,6 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
8359 i845_update_cursor(crtc, base); 8411 i845_update_cursor(crtc, base);
8360 else 8412 else
8361 i9xx_update_cursor(crtc, base); 8413 i9xx_update_cursor(crtc, base);
8362 intel_crtc->cursor_base = base;
8363} 8414}
8364 8415
8365static bool cursor_size_ok(struct drm_device *dev, 8416static bool cursor_size_ok(struct drm_device *dev,
@@ -9023,35 +9074,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
9023 return mode; 9074 return mode;
9024} 9075}
9025 9076
9026static void intel_increase_pllclock(struct drm_device *dev,
9027 enum pipe pipe)
9028{
9029 struct drm_i915_private *dev_priv = dev->dev_private;
9030 int dpll_reg = DPLL(pipe);
9031 int dpll;
9032
9033 if (!HAS_GMCH_DISPLAY(dev))
9034 return;
9035
9036 if (!dev_priv->lvds_downclock_avail)
9037 return;
9038
9039 dpll = I915_READ(dpll_reg);
9040 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
9041 DRM_DEBUG_DRIVER("upclocking LVDS\n");
9042
9043 assert_panel_unlocked(dev_priv, pipe);
9044
9045 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
9046 I915_WRITE(dpll_reg, dpll);
9047 intel_wait_for_vblank(dev, pipe);
9048
9049 dpll = I915_READ(dpll_reg);
9050 if (dpll & DISPLAY_RATE_SELECT_FPA1)
9051 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
9052 }
9053}
9054
9055static void intel_decrease_pllclock(struct drm_crtc *crtc) 9077static void intel_decrease_pllclock(struct drm_crtc *crtc)
9056{ 9078{
9057 struct drm_device *dev = crtc->dev; 9079 struct drm_device *dev = crtc->dev;
@@ -9127,199 +9149,16 @@ out:
9127 intel_runtime_pm_put(dev_priv); 9149 intel_runtime_pm_put(dev_priv);
9128} 9150}
9129 9151
9130
9131/**
9132 * intel_mark_fb_busy - mark given planes as busy
9133 * @dev: DRM device
9134 * @frontbuffer_bits: bits for the affected planes
9135 * @ring: optional ring for asynchronous commands
9136 *
9137 * This function gets called every time the screen contents change. It can be
9138 * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
9139 */
9140static void intel_mark_fb_busy(struct drm_device *dev,
9141 unsigned frontbuffer_bits,
9142 struct intel_engine_cs *ring)
9143{
9144 struct drm_i915_private *dev_priv = dev->dev_private;
9145 enum pipe pipe;
9146
9147 if (!i915.powersave)
9148 return;
9149
9150 for_each_pipe(dev_priv, pipe) {
9151 if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
9152 continue;
9153
9154 intel_increase_pllclock(dev, pipe);
9155 if (ring && intel_fbc_enabled(dev))
9156 ring->fbc_dirty = true;
9157 }
9158}
9159
9160/**
9161 * intel_fb_obj_invalidate - invalidate frontbuffer object
9162 * @obj: GEM object to invalidate
9163 * @ring: set for asynchronous rendering
9164 *
9165 * This function gets called every time rendering on the given object starts and
9166 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
9167 * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
9168 * until the rendering completes or a flip on this frontbuffer plane is
9169 * scheduled.
9170 */
9171void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
9172 struct intel_engine_cs *ring)
9173{
9174 struct drm_device *dev = obj->base.dev;
9175 struct drm_i915_private *dev_priv = dev->dev_private;
9176
9177 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
9178
9179 if (!obj->frontbuffer_bits)
9180 return;
9181
9182 if (ring) {
9183 mutex_lock(&dev_priv->fb_tracking.lock);
9184 dev_priv->fb_tracking.busy_bits
9185 |= obj->frontbuffer_bits;
9186 dev_priv->fb_tracking.flip_bits
9187 &= ~obj->frontbuffer_bits;
9188 mutex_unlock(&dev_priv->fb_tracking.lock);
9189 }
9190
9191 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
9192
9193 intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
9194}
9195
9196/**
9197 * intel_frontbuffer_flush - flush frontbuffer
9198 * @dev: DRM device
9199 * @frontbuffer_bits: frontbuffer plane tracking bits
9200 *
9201 * This function gets called every time rendering on the given planes has
9202 * completed and frontbuffer caching can be started again. Flushes will get
9203 * delayed if they're blocked by some oustanding asynchronous rendering.
9204 *
9205 * Can be called without any locks held.
9206 */
9207void intel_frontbuffer_flush(struct drm_device *dev,
9208 unsigned frontbuffer_bits)
9209{
9210 struct drm_i915_private *dev_priv = dev->dev_private;
9211
9212 /* Delay flushing when rings are still busy.*/
9213 mutex_lock(&dev_priv->fb_tracking.lock);
9214 frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
9215 mutex_unlock(&dev_priv->fb_tracking.lock);
9216
9217 intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
9218
9219 intel_edp_psr_flush(dev, frontbuffer_bits);
9220
9221 /*
9222 * FIXME: Unconditional fbc flushing here is a rather gross hack and
9223 * needs to be reworked into a proper frontbuffer tracking scheme like
9224 * psr employs.
9225 */
9226 if (IS_BROADWELL(dev))
9227 gen8_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
9228}
9229
9230/**
9231 * intel_fb_obj_flush - flush frontbuffer object
9232 * @obj: GEM object to flush
9233 * @retire: set when retiring asynchronous rendering
9234 *
9235 * This function gets called every time rendering on the given object has
9236 * completed and frontbuffer caching can be started again. If @retire is true
9237 * then any delayed flushes will be unblocked.
9238 */
9239void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
9240 bool retire)
9241{
9242 struct drm_device *dev = obj->base.dev;
9243 struct drm_i915_private *dev_priv = dev->dev_private;
9244 unsigned frontbuffer_bits;
9245
9246 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
9247
9248 if (!obj->frontbuffer_bits)
9249 return;
9250
9251 frontbuffer_bits = obj->frontbuffer_bits;
9252
9253 if (retire) {
9254 mutex_lock(&dev_priv->fb_tracking.lock);
9255 /* Filter out new bits since rendering started. */
9256 frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
9257
9258 dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
9259 mutex_unlock(&dev_priv->fb_tracking.lock);
9260 }
9261
9262 intel_frontbuffer_flush(dev, frontbuffer_bits);
9263}
9264
9265/**
9266 * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
9267 * @dev: DRM device
9268 * @frontbuffer_bits: frontbuffer plane tracking bits
9269 *
9270 * This function gets called after scheduling a flip on @obj. The actual
9271 * frontbuffer flushing will be delayed until completion is signalled with
9272 * intel_frontbuffer_flip_complete. If an invalidate happens in between this
9273 * flush will be cancelled.
9274 *
9275 * Can be called without any locks held.
9276 */
9277void intel_frontbuffer_flip_prepare(struct drm_device *dev,
9278 unsigned frontbuffer_bits)
9279{
9280 struct drm_i915_private *dev_priv = dev->dev_private;
9281
9282 mutex_lock(&dev_priv->fb_tracking.lock);
9283 dev_priv->fb_tracking.flip_bits
9284 |= frontbuffer_bits;
9285 mutex_unlock(&dev_priv->fb_tracking.lock);
9286}
9287
9288/**
9289 * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
9290 * @dev: DRM device
9291 * @frontbuffer_bits: frontbuffer plane tracking bits
9292 *
9293 * This function gets called after the flip has been latched and will complete
9294 * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
9295 *
9296 * Can be called without any locks held.
9297 */
9298void intel_frontbuffer_flip_complete(struct drm_device *dev,
9299 unsigned frontbuffer_bits)
9300{
9301 struct drm_i915_private *dev_priv = dev->dev_private;
9302
9303 mutex_lock(&dev_priv->fb_tracking.lock);
9304 /* Mask any cancelled flips. */
9305 frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
9306 dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
9307 mutex_unlock(&dev_priv->fb_tracking.lock);
9308
9309 intel_frontbuffer_flush(dev, frontbuffer_bits);
9310}
9311
9312static void intel_crtc_destroy(struct drm_crtc *crtc) 9152static void intel_crtc_destroy(struct drm_crtc *crtc)
9313{ 9153{
9314 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9154 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9315 struct drm_device *dev = crtc->dev; 9155 struct drm_device *dev = crtc->dev;
9316 struct intel_unpin_work *work; 9156 struct intel_unpin_work *work;
9317 unsigned long flags;
9318 9157
9319 spin_lock_irqsave(&dev->event_lock, flags); 9158 spin_lock_irq(&dev->event_lock);
9320 work = intel_crtc->unpin_work; 9159 work = intel_crtc->unpin_work;
9321 intel_crtc->unpin_work = NULL; 9160 intel_crtc->unpin_work = NULL;
9322 spin_unlock_irqrestore(&dev->event_lock, flags); 9161 spin_unlock_irq(&dev->event_lock);
9323 9162
9324 if (work) { 9163 if (work) {
9325 cancel_work_sync(&work->work); 9164 cancel_work_sync(&work->work);
@@ -9365,6 +9204,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
9365 if (intel_crtc == NULL) 9204 if (intel_crtc == NULL)
9366 return; 9205 return;
9367 9206
9207 /*
9208 * This is called both by irq handlers and the reset code (to complete
9209 * lost pageflips) so needs the full irqsave spinlocks.
9210 */
9368 spin_lock_irqsave(&dev->event_lock, flags); 9211 spin_lock_irqsave(&dev->event_lock, flags);
9369 work = intel_crtc->unpin_work; 9212 work = intel_crtc->unpin_work;
9370 9213
@@ -9446,7 +9289,12 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
9446 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 9289 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
9447 unsigned long flags; 9290 unsigned long flags;
9448 9291
9449 /* NB: An MMIO update of the plane base pointer will also 9292
9293 /*
9294 * This is called both by irq handlers and the reset code (to complete
9295 * lost pageflips) so needs the full irqsave spinlocks.
9296 *
9297 * NB: An MMIO update of the plane base pointer will also
9450 * generate a page-flip completion irq, i.e. every modeset 9298 * generate a page-flip completion irq, i.e. every modeset
9451 * is also accompanied by a spurious intel_prepare_page_flip(). 9299 * is also accompanied by a spurious intel_prepare_page_flip().
9452 */ 9300 */
@@ -9821,7 +9669,6 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
9821{ 9669{
9822 struct drm_i915_private *dev_priv = dev->dev_private; 9670 struct drm_i915_private *dev_priv = dev->dev_private;
9823 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9671 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9824 unsigned long irq_flags;
9825 int ret; 9672 int ret;
9826 9673
9827 if (WARN_ON(intel_crtc->mmio_flip.seqno)) 9674 if (WARN_ON(intel_crtc->mmio_flip.seqno))
@@ -9835,10 +9682,10 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
9835 return 0; 9682 return 0;
9836 } 9683 }
9837 9684
9838 spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags); 9685 spin_lock_irq(&dev_priv->mmio_flip_lock);
9839 intel_crtc->mmio_flip.seqno = obj->last_write_seqno; 9686 intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
9840 intel_crtc->mmio_flip.ring_id = obj->ring->id; 9687 intel_crtc->mmio_flip.ring_id = obj->ring->id;
9841 spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags); 9688 spin_unlock_irq(&dev_priv->mmio_flip_lock);
9842 9689
9843 /* 9690 /*
9844 * Double check to catch cases where irq fired before 9691 * Double check to catch cases where irq fired before
@@ -9903,18 +9750,19 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
9903 struct drm_i915_private *dev_priv = dev->dev_private; 9750 struct drm_i915_private *dev_priv = dev->dev_private;
9904 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 9751 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9905 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9752 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9906 unsigned long flags; 9753
9754 WARN_ON(!in_irq());
9907 9755
9908 if (crtc == NULL) 9756 if (crtc == NULL)
9909 return; 9757 return;
9910 9758
9911 spin_lock_irqsave(&dev->event_lock, flags); 9759 spin_lock(&dev->event_lock);
9912 if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) { 9760 if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
9913 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", 9761 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
9914 intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe)); 9762 intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
9915 page_flip_completed(intel_crtc); 9763 page_flip_completed(intel_crtc);
9916 } 9764 }
9917 spin_unlock_irqrestore(&dev->event_lock, flags); 9765 spin_unlock(&dev->event_lock);
9918} 9766}
9919 9767
9920static int intel_crtc_page_flip(struct drm_crtc *crtc, 9768static int intel_crtc_page_flip(struct drm_crtc *crtc,
@@ -9930,7 +9778,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9930 enum pipe pipe = intel_crtc->pipe; 9778 enum pipe pipe = intel_crtc->pipe;
9931 struct intel_unpin_work *work; 9779 struct intel_unpin_work *work;
9932 struct intel_engine_cs *ring; 9780 struct intel_engine_cs *ring;
9933 unsigned long flags;
9934 int ret; 9781 int ret;
9935 9782
9936 /* 9783 /*
@@ -9971,7 +9818,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9971 goto free_work; 9818 goto free_work;
9972 9819
9973 /* We borrow the event spin lock for protecting unpin_work */ 9820 /* We borrow the event spin lock for protecting unpin_work */
9974 spin_lock_irqsave(&dev->event_lock, flags); 9821 spin_lock_irq(&dev->event_lock);
9975 if (intel_crtc->unpin_work) { 9822 if (intel_crtc->unpin_work) {
9976 /* Before declaring the flip queue wedged, check if 9823 /* Before declaring the flip queue wedged, check if
9977 * the hardware completed the operation behind our backs. 9824 * the hardware completed the operation behind our backs.
@@ -9981,7 +9828,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9981 page_flip_completed(intel_crtc); 9828 page_flip_completed(intel_crtc);
9982 } else { 9829 } else {
9983 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 9830 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
9984 spin_unlock_irqrestore(&dev->event_lock, flags); 9831 spin_unlock_irq(&dev->event_lock);
9985 9832
9986 drm_crtc_vblank_put(crtc); 9833 drm_crtc_vblank_put(crtc);
9987 kfree(work); 9834 kfree(work);
@@ -9989,7 +9836,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9989 } 9836 }
9990 } 9837 }
9991 intel_crtc->unpin_work = work; 9838 intel_crtc->unpin_work = work;
9992 spin_unlock_irqrestore(&dev->event_lock, flags); 9839 spin_unlock_irq(&dev->event_lock);
9993 9840
9994 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 9841 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
9995 flush_workqueue(dev_priv->wq); 9842 flush_workqueue(dev_priv->wq);
@@ -10076,9 +9923,9 @@ cleanup_pending:
10076 mutex_unlock(&dev->struct_mutex); 9923 mutex_unlock(&dev->struct_mutex);
10077 9924
10078cleanup: 9925cleanup:
10079 spin_lock_irqsave(&dev->event_lock, flags); 9926 spin_lock_irq(&dev->event_lock);
10080 intel_crtc->unpin_work = NULL; 9927 intel_crtc->unpin_work = NULL;
10081 spin_unlock_irqrestore(&dev->event_lock, flags); 9928 spin_unlock_irq(&dev->event_lock);
10082 9929
10083 drm_crtc_vblank_put(crtc); 9930 drm_crtc_vblank_put(crtc);
10084free_work: 9931free_work:
@@ -10089,9 +9936,9 @@ out_hang:
10089 intel_crtc_wait_for_pending_flips(crtc); 9936 intel_crtc_wait_for_pending_flips(crtc);
10090 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); 9937 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
10091 if (ret == 0 && event) { 9938 if (ret == 0 && event) {
10092 spin_lock_irqsave(&dev->event_lock, flags); 9939 spin_lock_irq(&dev->event_lock);
10093 drm_send_vblank_event(dev, pipe, event); 9940 drm_send_vblank_event(dev, pipe, event);
10094 spin_unlock_irqrestore(&dev->event_lock, flags); 9941 spin_unlock_irq(&dev->event_lock);
10095 } 9942 }
10096 } 9943 }
10097 return ret; 9944 return ret;
@@ -11677,7 +11524,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
11677{ 11524{
11678 uint32_t val; 11525 uint32_t val;
11679 11526
11680 if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS)) 11527 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
11681 return false; 11528 return false;
11682 11529
11683 val = I915_READ(PCH_DPLL(pll->id)); 11530 val = I915_READ(PCH_DPLL(pll->id));
@@ -11811,89 +11658,37 @@ disable_unpin:
11811} 11658}
11812 11659
11813static int 11660static int
11814intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, 11661intel_check_primary_plane(struct drm_plane *plane,
11815 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 11662 struct intel_plane_state *state)
11816 unsigned int crtc_w, unsigned int crtc_h, 11663{
11817 uint32_t src_x, uint32_t src_y, 11664 struct drm_crtc *crtc = state->crtc;
11818 uint32_t src_w, uint32_t src_h) 11665 struct drm_framebuffer *fb = state->fb;
11666 struct drm_rect *dest = &state->dst;
11667 struct drm_rect *src = &state->src;
11668 const struct drm_rect *clip = &state->clip;
11669
11670 return drm_plane_helper_check_update(plane, crtc, fb,
11671 src, dest, clip,
11672 DRM_PLANE_HELPER_NO_SCALING,
11673 DRM_PLANE_HELPER_NO_SCALING,
11674 false, true, &state->visible);
11675}
11676
11677static int
11678intel_commit_primary_plane(struct drm_plane *plane,
11679 struct intel_plane_state *state)
11819{ 11680{
11681 struct drm_crtc *crtc = state->crtc;
11682 struct drm_framebuffer *fb = state->fb;
11820 struct drm_device *dev = crtc->dev; 11683 struct drm_device *dev = crtc->dev;
11821 struct drm_i915_private *dev_priv = dev->dev_private; 11684 struct drm_i915_private *dev_priv = dev->dev_private;
11822 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11685 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11823 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11686 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11824 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 11687 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11825 struct drm_rect dest = {
11826 /* integer pixels */
11827 .x1 = crtc_x,
11828 .y1 = crtc_y,
11829 .x2 = crtc_x + crtc_w,
11830 .y2 = crtc_y + crtc_h,
11831 };
11832 struct drm_rect src = {
11833 /* 16.16 fixed point */
11834 .x1 = src_x,
11835 .y1 = src_y,
11836 .x2 = src_x + src_w,
11837 .y2 = src_y + src_h,
11838 };
11839 const struct drm_rect clip = {
11840 /* integer pixels */
11841 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11842 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11843 };
11844 const struct {
11845 int crtc_x, crtc_y;
11846 unsigned int crtc_w, crtc_h;
11847 uint32_t src_x, src_y, src_w, src_h;
11848 } orig = {
11849 .crtc_x = crtc_x,
11850 .crtc_y = crtc_y,
11851 .crtc_w = crtc_w,
11852 .crtc_h = crtc_h,
11853 .src_x = src_x,
11854 .src_y = src_y,
11855 .src_w = src_w,
11856 .src_h = src_h,
11857 };
11858 struct intel_plane *intel_plane = to_intel_plane(plane); 11688 struct intel_plane *intel_plane = to_intel_plane(plane);
11859 bool visible; 11689 struct drm_rect *src = &state->src;
11860 int ret; 11690 int ret;
11861 11691
11862 ret = drm_plane_helper_check_update(plane, crtc, fb,
11863 &src, &dest, &clip,
11864 DRM_PLANE_HELPER_NO_SCALING,
11865 DRM_PLANE_HELPER_NO_SCALING,
11866 false, true, &visible);
11867
11868 if (ret)
11869 return ret;
11870
11871 /*
11872 * If the CRTC isn't enabled, we're just pinning the framebuffer,
11873 * updating the fb pointer, and returning without touching the
11874 * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to
11875 * turn on the display with all planes setup as desired.
11876 */
11877 if (!crtc->enabled) {
11878 mutex_lock(&dev->struct_mutex);
11879
11880 /*
11881 * If we already called setplane while the crtc was disabled,
11882 * we may have an fb pinned; unpin it.
11883 */
11884 if (plane->fb)
11885 intel_unpin_fb_obj(old_obj);
11886
11887 i915_gem_track_fb(old_obj, obj,
11888 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11889
11890 /* Pin and return without programming hardware */
11891 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11892 mutex_unlock(&dev->struct_mutex);
11893
11894 return ret;
11895 }
11896
11897 intel_crtc_wait_for_pending_flips(crtc); 11692 intel_crtc_wait_for_pending_flips(crtc);
11898 11693
11899 /* 11694 /*
@@ -11902,7 +11697,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11902 * happens if userspace explicitly disables the plane by passing fb=0 11697 * happens if userspace explicitly disables the plane by passing fb=0
11903 * because plane->fb still gets set and pinned. 11698 * because plane->fb still gets set and pinned.
11904 */ 11699 */
11905 if (!visible) { 11700 if (!state->visible) {
11906 mutex_lock(&dev->struct_mutex); 11701 mutex_lock(&dev->struct_mutex);
11907 11702
11908 /* 11703 /*
@@ -11949,7 +11744,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11949 intel_disable_fbc(dev); 11744 intel_disable_fbc(dev);
11950 } 11745 }
11951 } 11746 }
11952 ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb); 11747 ret = intel_pipe_set_base(crtc, src->x1, src->y1, fb);
11953 if (ret) 11748 if (ret)
11954 return ret; 11749 return ret;
11955 11750
@@ -11957,19 +11752,62 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11957 intel_enable_primary_hw_plane(plane, crtc); 11752 intel_enable_primary_hw_plane(plane, crtc);
11958 } 11753 }
11959 11754
11960 intel_plane->crtc_x = orig.crtc_x; 11755 intel_plane->crtc_x = state->orig_dst.x1;
11961 intel_plane->crtc_y = orig.crtc_y; 11756 intel_plane->crtc_y = state->orig_dst.y1;
11962 intel_plane->crtc_w = orig.crtc_w; 11757 intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
11963 intel_plane->crtc_h = orig.crtc_h; 11758 intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
11964 intel_plane->src_x = orig.src_x; 11759 intel_plane->src_x = state->orig_src.x1;
11965 intel_plane->src_y = orig.src_y; 11760 intel_plane->src_y = state->orig_src.y1;
11966 intel_plane->src_w = orig.src_w; 11761 intel_plane->src_w = drm_rect_width(&state->orig_src);
11967 intel_plane->src_h = orig.src_h; 11762 intel_plane->src_h = drm_rect_height(&state->orig_src);
11968 intel_plane->obj = obj; 11763 intel_plane->obj = obj;
11969 11764
11970 return 0; 11765 return 0;
11971} 11766}
11972 11767
11768static int
11769intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11770 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11771 unsigned int crtc_w, unsigned int crtc_h,
11772 uint32_t src_x, uint32_t src_y,
11773 uint32_t src_w, uint32_t src_h)
11774{
11775 struct intel_plane_state state;
11776 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11777 int ret;
11778
11779 state.crtc = crtc;
11780 state.fb = fb;
11781
11782 /* sample coordinates in 16.16 fixed point */
11783 state.src.x1 = src_x;
11784 state.src.x2 = src_x + src_w;
11785 state.src.y1 = src_y;
11786 state.src.y2 = src_y + src_h;
11787
11788 /* integer pixels */
11789 state.dst.x1 = crtc_x;
11790 state.dst.x2 = crtc_x + crtc_w;
11791 state.dst.y1 = crtc_y;
11792 state.dst.y2 = crtc_y + crtc_h;
11793
11794 state.clip.x1 = 0;
11795 state.clip.y1 = 0;
11796 state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
11797 state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
11798
11799 state.orig_src = state.src;
11800 state.orig_dst = state.dst;
11801
11802 ret = intel_check_primary_plane(plane, &state);
11803 if (ret)
11804 return ret;
11805
11806 intel_commit_primary_plane(plane, &state);
11807
11808 return 0;
11809}
11810
11973/* Common destruction function for both primary and cursor planes */ 11811/* Common destruction function for both primary and cursor planes */
11974static void intel_plane_destroy(struct drm_plane *plane) 11812static void intel_plane_destroy(struct drm_plane *plane)
11975{ 11813{
@@ -12044,51 +11882,41 @@ intel_cursor_plane_disable(struct drm_plane *plane)
12044} 11882}
12045 11883
12046static int 11884static int
12047intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, 11885intel_check_cursor_plane(struct drm_plane *plane,
12048 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 11886 struct intel_plane_state *state)
12049 unsigned int crtc_w, unsigned int crtc_h,
12050 uint32_t src_x, uint32_t src_y,
12051 uint32_t src_w, uint32_t src_h)
12052{ 11887{
12053 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11888 struct drm_crtc *crtc = state->crtc;
12054 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 11889 struct drm_framebuffer *fb = state->fb;
12055 struct drm_i915_gem_object *obj = intel_fb->obj; 11890 struct drm_rect *dest = &state->dst;
12056 struct drm_rect dest = { 11891 struct drm_rect *src = &state->src;
12057 /* integer pixels */ 11892 const struct drm_rect *clip = &state->clip;
12058 .x1 = crtc_x,
12059 .y1 = crtc_y,
12060 .x2 = crtc_x + crtc_w,
12061 .y2 = crtc_y + crtc_h,
12062 };
12063 struct drm_rect src = {
12064 /* 16.16 fixed point */
12065 .x1 = src_x,
12066 .y1 = src_y,
12067 .x2 = src_x + src_w,
12068 .y2 = src_y + src_h,
12069 };
12070 const struct drm_rect clip = {
12071 /* integer pixels */
12072 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
12073 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
12074 };
12075 bool visible;
12076 int ret;
12077 11893
12078 ret = drm_plane_helper_check_update(plane, crtc, fb, 11894 return drm_plane_helper_check_update(plane, crtc, fb,
12079 &src, &dest, &clip, 11895 src, dest, clip,
12080 DRM_PLANE_HELPER_NO_SCALING, 11896 DRM_PLANE_HELPER_NO_SCALING,
12081 DRM_PLANE_HELPER_NO_SCALING, 11897 DRM_PLANE_HELPER_NO_SCALING,
12082 true, true, &visible); 11898 true, true, &state->visible);
12083 if (ret) 11899}
12084 return ret;
12085 11900
12086 crtc->cursor_x = crtc_x; 11901static int
12087 crtc->cursor_y = crtc_y; 11902intel_commit_cursor_plane(struct drm_plane *plane,
11903 struct intel_plane_state *state)
11904{
11905 struct drm_crtc *crtc = state->crtc;
11906 struct drm_framebuffer *fb = state->fb;
11907 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11908 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11909 struct drm_i915_gem_object *obj = intel_fb->obj;
11910 int crtc_w, crtc_h;
11911
11912 crtc->cursor_x = state->orig_dst.x1;
11913 crtc->cursor_y = state->orig_dst.y1;
12088 if (fb != crtc->cursor->fb) { 11914 if (fb != crtc->cursor->fb) {
11915 crtc_w = drm_rect_width(&state->orig_dst);
11916 crtc_h = drm_rect_height(&state->orig_dst);
12089 return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h); 11917 return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
12090 } else { 11918 } else {
12091 intel_crtc_update_cursor(crtc, visible); 11919 intel_crtc_update_cursor(crtc, state->visible);
12092 11920
12093 intel_frontbuffer_flip(crtc->dev, 11921 intel_frontbuffer_flip(crtc->dev,
12094 INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe)); 11922 INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
@@ -12096,6 +11924,48 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
12096 return 0; 11924 return 0;
12097 } 11925 }
12098} 11926}
11927
11928static int
11929intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
11930 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11931 unsigned int crtc_w, unsigned int crtc_h,
11932 uint32_t src_x, uint32_t src_y,
11933 uint32_t src_w, uint32_t src_h)
11934{
11935 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11936 struct intel_plane_state state;
11937 int ret;
11938
11939 state.crtc = crtc;
11940 state.fb = fb;
11941
11942 /* sample coordinates in 16.16 fixed point */
11943 state.src.x1 = src_x;
11944 state.src.x2 = src_x + src_w;
11945 state.src.y1 = src_y;
11946 state.src.y2 = src_y + src_h;
11947
11948 /* integer pixels */
11949 state.dst.x1 = crtc_x;
11950 state.dst.x2 = crtc_x + crtc_w;
11951 state.dst.y1 = crtc_y;
11952 state.dst.y2 = crtc_y + crtc_h;
11953
11954 state.clip.x1 = 0;
11955 state.clip.y1 = 0;
11956 state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
11957 state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
11958
11959 state.orig_src = state.src;
11960 state.orig_dst = state.dst;
11961
11962 ret = intel_check_cursor_plane(plane, &state);
11963 if (ret)
11964 return ret;
11965
11966 return intel_commit_cursor_plane(plane, &state);
11967}
11968
12099static const struct drm_plane_funcs intel_cursor_plane_funcs = { 11969static const struct drm_plane_funcs intel_cursor_plane_funcs = {
12100 .update_plane = intel_cursor_plane_update, 11970 .update_plane = intel_cursor_plane_update,
12101 .disable_plane = intel_cursor_plane_disable, 11971 .disable_plane = intel_cursor_plane_disable,
@@ -12284,6 +12154,9 @@ static bool intel_crt_present(struct drm_device *dev)
12284{ 12154{
12285 struct drm_i915_private *dev_priv = dev->dev_private; 12155 struct drm_i915_private *dev_priv = dev->dev_private;
12286 12156
12157 if (INTEL_INFO(dev)->gen >= 9)
12158 return false;
12159
12287 if (IS_ULT(dev)) 12160 if (IS_ULT(dev))
12288 return false; 12161 return false;
12289 12162
@@ -12636,8 +12509,12 @@ static void intel_init_display(struct drm_device *dev)
12636 dev_priv->display.crtc_enable = haswell_crtc_enable; 12509 dev_priv->display.crtc_enable = haswell_crtc_enable;
12637 dev_priv->display.crtc_disable = haswell_crtc_disable; 12510 dev_priv->display.crtc_disable = haswell_crtc_disable;
12638 dev_priv->display.off = ironlake_crtc_off; 12511 dev_priv->display.off = ironlake_crtc_off;
12639 dev_priv->display.update_primary_plane = 12512 if (INTEL_INFO(dev)->gen >= 9)
12640 ironlake_update_primary_plane; 12513 dev_priv->display.update_primary_plane =
12514 skylake_update_primary_plane;
12515 else
12516 dev_priv->display.update_primary_plane =
12517 ironlake_update_primary_plane;
12641 } else if (HAS_PCH_SPLIT(dev)) { 12518 } else if (HAS_PCH_SPLIT(dev)) {
12642 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 12519 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
12643 dev_priv->display.get_plane_config = ironlake_get_plane_config; 12520 dev_priv->display.get_plane_config = ironlake_get_plane_config;
@@ -12721,6 +12598,10 @@ static void intel_init_display(struct drm_device *dev)
12721 dev_priv->display.modeset_global_resources = 12598 dev_priv->display.modeset_global_resources =
12722 valleyview_modeset_global_resources; 12599 valleyview_modeset_global_resources;
12723 dev_priv->display.write_eld = ironlake_write_eld; 12600 dev_priv->display.write_eld = ironlake_write_eld;
12601 } else if (INTEL_INFO(dev)->gen >= 9) {
12602 dev_priv->display.write_eld = haswell_write_eld;
12603 dev_priv->display.modeset_global_resources =
12604 haswell_modeset_global_resources;
12724 } 12605 }
12725 12606
12726 /* Default just returns -ENODEV to indicate unsupported */ 12607 /* Default just returns -ENODEV to indicate unsupported */
@@ -12948,11 +12829,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
12948 intel_enable_gt_powersave(dev); 12829 intel_enable_gt_powersave(dev);
12949} 12830}
12950 12831
12951void intel_modeset_suspend_hw(struct drm_device *dev)
12952{
12953 intel_suspend_hw(dev);
12954}
12955
12956void intel_modeset_init(struct drm_device *dev) 12832void intel_modeset_init(struct drm_device *dev)
12957{ 12833{
12958 struct drm_i915_private *dev_priv = dev->dev_private; 12834 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -13288,7 +13164,7 @@ void i915_redisable_vga(struct drm_device *dev)
13288 * level, just check if the power well is enabled instead of trying to 13164 * level, just check if the power well is enabled instead of trying to
13289 * follow the "don't touch the power well if we don't need it" policy 13165 * follow the "don't touch the power well if we don't need it" policy
13290 * the rest of the driver uses. */ 13166 * the rest of the driver uses. */
13291 if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA)) 13167 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
13292 return; 13168 return;
13293 13169
13294 i915_redisable_vga_power_on(dev); 13170 i915_redisable_vga_power_on(dev);
@@ -13509,9 +13385,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
13509 * Too much stuff here (turning of rps, connectors, ...) would 13385 * Too much stuff here (turning of rps, connectors, ...) would
13510 * experience fancy races otherwise. 13386 * experience fancy races otherwise.
13511 */ 13387 */
13512 drm_irq_uninstall(dev); 13388 intel_irq_uninstall(dev_priv);
13513 intel_hpd_cancel_work(dev_priv);
13514 dev_priv->pm._irqs_disabled = true;
13515 13389
13516 /* 13390 /*
13517 * Due to the hpd irq storm handling the hotplug work can re-arm the 13391 * Due to the hpd irq storm handling the hotplug work can re-arm the
@@ -13666,8 +13540,8 @@ intel_display_capture_error_state(struct drm_device *dev)
13666 13540
13667 for_each_pipe(dev_priv, i) { 13541 for_each_pipe(dev_priv, i) {
13668 error->pipe[i].power_domain_on = 13542 error->pipe[i].power_domain_on =
13669 intel_display_power_enabled_unlocked(dev_priv, 13543 __intel_display_power_is_enabled(dev_priv,
13670 POWER_DOMAIN_PIPE(i)); 13544 POWER_DOMAIN_PIPE(i));
13671 if (!error->pipe[i].power_domain_on) 13545 if (!error->pipe[i].power_domain_on)
13672 continue; 13546 continue;
13673 13547
@@ -13702,7 +13576,7 @@ intel_display_capture_error_state(struct drm_device *dev)
13702 enum transcoder cpu_transcoder = transcoders[i]; 13576 enum transcoder cpu_transcoder = transcoders[i];
13703 13577
13704 error->transcoder[i].power_domain_on = 13578 error->transcoder[i].power_domain_on =
13705 intel_display_power_enabled_unlocked(dev_priv, 13579 __intel_display_power_is_enabled(dev_priv,
13706 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 13580 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13707 if (!error->transcoder[i].power_domain_on) 13581 if (!error->transcoder[i].power_domain_on)
13708 continue; 13582 continue;
@@ -13786,9 +13660,8 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
13786 13660
13787 for_each_intel_crtc(dev, crtc) { 13661 for_each_intel_crtc(dev, crtc) {
13788 struct intel_unpin_work *work; 13662 struct intel_unpin_work *work;
13789 unsigned long irqflags;
13790 13663
13791 spin_lock_irqsave(&dev->event_lock, irqflags); 13664 spin_lock_irq(&dev->event_lock);
13792 13665
13793 work = crtc->unpin_work; 13666 work = crtc->unpin_work;
13794 13667
@@ -13798,6 +13671,6 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
13798 work->event = NULL; 13671 work->event = NULL;
13799 } 13672 }
13800 13673
13801 spin_unlock_irqrestore(&dev->event_lock, irqflags); 13674 spin_unlock_irq(&dev->event_lock);
13802 } 13675 }
13803} 13676}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f6a3fdd5589e..64c8e047891d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -225,7 +225,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
225} 225}
226 226
227static uint32_t 227static uint32_t
228pack_aux(uint8_t *src, int src_bytes) 228pack_aux(const uint8_t *src, int src_bytes)
229{ 229{
230 int i; 230 int i;
231 uint32_t v = 0; 231 uint32_t v = 0;
@@ -661,6 +661,16 @@ static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
661 return index ? 0 : 100; 661 return index ? 0 : 100;
662} 662}
663 663
664static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
665{
666 /*
667 * SKL doesn't need us to program the AUX clock divider (Hardware will
668 * derive the clock from CDCLK automatically). We still implement the
669 * get_aux_clock_divider vfunc to plug-in into the existing code.
670 */
671 return index ? 0 : 1;
672}
673
664static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp, 674static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
665 bool has_aux_irq, 675 bool has_aux_irq,
666 int send_bytes, 676 int send_bytes,
@@ -691,9 +701,24 @@ static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
691 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 701 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
692} 702}
693 703
704static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
705 bool has_aux_irq,
706 int send_bytes,
707 uint32_t unused)
708{
709 return DP_AUX_CH_CTL_SEND_BUSY |
710 DP_AUX_CH_CTL_DONE |
711 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
712 DP_AUX_CH_CTL_TIME_OUT_ERROR |
713 DP_AUX_CH_CTL_TIME_OUT_1600us |
714 DP_AUX_CH_CTL_RECEIVE_ERROR |
715 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
716 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
717}
718
694static int 719static int
695intel_dp_aux_ch(struct intel_dp *intel_dp, 720intel_dp_aux_ch(struct intel_dp *intel_dp,
696 uint8_t *send, int send_bytes, 721 const uint8_t *send, int send_bytes,
697 uint8_t *recv, int recv_size) 722 uint8_t *recv, int recv_size)
698{ 723{
699 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 724 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -925,7 +950,16 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
925 BUG(); 950 BUG();
926 } 951 }
927 952
928 if (!HAS_DDI(dev)) 953 /*
954 * The AUX_CTL register is usually DP_CTL + 0x10.
955 *
956 * On Haswell and Broadwell though:
957 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
958 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
959 *
960 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
961 */
962 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
929 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; 963 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
930 964
931 intel_dp->aux.name = name; 965 intel_dp->aux.name = name;
@@ -1819,7 +1853,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1819 u32 tmp; 1853 u32 tmp;
1820 1854
1821 power_domain = intel_display_port_power_domain(encoder); 1855 power_domain = intel_display_port_power_domain(encoder);
1822 if (!intel_display_power_enabled(dev_priv, power_domain)) 1856 if (!intel_display_power_is_enabled(dev_priv, power_domain))
1823 return false; 1857 return false;
1824 1858
1825 tmp = I915_READ(intel_dp->output_reg); 1859 tmp = I915_READ(intel_dp->output_reg);
@@ -1995,10 +2029,8 @@ static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1995 POSTING_READ(ctl_reg); 2029 POSTING_READ(ctl_reg);
1996} 2030}
1997 2031
1998static void intel_edp_psr_setup(struct intel_dp *intel_dp) 2032static void intel_edp_psr_setup_vsc(struct intel_dp *intel_dp)
1999{ 2033{
2000 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2001 struct drm_i915_private *dev_priv = dev->dev_private;
2002 struct edp_vsc_psr psr_vsc; 2034 struct edp_vsc_psr psr_vsc;
2003 2035
2004 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ 2036 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
@@ -2008,10 +2040,6 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
2008 psr_vsc.sdp_header.HB2 = 0x2; 2040 psr_vsc.sdp_header.HB2 = 0x2;
2009 psr_vsc.sdp_header.HB3 = 0x8; 2041 psr_vsc.sdp_header.HB3 = 0x8;
2010 intel_edp_psr_write_vsc(intel_dp, &psr_vsc); 2042 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
2011
2012 /* Avoid continuous PSR exit by masking memup and hpd */
2013 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
2014 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
2015} 2043}
2016 2044
2017static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) 2045static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
@@ -2021,8 +2049,17 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
2021 struct drm_i915_private *dev_priv = dev->dev_private; 2049 struct drm_i915_private *dev_priv = dev->dev_private;
2022 uint32_t aux_clock_divider; 2050 uint32_t aux_clock_divider;
2023 int precharge = 0x3; 2051 int precharge = 0x3;
2024 int msg_size = 5; /* Header(4) + Message(1) */
2025 bool only_standby = false; 2052 bool only_standby = false;
2053 static const uint8_t aux_msg[] = {
2054 [0] = DP_AUX_NATIVE_WRITE << 4,
2055 [1] = DP_SET_POWER >> 8,
2056 [2] = DP_SET_POWER & 0xff,
2057 [3] = 1 - 1,
2058 [4] = DP_SET_POWER_D0,
2059 };
2060 int i;
2061
2062 BUILD_BUG_ON(sizeof(aux_msg) > 20);
2026 2063
2027 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); 2064 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
2028 2065
@@ -2038,11 +2075,13 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
2038 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); 2075 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
2039 2076
2040 /* Setup AUX registers */ 2077 /* Setup AUX registers */
2041 I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND); 2078 for (i = 0; i < sizeof(aux_msg); i += 4)
2042 I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION); 2079 I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
2080 pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
2081
2043 I915_WRITE(EDP_PSR_AUX_CTL(dev), 2082 I915_WRITE(EDP_PSR_AUX_CTL(dev),
2044 DP_AUX_CH_CTL_TIME_OUT_400us | 2083 DP_AUX_CH_CTL_TIME_OUT_400us |
2045 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 2084 (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
2046 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 2085 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
2047 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); 2086 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
2048} 2087}
@@ -2131,10 +2170,7 @@ static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
2131 WARN_ON(dev_priv->psr.active); 2170 WARN_ON(dev_priv->psr.active);
2132 lockdep_assert_held(&dev_priv->psr.lock); 2171 lockdep_assert_held(&dev_priv->psr.lock);
2133 2172
2134 /* Enable PSR on the panel */ 2173 /* Enable/Re-enable PSR on the host */
2135 intel_edp_psr_enable_sink(intel_dp);
2136
2137 /* Enable PSR on the host */
2138 intel_edp_psr_enable_source(intel_dp); 2174 intel_edp_psr_enable_source(intel_dp);
2139 2175
2140 dev_priv->psr.active = true; 2176 dev_priv->psr.active = true;
@@ -2158,17 +2194,25 @@ void intel_edp_psr_enable(struct intel_dp *intel_dp)
2158 mutex_lock(&dev_priv->psr.lock); 2194 mutex_lock(&dev_priv->psr.lock);
2159 if (dev_priv->psr.enabled) { 2195 if (dev_priv->psr.enabled) {
2160 DRM_DEBUG_KMS("PSR already in use\n"); 2196 DRM_DEBUG_KMS("PSR already in use\n");
2161 mutex_unlock(&dev_priv->psr.lock); 2197 goto unlock;
2162 return;
2163 } 2198 }
2164 2199
2200 if (!intel_edp_psr_match_conditions(intel_dp))
2201 goto unlock;
2202
2165 dev_priv->psr.busy_frontbuffer_bits = 0; 2203 dev_priv->psr.busy_frontbuffer_bits = 0;
2166 2204
2167 /* Setup PSR once */ 2205 intel_edp_psr_setup_vsc(intel_dp);
2168 intel_edp_psr_setup(intel_dp);
2169 2206
2170 if (intel_edp_psr_match_conditions(intel_dp)) 2207 /* Avoid continuous PSR exit by masking memup and hpd */
2171 dev_priv->psr.enabled = intel_dp; 2208 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
2209 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
2210
2211 /* Enable PSR on the panel */
2212 intel_edp_psr_enable_sink(intel_dp);
2213
2214 dev_priv->psr.enabled = intel_dp;
2215unlock:
2172 mutex_unlock(&dev_priv->psr.lock); 2216 mutex_unlock(&dev_priv->psr.lock);
2173} 2217}
2174 2218
@@ -2209,6 +2253,17 @@ static void intel_edp_psr_work(struct work_struct *work)
2209 container_of(work, typeof(*dev_priv), psr.work.work); 2253 container_of(work, typeof(*dev_priv), psr.work.work);
2210 struct intel_dp *intel_dp = dev_priv->psr.enabled; 2254 struct intel_dp *intel_dp = dev_priv->psr.enabled;
2211 2255
2256 /* We have to make sure PSR is ready for re-enable
2257 * otherwise it keeps disabled until next full enable/disable cycle.
2258 * PSR might take some time to get fully disabled
2259 * and be ready for re-enable.
2260 */
2261 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
2262 EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
2263 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
2264 return;
2265 }
2266
2212 mutex_lock(&dev_priv->psr.lock); 2267 mutex_lock(&dev_priv->psr.lock);
2213 intel_dp = dev_priv->psr.enabled; 2268 intel_dp = dev_priv->psr.enabled;
2214 2269
@@ -2680,6 +2735,15 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
2680 2735
2681 mutex_lock(&dev_priv->dpio_lock); 2736 mutex_lock(&dev_priv->dpio_lock);
2682 2737
2738 /* allow hardware to manage TX FIFO reset source */
2739 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2740 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2741 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2742
2743 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2744 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2745 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2746
2683 /* Deassert soft data lane reset*/ 2747 /* Deassert soft data lane reset*/
2684 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); 2748 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2685 val |= CHV_PCS_REQ_SOFTRESET_EN; 2749 val |= CHV_PCS_REQ_SOFTRESET_EN;
@@ -2836,7 +2900,9 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
2836 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2900 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2837 enum port port = dp_to_dig_port(intel_dp)->port; 2901 enum port port = dp_to_dig_port(intel_dp)->port;
2838 2902
2839 if (IS_VALLEYVIEW(dev)) 2903 if (INTEL_INFO(dev)->gen >= 9)
2904 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2905 else if (IS_VALLEYVIEW(dev))
2840 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 2906 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2841 else if (IS_GEN7(dev) && port == PORT_A) 2907 else if (IS_GEN7(dev) && port == PORT_A)
2842 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 2908 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -2852,7 +2918,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2852 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2918 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2853 enum port port = dp_to_dig_port(intel_dp)->port; 2919 enum port port = dp_to_dig_port(intel_dp)->port;
2854 2920
2855 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2921 if (INTEL_INFO(dev)->gen >= 9) {
2922 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2923 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2924 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2925 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2926 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2927 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2928 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2929 default:
2930 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2931 }
2932 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2856 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2933 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2857 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 2934 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2858 return DP_TRAIN_PRE_EMPH_LEVEL_3; 2935 return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3088,12 +3165,26 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3088 /* Clear calc init */ 3165 /* Clear calc init */
3089 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); 3166 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3090 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 3167 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3168 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3169 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3091 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); 3170 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3092 3171
3093 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); 3172 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3094 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 3173 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3174 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3175 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3095 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); 3176 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3096 3177
3178 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3179 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3180 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3181 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3182
3183 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3184 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3185 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3186 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3187
3097 /* Program swing deemph */ 3188 /* Program swing deemph */
3098 for (i = 0; i < 4; i++) { 3189 for (i = 0; i < 4; i++) {
3099 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); 3190 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
@@ -3334,7 +3425,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3334 uint32_t signal_levels, mask; 3425 uint32_t signal_levels, mask;
3335 uint8_t train_set = intel_dp->train_set[0]; 3426 uint8_t train_set = intel_dp->train_set[0];
3336 3427
3337 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 3428 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3338 signal_levels = intel_hsw_signal_levels(train_set); 3429 signal_levels = intel_hsw_signal_levels(train_set);
3339 mask = DDI_BUF_EMP_MASK; 3430 mask = DDI_BUF_EMP_MASK;
3340 } else if (IS_CHERRYVIEW(dev)) { 3431 } else if (IS_CHERRYVIEW(dev)) {
@@ -3801,26 +3892,48 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3801 struct drm_device *dev = intel_dig_port->base.base.dev; 3892 struct drm_device *dev = intel_dig_port->base.base.dev;
3802 struct intel_crtc *intel_crtc = 3893 struct intel_crtc *intel_crtc =
3803 to_intel_crtc(intel_dig_port->base.base.crtc); 3894 to_intel_crtc(intel_dig_port->base.base.crtc);
3804 u8 buf[1]; 3895 u8 buf;
3896 int test_crc_count;
3897 int attempts = 6;
3805 3898
3806 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0) 3899 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3807 return -EIO; 3900 return -EIO;
3808 3901
3809 if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) 3902 if (!(buf & DP_TEST_CRC_SUPPORTED))
3810 return -ENOTTY; 3903 return -ENOTTY;
3811 3904
3905 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3906 return -EIO;
3907
3812 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 3908 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3813 DP_TEST_SINK_START) < 0) 3909 buf | DP_TEST_SINK_START) < 0)
3910 return -EIO;
3911
3912 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3814 return -EIO; 3913 return -EIO;
3914 test_crc_count = buf & DP_TEST_COUNT_MASK;
3815 3915
3816 /* Wait 2 vblanks to be sure we will have the correct CRC value */ 3916 do {
3817 intel_wait_for_vblank(dev, intel_crtc->pipe); 3917 if (drm_dp_dpcd_readb(&intel_dp->aux,
3818 intel_wait_for_vblank(dev, intel_crtc->pipe); 3918 DP_TEST_SINK_MISC, &buf) < 0)
3919 return -EIO;
3920 intel_wait_for_vblank(dev, intel_crtc->pipe);
3921 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3922
3923 if (attempts == 0) {
3924 DRM_ERROR("Panel is unable to calculate CRC after 6 vblanks\n");
3925 return -EIO;
3926 }
3819 3927
3820 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) 3928 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3821 return -EIO; 3929 return -EIO;
3822 3930
3823 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0); 3931 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3932 return -EIO;
3933 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3934 buf & ~DP_TEST_SINK_START) < 0)
3935 return -EIO;
3936
3824 return 0; 3937 return 0;
3825} 3938}
3826 3939
@@ -5057,7 +5170,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5057 intel_dp->pps_pipe = INVALID_PIPE; 5170 intel_dp->pps_pipe = INVALID_PIPE;
5058 5171
5059 /* intel_dp vfuncs */ 5172 /* intel_dp vfuncs */
5060 if (IS_VALLEYVIEW(dev)) 5173 if (INTEL_INFO(dev)->gen >= 9)
5174 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5175 else if (IS_VALLEYVIEW(dev))
5061 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; 5176 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5062 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 5177 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5063 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 5178 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
@@ -5066,7 +5181,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5066 else 5181 else
5067 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider; 5182 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5068 5183
5069 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl; 5184 if (INTEL_INFO(dev)->gen >= 9)
5185 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5186 else
5187 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5070 5188
5071 /* Preserve the current hw state. */ 5189 /* Preserve the current hw state. */
5072 intel_dp->DP = I915_READ(intel_dp->output_reg); 5190 intel_dp->DP = I915_READ(intel_dp->output_reg);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ba715229a540..94993d23e547 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -34,6 +34,7 @@
34#include <drm/drm_crtc_helper.h> 34#include <drm/drm_crtc_helper.h>
35#include <drm/drm_fb_helper.h> 35#include <drm/drm_fb_helper.h>
36#include <drm/drm_dp_mst_helper.h> 36#include <drm/drm_dp_mst_helper.h>
37#include <drm/drm_rect.h>
37 38
38#define DIV_ROUND_CLOSEST_ULL(ll, d) \ 39#define DIV_ROUND_CLOSEST_ULL(ll, d) \
39({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) 40({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
@@ -240,6 +241,17 @@ typedef struct dpll {
240 int p; 241 int p;
241} intel_clock_t; 242} intel_clock_t;
242 243
244struct intel_plane_state {
245 struct drm_crtc *crtc;
246 struct drm_framebuffer *fb;
247 struct drm_rect src;
248 struct drm_rect dst;
249 struct drm_rect clip;
250 struct drm_rect orig_src;
251 struct drm_rect orig_dst;
252 bool visible;
253};
254
243struct intel_plane_config { 255struct intel_plane_config {
244 bool tiled; 256 bool tiled;
245 int size; 257 int size;
@@ -734,6 +746,14 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
734 return container_of(intel_hdmi, struct intel_digital_port, hdmi); 746 return container_of(intel_hdmi, struct intel_digital_port, hdmi);
735} 747}
736 748
749/*
750 * Returns the number of planes for this pipe, ie the number of sprites + 1
751 * (primary plane). This doesn't count the cursor plane then.
752 */
753static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
754{
755 return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
756}
737 757
738/* i915_irq.c */ 758/* i915_irq.c */
739bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 759bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
@@ -747,15 +767,15 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
747void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 767void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
748void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 768void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
749void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 769void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
750void intel_runtime_pm_disable_interrupts(struct drm_device *dev); 770void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
751void intel_runtime_pm_restore_interrupts(struct drm_device *dev); 771void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
752static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) 772static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
753{ 773{
754 /* 774 /*
755 * We only use drm_irq_uninstall() at unload and VT switch, so 775 * We only use drm_irq_uninstall() at unload and VT switch, so
756 * this is the only thing we need to check. 776 * this is the only thing we need to check.
757 */ 777 */
758 return !dev_priv->pm._irqs_disabled; 778 return dev_priv->pm.irqs_enabled;
759} 779}
760 780
761int intel_get_crtc_scanline(struct intel_crtc *crtc); 781int intel_get_crtc_scanline(struct intel_crtc *crtc);
@@ -792,11 +812,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
792 struct intel_crtc_config *pipe_config); 812 struct intel_crtc_config *pipe_config);
793void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state); 813void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
794 814
795/* intel_display.c */ 815/* intel_frontbuffer.c */
796const char *intel_output_name(int output);
797bool intel_has_pending_fb_unpin(struct drm_device *dev);
798int intel_pch_rawclk(struct drm_device *dev);
799void intel_mark_busy(struct drm_device *dev);
800void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, 816void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
801 struct intel_engine_cs *ring); 817 struct intel_engine_cs *ring);
802void intel_frontbuffer_flip_prepare(struct drm_device *dev, 818void intel_frontbuffer_flip_prepare(struct drm_device *dev,
@@ -806,7 +822,7 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
806void intel_frontbuffer_flush(struct drm_device *dev, 822void intel_frontbuffer_flush(struct drm_device *dev,
807 unsigned frontbuffer_bits); 823 unsigned frontbuffer_bits);
808/** 824/**
809 * intel_frontbuffer_flip - prepare frontbuffer flip 825 * intel_frontbuffer_flip - synchronous frontbuffer flip
810 * @dev: DRM device 826 * @dev: DRM device
811 * @frontbuffer_bits: frontbuffer plane tracking bits 827 * @frontbuffer_bits: frontbuffer plane tracking bits
812 * 828 *
@@ -824,6 +840,13 @@ void intel_frontbuffer_flip(struct drm_device *dev,
824} 840}
825 841
826void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire); 842void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
843
844
845/* intel_display.c */
846const char *intel_output_name(int output);
847bool intel_has_pending_fb_unpin(struct drm_device *dev);
848int intel_pch_rawclk(struct drm_device *dev);
849void intel_mark_busy(struct drm_device *dev);
827void intel_mark_idle(struct drm_device *dev); 850void intel_mark_idle(struct drm_device *dev);
828void intel_crtc_restore_mode(struct drm_crtc *crtc); 851void intel_crtc_restore_mode(struct drm_crtc *crtc);
829void intel_crtc_control(struct drm_crtc *crtc, bool enable); 852void intel_crtc_control(struct drm_crtc *crtc, bool enable);
@@ -844,7 +867,11 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
844 struct drm_file *file_priv); 867 struct drm_file *file_priv);
845enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 868enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
846 enum pipe pipe); 869 enum pipe pipe);
847void intel_wait_for_vblank(struct drm_device *dev, int pipe); 870static inline void
871intel_wait_for_vblank(struct drm_device *dev, int pipe)
872{
873 drm_wait_one_vblank(dev, pipe);
874}
848int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); 875int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
849void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 876void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
850 struct intel_digital_port *dport); 877 struct intel_digital_port *dport);
@@ -878,6 +905,8 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
878void intel_put_shared_dpll(struct intel_crtc *crtc); 905void intel_put_shared_dpll(struct intel_crtc *crtc);
879 906
880/* modesetting asserts */ 907/* modesetting asserts */
908void assert_panel_unlocked(struct drm_i915_private *dev_priv,
909 enum pipe pipe);
881void assert_pll(struct drm_i915_private *dev_priv, 910void assert_pll(struct drm_i915_private *dev_priv,
882 enum pipe pipe, bool state); 911 enum pipe pipe, bool state);
883#define assert_pll_enabled(d, p) assert_pll(d, p, true) 912#define assert_pll_enabled(d, p) assert_pll(d, p, true)
@@ -908,7 +937,6 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
908bool intel_crtc_active(struct drm_crtc *crtc); 937bool intel_crtc_active(struct drm_crtc *crtc);
909void hsw_enable_ips(struct intel_crtc *crtc); 938void hsw_enable_ips(struct intel_crtc *crtc);
910void hsw_disable_ips(struct intel_crtc *crtc); 939void hsw_disable_ips(struct intel_crtc *crtc);
911void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
912enum intel_display_power_domain 940enum intel_display_power_domain
913intel_display_port_power_domain(struct intel_encoder *intel_encoder); 941intel_display_port_power_domain(struct intel_encoder *intel_encoder);
914void intel_mode_from_pipe_config(struct drm_display_mode *mode, 942void intel_mode_from_pipe_config(struct drm_display_mode *mode,
@@ -1055,6 +1083,28 @@ extern struct drm_display_mode *intel_find_panel_downclock(
1055 struct drm_display_mode *fixed_mode, 1083 struct drm_display_mode *fixed_mode,
1056 struct drm_connector *connector); 1084 struct drm_connector *connector);
1057 1085
1086/* intel_runtime_pm.c */
1087int intel_power_domains_init(struct drm_i915_private *);
1088void intel_power_domains_fini(struct drm_i915_private *);
1089void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
1090void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
1091
1092bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
1093 enum intel_display_power_domain domain);
1094bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
1095 enum intel_display_power_domain domain);
1096void intel_display_power_get(struct drm_i915_private *dev_priv,
1097 enum intel_display_power_domain domain);
1098void intel_display_power_put(struct drm_i915_private *dev_priv,
1099 enum intel_display_power_domain domain);
1100void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
1101void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
1102void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
1103void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
1104void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
1105
1106void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
1107
1058/* intel_pm.c */ 1108/* intel_pm.c */
1059void intel_init_clock_gating(struct drm_device *dev); 1109void intel_init_clock_gating(struct drm_device *dev);
1060void intel_suspend_hw(struct drm_device *dev); 1110void intel_suspend_hw(struct drm_device *dev);
@@ -1072,17 +1122,6 @@ bool intel_fbc_enabled(struct drm_device *dev);
1072void intel_update_fbc(struct drm_device *dev); 1122void intel_update_fbc(struct drm_device *dev);
1073void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 1123void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
1074void intel_gpu_ips_teardown(void); 1124void intel_gpu_ips_teardown(void);
1075int intel_power_domains_init(struct drm_i915_private *);
1076void intel_power_domains_remove(struct drm_i915_private *);
1077bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
1078 enum intel_display_power_domain domain);
1079bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
1080 enum intel_display_power_domain domain);
1081void intel_display_power_get(struct drm_i915_private *dev_priv,
1082 enum intel_display_power_domain domain);
1083void intel_display_power_put(struct drm_i915_private *dev_priv,
1084 enum intel_display_power_domain domain);
1085void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
1086void intel_init_gt_powersave(struct drm_device *dev); 1125void intel_init_gt_powersave(struct drm_device *dev);
1087void intel_cleanup_gt_powersave(struct drm_device *dev); 1126void intel_cleanup_gt_powersave(struct drm_device *dev);
1088void intel_enable_gt_powersave(struct drm_device *dev); 1127void intel_enable_gt_powersave(struct drm_device *dev);
@@ -1093,13 +1132,6 @@ void ironlake_teardown_rc6(struct drm_device *dev);
1093void gen6_update_ring_freq(struct drm_device *dev); 1132void gen6_update_ring_freq(struct drm_device *dev);
1094void gen6_rps_idle(struct drm_i915_private *dev_priv); 1133void gen6_rps_idle(struct drm_i915_private *dev_priv);
1095void gen6_rps_boost(struct drm_i915_private *dev_priv); 1134void gen6_rps_boost(struct drm_i915_private *dev_priv);
1096void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
1097void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
1098void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
1099void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
1100void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
1101void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
1102void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
1103void ilk_wm_get_hw_state(struct drm_device *dev); 1135void ilk_wm_get_hw_state(struct drm_device *dev);
1104 1136
1105 1137
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 5bd9e09ad3c5..0b184079de14 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -344,7 +344,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
344 DRM_DEBUG_KMS("\n"); 344 DRM_DEBUG_KMS("\n");
345 345
346 power_domain = intel_display_port_power_domain(encoder); 346 power_domain = intel_display_port_power_domain(encoder);
347 if (!intel_display_power_enabled(dev_priv, power_domain)) 347 if (!intel_display_power_is_enabled(dev_priv, power_domain))
348 return false; 348 return false;
349 349
350 /* XXX: this only works for one DSI output */ 350 /* XXX: this only works for one DSI output */
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
new file mode 100644
index 000000000000..58cf2e6b78f4
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -0,0 +1,279 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Daniel Vetter <daniel.vetter@ffwll.ch>
25 */
26
27/**
28 * DOC: frontbuffer tracking
29 *
30 * Many features require us to track changes to the currently active
31 * frontbuffer, especially rendering targeted at the frontbuffer.
32 *
33 * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
34 * frontbuffer slots through i915_gem_track_fb(). The function in this file are
35 * then called when the contents of the frontbuffer are invalidated, when
36 * frontbuffer rendering has stopped again to flush out all the changes and when
37 * the frontbuffer is exchanged with a flip. Subsystems interested in
38 * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
39 * into the relevant places and filter for the frontbuffer slots that they are
40 * interested int.
41 *
42 * On a high level there are two types of powersaving features. The first one
43 * work like a special cache (FBC and PSR) and are interested when they should
44 * stop caching and when to restart caching. This is done by placing callbacks
45 * into the invalidate and the flush functions: At invalidate the caching must
46 * be stopped and at flush time it can be restarted. And maybe they need to know
47 * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
48 * and flush on its own) which can be achieved with placing callbacks into the
49 * flip functions.
50 *
51 * The other type of display power saving feature only cares about busyness
52 * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
53 * busyness. There is no direct way to detect idleness. Instead an idle timer
54 * work delayed work should be started from the flush and flip functions and
55 * cancelled as soon as busyness is detected.
56 *
57 * Note that there's also an older frontbuffer activity tracking scheme which
58 * just tracks general activity. This is done by the various mark_busy and
59 * mark_idle functions. For display power management features using these
60 * functions is deprecated and should be avoided.
61 */
62
63#include <drm/drmP.h>
64
65#include "intel_drv.h"
66#include "i915_drv.h"
67
68static void intel_increase_pllclock(struct drm_device *dev,
69 enum pipe pipe)
70{
71 struct drm_i915_private *dev_priv = dev->dev_private;
72 int dpll_reg = DPLL(pipe);
73 int dpll;
74
75 if (!HAS_GMCH_DISPLAY(dev))
76 return;
77
78 if (!dev_priv->lvds_downclock_avail)
79 return;
80
81 dpll = I915_READ(dpll_reg);
82 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
83 DRM_DEBUG_DRIVER("upclocking LVDS\n");
84
85 assert_panel_unlocked(dev_priv, pipe);
86
87 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
88 I915_WRITE(dpll_reg, dpll);
89 intel_wait_for_vblank(dev, pipe);
90
91 dpll = I915_READ(dpll_reg);
92 if (dpll & DISPLAY_RATE_SELECT_FPA1)
93 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
94 }
95}
96
97/**
98 * intel_mark_fb_busy - mark given planes as busy
99 * @dev: DRM device
100 * @frontbuffer_bits: bits for the affected planes
101 * @ring: optional ring for asynchronous commands
102 *
103 * This function gets called every time the screen contents change. It can be
104 * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
105 */
106static void intel_mark_fb_busy(struct drm_device *dev,
107 unsigned frontbuffer_bits,
108 struct intel_engine_cs *ring)
109{
110 struct drm_i915_private *dev_priv = dev->dev_private;
111 enum pipe pipe;
112
113 if (!i915.powersave)
114 return;
115
116 for_each_pipe(dev_priv, pipe) {
117 if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
118 continue;
119
120 intel_increase_pllclock(dev, pipe);
121 if (ring && intel_fbc_enabled(dev))
122 ring->fbc_dirty = true;
123 }
124}
125
126/**
127 * intel_fb_obj_invalidate - invalidate frontbuffer object
128 * @obj: GEM object to invalidate
129 * @ring: set for asynchronous rendering
130 *
131 * This function gets called every time rendering on the given object starts and
132 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
133 * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
134 * until the rendering completes or a flip on this frontbuffer plane is
135 * scheduled.
136 */
137void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
138 struct intel_engine_cs *ring)
139{
140 struct drm_device *dev = obj->base.dev;
141 struct drm_i915_private *dev_priv = dev->dev_private;
142
143 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
144
145 if (!obj->frontbuffer_bits)
146 return;
147
148 if (ring) {
149 mutex_lock(&dev_priv->fb_tracking.lock);
150 dev_priv->fb_tracking.busy_bits
151 |= obj->frontbuffer_bits;
152 dev_priv->fb_tracking.flip_bits
153 &= ~obj->frontbuffer_bits;
154 mutex_unlock(&dev_priv->fb_tracking.lock);
155 }
156
157 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
158
159 intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
160}
161
162/**
163 * intel_frontbuffer_flush - flush frontbuffer
164 * @dev: DRM device
165 * @frontbuffer_bits: frontbuffer plane tracking bits
166 *
167 * This function gets called every time rendering on the given planes has
168 * completed and frontbuffer caching can be started again. Flushes will get
169 * delayed if they're blocked by some outstanding asynchronous rendering.
170 *
171 * Can be called without any locks held.
172 */
173void intel_frontbuffer_flush(struct drm_device *dev,
174 unsigned frontbuffer_bits)
175{
176 struct drm_i915_private *dev_priv = dev->dev_private;
177
178 /* Delay flushing when rings are still busy.*/
179 mutex_lock(&dev_priv->fb_tracking.lock);
180 frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
181 mutex_unlock(&dev_priv->fb_tracking.lock);
182
183 intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
184
185 intel_edp_psr_flush(dev, frontbuffer_bits);
186
187 /*
188 * FIXME: Unconditional fbc flushing here is a rather gross hack and
189 * needs to be reworked into a proper frontbuffer tracking scheme like
190 * psr employs.
191 */
192 if (dev_priv->fbc.need_sw_cache_clean) {
193 dev_priv->fbc.need_sw_cache_clean = false;
194 bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
195 }
196}
197
198/**
199 * intel_fb_obj_flush - flush frontbuffer object
200 * @obj: GEM object to flush
201 * @retire: set when retiring asynchronous rendering
202 *
203 * This function gets called every time rendering on the given object has
204 * completed and frontbuffer caching can be started again. If @retire is true
205 * then any delayed flushes will be unblocked.
206 */
207void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
208 bool retire)
209{
210 struct drm_device *dev = obj->base.dev;
211 struct drm_i915_private *dev_priv = dev->dev_private;
212 unsigned frontbuffer_bits;
213
214 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
215
216 if (!obj->frontbuffer_bits)
217 return;
218
219 frontbuffer_bits = obj->frontbuffer_bits;
220
221 if (retire) {
222 mutex_lock(&dev_priv->fb_tracking.lock);
223 /* Filter out new bits since rendering started. */
224 frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
225
226 dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
227 mutex_unlock(&dev_priv->fb_tracking.lock);
228 }
229
230 intel_frontbuffer_flush(dev, frontbuffer_bits);
231}
232
233/**
234 * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
235 * @dev: DRM device
236 * @frontbuffer_bits: frontbuffer plane tracking bits
237 *
238 * This function gets called after scheduling a flip on @obj. The actual
239 * frontbuffer flushing will be delayed until completion is signalled with
240 * intel_frontbuffer_flip_complete. If an invalidate happens in between this
241 * flush will be cancelled.
242 *
243 * Can be called without any locks held.
244 */
245void intel_frontbuffer_flip_prepare(struct drm_device *dev,
246 unsigned frontbuffer_bits)
247{
248 struct drm_i915_private *dev_priv = dev->dev_private;
249
250 mutex_lock(&dev_priv->fb_tracking.lock);
251 dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
252 /* Remove stale busy bits due to the old buffer. */
253 dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
254 mutex_unlock(&dev_priv->fb_tracking.lock);
255}
256
257/**
258 * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
259 * @dev: DRM device
260 * @frontbuffer_bits: frontbuffer plane tracking bits
261 *
262 * This function gets called after the flip has been latched and will complete
263 * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
264 *
265 * Can be called without any locks held.
266 */
267void intel_frontbuffer_flip_complete(struct drm_device *dev,
268 unsigned frontbuffer_bits)
269{
270 struct drm_i915_private *dev_priv = dev->dev_private;
271
272 mutex_lock(&dev_priv->fb_tracking.lock);
273 /* Mask any cancelled flips. */
274 frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
275 dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
276 mutex_unlock(&dev_priv->fb_tracking.lock);
277
278 intel_frontbuffer_flush(dev, frontbuffer_bits);
279}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 29ec1535992d..8b5f3aa027f3 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -690,7 +690,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
690 u32 tmp; 690 u32 tmp;
691 691
692 power_domain = intel_display_port_power_domain(encoder); 692 power_domain = intel_display_port_power_domain(encoder);
693 if (!intel_display_power_enabled(dev_priv, power_domain)) 693 if (!intel_display_power_is_enabled(dev_priv, power_domain))
694 return false; 694 return false;
695 695
696 tmp = I915_READ(intel_hdmi->hdmi_reg); 696 tmp = I915_READ(intel_hdmi->hdmi_reg);
@@ -1405,6 +1405,15 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1405 1405
1406 mutex_lock(&dev_priv->dpio_lock); 1406 mutex_lock(&dev_priv->dpio_lock);
1407 1407
1408 /* allow hardware to manage TX FIFO reset source */
1409 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
1410 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
1411 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
1412
1413 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
1414 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
1415 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
1416
1408 /* Deassert soft data lane reset*/ 1417 /* Deassert soft data lane reset*/
1409 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); 1418 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
1410 val |= CHV_PCS_REQ_SOFTRESET_EN; 1419 val |= CHV_PCS_REQ_SOFTRESET_EN;
@@ -1441,12 +1450,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1441 /* Clear calc init */ 1450 /* Clear calc init */
1442 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); 1451 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
1443 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 1452 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
1453 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
1454 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1444 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); 1455 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
1445 1456
1446 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); 1457 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
1447 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 1458 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
1459 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
1460 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1448 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); 1461 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
1449 1462
1463 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
1464 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
1465 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
1466 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
1467
1468 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
1469 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
1470 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
1471 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
1472
1450 /* FIXME: Program the support xxx V-dB */ 1473 /* FIXME: Program the support xxx V-dB */
1451 /* Use 800mV-0dB */ 1474 /* Use 800mV-0dB */
1452 for (i = 0; i < 4; i++) { 1475 for (i = 0; i < 4; i++) {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index bafd38b5703e..803fc38664c4 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1063,7 +1063,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
1063 struct drm_i915_private *dev_priv = dev->dev_private; 1063 struct drm_i915_private *dev_priv = dev->dev_private;
1064 unsigned long flags; 1064 unsigned long flags;
1065 1065
1066 if (!dev->irq_enabled) 1066 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1067 return false; 1067 return false;
1068 1068
1069 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1069 spin_lock_irqsave(&dev_priv->irq_lock, flags);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index a6bd1422e38f..2b50c98dd6b0 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -76,7 +76,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
76 u32 tmp; 76 u32 tmp;
77 77
78 power_domain = intel_display_port_power_domain(encoder); 78 power_domain = intel_display_port_power_domain(encoder);
79 if (!intel_display_power_enabled(dev_priv, power_domain)) 79 if (!intel_display_power_is_enabled(dev_priv, power_domain))
80 return false; 80 return false;
81 81
82 tmp = I915_READ(lvds_encoder->reg); 82 tmp = I915_READ(lvds_encoder->reg);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 0e018cb49147..e3def5ad4a77 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -537,14 +537,13 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
537 struct drm_device *dev = connector->base.dev; 537 struct drm_device *dev = connector->base.dev;
538 struct drm_i915_private *dev_priv = dev->dev_private; 538 struct drm_i915_private *dev_priv = dev->dev_private;
539 u32 val; 539 u32 val;
540 unsigned long flags;
541 540
542 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 541 mutex_lock(&dev_priv->backlight_lock);
543 542
544 val = dev_priv->display.get_backlight(connector); 543 val = dev_priv->display.get_backlight(connector);
545 val = intel_panel_compute_brightness(connector, val); 544 val = intel_panel_compute_brightness(connector, val);
546 545
547 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 546 mutex_unlock(&dev_priv->backlight_lock);
548 547
549 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); 548 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
550 return val; 549 return val;
@@ -628,12 +627,11 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
628 struct intel_panel *panel = &connector->panel; 627 struct intel_panel *panel = &connector->panel;
629 enum pipe pipe = intel_get_pipe_from_connector(connector); 628 enum pipe pipe = intel_get_pipe_from_connector(connector);
630 u32 hw_level; 629 u32 hw_level;
631 unsigned long flags;
632 630
633 if (!panel->backlight.present || pipe == INVALID_PIPE) 631 if (!panel->backlight.present || pipe == INVALID_PIPE)
634 return; 632 return;
635 633
636 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 634 mutex_lock(&dev_priv->backlight_lock);
637 635
638 WARN_ON(panel->backlight.max == 0); 636 WARN_ON(panel->backlight.max == 0);
639 637
@@ -643,7 +641,7 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
643 if (panel->backlight.enabled) 641 if (panel->backlight.enabled)
644 intel_panel_actually_set_backlight(connector, hw_level); 642 intel_panel_actually_set_backlight(connector, hw_level);
645 643
646 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 644 mutex_unlock(&dev_priv->backlight_lock);
647} 645}
648 646
649/* set backlight brightness to level in range [0..max], assuming hw min is 647/* set backlight brightness to level in range [0..max], assuming hw min is
@@ -657,12 +655,11 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
657 struct intel_panel *panel = &connector->panel; 655 struct intel_panel *panel = &connector->panel;
658 enum pipe pipe = intel_get_pipe_from_connector(connector); 656 enum pipe pipe = intel_get_pipe_from_connector(connector);
659 u32 hw_level; 657 u32 hw_level;
660 unsigned long flags;
661 658
662 if (!panel->backlight.present || pipe == INVALID_PIPE) 659 if (!panel->backlight.present || pipe == INVALID_PIPE)
663 return; 660 return;
664 661
665 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 662 mutex_lock(&dev_priv->backlight_lock);
666 663
667 WARN_ON(panel->backlight.max == 0); 664 WARN_ON(panel->backlight.max == 0);
668 665
@@ -678,7 +675,7 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
678 if (panel->backlight.enabled) 675 if (panel->backlight.enabled)
679 intel_panel_actually_set_backlight(connector, hw_level); 676 intel_panel_actually_set_backlight(connector, hw_level);
680 677
681 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 678 mutex_unlock(&dev_priv->backlight_lock);
682} 679}
683 680
684static void pch_disable_backlight(struct intel_connector *connector) 681static void pch_disable_backlight(struct intel_connector *connector)
@@ -732,7 +729,6 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
732 struct drm_i915_private *dev_priv = dev->dev_private; 729 struct drm_i915_private *dev_priv = dev->dev_private;
733 struct intel_panel *panel = &connector->panel; 730 struct intel_panel *panel = &connector->panel;
734 enum pipe pipe = intel_get_pipe_from_connector(connector); 731 enum pipe pipe = intel_get_pipe_from_connector(connector);
735 unsigned long flags;
736 732
737 if (!panel->backlight.present || pipe == INVALID_PIPE) 733 if (!panel->backlight.present || pipe == INVALID_PIPE)
738 return; 734 return;
@@ -748,14 +744,14 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
748 return; 744 return;
749 } 745 }
750 746
751 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 747 mutex_lock(&dev_priv->backlight_lock);
752 748
753 if (panel->backlight.device) 749 if (panel->backlight.device)
754 panel->backlight.device->props.power = FB_BLANK_POWERDOWN; 750 panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
755 panel->backlight.enabled = false; 751 panel->backlight.enabled = false;
756 dev_priv->display.disable_backlight(connector); 752 dev_priv->display.disable_backlight(connector);
757 753
758 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 754 mutex_unlock(&dev_priv->backlight_lock);
759} 755}
760 756
761static void bdw_enable_backlight(struct intel_connector *connector) 757static void bdw_enable_backlight(struct intel_connector *connector)
@@ -936,14 +932,13 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
936 struct drm_i915_private *dev_priv = dev->dev_private; 932 struct drm_i915_private *dev_priv = dev->dev_private;
937 struct intel_panel *panel = &connector->panel; 933 struct intel_panel *panel = &connector->panel;
938 enum pipe pipe = intel_get_pipe_from_connector(connector); 934 enum pipe pipe = intel_get_pipe_from_connector(connector);
939 unsigned long flags;
940 935
941 if (!panel->backlight.present || pipe == INVALID_PIPE) 936 if (!panel->backlight.present || pipe == INVALID_PIPE)
942 return; 937 return;
943 938
944 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); 939 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
945 940
946 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 941 mutex_lock(&dev_priv->backlight_lock);
947 942
948 WARN_ON(panel->backlight.max == 0); 943 WARN_ON(panel->backlight.max == 0);
949 944
@@ -961,7 +956,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
961 if (panel->backlight.device) 956 if (panel->backlight.device)
962 panel->backlight.device->props.power = FB_BLANK_UNBLANK; 957 panel->backlight.device->props.power = FB_BLANK_UNBLANK;
963 958
964 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 959 mutex_unlock(&dev_priv->backlight_lock);
965} 960}
966 961
967#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) 962#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
@@ -1266,7 +1261,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
1266 struct drm_i915_private *dev_priv = dev->dev_private; 1261 struct drm_i915_private *dev_priv = dev->dev_private;
1267 struct intel_connector *intel_connector = to_intel_connector(connector); 1262 struct intel_connector *intel_connector = to_intel_connector(connector);
1268 struct intel_panel *panel = &intel_connector->panel; 1263 struct intel_panel *panel = &intel_connector->panel;
1269 unsigned long flags;
1270 int ret; 1264 int ret;
1271 1265
1272 if (!dev_priv->vbt.backlight.present) { 1266 if (!dev_priv->vbt.backlight.present) {
@@ -1279,9 +1273,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
1279 } 1273 }
1280 1274
1281 /* set level and max in panel struct */ 1275 /* set level and max in panel struct */
1282 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 1276 mutex_lock(&dev_priv->backlight_lock);
1283 ret = dev_priv->display.setup_backlight(intel_connector); 1277 ret = dev_priv->display.setup_backlight(intel_connector);
1284 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 1278 mutex_unlock(&dev_priv->backlight_lock);
1285 1279
1286 if (ret) { 1280 if (ret) {
1287 DRM_DEBUG_KMS("failed to setup backlight for connector %s\n", 1281 DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
@@ -1316,7 +1310,7 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
1316{ 1310{
1317 struct drm_i915_private *dev_priv = dev->dev_private; 1311 struct drm_i915_private *dev_priv = dev->dev_private;
1318 1312
1319 if (IS_BROADWELL(dev)) { 1313 if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
1320 dev_priv->display.setup_backlight = bdw_setup_backlight; 1314 dev_priv->display.setup_backlight = bdw_setup_backlight;
1321 dev_priv->display.enable_backlight = bdw_enable_backlight; 1315 dev_priv->display.enable_backlight = bdw_enable_backlight;
1322 dev_priv->display.disable_backlight = pch_disable_backlight; 1316 dev_priv->display.disable_backlight = pch_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index c27b6140bfd1..a14be5d56c6b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,9 +30,6 @@
30#include "intel_drv.h" 30#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h" 31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/vgaarb.h>
34#include <drm/i915_powerwell.h>
35#include <linux/pm_runtime.h>
36 33
37/** 34/**
38 * RC6 is a special power stage which allows the GPU to enter an very 35 * RC6 is a special power stage which allows the GPU to enter an very
@@ -66,11 +63,37 @@
66 * i915.i915_enable_fbc parameter 63 * i915.i915_enable_fbc parameter
67 */ 64 */
68 65
66static void gen9_init_clock_gating(struct drm_device *dev)
67{
68 struct drm_i915_private *dev_priv = dev->dev_private;
69
70 /*
71 * WaDisableSDEUnitClockGating:skl
72 * This seems to be a pre-production w/a.
73 */
74 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
75 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
76
77 /*
78 * WaDisableDgMirrorFixInHalfSliceChicken5:skl
79 * This is a pre-production w/a.
80 */
81 I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
82 I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
83 ~GEN9_DG_MIRROR_FIX_ENABLE);
84
85 /* Wa4x4STCOptimizationDisable:skl */
86 I915_WRITE(CACHE_MODE_1,
87 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
88}
89
69static void i8xx_disable_fbc(struct drm_device *dev) 90static void i8xx_disable_fbc(struct drm_device *dev)
70{ 91{
71 struct drm_i915_private *dev_priv = dev->dev_private; 92 struct drm_i915_private *dev_priv = dev->dev_private;
72 u32 fbc_ctl; 93 u32 fbc_ctl;
73 94
95 dev_priv->fbc.enabled = false;
96
74 /* Disable compression */ 97 /* Disable compression */
75 fbc_ctl = I915_READ(FBC_CONTROL); 98 fbc_ctl = I915_READ(FBC_CONTROL);
76 if ((fbc_ctl & FBC_CTL_EN) == 0) 99 if ((fbc_ctl & FBC_CTL_EN) == 0)
@@ -99,6 +122,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
99 int i; 122 int i;
100 u32 fbc_ctl; 123 u32 fbc_ctl;
101 124
125 dev_priv->fbc.enabled = true;
126
102 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; 127 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
103 if (fb->pitches[0] < cfb_pitch) 128 if (fb->pitches[0] < cfb_pitch)
104 cfb_pitch = fb->pitches[0]; 129 cfb_pitch = fb->pitches[0];
@@ -153,6 +178,8 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
153 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 178 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
154 u32 dpfc_ctl; 179 u32 dpfc_ctl;
155 180
181 dev_priv->fbc.enabled = true;
182
156 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN; 183 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
157 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 184 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
158 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 185 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
@@ -173,6 +200,8 @@ static void g4x_disable_fbc(struct drm_device *dev)
173 struct drm_i915_private *dev_priv = dev->dev_private; 200 struct drm_i915_private *dev_priv = dev->dev_private;
174 u32 dpfc_ctl; 201 u32 dpfc_ctl;
175 202
203 dev_priv->fbc.enabled = false;
204
176 /* Disable compression */ 205 /* Disable compression */
177 dpfc_ctl = I915_READ(DPFC_CONTROL); 206 dpfc_ctl = I915_READ(DPFC_CONTROL);
178 if (dpfc_ctl & DPFC_CTL_EN) { 207 if (dpfc_ctl & DPFC_CTL_EN) {
@@ -224,6 +253,8 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 253 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
225 u32 dpfc_ctl; 254 u32 dpfc_ctl;
226 255
256 dev_priv->fbc.enabled = true;
257
227 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); 258 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
228 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 259 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
229 dev_priv->fbc.threshold++; 260 dev_priv->fbc.threshold++;
@@ -264,6 +295,8 @@ static void ironlake_disable_fbc(struct drm_device *dev)
264 struct drm_i915_private *dev_priv = dev->dev_private; 295 struct drm_i915_private *dev_priv = dev->dev_private;
265 u32 dpfc_ctl; 296 u32 dpfc_ctl;
266 297
298 dev_priv->fbc.enabled = false;
299
267 /* Disable compression */ 300 /* Disable compression */
268 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 301 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
269 if (dpfc_ctl & DPFC_CTL_EN) { 302 if (dpfc_ctl & DPFC_CTL_EN) {
@@ -290,6 +323,8 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
290 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 323 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
291 u32 dpfc_ctl; 324 u32 dpfc_ctl;
292 325
326 dev_priv->fbc.enabled = true;
327
293 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); 328 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
294 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 329 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
295 dev_priv->fbc.threshold++; 330 dev_priv->fbc.threshold++;
@@ -339,19 +374,19 @@ bool intel_fbc_enabled(struct drm_device *dev)
339{ 374{
340 struct drm_i915_private *dev_priv = dev->dev_private; 375 struct drm_i915_private *dev_priv = dev->dev_private;
341 376
342 if (!dev_priv->display.fbc_enabled) 377 return dev_priv->fbc.enabled;
343 return false;
344
345 return dev_priv->display.fbc_enabled(dev);
346} 378}
347 379
348void gen8_fbc_sw_flush(struct drm_device *dev, u32 value) 380void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
349{ 381{
350 struct drm_i915_private *dev_priv = dev->dev_private; 382 struct drm_i915_private *dev_priv = dev->dev_private;
351 383
352 if (!IS_GEN8(dev)) 384 if (!IS_GEN8(dev))
353 return; 385 return;
354 386
387 if (!intel_fbc_enabled(dev))
388 return;
389
355 I915_WRITE(MSG_FBC_REND_STATE, value); 390 I915_WRITE(MSG_FBC_REND_STATE, value);
356} 391}
357 392
@@ -6041,1161 +6076,35 @@ void intel_suspend_hw(struct drm_device *dev)
6041 lpt_suspend_hw(dev); 6076 lpt_suspend_hw(dev);
6042} 6077}
6043 6078
6044#define for_each_power_well(i, power_well, domain_mask, power_domains) \ 6079static void intel_init_fbc(struct drm_i915_private *dev_priv)
6045 for (i = 0; \
6046 i < (power_domains)->power_well_count && \
6047 ((power_well) = &(power_domains)->power_wells[i]); \
6048 i++) \
6049 if ((power_well)->domains & (domain_mask))
6050
6051#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
6052 for (i = (power_domains)->power_well_count - 1; \
6053 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
6054 i--) \
6055 if ((power_well)->domains & (domain_mask))
6056
6057/**
6058 * We should only use the power well if we explicitly asked the hardware to
6059 * enable it, so check if it's enabled and also check if we've requested it to
6060 * be enabled.
6061 */
6062static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
6063 struct i915_power_well *power_well)
6064{
6065 return I915_READ(HSW_PWR_WELL_DRIVER) ==
6066 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
6067}
6068
6069bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
6070 enum intel_display_power_domain domain)
6071{
6072 struct i915_power_domains *power_domains;
6073 struct i915_power_well *power_well;
6074 bool is_enabled;
6075 int i;
6076
6077 if (dev_priv->pm.suspended)
6078 return false;
6079
6080 power_domains = &dev_priv->power_domains;
6081
6082 is_enabled = true;
6083
6084 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6085 if (power_well->always_on)
6086 continue;
6087
6088 if (!power_well->hw_enabled) {
6089 is_enabled = false;
6090 break;
6091 }
6092 }
6093
6094 return is_enabled;
6095}
6096
6097bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
6098 enum intel_display_power_domain domain)
6099{
6100 struct i915_power_domains *power_domains;
6101 bool ret;
6102
6103 power_domains = &dev_priv->power_domains;
6104
6105 mutex_lock(&power_domains->lock);
6106 ret = intel_display_power_enabled_unlocked(dev_priv, domain);
6107 mutex_unlock(&power_domains->lock);
6108
6109 return ret;
6110}
6111
6112/*
6113 * Starting with Haswell, we have a "Power Down Well" that can be turned off
6114 * when not needed anymore. We have 4 registers that can request the power well
6115 * to be enabled, and it will only be disabled if none of the registers is
6116 * requesting it to be enabled.
6117 */
6118static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
6119{
6120 struct drm_device *dev = dev_priv->dev;
6121
6122 /*
6123 * After we re-enable the power well, if we touch VGA register 0x3d5
6124 * we'll get unclaimed register interrupts. This stops after we write
6125 * anything to the VGA MSR register. The vgacon module uses this
6126 * register all the time, so if we unbind our driver and, as a
6127 * consequence, bind vgacon, we'll get stuck in an infinite loop at
6128 * console_unlock(). So make here we touch the VGA MSR register, making
6129 * sure vgacon can keep working normally without triggering interrupts
6130 * and error messages.
6131 */
6132 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
6133 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
6134 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6135
6136 if (IS_BROADWELL(dev))
6137 gen8_irq_power_well_post_enable(dev_priv);
6138}
6139
6140static void hsw_set_power_well(struct drm_i915_private *dev_priv,
6141 struct i915_power_well *power_well, bool enable)
6142{ 6080{
6143 bool is_enabled, enable_requested; 6081 if (!HAS_FBC(dev_priv)) {
6144 uint32_t tmp; 6082 dev_priv->fbc.enabled = false;
6145
6146 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
6147 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
6148 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
6149
6150 if (enable) {
6151 if (!enable_requested)
6152 I915_WRITE(HSW_PWR_WELL_DRIVER,
6153 HSW_PWR_WELL_ENABLE_REQUEST);
6154
6155 if (!is_enabled) {
6156 DRM_DEBUG_KMS("Enabling power well\n");
6157 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
6158 HSW_PWR_WELL_STATE_ENABLED), 20))
6159 DRM_ERROR("Timeout enabling power well\n");
6160 }
6161
6162 hsw_power_well_post_enable(dev_priv);
6163 } else {
6164 if (enable_requested) {
6165 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
6166 POSTING_READ(HSW_PWR_WELL_DRIVER);
6167 DRM_DEBUG_KMS("Requesting to disable the power well\n");
6168 }
6169 }
6170}
6171
6172static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
6173 struct i915_power_well *power_well)
6174{
6175 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
6176
6177 /*
6178 * We're taking over the BIOS, so clear any requests made by it since
6179 * the driver is in charge now.
6180 */
6181 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
6182 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
6183}
6184
6185static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
6186 struct i915_power_well *power_well)
6187{
6188 hsw_set_power_well(dev_priv, power_well, true);
6189}
6190
6191static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
6192 struct i915_power_well *power_well)
6193{
6194 hsw_set_power_well(dev_priv, power_well, false);
6195}
6196
6197static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
6198 struct i915_power_well *power_well)
6199{
6200}
6201
6202static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
6203 struct i915_power_well *power_well)
6204{
6205 return true;
6206}
6207
6208static void vlv_set_power_well(struct drm_i915_private *dev_priv,
6209 struct i915_power_well *power_well, bool enable)
6210{
6211 enum punit_power_well power_well_id = power_well->data;
6212 u32 mask;
6213 u32 state;
6214 u32 ctrl;
6215
6216 mask = PUNIT_PWRGT_MASK(power_well_id);
6217 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
6218 PUNIT_PWRGT_PWR_GATE(power_well_id);
6219
6220 mutex_lock(&dev_priv->rps.hw_lock);
6221
6222#define COND \
6223 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
6224
6225 if (COND)
6226 goto out;
6227
6228 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
6229 ctrl &= ~mask;
6230 ctrl |= state;
6231 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
6232
6233 if (wait_for(COND, 100))
6234 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6235 state,
6236 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
6237
6238#undef COND
6239
6240out:
6241 mutex_unlock(&dev_priv->rps.hw_lock);
6242}
6243
6244static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
6245 struct i915_power_well *power_well)
6246{
6247 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
6248}
6249
6250static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
6251 struct i915_power_well *power_well)
6252{
6253 vlv_set_power_well(dev_priv, power_well, true);
6254}
6255
6256static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
6257 struct i915_power_well *power_well)
6258{
6259 vlv_set_power_well(dev_priv, power_well, false);
6260}
6261
6262static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
6263 struct i915_power_well *power_well)
6264{
6265 int power_well_id = power_well->data;
6266 bool enabled = false;
6267 u32 mask;
6268 u32 state;
6269 u32 ctrl;
6270
6271 mask = PUNIT_PWRGT_MASK(power_well_id);
6272 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
6273
6274 mutex_lock(&dev_priv->rps.hw_lock);
6275
6276 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
6277 /*
6278 * We only ever set the power-on and power-gate states, anything
6279 * else is unexpected.
6280 */
6281 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
6282 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
6283 if (state == ctrl)
6284 enabled = true;
6285
6286 /*
6287 * A transient state at this point would mean some unexpected party
6288 * is poking at the power controls too.
6289 */
6290 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
6291 WARN_ON(ctrl != state);
6292
6293 mutex_unlock(&dev_priv->rps.hw_lock);
6294
6295 return enabled;
6296}
6297
6298static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
6299 struct i915_power_well *power_well)
6300{
6301 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6302
6303 vlv_set_power_well(dev_priv, power_well, true);
6304
6305 spin_lock_irq(&dev_priv->irq_lock);
6306 valleyview_enable_display_irqs(dev_priv);
6307 spin_unlock_irq(&dev_priv->irq_lock);
6308
6309 /*
6310 * During driver initialization/resume we can avoid restoring the
6311 * part of the HW/SW state that will be inited anyway explicitly.
6312 */
6313 if (dev_priv->power_domains.initializing)
6314 return; 6083 return;
6315
6316 intel_hpd_init(dev_priv->dev);
6317
6318 i915_redisable_vga_power_on(dev_priv->dev);
6319}
6320
6321static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
6322 struct i915_power_well *power_well)
6323{
6324 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6325
6326 spin_lock_irq(&dev_priv->irq_lock);
6327 valleyview_disable_display_irqs(dev_priv);
6328 spin_unlock_irq(&dev_priv->irq_lock);
6329
6330 vlv_set_power_well(dev_priv, power_well, false);
6331
6332 vlv_power_sequencer_reset(dev_priv);
6333}
6334
6335static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6336 struct i915_power_well *power_well)
6337{
6338 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6339
6340 /*
6341 * Enable the CRI clock source so we can get at the
6342 * display and the reference clock for VGA
6343 * hotplug / manual detection.
6344 */
6345 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6346 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6347 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6348
6349 vlv_set_power_well(dev_priv, power_well, true);
6350
6351 /*
6352 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
6353 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
6354 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
6355 * b. The other bits such as sfr settings / modesel may all
6356 * be set to 0.
6357 *
6358 * This should only be done on init and resume from S3 with
6359 * both PLLs disabled, or we risk losing DPIO and PLL
6360 * synchronization.
6361 */
6362 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
6363}
6364
6365static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6366 struct i915_power_well *power_well)
6367{
6368 enum pipe pipe;
6369
6370 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6371
6372 for_each_pipe(dev_priv, pipe)
6373 assert_pll_disabled(dev_priv, pipe);
6374
6375 /* Assert common reset */
6376 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
6377
6378 vlv_set_power_well(dev_priv, power_well, false);
6379}
6380
6381static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6382 struct i915_power_well *power_well)
6383{
6384 enum dpio_phy phy;
6385
6386 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6387 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6388
6389 /*
6390 * Enable the CRI clock source so we can get at the
6391 * display and the reference clock for VGA
6392 * hotplug / manual detection.
6393 */
6394 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6395 phy = DPIO_PHY0;
6396 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6397 DPLL_REFA_CLK_ENABLE_VLV);
6398 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6399 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6400 } else {
6401 phy = DPIO_PHY1;
6402 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
6403 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6404 } 6084 }
6405 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6406 vlv_set_power_well(dev_priv, power_well, true);
6407
6408 /* Poll for phypwrgood signal */
6409 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
6410 DRM_ERROR("Display PHY %d is not power up\n", phy);
6411
6412 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
6413 PHY_COM_LANE_RESET_DEASSERT(phy));
6414}
6415
6416static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6417 struct i915_power_well *power_well)
6418{
6419 enum dpio_phy phy;
6420
6421 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6422 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6423 6085
6424 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 6086 if (INTEL_INFO(dev_priv)->gen >= 7) {
6425 phy = DPIO_PHY0; 6087 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6426 assert_pll_disabled(dev_priv, PIPE_A); 6088 dev_priv->display.enable_fbc = gen7_enable_fbc;
6427 assert_pll_disabled(dev_priv, PIPE_B); 6089 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6090 } else if (INTEL_INFO(dev_priv)->gen >= 5) {
6091 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6092 dev_priv->display.enable_fbc = ironlake_enable_fbc;
6093 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6094 } else if (IS_GM45(dev_priv)) {
6095 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
6096 dev_priv->display.enable_fbc = g4x_enable_fbc;
6097 dev_priv->display.disable_fbc = g4x_disable_fbc;
6428 } else { 6098 } else {
6429 phy = DPIO_PHY1; 6099 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
6430 assert_pll_disabled(dev_priv, PIPE_C); 6100 dev_priv->display.enable_fbc = i8xx_enable_fbc;
6431 } 6101 dev_priv->display.disable_fbc = i8xx_disable_fbc;
6432
6433 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
6434 ~PHY_COM_LANE_RESET_DEASSERT(phy));
6435
6436 vlv_set_power_well(dev_priv, power_well, false);
6437}
6438
6439static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
6440 struct i915_power_well *power_well)
6441{
6442 enum pipe pipe = power_well->data;
6443 bool enabled;
6444 u32 state, ctrl;
6445
6446 mutex_lock(&dev_priv->rps.hw_lock);
6447
6448 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
6449 /*
6450 * We only ever set the power-on and power-gate states, anything
6451 * else is unexpected.
6452 */
6453 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
6454 enabled = state == DP_SSS_PWR_ON(pipe);
6455
6456 /*
6457 * A transient state at this point would mean some unexpected party
6458 * is poking at the power controls too.
6459 */
6460 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
6461 WARN_ON(ctrl << 16 != state);
6462
6463 mutex_unlock(&dev_priv->rps.hw_lock);
6464
6465 return enabled;
6466}
6467
6468static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
6469 struct i915_power_well *power_well,
6470 bool enable)
6471{
6472 enum pipe pipe = power_well->data;
6473 u32 state;
6474 u32 ctrl;
6475
6476 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
6477
6478 mutex_lock(&dev_priv->rps.hw_lock);
6479 6102
6480#define COND \ 6103 /* This value was pulled out of someone's hat */
6481 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) 6104 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
6482
6483 if (COND)
6484 goto out;
6485
6486 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
6487 ctrl &= ~DP_SSC_MASK(pipe);
6488 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
6489 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
6490
6491 if (wait_for(COND, 100))
6492 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6493 state,
6494 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
6495
6496#undef COND
6497
6498out:
6499 mutex_unlock(&dev_priv->rps.hw_lock);
6500}
6501
6502static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
6503 struct i915_power_well *power_well)
6504{
6505 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
6506}
6507
6508static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
6509 struct i915_power_well *power_well)
6510{
6511 WARN_ON_ONCE(power_well->data != PIPE_A &&
6512 power_well->data != PIPE_B &&
6513 power_well->data != PIPE_C);
6514
6515 chv_set_pipe_power_well(dev_priv, power_well, true);
6516}
6517
6518static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
6519 struct i915_power_well *power_well)
6520{
6521 WARN_ON_ONCE(power_well->data != PIPE_A &&
6522 power_well->data != PIPE_B &&
6523 power_well->data != PIPE_C);
6524
6525 chv_set_pipe_power_well(dev_priv, power_well, false);
6526}
6527
6528static void check_power_well_state(struct drm_i915_private *dev_priv,
6529 struct i915_power_well *power_well)
6530{
6531 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
6532
6533 if (power_well->always_on || !i915.disable_power_well) {
6534 if (!enabled)
6535 goto mismatch;
6536
6537 return;
6538 }
6539
6540 if (enabled != (power_well->count > 0))
6541 goto mismatch;
6542
6543 return;
6544
6545mismatch:
6546 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
6547 power_well->name, power_well->always_on, enabled,
6548 power_well->count, i915.disable_power_well);
6549}
6550
6551void intel_display_power_get(struct drm_i915_private *dev_priv,
6552 enum intel_display_power_domain domain)
6553{
6554 struct i915_power_domains *power_domains;
6555 struct i915_power_well *power_well;
6556 int i;
6557
6558 intel_runtime_pm_get(dev_priv);
6559
6560 power_domains = &dev_priv->power_domains;
6561
6562 mutex_lock(&power_domains->lock);
6563
6564 for_each_power_well(i, power_well, BIT(domain), power_domains) {
6565 if (!power_well->count++) {
6566 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
6567 power_well->ops->enable(dev_priv, power_well);
6568 power_well->hw_enabled = true;
6569 }
6570
6571 check_power_well_state(dev_priv, power_well);
6572 }
6573
6574 power_domains->domain_use_count[domain]++;
6575
6576 mutex_unlock(&power_domains->lock);
6577}
6578
6579void intel_display_power_put(struct drm_i915_private *dev_priv,
6580 enum intel_display_power_domain domain)
6581{
6582 struct i915_power_domains *power_domains;
6583 struct i915_power_well *power_well;
6584 int i;
6585
6586 power_domains = &dev_priv->power_domains;
6587
6588 mutex_lock(&power_domains->lock);
6589
6590 WARN_ON(!power_domains->domain_use_count[domain]);
6591 power_domains->domain_use_count[domain]--;
6592
6593 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6594 WARN_ON(!power_well->count);
6595
6596 if (!--power_well->count && i915.disable_power_well) {
6597 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
6598 power_well->hw_enabled = false;
6599 power_well->ops->disable(dev_priv, power_well);
6600 }
6601
6602 check_power_well_state(dev_priv, power_well);
6603 }
6604
6605 mutex_unlock(&power_domains->lock);
6606
6607 intel_runtime_pm_put(dev_priv);
6608}
6609
6610static struct i915_power_domains *hsw_pwr;
6611
6612/* Display audio driver power well request */
6613int i915_request_power_well(void)
6614{
6615 struct drm_i915_private *dev_priv;
6616
6617 if (!hsw_pwr)
6618 return -ENODEV;
6619
6620 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6621 power_domains);
6622 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
6623 return 0;
6624}
6625EXPORT_SYMBOL_GPL(i915_request_power_well);
6626
6627/* Display audio driver power well release */
6628int i915_release_power_well(void)
6629{
6630 struct drm_i915_private *dev_priv;
6631
6632 if (!hsw_pwr)
6633 return -ENODEV;
6634
6635 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6636 power_domains);
6637 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
6638 return 0;
6639}
6640EXPORT_SYMBOL_GPL(i915_release_power_well);
6641
6642/*
6643 * Private interface for the audio driver to get CDCLK in kHz.
6644 *
6645 * Caller must request power well using i915_request_power_well() prior to
6646 * making the call.
6647 */
6648int i915_get_cdclk_freq(void)
6649{
6650 struct drm_i915_private *dev_priv;
6651
6652 if (!hsw_pwr)
6653 return -ENODEV;
6654
6655 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6656 power_domains);
6657
6658 return intel_ddi_get_cdclk_freq(dev_priv);
6659}
6660EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
6661
6662
6663#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
6664
6665#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
6666 BIT(POWER_DOMAIN_PIPE_A) | \
6667 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
6668 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
6669 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
6670 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6671 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6672 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6673 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6674 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6675 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6676 BIT(POWER_DOMAIN_PORT_CRT) | \
6677 BIT(POWER_DOMAIN_PLLS) | \
6678 BIT(POWER_DOMAIN_INIT))
6679#define HSW_DISPLAY_POWER_DOMAINS ( \
6680 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
6681 BIT(POWER_DOMAIN_INIT))
6682
6683#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
6684 HSW_ALWAYS_ON_POWER_DOMAINS | \
6685 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
6686#define BDW_DISPLAY_POWER_DOMAINS ( \
6687 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
6688 BIT(POWER_DOMAIN_INIT))
6689
6690#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
6691#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
6692
6693#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
6694 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6695 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6696 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6697 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6698 BIT(POWER_DOMAIN_PORT_CRT) | \
6699 BIT(POWER_DOMAIN_INIT))
6700
6701#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
6702 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6703 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6704 BIT(POWER_DOMAIN_INIT))
6705
6706#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
6707 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6708 BIT(POWER_DOMAIN_INIT))
6709
6710#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
6711 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6712 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6713 BIT(POWER_DOMAIN_INIT))
6714
6715#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
6716 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6717 BIT(POWER_DOMAIN_INIT))
6718
6719#define CHV_PIPE_A_POWER_DOMAINS ( \
6720 BIT(POWER_DOMAIN_PIPE_A) | \
6721 BIT(POWER_DOMAIN_INIT))
6722
6723#define CHV_PIPE_B_POWER_DOMAINS ( \
6724 BIT(POWER_DOMAIN_PIPE_B) | \
6725 BIT(POWER_DOMAIN_INIT))
6726
6727#define CHV_PIPE_C_POWER_DOMAINS ( \
6728 BIT(POWER_DOMAIN_PIPE_C) | \
6729 BIT(POWER_DOMAIN_INIT))
6730
6731#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
6732 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6733 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6734 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6735 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6736 BIT(POWER_DOMAIN_INIT))
6737
6738#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
6739 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6740 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6741 BIT(POWER_DOMAIN_INIT))
6742
6743#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
6744 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6745 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6746 BIT(POWER_DOMAIN_INIT))
6747
6748#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
6749 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6750 BIT(POWER_DOMAIN_INIT))
6751
6752static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
6753 .sync_hw = i9xx_always_on_power_well_noop,
6754 .enable = i9xx_always_on_power_well_noop,
6755 .disable = i9xx_always_on_power_well_noop,
6756 .is_enabled = i9xx_always_on_power_well_enabled,
6757};
6758
6759static const struct i915_power_well_ops chv_pipe_power_well_ops = {
6760 .sync_hw = chv_pipe_power_well_sync_hw,
6761 .enable = chv_pipe_power_well_enable,
6762 .disable = chv_pipe_power_well_disable,
6763 .is_enabled = chv_pipe_power_well_enabled,
6764};
6765
6766static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
6767 .sync_hw = vlv_power_well_sync_hw,
6768 .enable = chv_dpio_cmn_power_well_enable,
6769 .disable = chv_dpio_cmn_power_well_disable,
6770 .is_enabled = vlv_power_well_enabled,
6771};
6772
6773static struct i915_power_well i9xx_always_on_power_well[] = {
6774 {
6775 .name = "always-on",
6776 .always_on = 1,
6777 .domains = POWER_DOMAIN_MASK,
6778 .ops = &i9xx_always_on_power_well_ops,
6779 },
6780};
6781
6782static const struct i915_power_well_ops hsw_power_well_ops = {
6783 .sync_hw = hsw_power_well_sync_hw,
6784 .enable = hsw_power_well_enable,
6785 .disable = hsw_power_well_disable,
6786 .is_enabled = hsw_power_well_enabled,
6787};
6788
6789static struct i915_power_well hsw_power_wells[] = {
6790 {
6791 .name = "always-on",
6792 .always_on = 1,
6793 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
6794 .ops = &i9xx_always_on_power_well_ops,
6795 },
6796 {
6797 .name = "display",
6798 .domains = HSW_DISPLAY_POWER_DOMAINS,
6799 .ops = &hsw_power_well_ops,
6800 },
6801};
6802
6803static struct i915_power_well bdw_power_wells[] = {
6804 {
6805 .name = "always-on",
6806 .always_on = 1,
6807 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
6808 .ops = &i9xx_always_on_power_well_ops,
6809 },
6810 {
6811 .name = "display",
6812 .domains = BDW_DISPLAY_POWER_DOMAINS,
6813 .ops = &hsw_power_well_ops,
6814 },
6815};
6816
6817static const struct i915_power_well_ops vlv_display_power_well_ops = {
6818 .sync_hw = vlv_power_well_sync_hw,
6819 .enable = vlv_display_power_well_enable,
6820 .disable = vlv_display_power_well_disable,
6821 .is_enabled = vlv_power_well_enabled,
6822};
6823
6824static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
6825 .sync_hw = vlv_power_well_sync_hw,
6826 .enable = vlv_dpio_cmn_power_well_enable,
6827 .disable = vlv_dpio_cmn_power_well_disable,
6828 .is_enabled = vlv_power_well_enabled,
6829};
6830
6831static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
6832 .sync_hw = vlv_power_well_sync_hw,
6833 .enable = vlv_power_well_enable,
6834 .disable = vlv_power_well_disable,
6835 .is_enabled = vlv_power_well_enabled,
6836};
6837
6838static struct i915_power_well vlv_power_wells[] = {
6839 {
6840 .name = "always-on",
6841 .always_on = 1,
6842 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
6843 .ops = &i9xx_always_on_power_well_ops,
6844 },
6845 {
6846 .name = "display",
6847 .domains = VLV_DISPLAY_POWER_DOMAINS,
6848 .data = PUNIT_POWER_WELL_DISP2D,
6849 .ops = &vlv_display_power_well_ops,
6850 },
6851 {
6852 .name = "dpio-tx-b-01",
6853 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6854 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6855 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6856 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6857 .ops = &vlv_dpio_power_well_ops,
6858 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
6859 },
6860 {
6861 .name = "dpio-tx-b-23",
6862 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6863 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6864 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6865 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6866 .ops = &vlv_dpio_power_well_ops,
6867 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
6868 },
6869 {
6870 .name = "dpio-tx-c-01",
6871 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6872 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6873 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6874 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6875 .ops = &vlv_dpio_power_well_ops,
6876 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
6877 },
6878 {
6879 .name = "dpio-tx-c-23",
6880 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6881 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6882 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6883 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6884 .ops = &vlv_dpio_power_well_ops,
6885 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
6886 },
6887 {
6888 .name = "dpio-common",
6889 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
6890 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6891 .ops = &vlv_dpio_cmn_power_well_ops,
6892 },
6893};
6894
6895static struct i915_power_well chv_power_wells[] = {
6896 {
6897 .name = "always-on",
6898 .always_on = 1,
6899 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
6900 .ops = &i9xx_always_on_power_well_ops,
6901 },
6902#if 0
6903 {
6904 .name = "display",
6905 .domains = VLV_DISPLAY_POWER_DOMAINS,
6906 .data = PUNIT_POWER_WELL_DISP2D,
6907 .ops = &vlv_display_power_well_ops,
6908 },
6909 {
6910 .name = "pipe-a",
6911 .domains = CHV_PIPE_A_POWER_DOMAINS,
6912 .data = PIPE_A,
6913 .ops = &chv_pipe_power_well_ops,
6914 },
6915 {
6916 .name = "pipe-b",
6917 .domains = CHV_PIPE_B_POWER_DOMAINS,
6918 .data = PIPE_B,
6919 .ops = &chv_pipe_power_well_ops,
6920 },
6921 {
6922 .name = "pipe-c",
6923 .domains = CHV_PIPE_C_POWER_DOMAINS,
6924 .data = PIPE_C,
6925 .ops = &chv_pipe_power_well_ops,
6926 },
6927#endif
6928 {
6929 .name = "dpio-common-bc",
6930 /*
6931 * XXX: cmnreset for one PHY seems to disturb the other.
6932 * As a workaround keep both powered on at the same
6933 * time for now.
6934 */
6935 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
6936 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6937 .ops = &chv_dpio_cmn_power_well_ops,
6938 },
6939 {
6940 .name = "dpio-common-d",
6941 /*
6942 * XXX: cmnreset for one PHY seems to disturb the other.
6943 * As a workaround keep both powered on at the same
6944 * time for now.
6945 */
6946 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
6947 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
6948 .ops = &chv_dpio_cmn_power_well_ops,
6949 },
6950#if 0
6951 {
6952 .name = "dpio-tx-b-01",
6953 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6954 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
6955 .ops = &vlv_dpio_power_well_ops,
6956 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
6957 },
6958 {
6959 .name = "dpio-tx-b-23",
6960 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6961 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
6962 .ops = &vlv_dpio_power_well_ops,
6963 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
6964 },
6965 {
6966 .name = "dpio-tx-c-01",
6967 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6968 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6969 .ops = &vlv_dpio_power_well_ops,
6970 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
6971 },
6972 {
6973 .name = "dpio-tx-c-23",
6974 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6975 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6976 .ops = &vlv_dpio_power_well_ops,
6977 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
6978 },
6979 {
6980 .name = "dpio-tx-d-01",
6981 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
6982 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
6983 .ops = &vlv_dpio_power_well_ops,
6984 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
6985 },
6986 {
6987 .name = "dpio-tx-d-23",
6988 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
6989 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
6990 .ops = &vlv_dpio_power_well_ops,
6991 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
6992 },
6993#endif
6994};
6995
6996static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
6997 enum punit_power_well power_well_id)
6998{
6999 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7000 struct i915_power_well *power_well;
7001 int i;
7002
7003 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
7004 if (power_well->data == power_well_id)
7005 return power_well;
7006 } 6105 }
7007 6106
7008 return NULL; 6107 dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
7009}
7010
7011#define set_power_wells(power_domains, __power_wells) ({ \
7012 (power_domains)->power_wells = (__power_wells); \
7013 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
7014})
7015
7016int intel_power_domains_init(struct drm_i915_private *dev_priv)
7017{
7018 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7019
7020 mutex_init(&power_domains->lock);
7021
7022 /*
7023 * The enabling order will be from lower to higher indexed wells,
7024 * the disabling order is reversed.
7025 */
7026 if (IS_HASWELL(dev_priv->dev)) {
7027 set_power_wells(power_domains, hsw_power_wells);
7028 hsw_pwr = power_domains;
7029 } else if (IS_BROADWELL(dev_priv->dev)) {
7030 set_power_wells(power_domains, bdw_power_wells);
7031 hsw_pwr = power_domains;
7032 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
7033 set_power_wells(power_domains, chv_power_wells);
7034 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
7035 set_power_wells(power_domains, vlv_power_wells);
7036 } else {
7037 set_power_wells(power_domains, i9xx_always_on_power_well);
7038 }
7039
7040 return 0;
7041}
7042
7043void intel_power_domains_remove(struct drm_i915_private *dev_priv)
7044{
7045 hsw_pwr = NULL;
7046}
7047
7048static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
7049{
7050 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7051 struct i915_power_well *power_well;
7052 int i;
7053
7054 mutex_lock(&power_domains->lock);
7055 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
7056 power_well->ops->sync_hw(dev_priv, power_well);
7057 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
7058 power_well);
7059 }
7060 mutex_unlock(&power_domains->lock);
7061}
7062
7063static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
7064{
7065 struct i915_power_well *cmn =
7066 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
7067 struct i915_power_well *disp2d =
7068 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
7069
7070 /* nothing to do if common lane is already off */
7071 if (!cmn->ops->is_enabled(dev_priv, cmn))
7072 return;
7073
7074 /* If the display might be already active skip this */
7075 if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
7076 I915_READ(DPIO_CTL) & DPIO_CMNRST)
7077 return;
7078
7079 DRM_DEBUG_KMS("toggling display PHY side reset\n");
7080
7081 /* cmnlane needs DPLL registers */
7082 disp2d->ops->enable(dev_priv, disp2d);
7083
7084 /*
7085 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
7086 * Need to assert and de-assert PHY SB reset by gating the
7087 * common lane power, then un-gating it.
7088 * Simply ungating isn't enough to reset the PHY enough to get
7089 * ports and lanes running.
7090 */
7091 cmn->ops->disable(dev_priv, cmn);
7092}
7093
7094void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
7095{
7096 struct drm_device *dev = dev_priv->dev;
7097 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7098
7099 power_domains->initializing = true;
7100
7101 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
7102 mutex_lock(&power_domains->lock);
7103 vlv_cmnlane_wa(dev_priv);
7104 mutex_unlock(&power_domains->lock);
7105 }
7106
7107 /* For now, we need the power well to be always enabled. */
7108 intel_display_set_init_power(dev_priv, true);
7109 intel_power_domains_resume(dev_priv);
7110 power_domains->initializing = false;
7111}
7112
7113void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
7114{
7115 intel_runtime_pm_get(dev_priv);
7116}
7117
7118void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
7119{
7120 intel_runtime_pm_put(dev_priv);
7121}
7122
7123void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
7124{
7125 struct drm_device *dev = dev_priv->dev;
7126 struct device *device = &dev->pdev->dev;
7127
7128 if (!HAS_RUNTIME_PM(dev))
7129 return;
7130
7131 pm_runtime_get_sync(device);
7132 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
7133}
7134
7135void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
7136{
7137 struct drm_device *dev = dev_priv->dev;
7138 struct device *device = &dev->pdev->dev;
7139
7140 if (!HAS_RUNTIME_PM(dev))
7141 return;
7142
7143 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
7144 pm_runtime_get_noresume(device);
7145}
7146
7147void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
7148{
7149 struct drm_device *dev = dev_priv->dev;
7150 struct device *device = &dev->pdev->dev;
7151
7152 if (!HAS_RUNTIME_PM(dev))
7153 return;
7154
7155 pm_runtime_mark_last_busy(device);
7156 pm_runtime_put_autosuspend(device);
7157}
7158
7159void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
7160{
7161 struct drm_device *dev = dev_priv->dev;
7162 struct device *device = &dev->pdev->dev;
7163
7164 if (!HAS_RUNTIME_PM(dev))
7165 return;
7166
7167 pm_runtime_set_active(device);
7168
7169 /*
7170 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
7171 * requirement.
7172 */
7173 if (!intel_enable_rc6(dev)) {
7174 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
7175 return;
7176 }
7177
7178 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
7179 pm_runtime_mark_last_busy(device);
7180 pm_runtime_use_autosuspend(device);
7181
7182 pm_runtime_put_autosuspend(device);
7183}
7184
7185void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
7186{
7187 struct drm_device *dev = dev_priv->dev;
7188 struct device *device = &dev->pdev->dev;
7189
7190 if (!HAS_RUNTIME_PM(dev))
7191 return;
7192
7193 if (!intel_enable_rc6(dev))
7194 return;
7195
7196 /* Make sure we're not suspended first. */
7197 pm_runtime_get_sync(device);
7198 pm_runtime_disable(device);
7199} 6108}
7200 6109
7201/* Set up chip specific power management-related functions */ 6110/* Set up chip specific power management-related functions */
@@ -7203,28 +6112,7 @@ void intel_init_pm(struct drm_device *dev)
7203{ 6112{
7204 struct drm_i915_private *dev_priv = dev->dev_private; 6113 struct drm_i915_private *dev_priv = dev->dev_private;
7205 6114
7206 if (HAS_FBC(dev)) { 6115 intel_init_fbc(dev_priv);
7207 if (INTEL_INFO(dev)->gen >= 7) {
7208 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7209 dev_priv->display.enable_fbc = gen7_enable_fbc;
7210 dev_priv->display.disable_fbc = ironlake_disable_fbc;
7211 } else if (INTEL_INFO(dev)->gen >= 5) {
7212 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7213 dev_priv->display.enable_fbc = ironlake_enable_fbc;
7214 dev_priv->display.disable_fbc = ironlake_disable_fbc;
7215 } else if (IS_GM45(dev)) {
7216 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
7217 dev_priv->display.enable_fbc = g4x_enable_fbc;
7218 dev_priv->display.disable_fbc = g4x_disable_fbc;
7219 } else {
7220 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
7221 dev_priv->display.enable_fbc = i8xx_enable_fbc;
7222 dev_priv->display.disable_fbc = i8xx_disable_fbc;
7223
7224 /* This value was pulled out of someone's hat */
7225 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
7226 }
7227 }
7228 6116
7229 /* For cxsr */ 6117 /* For cxsr */
7230 if (IS_PINEVIEW(dev)) 6118 if (IS_PINEVIEW(dev))
@@ -7233,7 +6121,9 @@ void intel_init_pm(struct drm_device *dev)
7233 i915_ironlake_get_mem_freq(dev); 6121 i915_ironlake_get_mem_freq(dev);
7234 6122
7235 /* For FIFO watermark updates */ 6123 /* For FIFO watermark updates */
7236 if (HAS_PCH_SPLIT(dev)) { 6124 if (IS_GEN9(dev)) {
6125 dev_priv->display.init_clock_gating = gen9_init_clock_gating;
6126 } else if (HAS_PCH_SPLIT(dev)) {
7237 ilk_setup_wm_latency(dev); 6127 ilk_setup_wm_latency(dev);
7238 6128
7239 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] && 6129 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
@@ -7490,5 +6380,4 @@ void intel_pm_setup(struct drm_device *dev)
7490 intel_gen6_powersave_work); 6380 intel_gen6_powersave_work);
7491 6381
7492 dev_priv->pm.suspended = false; 6382 dev_priv->pm.suspended = false;
7493 dev_priv->pm._irqs_disabled = false;
7494} 6383}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 0a80e419b589..816a6926df28 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -729,8 +729,12 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
729 * workaround for for a possible hang in the unlikely event a TLB 729 * workaround for for a possible hang in the unlikely event a TLB
730 * invalidation occurs during a PSD flush. 730 * invalidation occurs during a PSD flush.
731 */ 731 */
732 /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
732 intel_ring_emit_wa(ring, HDC_CHICKEN0, 733 intel_ring_emit_wa(ring, HDC_CHICKEN0,
733 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT)); 734 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT |
735 (IS_BDW_GT3(dev) ?
736 HDC_FENCE_DEST_SLM_DISABLE : 0)
737 ));
734 738
735 /* Wa4x4STCOptimizationDisable:bdw */ 739 /* Wa4x4STCOptimizationDisable:bdw */
736 intel_ring_emit_wa(ring, CACHE_MODE_1, 740 intel_ring_emit_wa(ring, CACHE_MODE_1,
@@ -812,7 +816,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
812 * 816 *
813 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv 817 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
814 */ 818 */
815 if (INTEL_INFO(dev)->gen >= 6) 819 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
816 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 820 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
817 821
818 /* Required for the hardware to program scanline values for waiting */ 822 /* Required for the hardware to program scanline values for waiting */
@@ -1186,7 +1190,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
1186 struct drm_i915_private *dev_priv = dev->dev_private; 1190 struct drm_i915_private *dev_priv = dev->dev_private;
1187 unsigned long flags; 1191 unsigned long flags;
1188 1192
1189 if (!dev->irq_enabled) 1193 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1190 return false; 1194 return false;
1191 1195
1192 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1196 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1217,7 +1221,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
1217 struct drm_i915_private *dev_priv = dev->dev_private; 1221 struct drm_i915_private *dev_priv = dev->dev_private;
1218 unsigned long flags; 1222 unsigned long flags;
1219 1223
1220 if (!dev->irq_enabled) 1224 if (!intel_irqs_enabled(dev_priv))
1221 return false; 1225 return false;
1222 1226
1223 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1227 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1254,7 +1258,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
1254 struct drm_i915_private *dev_priv = dev->dev_private; 1258 struct drm_i915_private *dev_priv = dev->dev_private;
1255 unsigned long flags; 1259 unsigned long flags;
1256 1260
1257 if (!dev->irq_enabled) 1261 if (!intel_irqs_enabled(dev_priv))
1258 return false; 1262 return false;
1259 1263
1260 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1264 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1388,8 +1392,8 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
1388 struct drm_i915_private *dev_priv = dev->dev_private; 1392 struct drm_i915_private *dev_priv = dev->dev_private;
1389 unsigned long flags; 1393 unsigned long flags;
1390 1394
1391 if (!dev->irq_enabled) 1395 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1392 return false; 1396 return false;
1393 1397
1394 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1398 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1395 if (ring->irq_refcount++ == 0) { 1399 if (ring->irq_refcount++ == 0) {
@@ -1431,7 +1435,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
1431 struct drm_i915_private *dev_priv = dev->dev_private; 1435 struct drm_i915_private *dev_priv = dev->dev_private;
1432 unsigned long flags; 1436 unsigned long flags;
1433 1437
1434 if (!dev->irq_enabled) 1438 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1435 return false; 1439 return false;
1436 1440
1437 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1441 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1451,9 +1455,6 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
1451 struct drm_i915_private *dev_priv = dev->dev_private; 1455 struct drm_i915_private *dev_priv = dev->dev_private;
1452 unsigned long flags; 1456 unsigned long flags;
1453 1457
1454 if (!dev->irq_enabled)
1455 return;
1456
1457 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1458 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1458 if (--ring->irq_refcount == 0) { 1459 if (--ring->irq_refcount == 0) {
1459 I915_WRITE_IMR(ring, ~0); 1460 I915_WRITE_IMR(ring, ~0);
@@ -1469,7 +1470,7 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
1469 struct drm_i915_private *dev_priv = dev->dev_private; 1470 struct drm_i915_private *dev_priv = dev->dev_private;
1470 unsigned long flags; 1471 unsigned long flags;
1471 1472
1472 if (!dev->irq_enabled) 1473 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1473 return false; 1474 return false;
1474 1475
1475 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1476 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -2229,6 +2230,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
2229 u32 invalidate, u32 flush) 2230 u32 invalidate, u32 flush)
2230{ 2231{
2231 struct drm_device *dev = ring->dev; 2232 struct drm_device *dev = ring->dev;
2233 struct drm_i915_private *dev_priv = dev->dev_private;
2232 uint32_t cmd; 2234 uint32_t cmd;
2233 int ret; 2235 int ret;
2234 2236
@@ -2259,8 +2261,12 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
2259 } 2261 }
2260 intel_ring_advance(ring); 2262 intel_ring_advance(ring);
2261 2263
2262 if (IS_GEN7(dev) && !invalidate && flush) 2264 if (!invalidate && flush) {
2263 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); 2265 if (IS_GEN7(dev))
2266 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
2267 else if (IS_BROADWELL(dev))
2268 dev_priv->fbc.need_sw_cache_clean = true;
2269 }
2264 2270
2265 return 0; 2271 return 0;
2266} 2272}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
new file mode 100644
index 000000000000..36749b91d28e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -0,0 +1,1375 @@
1/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29#include <linux/pm_runtime.h>
30#include <linux/vgaarb.h>
31
32#include "i915_drv.h"
33#include "intel_drv.h"
34#include <drm/i915_powerwell.h>
35
36/**
37 * DOC: runtime pm
38 *
39 * The i915 driver supports dynamic enabling and disabling of entire hardware
40 * blocks at runtime. This is especially important on the display side where
41 * software is supposed to control many power gates manually on recent hardware,
42 * since on the GT side a lot of the power management is done by the hardware.
43 * But even there some manual control at the device level is required.
44 *
45 * Since i915 supports a diverse set of platforms with a unified codebase and
46 * hardware engineers just love to shuffle functionality around between power
47 * domains there's a sizeable amount of indirection required. This file provides
48 * generic functions to the driver for grabbing and releasing references for
49 * abstract power domains. It then maps those to the actual power wells
50 * present for a given platform.
51 */
52
53static struct i915_power_domains *hsw_pwr;
54
55#define for_each_power_well(i, power_well, domain_mask, power_domains) \
56 for (i = 0; \
57 i < (power_domains)->power_well_count && \
58 ((power_well) = &(power_domains)->power_wells[i]); \
59 i++) \
60 if ((power_well)->domains & (domain_mask))
61
62#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
63 for (i = (power_domains)->power_well_count - 1; \
64 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
65 i--) \
66 if ((power_well)->domains & (domain_mask))
67
68/*
69 * We should only use the power well if we explicitly asked the hardware to
70 * enable it, so check if it's enabled and also check if we've requested it to
71 * be enabled.
72 */
73static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
74 struct i915_power_well *power_well)
75{
76 return I915_READ(HSW_PWR_WELL_DRIVER) ==
77 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
78}
79
80/**
81 * __intel_display_power_is_enabled - unlocked check for a power domain
82 * @dev_priv: i915 device instance
83 * @domain: power domain to check
84 *
85 * This is the unlocked version of intel_display_power_is_enabled() and should
86 * only be used from error capture and recovery code where deadlocks are
87 * possible.
88 *
89 * Returns:
90 * True when the power domain is enabled, false otherwise.
91 */
92bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
93 enum intel_display_power_domain domain)
94{
95 struct i915_power_domains *power_domains;
96 struct i915_power_well *power_well;
97 bool is_enabled;
98 int i;
99
100 if (dev_priv->pm.suspended)
101 return false;
102
103 power_domains = &dev_priv->power_domains;
104
105 is_enabled = true;
106
107 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
108 if (power_well->always_on)
109 continue;
110
111 if (!power_well->hw_enabled) {
112 is_enabled = false;
113 break;
114 }
115 }
116
117 return is_enabled;
118}
119
120/**
121 * intel_display_power_is_enabled - unlocked check for a power domain
122 * @dev_priv: i915 device instance
123 * @domain: power domain to check
124 *
125 * This function can be used to check the hw power domain state. It is mostly
126 * used in hardware state readout functions. Everywhere else code should rely
127 * upon explicit power domain reference counting to ensure that the hardware
128 * block is powered up before accessing it.
129 *
130 * Callers must hold the relevant modesetting locks to ensure that concurrent
131 * threads can't disable the power well while the caller tries to read a few
132 * registers.
133 *
134 * Returns:
135 * True when the power domain is enabled, false otherwise.
136 */
137bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
138 enum intel_display_power_domain domain)
139{
140 struct i915_power_domains *power_domains;
141 bool ret;
142
143 power_domains = &dev_priv->power_domains;
144
145 mutex_lock(&power_domains->lock);
146 ret = __intel_display_power_is_enabled(dev_priv, domain);
147 mutex_unlock(&power_domains->lock);
148
149 return ret;
150}
151
152/**
153 * intel_display_set_init_power - set the initial power domain state
154 * @dev_priv: i915 device instance
155 * @enable: whether to enable or disable the initial power domain state
156 *
157 * For simplicity our driver load/unload and system suspend/resume code assumes
158 * that all power domains are always enabled. This functions controls the state
159 * of this little hack. While the initial power domain state is enabled runtime
160 * pm is effectively disabled.
161 */
162void intel_display_set_init_power(struct drm_i915_private *dev_priv,
163 bool enable)
164{
165 if (dev_priv->power_domains.init_power_on == enable)
166 return;
167
168 if (enable)
169 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
170 else
171 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
172
173 dev_priv->power_domains.init_power_on = enable;
174}
175
176/*
177 * Starting with Haswell, we have a "Power Down Well" that can be turned off
178 * when not needed anymore. We have 4 registers that can request the power well
179 * to be enabled, and it will only be disabled if none of the registers is
180 * requesting it to be enabled.
181 */
182static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
183{
184 struct drm_device *dev = dev_priv->dev;
185
186 /*
187 * After we re-enable the power well, if we touch VGA register 0x3d5
188 * we'll get unclaimed register interrupts. This stops after we write
189 * anything to the VGA MSR register. The vgacon module uses this
190 * register all the time, so if we unbind our driver and, as a
191 * consequence, bind vgacon, we'll get stuck in an infinite loop at
192 * console_unlock(). So make here we touch the VGA MSR register, making
193 * sure vgacon can keep working normally without triggering interrupts
194 * and error messages.
195 */
196 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
197 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
198 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
199
200 if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
201 gen8_irq_power_well_post_enable(dev_priv);
202}
203
204static void hsw_set_power_well(struct drm_i915_private *dev_priv,
205 struct i915_power_well *power_well, bool enable)
206{
207 bool is_enabled, enable_requested;
208 uint32_t tmp;
209
210 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
211 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
212 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
213
214 if (enable) {
215 if (!enable_requested)
216 I915_WRITE(HSW_PWR_WELL_DRIVER,
217 HSW_PWR_WELL_ENABLE_REQUEST);
218
219 if (!is_enabled) {
220 DRM_DEBUG_KMS("Enabling power well\n");
221 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
222 HSW_PWR_WELL_STATE_ENABLED), 20))
223 DRM_ERROR("Timeout enabling power well\n");
224 }
225
226 hsw_power_well_post_enable(dev_priv);
227 } else {
228 if (enable_requested) {
229 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
230 POSTING_READ(HSW_PWR_WELL_DRIVER);
231 DRM_DEBUG_KMS("Requesting to disable the power well\n");
232 }
233 }
234}
235
236static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
237 struct i915_power_well *power_well)
238{
239 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
240
241 /*
242 * We're taking over the BIOS, so clear any requests made by it since
243 * the driver is in charge now.
244 */
245 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
246 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
247}
248
249static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
250 struct i915_power_well *power_well)
251{
252 hsw_set_power_well(dev_priv, power_well, true);
253}
254
255static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
256 struct i915_power_well *power_well)
257{
258 hsw_set_power_well(dev_priv, power_well, false);
259}
260
261static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
262 struct i915_power_well *power_well)
263{
264}
265
266static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
267 struct i915_power_well *power_well)
268{
269 return true;
270}
271
272static void vlv_set_power_well(struct drm_i915_private *dev_priv,
273 struct i915_power_well *power_well, bool enable)
274{
275 enum punit_power_well power_well_id = power_well->data;
276 u32 mask;
277 u32 state;
278 u32 ctrl;
279
280 mask = PUNIT_PWRGT_MASK(power_well_id);
281 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
282 PUNIT_PWRGT_PWR_GATE(power_well_id);
283
284 mutex_lock(&dev_priv->rps.hw_lock);
285
286#define COND \
287 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
288
289 if (COND)
290 goto out;
291
292 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
293 ctrl &= ~mask;
294 ctrl |= state;
295 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
296
297 if (wait_for(COND, 100))
298 DRM_ERROR("timout setting power well state %08x (%08x)\n",
299 state,
300 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
301
302#undef COND
303
304out:
305 mutex_unlock(&dev_priv->rps.hw_lock);
306}
307
308static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
309 struct i915_power_well *power_well)
310{
311 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
312}
313
314static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
315 struct i915_power_well *power_well)
316{
317 vlv_set_power_well(dev_priv, power_well, true);
318}
319
320static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
321 struct i915_power_well *power_well)
322{
323 vlv_set_power_well(dev_priv, power_well, false);
324}
325
326static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
327 struct i915_power_well *power_well)
328{
329 int power_well_id = power_well->data;
330 bool enabled = false;
331 u32 mask;
332 u32 state;
333 u32 ctrl;
334
335 mask = PUNIT_PWRGT_MASK(power_well_id);
336 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
337
338 mutex_lock(&dev_priv->rps.hw_lock);
339
340 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
341 /*
342 * We only ever set the power-on and power-gate states, anything
343 * else is unexpected.
344 */
345 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
346 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
347 if (state == ctrl)
348 enabled = true;
349
350 /*
351 * A transient state at this point would mean some unexpected party
352 * is poking at the power controls too.
353 */
354 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
355 WARN_ON(ctrl != state);
356
357 mutex_unlock(&dev_priv->rps.hw_lock);
358
359 return enabled;
360}
361
362static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
363 struct i915_power_well *power_well)
364{
365 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
366
367 vlv_set_power_well(dev_priv, power_well, true);
368
369 spin_lock_irq(&dev_priv->irq_lock);
370 valleyview_enable_display_irqs(dev_priv);
371 spin_unlock_irq(&dev_priv->irq_lock);
372
373 /*
374 * During driver initialization/resume we can avoid restoring the
375 * part of the HW/SW state that will be inited anyway explicitly.
376 */
377 if (dev_priv->power_domains.initializing)
378 return;
379
380 intel_hpd_init(dev_priv);
381
382 i915_redisable_vga_power_on(dev_priv->dev);
383}
384
385static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
386 struct i915_power_well *power_well)
387{
388 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
389
390 spin_lock_irq(&dev_priv->irq_lock);
391 valleyview_disable_display_irqs(dev_priv);
392 spin_unlock_irq(&dev_priv->irq_lock);
393
394 vlv_set_power_well(dev_priv, power_well, false);
395
396 vlv_power_sequencer_reset(dev_priv);
397}
398
399static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
400 struct i915_power_well *power_well)
401{
402 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
403
404 /*
405 * Enable the CRI clock source so we can get at the
406 * display and the reference clock for VGA
407 * hotplug / manual detection.
408 */
409 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
410 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
411 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
412
413 vlv_set_power_well(dev_priv, power_well, true);
414
415 /*
416 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
417 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
418 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
419 * b. The other bits such as sfr settings / modesel may all
420 * be set to 0.
421 *
422 * This should only be done on init and resume from S3 with
423 * both PLLs disabled, or we risk losing DPIO and PLL
424 * synchronization.
425 */
426 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
427}
428
429static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
430 struct i915_power_well *power_well)
431{
432 enum pipe pipe;
433
434 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
435
436 for_each_pipe(dev_priv, pipe)
437 assert_pll_disabled(dev_priv, pipe);
438
439 /* Assert common reset */
440 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
441
442 vlv_set_power_well(dev_priv, power_well, false);
443}
444
445static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
446 struct i915_power_well *power_well)
447{
448 enum dpio_phy phy;
449
450 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
451 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
452
453 /*
454 * Enable the CRI clock source so we can get at the
455 * display and the reference clock for VGA
456 * hotplug / manual detection.
457 */
458 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
459 phy = DPIO_PHY0;
460 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
461 DPLL_REFA_CLK_ENABLE_VLV);
462 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
463 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
464 } else {
465 phy = DPIO_PHY1;
466 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
467 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
468 }
469 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
470 vlv_set_power_well(dev_priv, power_well, true);
471
472 /* Poll for phypwrgood signal */
473 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
474 DRM_ERROR("Display PHY %d is not power up\n", phy);
475
476 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
477 PHY_COM_LANE_RESET_DEASSERT(phy));
478}
479
480static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
481 struct i915_power_well *power_well)
482{
483 enum dpio_phy phy;
484
485 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
486 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
487
488 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
489 phy = DPIO_PHY0;
490 assert_pll_disabled(dev_priv, PIPE_A);
491 assert_pll_disabled(dev_priv, PIPE_B);
492 } else {
493 phy = DPIO_PHY1;
494 assert_pll_disabled(dev_priv, PIPE_C);
495 }
496
497 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
498 ~PHY_COM_LANE_RESET_DEASSERT(phy));
499
500 vlv_set_power_well(dev_priv, power_well, false);
501}
502
503static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
504 struct i915_power_well *power_well)
505{
506 enum pipe pipe = power_well->data;
507 bool enabled;
508 u32 state, ctrl;
509
510 mutex_lock(&dev_priv->rps.hw_lock);
511
512 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
513 /*
514 * We only ever set the power-on and power-gate states, anything
515 * else is unexpected.
516 */
517 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
518 enabled = state == DP_SSS_PWR_ON(pipe);
519
520 /*
521 * A transient state at this point would mean some unexpected party
522 * is poking at the power controls too.
523 */
524 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
525 WARN_ON(ctrl << 16 != state);
526
527 mutex_unlock(&dev_priv->rps.hw_lock);
528
529 return enabled;
530}
531
532static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
533 struct i915_power_well *power_well,
534 bool enable)
535{
536 enum pipe pipe = power_well->data;
537 u32 state;
538 u32 ctrl;
539
540 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
541
542 mutex_lock(&dev_priv->rps.hw_lock);
543
544#define COND \
545 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
546
547 if (COND)
548 goto out;
549
550 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
551 ctrl &= ~DP_SSC_MASK(pipe);
552 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
553 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
554
555 if (wait_for(COND, 100))
556 DRM_ERROR("timout setting power well state %08x (%08x)\n",
557 state,
558 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
559
560#undef COND
561
562out:
563 mutex_unlock(&dev_priv->rps.hw_lock);
564}
565
566static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
567 struct i915_power_well *power_well)
568{
569 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
570}
571
572static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
573 struct i915_power_well *power_well)
574{
575 WARN_ON_ONCE(power_well->data != PIPE_A &&
576 power_well->data != PIPE_B &&
577 power_well->data != PIPE_C);
578
579 chv_set_pipe_power_well(dev_priv, power_well, true);
580}
581
582static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
583 struct i915_power_well *power_well)
584{
585 WARN_ON_ONCE(power_well->data != PIPE_A &&
586 power_well->data != PIPE_B &&
587 power_well->data != PIPE_C);
588
589 chv_set_pipe_power_well(dev_priv, power_well, false);
590}
591
592static void check_power_well_state(struct drm_i915_private *dev_priv,
593 struct i915_power_well *power_well)
594{
595 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
596
597 if (power_well->always_on || !i915.disable_power_well) {
598 if (!enabled)
599 goto mismatch;
600
601 return;
602 }
603
604 if (enabled != (power_well->count > 0))
605 goto mismatch;
606
607 return;
608
609mismatch:
610 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
611 power_well->name, power_well->always_on, enabled,
612 power_well->count, i915.disable_power_well);
613}
614
615/**
616 * intel_display_power_get - grab a power domain reference
617 * @dev_priv: i915 device instance
618 * @domain: power domain to reference
619 *
620 * This function grabs a power domain reference for @domain and ensures that the
621 * power domain and all its parents are powered up. Therefore users should only
622 * grab a reference to the innermost power domain they need.
623 *
624 * Any power domain reference obtained by this function must have a symmetric
625 * call to intel_display_power_put() to release the reference again.
626 */
627void intel_display_power_get(struct drm_i915_private *dev_priv,
628 enum intel_display_power_domain domain)
629{
630 struct i915_power_domains *power_domains;
631 struct i915_power_well *power_well;
632 int i;
633
634 intel_runtime_pm_get(dev_priv);
635
636 power_domains = &dev_priv->power_domains;
637
638 mutex_lock(&power_domains->lock);
639
640 for_each_power_well(i, power_well, BIT(domain), power_domains) {
641 if (!power_well->count++) {
642 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
643 power_well->ops->enable(dev_priv, power_well);
644 power_well->hw_enabled = true;
645 }
646
647 check_power_well_state(dev_priv, power_well);
648 }
649
650 power_domains->domain_use_count[domain]++;
651
652 mutex_unlock(&power_domains->lock);
653}
654
655/**
656 * intel_display_power_put - release a power domain reference
657 * @dev_priv: i915 device instance
658 * @domain: power domain to reference
659 *
660 * This function drops the power domain reference obtained by
661 * intel_display_power_get() and might power down the corresponding hardware
662 * block right away if this is the last reference.
663 */
664void intel_display_power_put(struct drm_i915_private *dev_priv,
665 enum intel_display_power_domain domain)
666{
667 struct i915_power_domains *power_domains;
668 struct i915_power_well *power_well;
669 int i;
670
671 power_domains = &dev_priv->power_domains;
672
673 mutex_lock(&power_domains->lock);
674
675 WARN_ON(!power_domains->domain_use_count[domain]);
676 power_domains->domain_use_count[domain]--;
677
678 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
679 WARN_ON(!power_well->count);
680
681 if (!--power_well->count && i915.disable_power_well) {
682 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
683 power_well->hw_enabled = false;
684 power_well->ops->disable(dev_priv, power_well);
685 }
686
687 check_power_well_state(dev_priv, power_well);
688 }
689
690 mutex_unlock(&power_domains->lock);
691
692 intel_runtime_pm_put(dev_priv);
693}
694
695#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
696
697#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
698 BIT(POWER_DOMAIN_PIPE_A) | \
699 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
700 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
701 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
702 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
703 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
704 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
705 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
706 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
707 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
708 BIT(POWER_DOMAIN_PORT_CRT) | \
709 BIT(POWER_DOMAIN_PLLS) | \
710 BIT(POWER_DOMAIN_INIT))
711#define HSW_DISPLAY_POWER_DOMAINS ( \
712 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
713 BIT(POWER_DOMAIN_INIT))
714
715#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
716 HSW_ALWAYS_ON_POWER_DOMAINS | \
717 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
718#define BDW_DISPLAY_POWER_DOMAINS ( \
719 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
720 BIT(POWER_DOMAIN_INIT))
721
722#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
723#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
724
725#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
726 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
727 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
728 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
729 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
730 BIT(POWER_DOMAIN_PORT_CRT) | \
731 BIT(POWER_DOMAIN_INIT))
732
733#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
734 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
735 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
736 BIT(POWER_DOMAIN_INIT))
737
738#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
739 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
740 BIT(POWER_DOMAIN_INIT))
741
742#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
743 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
744 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
745 BIT(POWER_DOMAIN_INIT))
746
747#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
748 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
749 BIT(POWER_DOMAIN_INIT))
750
751#define CHV_PIPE_A_POWER_DOMAINS ( \
752 BIT(POWER_DOMAIN_PIPE_A) | \
753 BIT(POWER_DOMAIN_INIT))
754
755#define CHV_PIPE_B_POWER_DOMAINS ( \
756 BIT(POWER_DOMAIN_PIPE_B) | \
757 BIT(POWER_DOMAIN_INIT))
758
759#define CHV_PIPE_C_POWER_DOMAINS ( \
760 BIT(POWER_DOMAIN_PIPE_C) | \
761 BIT(POWER_DOMAIN_INIT))
762
763#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
764 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
765 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
766 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
767 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
768 BIT(POWER_DOMAIN_INIT))
769
770#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
771 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
772 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
773 BIT(POWER_DOMAIN_INIT))
774
775#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
776 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
777 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
778 BIT(POWER_DOMAIN_INIT))
779
780#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
781 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
782 BIT(POWER_DOMAIN_INIT))
783
784static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
785 .sync_hw = i9xx_always_on_power_well_noop,
786 .enable = i9xx_always_on_power_well_noop,
787 .disable = i9xx_always_on_power_well_noop,
788 .is_enabled = i9xx_always_on_power_well_enabled,
789};
790
791static const struct i915_power_well_ops chv_pipe_power_well_ops = {
792 .sync_hw = chv_pipe_power_well_sync_hw,
793 .enable = chv_pipe_power_well_enable,
794 .disable = chv_pipe_power_well_disable,
795 .is_enabled = chv_pipe_power_well_enabled,
796};
797
798static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
799 .sync_hw = vlv_power_well_sync_hw,
800 .enable = chv_dpio_cmn_power_well_enable,
801 .disable = chv_dpio_cmn_power_well_disable,
802 .is_enabled = vlv_power_well_enabled,
803};
804
805static struct i915_power_well i9xx_always_on_power_well[] = {
806 {
807 .name = "always-on",
808 .always_on = 1,
809 .domains = POWER_DOMAIN_MASK,
810 .ops = &i9xx_always_on_power_well_ops,
811 },
812};
813
814static const struct i915_power_well_ops hsw_power_well_ops = {
815 .sync_hw = hsw_power_well_sync_hw,
816 .enable = hsw_power_well_enable,
817 .disable = hsw_power_well_disable,
818 .is_enabled = hsw_power_well_enabled,
819};
820
821static struct i915_power_well hsw_power_wells[] = {
822 {
823 .name = "always-on",
824 .always_on = 1,
825 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
826 .ops = &i9xx_always_on_power_well_ops,
827 },
828 {
829 .name = "display",
830 .domains = HSW_DISPLAY_POWER_DOMAINS,
831 .ops = &hsw_power_well_ops,
832 },
833};
834
835static struct i915_power_well bdw_power_wells[] = {
836 {
837 .name = "always-on",
838 .always_on = 1,
839 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
840 .ops = &i9xx_always_on_power_well_ops,
841 },
842 {
843 .name = "display",
844 .domains = BDW_DISPLAY_POWER_DOMAINS,
845 .ops = &hsw_power_well_ops,
846 },
847};
848
849static const struct i915_power_well_ops vlv_display_power_well_ops = {
850 .sync_hw = vlv_power_well_sync_hw,
851 .enable = vlv_display_power_well_enable,
852 .disable = vlv_display_power_well_disable,
853 .is_enabled = vlv_power_well_enabled,
854};
855
856static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
857 .sync_hw = vlv_power_well_sync_hw,
858 .enable = vlv_dpio_cmn_power_well_enable,
859 .disable = vlv_dpio_cmn_power_well_disable,
860 .is_enabled = vlv_power_well_enabled,
861};
862
863static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
864 .sync_hw = vlv_power_well_sync_hw,
865 .enable = vlv_power_well_enable,
866 .disable = vlv_power_well_disable,
867 .is_enabled = vlv_power_well_enabled,
868};
869
870static struct i915_power_well vlv_power_wells[] = {
871 {
872 .name = "always-on",
873 .always_on = 1,
874 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
875 .ops = &i9xx_always_on_power_well_ops,
876 },
877 {
878 .name = "display",
879 .domains = VLV_DISPLAY_POWER_DOMAINS,
880 .data = PUNIT_POWER_WELL_DISP2D,
881 .ops = &vlv_display_power_well_ops,
882 },
883 {
884 .name = "dpio-tx-b-01",
885 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
886 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
887 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
888 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
889 .ops = &vlv_dpio_power_well_ops,
890 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
891 },
892 {
893 .name = "dpio-tx-b-23",
894 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
895 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
896 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
897 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
898 .ops = &vlv_dpio_power_well_ops,
899 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
900 },
901 {
902 .name = "dpio-tx-c-01",
903 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
904 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
905 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
906 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
907 .ops = &vlv_dpio_power_well_ops,
908 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
909 },
910 {
911 .name = "dpio-tx-c-23",
912 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
913 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
914 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
915 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
916 .ops = &vlv_dpio_power_well_ops,
917 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
918 },
919 {
920 .name = "dpio-common",
921 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
922 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
923 .ops = &vlv_dpio_cmn_power_well_ops,
924 },
925};
926
927static struct i915_power_well chv_power_wells[] = {
928 {
929 .name = "always-on",
930 .always_on = 1,
931 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
932 .ops = &i9xx_always_on_power_well_ops,
933 },
934#if 0
935 {
936 .name = "display",
937 .domains = VLV_DISPLAY_POWER_DOMAINS,
938 .data = PUNIT_POWER_WELL_DISP2D,
939 .ops = &vlv_display_power_well_ops,
940 },
941 {
942 .name = "pipe-a",
943 .domains = CHV_PIPE_A_POWER_DOMAINS,
944 .data = PIPE_A,
945 .ops = &chv_pipe_power_well_ops,
946 },
947 {
948 .name = "pipe-b",
949 .domains = CHV_PIPE_B_POWER_DOMAINS,
950 .data = PIPE_B,
951 .ops = &chv_pipe_power_well_ops,
952 },
953 {
954 .name = "pipe-c",
955 .domains = CHV_PIPE_C_POWER_DOMAINS,
956 .data = PIPE_C,
957 .ops = &chv_pipe_power_well_ops,
958 },
959#endif
960 {
961 .name = "dpio-common-bc",
962 /*
963 * XXX: cmnreset for one PHY seems to disturb the other.
964 * As a workaround keep both powered on at the same
965 * time for now.
966 */
967 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
968 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
969 .ops = &chv_dpio_cmn_power_well_ops,
970 },
971 {
972 .name = "dpio-common-d",
973 /*
974 * XXX: cmnreset for one PHY seems to disturb the other.
975 * As a workaround keep both powered on at the same
976 * time for now.
977 */
978 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
979 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
980 .ops = &chv_dpio_cmn_power_well_ops,
981 },
982#if 0
983 {
984 .name = "dpio-tx-b-01",
985 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
986 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
987 .ops = &vlv_dpio_power_well_ops,
988 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
989 },
990 {
991 .name = "dpio-tx-b-23",
992 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
993 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
994 .ops = &vlv_dpio_power_well_ops,
995 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
996 },
997 {
998 .name = "dpio-tx-c-01",
999 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1000 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1001 .ops = &vlv_dpio_power_well_ops,
1002 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1003 },
1004 {
1005 .name = "dpio-tx-c-23",
1006 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1007 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1008 .ops = &vlv_dpio_power_well_ops,
1009 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1010 },
1011 {
1012 .name = "dpio-tx-d-01",
1013 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
1014 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
1015 .ops = &vlv_dpio_power_well_ops,
1016 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
1017 },
1018 {
1019 .name = "dpio-tx-d-23",
1020 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
1021 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
1022 .ops = &vlv_dpio_power_well_ops,
1023 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
1024 },
1025#endif
1026};
1027
1028static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1029 enum punit_power_well power_well_id)
1030{
1031 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1032 struct i915_power_well *power_well;
1033 int i;
1034
1035 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1036 if (power_well->data == power_well_id)
1037 return power_well;
1038 }
1039
1040 return NULL;
1041}
1042
1043#define set_power_wells(power_domains, __power_wells) ({ \
1044 (power_domains)->power_wells = (__power_wells); \
1045 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
1046})
1047
1048/**
1049 * intel_power_domains_init - initializes the power domain structures
1050 * @dev_priv: i915 device instance
1051 *
1052 * Initializes the power domain structures for @dev_priv depending upon the
1053 * supported platform.
1054 */
1055int intel_power_domains_init(struct drm_i915_private *dev_priv)
1056{
1057 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1058
1059 mutex_init(&power_domains->lock);
1060
1061 /*
1062 * The enabling order will be from lower to higher indexed wells,
1063 * the disabling order is reversed.
1064 */
1065 if (IS_HASWELL(dev_priv->dev)) {
1066 set_power_wells(power_domains, hsw_power_wells);
1067 hsw_pwr = power_domains;
1068 } else if (IS_BROADWELL(dev_priv->dev)) {
1069 set_power_wells(power_domains, bdw_power_wells);
1070 hsw_pwr = power_domains;
1071 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1072 set_power_wells(power_domains, chv_power_wells);
1073 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
1074 set_power_wells(power_domains, vlv_power_wells);
1075 } else {
1076 set_power_wells(power_domains, i9xx_always_on_power_well);
1077 }
1078
1079 return 0;
1080}
1081
1082static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
1083{
1084 struct drm_device *dev = dev_priv->dev;
1085 struct device *device = &dev->pdev->dev;
1086
1087 if (!HAS_RUNTIME_PM(dev))
1088 return;
1089
1090 if (!intel_enable_rc6(dev))
1091 return;
1092
1093 /* Make sure we're not suspended first. */
1094 pm_runtime_get_sync(device);
1095 pm_runtime_disable(device);
1096}
1097
1098/**
1099 * intel_power_domains_fini - finalizes the power domain structures
1100 * @dev_priv: i915 device instance
1101 *
1102 * Finalizes the power domain structures for @dev_priv depending upon the
1103 * supported platform. This function also disables runtime pm and ensures that
1104 * the device stays powered up so that the driver can be reloaded.
1105 */
1106void intel_power_domains_fini(struct drm_i915_private *dev_priv)
1107{
1108 intel_runtime_pm_disable(dev_priv);
1109
1110 /* The i915.ko module is still not prepared to be loaded when
1111 * the power well is not enabled, so just enable it in case
1112 * we're going to unload/reload. */
1113 intel_display_set_init_power(dev_priv, true);
1114
1115 hsw_pwr = NULL;
1116}
1117
1118static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
1119{
1120 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1121 struct i915_power_well *power_well;
1122 int i;
1123
1124 mutex_lock(&power_domains->lock);
1125 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1126 power_well->ops->sync_hw(dev_priv, power_well);
1127 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
1128 power_well);
1129 }
1130 mutex_unlock(&power_domains->lock);
1131}
1132
1133static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1134{
1135 struct i915_power_well *cmn =
1136 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1137 struct i915_power_well *disp2d =
1138 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
1139
1140 /* nothing to do if common lane is already off */
1141 if (!cmn->ops->is_enabled(dev_priv, cmn))
1142 return;
1143
1144 /* If the display might be already active skip this */
1145 if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
1146 I915_READ(DPIO_CTL) & DPIO_CMNRST)
1147 return;
1148
1149 DRM_DEBUG_KMS("toggling display PHY side reset\n");
1150
1151 /* cmnlane needs DPLL registers */
1152 disp2d->ops->enable(dev_priv, disp2d);
1153
1154 /*
1155 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1156 * Need to assert and de-assert PHY SB reset by gating the
1157 * common lane power, then un-gating it.
1158 * Simply ungating isn't enough to reset the PHY enough to get
1159 * ports and lanes running.
1160 */
1161 cmn->ops->disable(dev_priv, cmn);
1162}
1163
1164/**
1165 * intel_power_domains_init_hw - initialize hardware power domain state
1166 * @dev_priv: i915 device instance
1167 *
1168 * This function initializes the hardware power domain state and enables all
1169 * power domains using intel_display_set_init_power().
1170 */
1171void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
1172{
1173 struct drm_device *dev = dev_priv->dev;
1174 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1175
1176 power_domains->initializing = true;
1177
1178 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
1179 mutex_lock(&power_domains->lock);
1180 vlv_cmnlane_wa(dev_priv);
1181 mutex_unlock(&power_domains->lock);
1182 }
1183
1184 /* For now, we need the power well to be always enabled. */
1185 intel_display_set_init_power(dev_priv, true);
1186 intel_power_domains_resume(dev_priv);
1187 power_domains->initializing = false;
1188}
1189
1190/**
1191 * intel_aux_display_runtime_get - grab an auxilliary power domain reference
1192 * @dev_priv: i915 device instance
1193 *
1194 * This function grabs a power domain reference for the auxiliary power domain
1195 * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
1196 * parents are powered up. Therefore users should only grab a reference to the
1197 * innermost power domain they need.
1198 *
1199 * Any power domain reference obtained by this function must have a symmetric
1200 * call to intel_aux_display_runtime_put() to release the reference again.
1201 */
1202void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
1203{
1204 intel_runtime_pm_get(dev_priv);
1205}
1206
1207/**
1208 * intel_aux_display_runtime_put - release an auxilliary power domain reference
1209 * @dev_priv: i915 device instance
1210 *
1211 * This function drops the auxilliary power domain reference obtained by
1212 * intel_aux_display_runtime_get() and might power down the corresponding
1213 * hardware block right away if this is the last reference.
1214 */
1215void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
1216{
1217 intel_runtime_pm_put(dev_priv);
1218}
1219
1220/**
1221 * intel_runtime_pm_get - grab a runtime pm reference
1222 * @dev_priv: i915 device instance
1223 *
1224 * This function grabs a device-level runtime pm reference (mostly used for GEM
1225 * code to ensure the GTT or GT is on) and ensures that it is powered up.
1226 *
1227 * Any runtime pm reference obtained by this function must have a symmetric
1228 * call to intel_runtime_pm_put() to release the reference again.
1229 */
1230void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
1231{
1232 struct drm_device *dev = dev_priv->dev;
1233 struct device *device = &dev->pdev->dev;
1234
1235 if (!HAS_RUNTIME_PM(dev))
1236 return;
1237
1238 pm_runtime_get_sync(device);
1239 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
1240}
1241
1242/**
1243 * intel_runtime_pm_get_noresume - grab a runtime pm reference
1244 * @dev_priv: i915 device instance
1245 *
1246 * This function grabs a device-level runtime pm reference (mostly used for GEM
1247 * code to ensure the GTT or GT is on).
1248 *
1249 * It will _not_ power up the device but instead only check that it's powered
1250 * on. Therefore it is only valid to call this functions from contexts where
1251 * the device is known to be powered up and where trying to power it up would
1252 * result in hilarity and deadlocks. That pretty much means only the system
1253 * suspend/resume code where this is used to grab runtime pm references for
1254 * delayed setup down in work items.
1255 *
1256 * Any runtime pm reference obtained by this function must have a symmetric
1257 * call to intel_runtime_pm_put() to release the reference again.
1258 */
1259void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
1260{
1261 struct drm_device *dev = dev_priv->dev;
1262 struct device *device = &dev->pdev->dev;
1263
1264 if (!HAS_RUNTIME_PM(dev))
1265 return;
1266
1267 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
1268 pm_runtime_get_noresume(device);
1269}
1270
1271/**
1272 * intel_runtime_pm_put - release a runtime pm reference
1273 * @dev_priv: i915 device instance
1274 *
1275 * This function drops the device-level runtime pm reference obtained by
1276 * intel_runtime_pm_get() and might power down the corresponding
1277 * hardware block right away if this is the last reference.
1278 */
1279void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
1280{
1281 struct drm_device *dev = dev_priv->dev;
1282 struct device *device = &dev->pdev->dev;
1283
1284 if (!HAS_RUNTIME_PM(dev))
1285 return;
1286
1287 pm_runtime_mark_last_busy(device);
1288 pm_runtime_put_autosuspend(device);
1289}
1290
1291/**
1292 * intel_runtime_pm_enable - enable runtime pm
1293 * @dev_priv: i915 device instance
1294 *
1295 * This function enables runtime pm at the end of the driver load sequence.
1296 *
1297 * Note that this function does currently not enable runtime pm for the
1298 * subordinate display power domains. That is only done on the first modeset
1299 * using intel_display_set_init_power().
1300 */
1301void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
1302{
1303 struct drm_device *dev = dev_priv->dev;
1304 struct device *device = &dev->pdev->dev;
1305
1306 if (!HAS_RUNTIME_PM(dev))
1307 return;
1308
1309 pm_runtime_set_active(device);
1310
1311 /*
1312 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
1313 * requirement.
1314 */
1315 if (!intel_enable_rc6(dev)) {
1316 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
1317 return;
1318 }
1319
1320 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
1321 pm_runtime_mark_last_busy(device);
1322 pm_runtime_use_autosuspend(device);
1323
1324 pm_runtime_put_autosuspend(device);
1325}
1326
1327/* Display audio driver power well request */
1328int i915_request_power_well(void)
1329{
1330 struct drm_i915_private *dev_priv;
1331
1332 if (!hsw_pwr)
1333 return -ENODEV;
1334
1335 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
1336 power_domains);
1337 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
1338 return 0;
1339}
1340EXPORT_SYMBOL_GPL(i915_request_power_well);
1341
1342/* Display audio driver power well release */
1343int i915_release_power_well(void)
1344{
1345 struct drm_i915_private *dev_priv;
1346
1347 if (!hsw_pwr)
1348 return -ENODEV;
1349
1350 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
1351 power_domains);
1352 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
1353 return 0;
1354}
1355EXPORT_SYMBOL_GPL(i915_release_power_well);
1356
1357/*
1358 * Private interface for the audio driver to get CDCLK in kHz.
1359 *
1360 * Caller must request power well using i915_request_power_well() prior to
1361 * making the call.
1362 */
1363int i915_get_cdclk_freq(void)
1364{
1365 struct drm_i915_private *dev_priv;
1366
1367 if (!hsw_pwr)
1368 return -ENODEV;
1369
1370 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
1371 power_domains);
1372
1373 return intel_ddi_get_cdclk_freq(dev_priv);
1374}
1375EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 07a74ef589bd..750b634d45ec 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -139,6 +139,184 @@ static void intel_update_primary_plane(struct intel_crtc *crtc)
139} 139}
140 140
141static void 141static void
142skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
143 struct drm_framebuffer *fb,
144 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
145 unsigned int crtc_w, unsigned int crtc_h,
146 uint32_t x, uint32_t y,
147 uint32_t src_w, uint32_t src_h)
148{
149 struct drm_device *dev = drm_plane->dev;
150 struct drm_i915_private *dev_priv = dev->dev_private;
151 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
152 const int pipe = intel_plane->pipe;
153 const int plane = intel_plane->plane + 1;
154 u32 plane_ctl, stride;
155 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
156
157 plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
158
159 /* Mask out pixel format bits in case we change it */
160 plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
161 plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
162 plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
163 plane_ctl &= ~PLANE_CTL_TILED_MASK;
164 plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
165
166 /* Trickle feed has to be enabled */
167 plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
168
169 switch (fb->pixel_format) {
170 case DRM_FORMAT_RGB565:
171 plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
172 break;
173 case DRM_FORMAT_XBGR8888:
174 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
175 break;
176 case DRM_FORMAT_XRGB8888:
177 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
178 break;
179 /*
180 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
181 * to be already pre-multiplied. We need to add a knob (or a different
182 * DRM_FORMAT) for user-space to configure that.
183 */
184 case DRM_FORMAT_ABGR8888:
185 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
186 PLANE_CTL_ORDER_RGBX |
187 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
188 break;
189 case DRM_FORMAT_ARGB8888:
190 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
191 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
192 break;
193 case DRM_FORMAT_YUYV:
194 plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
195 break;
196 case DRM_FORMAT_YVYU:
197 plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
198 break;
199 case DRM_FORMAT_UYVY:
200 plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
201 break;
202 case DRM_FORMAT_VYUY:
203 plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
204 break;
205 default:
206 BUG();
207 }
208
209 switch (obj->tiling_mode) {
210 case I915_TILING_NONE:
211 stride = fb->pitches[0] >> 6;
212 break;
213 case I915_TILING_X:
214 plane_ctl |= PLANE_CTL_TILED_X;
215 stride = fb->pitches[0] >> 9;
216 break;
217 default:
218 BUG();
219 }
220
221 plane_ctl |= PLANE_CTL_ENABLE;
222 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
223
224 intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
225 pixel_size, true,
226 src_w != crtc_w || src_h != crtc_h);
227
228 /* Sizes are 0 based */
229 src_w--;
230 src_h--;
231 crtc_w--;
232 crtc_h--;
233
234 I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
235 I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
236 I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
237 I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
238 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
239 I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
240 POSTING_READ(PLANE_SURF(pipe, plane));
241}
242
243static void
244skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
245{
246 struct drm_device *dev = drm_plane->dev;
247 struct drm_i915_private *dev_priv = dev->dev_private;
248 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
249 const int pipe = intel_plane->pipe;
250 const int plane = intel_plane->plane + 1;
251
252 I915_WRITE(PLANE_CTL(pipe, plane),
253 I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
254
255 /* Activate double buffered register update */
256 I915_WRITE(PLANE_CTL(pipe, plane), 0);
257 POSTING_READ(PLANE_CTL(pipe, plane));
258
259 intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
260}
261
262static int
263skl_update_colorkey(struct drm_plane *drm_plane,
264 struct drm_intel_sprite_colorkey *key)
265{
266 struct drm_device *dev = drm_plane->dev;
267 struct drm_i915_private *dev_priv = dev->dev_private;
268 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
269 const int pipe = intel_plane->pipe;
270 const int plane = intel_plane->plane;
271 u32 plane_ctl;
272
273 I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
274 I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
275 I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
276
277 plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
278 plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
279 if (key->flags & I915_SET_COLORKEY_DESTINATION)
280 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
281 else if (key->flags & I915_SET_COLORKEY_SOURCE)
282 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
283 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
284
285 POSTING_READ(PLANE_CTL(pipe, plane));
286
287 return 0;
288}
289
290static void
291skl_get_colorkey(struct drm_plane *drm_plane,
292 struct drm_intel_sprite_colorkey *key)
293{
294 struct drm_device *dev = drm_plane->dev;
295 struct drm_i915_private *dev_priv = dev->dev_private;
296 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
297 const int pipe = intel_plane->pipe;
298 const int plane = intel_plane->plane;
299 u32 plane_ctl;
300
301 key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
302 key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
303 key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
304
305 plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
306
307 switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
308 case PLANE_CTL_KEY_ENABLE_DESTINATION:
309 key->flags = I915_SET_COLORKEY_DESTINATION;
310 break;
311 case PLANE_CTL_KEY_ENABLE_SOURCE:
312 key->flags = I915_SET_COLORKEY_SOURCE;
313 break;
314 default:
315 key->flags = I915_SET_COLORKEY_NONE;
316 }
317}
318
319static void
142vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, 320vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
143 struct drm_framebuffer *fb, 321 struct drm_framebuffer *fb,
144 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 322 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
@@ -845,57 +1023,24 @@ static bool colorkey_enabled(struct intel_plane *intel_plane)
845} 1023}
846 1024
847static int 1025static int
848intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, 1026intel_check_sprite_plane(struct drm_plane *plane,
849 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 1027 struct intel_plane_state *state)
850 unsigned int crtc_w, unsigned int crtc_h,
851 uint32_t src_x, uint32_t src_y,
852 uint32_t src_w, uint32_t src_h)
853{ 1028{
854 struct drm_device *dev = plane->dev; 1029 struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
855 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
856 struct intel_plane *intel_plane = to_intel_plane(plane); 1030 struct intel_plane *intel_plane = to_intel_plane(plane);
857 enum pipe pipe = intel_crtc->pipe; 1031 struct drm_framebuffer *fb = state->fb;
858 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1032 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
859 struct drm_i915_gem_object *obj = intel_fb->obj; 1033 struct drm_i915_gem_object *obj = intel_fb->obj;
860 struct drm_i915_gem_object *old_obj = intel_plane->obj; 1034 int crtc_x, crtc_y;
861 int ret; 1035 unsigned int crtc_w, crtc_h;
862 bool primary_enabled; 1036 uint32_t src_x, src_y, src_w, src_h;
863 bool visible; 1037 struct drm_rect *src = &state->src;
1038 struct drm_rect *dst = &state->dst;
1039 struct drm_rect *orig_src = &state->orig_src;
1040 const struct drm_rect *clip = &state->clip;
864 int hscale, vscale; 1041 int hscale, vscale;
865 int max_scale, min_scale; 1042 int max_scale, min_scale;
866 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 1043 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
867 struct drm_rect src = {
868 /* sample coordinates in 16.16 fixed point */
869 .x1 = src_x,
870 .x2 = src_x + src_w,
871 .y1 = src_y,
872 .y2 = src_y + src_h,
873 };
874 struct drm_rect dst = {
875 /* integer pixels */
876 .x1 = crtc_x,
877 .x2 = crtc_x + crtc_w,
878 .y1 = crtc_y,
879 .y2 = crtc_y + crtc_h,
880 };
881 const struct drm_rect clip = {
882 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
883 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
884 };
885 const struct {
886 int crtc_x, crtc_y;
887 unsigned int crtc_w, crtc_h;
888 uint32_t src_x, src_y, src_w, src_h;
889 } orig = {
890 .crtc_x = crtc_x,
891 .crtc_y = crtc_y,
892 .crtc_w = crtc_w,
893 .crtc_h = crtc_h,
894 .src_x = src_x,
895 .src_y = src_y,
896 .src_w = src_w,
897 .src_h = src_h,
898 };
899 1044
900 /* Don't modify another pipe's plane */ 1045 /* Don't modify another pipe's plane */
901 if (intel_plane->pipe != intel_crtc->pipe) { 1046 if (intel_plane->pipe != intel_crtc->pipe) {
@@ -927,55 +1072,55 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
927 max_scale = intel_plane->max_downscale << 16; 1072 max_scale = intel_plane->max_downscale << 16;
928 min_scale = intel_plane->can_scale ? 1 : (1 << 16); 1073 min_scale = intel_plane->can_scale ? 1 : (1 << 16);
929 1074
930 drm_rect_rotate(&src, fb->width << 16, fb->height << 16, 1075 drm_rect_rotate(src, fb->width << 16, fb->height << 16,
931 intel_plane->rotation); 1076 intel_plane->rotation);
932 1077
933 hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale); 1078 hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
934 BUG_ON(hscale < 0); 1079 BUG_ON(hscale < 0);
935 1080
936 vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale); 1081 vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
937 BUG_ON(vscale < 0); 1082 BUG_ON(vscale < 0);
938 1083
939 visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale); 1084 state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
940 1085
941 crtc_x = dst.x1; 1086 crtc_x = dst->x1;
942 crtc_y = dst.y1; 1087 crtc_y = dst->y1;
943 crtc_w = drm_rect_width(&dst); 1088 crtc_w = drm_rect_width(dst);
944 crtc_h = drm_rect_height(&dst); 1089 crtc_h = drm_rect_height(dst);
945 1090
946 if (visible) { 1091 if (state->visible) {
947 /* check again in case clipping clamped the results */ 1092 /* check again in case clipping clamped the results */
948 hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale); 1093 hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
949 if (hscale < 0) { 1094 if (hscale < 0) {
950 DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n"); 1095 DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
951 drm_rect_debug_print(&src, true); 1096 drm_rect_debug_print(src, true);
952 drm_rect_debug_print(&dst, false); 1097 drm_rect_debug_print(dst, false);
953 1098
954 return hscale; 1099 return hscale;
955 } 1100 }
956 1101
957 vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale); 1102 vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
958 if (vscale < 0) { 1103 if (vscale < 0) {
959 DRM_DEBUG_KMS("Vertical scaling factor out of limits\n"); 1104 DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
960 drm_rect_debug_print(&src, true); 1105 drm_rect_debug_print(src, true);
961 drm_rect_debug_print(&dst, false); 1106 drm_rect_debug_print(dst, false);
962 1107
963 return vscale; 1108 return vscale;
964 } 1109 }
965 1110
966 /* Make the source viewport size an exact multiple of the scaling factors. */ 1111 /* Make the source viewport size an exact multiple of the scaling factors. */
967 drm_rect_adjust_size(&src, 1112 drm_rect_adjust_size(src,
968 drm_rect_width(&dst) * hscale - drm_rect_width(&src), 1113 drm_rect_width(dst) * hscale - drm_rect_width(src),
969 drm_rect_height(&dst) * vscale - drm_rect_height(&src)); 1114 drm_rect_height(dst) * vscale - drm_rect_height(src));
970 1115
971 drm_rect_rotate_inv(&src, fb->width << 16, fb->height << 16, 1116 drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
972 intel_plane->rotation); 1117 intel_plane->rotation);
973 1118
974 /* sanity check to make sure the src viewport wasn't enlarged */ 1119 /* sanity check to make sure the src viewport wasn't enlarged */
975 WARN_ON(src.x1 < (int) src_x || 1120 WARN_ON(src->x1 < (int) orig_src->x1 ||
976 src.y1 < (int) src_y || 1121 src->y1 < (int) orig_src->y1 ||
977 src.x2 > (int) (src_x + src_w) || 1122 src->x2 > (int) orig_src->x2 ||
978 src.y2 > (int) (src_y + src_h)); 1123 src->y2 > (int) orig_src->y2);
979 1124
980 /* 1125 /*
981 * Hardware doesn't handle subpixel coordinates. 1126 * Hardware doesn't handle subpixel coordinates.
@@ -983,10 +1128,10 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
983 * increase the source viewport size, because that could 1128 * increase the source viewport size, because that could
984 * push the downscaling factor out of bounds. 1129 * push the downscaling factor out of bounds.
985 */ 1130 */
986 src_x = src.x1 >> 16; 1131 src_x = src->x1 >> 16;
987 src_w = drm_rect_width(&src) >> 16; 1132 src_w = drm_rect_width(src) >> 16;
988 src_y = src.y1 >> 16; 1133 src_y = src->y1 >> 16;
989 src_h = drm_rect_height(&src) >> 16; 1134 src_h = drm_rect_height(src) >> 16;
990 1135
991 if (format_is_yuv(fb->pixel_format)) { 1136 if (format_is_yuv(fb->pixel_format)) {
992 src_x &= ~1; 1137 src_x &= ~1;
@@ -1000,12 +1145,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1000 crtc_w &= ~1; 1145 crtc_w &= ~1;
1001 1146
1002 if (crtc_w == 0) 1147 if (crtc_w == 0)
1003 visible = false; 1148 state->visible = false;
1004 } 1149 }
1005 } 1150 }
1006 1151
1007 /* Check size restrictions when scaling */ 1152 /* Check size restrictions when scaling */
1008 if (visible && (src_w != crtc_w || src_h != crtc_h)) { 1153 if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
1009 unsigned int width_bytes; 1154 unsigned int width_bytes;
1010 1155
1011 WARN_ON(!intel_plane->can_scale); 1156 WARN_ON(!intel_plane->can_scale);
@@ -1013,12 +1158,13 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1013 /* FIXME interlacing min height is 6 */ 1158 /* FIXME interlacing min height is 6 */
1014 1159
1015 if (crtc_w < 3 || crtc_h < 3) 1160 if (crtc_w < 3 || crtc_h < 3)
1016 visible = false; 1161 state->visible = false;
1017 1162
1018 if (src_w < 3 || src_h < 3) 1163 if (src_w < 3 || src_h < 3)
1019 visible = false; 1164 state->visible = false;
1020 1165
1021 width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size; 1166 width_bytes = ((src_x * pixel_size) & 63) +
1167 src_w * pixel_size;
1022 1168
1023 if (src_w > 2048 || src_h > 2048 || 1169 if (src_w > 2048 || src_h > 2048 ||
1024 width_bytes > 4096 || fb->pitches[0] > 4096) { 1170 width_bytes > 4096 || fb->pitches[0] > 4096) {
@@ -1027,42 +1173,76 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1027 } 1173 }
1028 } 1174 }
1029 1175
1030 dst.x1 = crtc_x; 1176 if (state->visible) {
1031 dst.x2 = crtc_x + crtc_w; 1177 src->x1 = src_x;
1032 dst.y1 = crtc_y; 1178 src->x2 = src_x + src_w;
1033 dst.y2 = crtc_y + crtc_h; 1179 src->y1 = src_y;
1180 src->y2 = src_y + src_h;
1181 }
1182
1183 dst->x1 = crtc_x;
1184 dst->x2 = crtc_x + crtc_w;
1185 dst->y1 = crtc_y;
1186 dst->y2 = crtc_y + crtc_h;
1187
1188 return 0;
1189}
1190
1191static int
1192intel_commit_sprite_plane(struct drm_plane *plane,
1193 struct intel_plane_state *state)
1194{
1195 struct drm_device *dev = plane->dev;
1196 struct drm_crtc *crtc = state->crtc;
1197 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1198 struct intel_plane *intel_plane = to_intel_plane(plane);
1199 enum pipe pipe = intel_crtc->pipe;
1200 struct drm_framebuffer *fb = state->fb;
1201 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1202 struct drm_i915_gem_object *obj = intel_fb->obj;
1203 struct drm_i915_gem_object *old_obj = intel_plane->obj;
1204 int crtc_x, crtc_y;
1205 unsigned int crtc_w, crtc_h;
1206 uint32_t src_x, src_y, src_w, src_h;
1207 struct drm_rect *dst = &state->dst;
1208 const struct drm_rect *clip = &state->clip;
1209 bool primary_enabled;
1210 int ret;
1034 1211
1035 /* 1212 /*
1036 * If the sprite is completely covering the primary plane, 1213 * If the sprite is completely covering the primary plane,
1037 * we can disable the primary and save power. 1214 * we can disable the primary and save power.
1038 */ 1215 */
1039 primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane); 1216 primary_enabled = !drm_rect_equals(dst, clip) || colorkey_enabled(intel_plane);
1040 WARN_ON(!primary_enabled && !visible && intel_crtc->active); 1217 WARN_ON(!primary_enabled && !state->visible && intel_crtc->active);
1041 1218
1042 mutex_lock(&dev->struct_mutex);
1043 1219
1044 /* Note that this will apply the VT-d workaround for scanouts, 1220 if (old_obj != obj) {
1045 * which is more restrictive than required for sprites. (The 1221 mutex_lock(&dev->struct_mutex);
1046 * primary plane requires 256KiB alignment with 64 PTE padding,
1047 * the sprite planes only require 128KiB alignment and 32 PTE padding.
1048 */
1049 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
1050
1051 i915_gem_track_fb(old_obj, obj,
1052 INTEL_FRONTBUFFER_SPRITE(pipe));
1053 mutex_unlock(&dev->struct_mutex);
1054 1222
1055 if (ret) 1223 /* Note that this will apply the VT-d workaround for scanouts,
1056 return ret; 1224 * which is more restrictive than required for sprites. (The
1225 * primary plane requires 256KiB alignment with 64 PTE padding,
1226 * the sprite planes only require 128KiB alignment and 32 PTE
1227 * padding.
1228 */
1229 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
1230 if (ret == 0)
1231 i915_gem_track_fb(old_obj, obj,
1232 INTEL_FRONTBUFFER_SPRITE(pipe));
1233 mutex_unlock(&dev->struct_mutex);
1234 if (ret)
1235 return ret;
1236 }
1057 1237
1058 intel_plane->crtc_x = orig.crtc_x; 1238 intel_plane->crtc_x = state->orig_dst.x1;
1059 intel_plane->crtc_y = orig.crtc_y; 1239 intel_plane->crtc_y = state->orig_dst.y1;
1060 intel_plane->crtc_w = orig.crtc_w; 1240 intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
1061 intel_plane->crtc_h = orig.crtc_h; 1241 intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
1062 intel_plane->src_x = orig.src_x; 1242 intel_plane->src_x = state->orig_src.x1;
1063 intel_plane->src_y = orig.src_y; 1243 intel_plane->src_y = state->orig_src.y1;
1064 intel_plane->src_w = orig.src_w; 1244 intel_plane->src_w = drm_rect_width(&state->orig_src);
1065 intel_plane->src_h = orig.src_h; 1245 intel_plane->src_h = drm_rect_height(&state->orig_src);
1066 intel_plane->obj = obj; 1246 intel_plane->obj = obj;
1067 1247
1068 if (intel_crtc->active) { 1248 if (intel_crtc->active) {
@@ -1076,12 +1256,22 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1076 if (primary_was_enabled && !primary_enabled) 1256 if (primary_was_enabled && !primary_enabled)
1077 intel_pre_disable_primary(crtc); 1257 intel_pre_disable_primary(crtc);
1078 1258
1079 if (visible) 1259 if (state->visible) {
1260 crtc_x = state->dst.x1;
1261 crtc_y = state->dst.y1;
1262 crtc_w = drm_rect_width(&state->dst);
1263 crtc_h = drm_rect_height(&state->dst);
1264 src_x = state->src.x1;
1265 src_y = state->src.y1;
1266 src_w = drm_rect_width(&state->src);
1267 src_h = drm_rect_height(&state->src);
1080 intel_plane->update_plane(plane, crtc, fb, obj, 1268 intel_plane->update_plane(plane, crtc, fb, obj,
1081 crtc_x, crtc_y, crtc_w, crtc_h, 1269 crtc_x, crtc_y, crtc_w, crtc_h,
1082 src_x, src_y, src_w, src_h); 1270 src_x, src_y, src_w, src_h);
1083 else 1271 } else {
1084 intel_plane->disable_plane(plane, crtc); 1272 intel_plane->disable_plane(plane, crtc);
1273 }
1274
1085 1275
1086 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe)); 1276 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
1087 1277
@@ -1090,14 +1280,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1090 } 1280 }
1091 1281
1092 /* Unpin old obj after new one is active to avoid ugliness */ 1282 /* Unpin old obj after new one is active to avoid ugliness */
1093 if (old_obj) { 1283 if (old_obj && old_obj != obj) {
1284
1094 /* 1285 /*
1095 * It's fairly common to simply update the position of 1286 * It's fairly common to simply update the position of
1096 * an existing object. In that case, we don't need to 1287 * an existing object. In that case, we don't need to
1097 * wait for vblank to avoid ugliness, we only need to 1288 * wait for vblank to avoid ugliness, we only need to
1098 * do the pin & ref bookkeeping. 1289 * do the pin & ref bookkeeping.
1099 */ 1290 */
1100 if (old_obj != obj && intel_crtc->active) 1291 if (intel_crtc->active)
1101 intel_wait_for_vblank(dev, intel_crtc->pipe); 1292 intel_wait_for_vblank(dev, intel_crtc->pipe);
1102 1293
1103 mutex_lock(&dev->struct_mutex); 1294 mutex_lock(&dev->struct_mutex);
@@ -1109,6 +1300,46 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1109} 1300}
1110 1301
1111static int 1302static int
1303intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1304 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
1305 unsigned int crtc_w, unsigned int crtc_h,
1306 uint32_t src_x, uint32_t src_y,
1307 uint32_t src_w, uint32_t src_h)
1308{
1309 struct intel_plane_state state;
1310 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1311 int ret;
1312
1313 state.crtc = crtc;
1314 state.fb = fb;
1315
1316 /* sample coordinates in 16.16 fixed point */
1317 state.src.x1 = src_x;
1318 state.src.x2 = src_x + src_w;
1319 state.src.y1 = src_y;
1320 state.src.y2 = src_y + src_h;
1321
1322 /* integer pixels */
1323 state.dst.x1 = crtc_x;
1324 state.dst.x2 = crtc_x + crtc_w;
1325 state.dst.y1 = crtc_y;
1326 state.dst.y2 = crtc_y + crtc_h;
1327
1328 state.clip.x1 = 0;
1329 state.clip.y1 = 0;
1330 state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
1331 state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
1332 state.orig_src = state.src;
1333 state.orig_dst = state.dst;
1334
1335 ret = intel_check_sprite_plane(plane, &state);
1336 if (ret)
1337 return ret;
1338
1339 return intel_commit_sprite_plane(plane, &state);
1340}
1341
1342static int
1112intel_disable_plane(struct drm_plane *plane) 1343intel_disable_plane(struct drm_plane *plane)
1113{ 1344{
1114 struct drm_device *dev = plane->dev; 1345 struct drm_device *dev = plane->dev;
@@ -1305,6 +1536,18 @@ static uint32_t vlv_plane_formats[] = {
1305 DRM_FORMAT_VYUY, 1536 DRM_FORMAT_VYUY,
1306}; 1537};
1307 1538
1539static uint32_t skl_plane_formats[] = {
1540 DRM_FORMAT_RGB565,
1541 DRM_FORMAT_ABGR8888,
1542 DRM_FORMAT_ARGB8888,
1543 DRM_FORMAT_XBGR8888,
1544 DRM_FORMAT_XRGB8888,
1545 DRM_FORMAT_YUYV,
1546 DRM_FORMAT_YVYU,
1547 DRM_FORMAT_UYVY,
1548 DRM_FORMAT_VYUY,
1549};
1550
1308int 1551int
1309intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) 1552intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1310{ 1553{
@@ -1368,7 +1611,21 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1368 num_plane_formats = ARRAY_SIZE(snb_plane_formats); 1611 num_plane_formats = ARRAY_SIZE(snb_plane_formats);
1369 } 1612 }
1370 break; 1613 break;
1371 1614 case 9:
1615 /*
1616 * FIXME: Skylake planes can be scaled (with some restrictions),
1617 * but this is for another time.
1618 */
1619 intel_plane->can_scale = false;
1620 intel_plane->max_downscale = 1;
1621 intel_plane->update_plane = skl_update_plane;
1622 intel_plane->disable_plane = skl_disable_plane;
1623 intel_plane->update_colorkey = skl_update_colorkey;
1624 intel_plane->get_colorkey = skl_get_colorkey;
1625
1626 plane_formats = skl_plane_formats;
1627 num_plane_formats = ARRAY_SIZE(skl_plane_formats);
1628 break;
1372 default: 1629 default:
1373 kfree(intel_plane); 1630 kfree(intel_plane);
1374 return -ENODEV; 1631 return -ENODEV;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index c14341ca3ef9..6f5f59b880f5 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1182,18 +1182,17 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1182 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1182 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1183 struct drm_device *dev = encoder->dev; 1183 struct drm_device *dev = encoder->dev;
1184 struct drm_i915_private *dev_priv = dev->dev_private; 1184 struct drm_i915_private *dev_priv = dev->dev_private;
1185 unsigned long irqflags;
1186 u32 tv_ctl, save_tv_ctl; 1185 u32 tv_ctl, save_tv_ctl;
1187 u32 tv_dac, save_tv_dac; 1186 u32 tv_dac, save_tv_dac;
1188 int type; 1187 int type;
1189 1188
1190 /* Disable TV interrupts around load detect or we'll recurse */ 1189 /* Disable TV interrupts around load detect or we'll recurse */
1191 if (connector->polled & DRM_CONNECTOR_POLL_HPD) { 1190 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1192 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1191 spin_lock_irq(&dev_priv->irq_lock);
1193 i915_disable_pipestat(dev_priv, 0, 1192 i915_disable_pipestat(dev_priv, 0,
1194 PIPE_HOTPLUG_INTERRUPT_STATUS | 1193 PIPE_HOTPLUG_INTERRUPT_STATUS |
1195 PIPE_HOTPLUG_TV_INTERRUPT_STATUS); 1194 PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1195 spin_unlock_irq(&dev_priv->irq_lock);
1197 } 1196 }
1198 1197
1199 save_tv_dac = tv_dac = I915_READ(TV_DAC); 1198 save_tv_dac = tv_dac = I915_READ(TV_DAC);
@@ -1266,11 +1265,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1266 1265
1267 /* Restore interrupt config */ 1266 /* Restore interrupt config */
1268 if (connector->polled & DRM_CONNECTOR_POLL_HPD) { 1267 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1269 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1268 spin_lock_irq(&dev_priv->irq_lock);
1270 i915_enable_pipestat(dev_priv, 0, 1269 i915_enable_pipestat(dev_priv, 0,
1271 PIPE_HOTPLUG_INTERRUPT_STATUS | 1270 PIPE_HOTPLUG_INTERRUPT_STATUS |
1272 PIPE_HOTPLUG_TV_INTERRUPT_STATUS); 1271 PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
1273 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1272 spin_unlock_irq(&dev_priv->irq_lock);
1274 } 1273 }
1275 1274
1276 return type; 1275 return type;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 918b76163965..0b0f4f85c4f2 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -194,13 +194,15 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
194static void __vlv_force_wake_get(struct drm_i915_private *dev_priv, 194static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
195 int fw_engine) 195 int fw_engine)
196{ 196{
197 /*
198 * WaRsDontPollForAckOnClearingFWBits:vlv
199 * Hardware clears ack bits lazily (only when all ack
200 * bits become 0) so don't poll for individiual ack
201 * bits to be clear here like on other platforms.
202 */
203
197 /* Check for Render Engine */ 204 /* Check for Render Engine */
198 if (FORCEWAKE_RENDER & fw_engine) { 205 if (FORCEWAKE_RENDER & fw_engine) {
199 if (wait_for_atomic((__raw_i915_read32(dev_priv,
200 FORCEWAKE_ACK_VLV) &
201 FORCEWAKE_KERNEL) == 0,
202 FORCEWAKE_ACK_TIMEOUT_MS))
203 DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
204 206
205 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 207 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
206 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 208 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
@@ -214,11 +216,6 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
214 216
215 /* Check for Media Engine */ 217 /* Check for Media Engine */
216 if (FORCEWAKE_MEDIA & fw_engine) { 218 if (FORCEWAKE_MEDIA & fw_engine) {
217 if (wait_for_atomic((__raw_i915_read32(dev_priv,
218 FORCEWAKE_ACK_MEDIA_VLV) &
219 FORCEWAKE_KERNEL) == 0,
220 FORCEWAKE_ACK_TIMEOUT_MS))
221 DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
222 219
223 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 220 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
224 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 221 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
@@ -968,7 +965,7 @@ static const struct register_whitelist {
968 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 965 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
969 uint32_t gen_bitmask; 966 uint32_t gen_bitmask;
970} whitelist[] = { 967} whitelist[] = {
971 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) }, 968 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
972}; 969};
973 970
974int i915_reg_read_ioctl(struct drm_device *dev, 971int i915_reg_read_ioctl(struct drm_device *dev,