aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-11-03 16:36:06 -0500
committerDave Airlie <airlied@redhat.com>2014-11-03 16:36:06 -0500
commit041df3573d0ce74b7f2f505c4224c8ee9be14a7c (patch)
treebd70ff8a5613cd1f4bd0e316874aa90d00a0c5ff
parentbbf0ef0334f2267687a92ec6d8114fd67b8157a3 (diff)
parent3eebaec630c2413a5e67bb7f49f0c6a53069a399 (diff)
Merge tag 'drm-intel-next-2014-10-24' of git://anongit.freedesktop.org/drm-intel into drm-next
- suspend/resume/freeze/thaw unification from Imre - wa list improvements from Mika&Arun - display pll precomputation from Ander Conselvan, this removed the last ->mode_set callbacks, a big step towards implementing atomic modesets - more kerneldoc for the interrupt code - 180 rotation for cursors (Ville&Sonika) - ULT/ULX feature check macros cleaned up thanks to Damien - piles and piles of fixes all over, bug team seems to work! * tag 'drm-intel-next-2014-10-24' of git://anongit.freedesktop.org/drm-intel: (61 commits) drm/i915: Update DRIVER_DATE to 20141024 drm/i915: add comments on what stage a given PM handler is called drm/i915: unify switcheroo and legacy suspend/resume handlers drm/i915: add poweroff_late handler drm/i915: sanitize suspend/resume helper function names drm/i915: unify S3 and S4 suspend/resume handlers drm/i915: disable/re-enable PCI device around S4 freeze/thaw drm/i915: enable output polling during S4 thaw drm/i915: check for GT faults in all resume handlers and driver load time drm/i915: remove unused restore_gtt_mappings optimization during suspend drm/i915: fix S4 suspend while switcheroo state is off drm/i915: vlv: fix switcheroo/legacy suspend/resume drm/i915: propagate error from legacy resume handler drm/i915: unify legacy S3 suspend and S4 freeze handlers drm/i915: factor out i915_drm_suspend_late drm/i915: Emit even number of dwords when emitting LRIs drm/i915: Add rotation support for cursor plane (v5) drm/i915: Correctly reject invalid flags for wait_ioctl drm/i915: use macros to assign mmio access functions drm/i915: only run hsw_power_well_post_enable when really needed ...
-rw-r--r--Documentation/DocBook/drm.tmpl5
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c39
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c12
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c210
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h60
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c24
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c41
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c366
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h21
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c22
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c513
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c10
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h14
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c381
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c5
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c65
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c190
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c91
24 files changed, 1145 insertions, 936 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index d7cfc98be159..f6a9d7b21380 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -3831,6 +3831,11 @@ int num_ioctls;</synopsis>
3831!Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb 3831!Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
3832 </sect2> 3832 </sect2>
3833 <sect2> 3833 <sect2>
3834 <title>Display FIFO Underrun Reporting</title>
3835!Pdrivers/gpu/drm/i915/intel_fifo_underrun.c fifo underrun handling
3836!Idrivers/gpu/drm/i915/intel_fifo_underrun.c
3837 </sect2>
3838 <sect2>
3834 <title>Plane Configuration</title> 3839 <title>Plane Configuration</title>
3835 <para> 3840 <para>
3836 This section covers plane configuration and composition with the 3841 This section covers plane configuration and composition with the
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 3a6bce047f6f..75fd7de9bf4b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -45,6 +45,7 @@ i915-y += intel_renderstate_gen6.o \
45# modesetting core code 45# modesetting core code
46i915-y += intel_bios.o \ 46i915-y += intel_bios.o \
47 intel_display.o \ 47 intel_display.o \
48 intel_fifo_underrun.o \
48 intel_frontbuffer.o \ 49 intel_frontbuffer.o \
49 intel_modes.o \ 50 intel_modes.o \
50 intel_overlay.o \ 51 intel_overlay.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index da4036d0bab9..e60d5c2f4a35 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1848,6 +1848,8 @@ static int i915_execlists(struct seq_file *m, void *data)
1848 if (ret) 1848 if (ret)
1849 return ret; 1849 return ret;
1850 1850
1851 intel_runtime_pm_get(dev_priv);
1852
1851 for_each_ring(ring, dev_priv, ring_id) { 1853 for_each_ring(ring, dev_priv, ring_id) {
1852 struct intel_ctx_submit_request *head_req = NULL; 1854 struct intel_ctx_submit_request *head_req = NULL;
1853 int count = 0; 1855 int count = 0;
@@ -1899,6 +1901,7 @@ static int i915_execlists(struct seq_file *m, void *data)
1899 seq_putc(m, '\n'); 1901 seq_putc(m, '\n');
1900 } 1902 }
1901 1903
1904 intel_runtime_pm_put(dev_priv);
1902 mutex_unlock(&dev->struct_mutex); 1905 mutex_unlock(&dev->struct_mutex);
1903 1906
1904 return 0; 1907 return 0;
@@ -2655,18 +2658,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
2655 2658
2656 intel_runtime_pm_get(dev_priv); 2659 intel_runtime_pm_get(dev_priv);
2657 2660
2658 seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs); 2661 seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
2659 for (i = 0; i < dev_priv->num_wa_regs; ++i) { 2662 for (i = 0; i < dev_priv->workarounds.count; ++i) {
2660 u32 addr, mask; 2663 u32 addr, mask, value, read;
2661 2664 bool ok;
2662 addr = dev_priv->intel_wa_regs[i].addr; 2665
2663 mask = dev_priv->intel_wa_regs[i].mask; 2666 addr = dev_priv->workarounds.reg[i].addr;
2664 dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask; 2667 mask = dev_priv->workarounds.reg[i].mask;
2665 if (dev_priv->intel_wa_regs[i].addr) 2668 value = dev_priv->workarounds.reg[i].value;
2666 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", 2669 read = I915_READ(addr);
2667 dev_priv->intel_wa_regs[i].addr, 2670 ok = (value & mask) == (read & mask);
2668 dev_priv->intel_wa_regs[i].value, 2671 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
2669 dev_priv->intel_wa_regs[i].mask); 2672 addr, value, mask, read, ok ? "OK" : "FAIL");
2670 } 2673 }
2671 2674
2672 intel_runtime_pm_put(dev_priv); 2675 intel_runtime_pm_put(dev_priv);
@@ -3255,6 +3258,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3255{ 3258{
3256 struct drm_i915_private *dev_priv = dev->dev_private; 3259 struct drm_i915_private *dev_priv = dev->dev_private;
3257 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3260 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3261 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
3262 pipe));
3258 u32 val = 0; /* shut up gcc */ 3263 u32 val = 0; /* shut up gcc */
3259 int ret; 3264 int ret;
3260 3265
@@ -3290,6 +3295,14 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3290 if (!pipe_crc->entries) 3295 if (!pipe_crc->entries)
3291 return -ENOMEM; 3296 return -ENOMEM;
3292 3297
3298 /*
3299 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3300 * enabled and disabled dynamically based on package C states,
3301 * user space can't make reliable use of the CRCs, so let's just
3302 * completely disable it.
3303 */
3304 hsw_disable_ips(crtc);
3305
3293 spin_lock_irq(&pipe_crc->lock); 3306 spin_lock_irq(&pipe_crc->lock);
3294 pipe_crc->head = 0; 3307 pipe_crc->head = 0;
3295 pipe_crc->tail = 0; 3308 pipe_crc->tail = 0;
@@ -3328,6 +3341,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3328 vlv_undo_pipe_scramble_reset(dev, pipe); 3341 vlv_undo_pipe_scramble_reset(dev, pipe);
3329 else if (IS_HASWELL(dev) && pipe == PIPE_A) 3342 else if (IS_HASWELL(dev) && pipe == PIPE_A)
3330 hsw_undo_trans_edp_pipe_A_crc_wa(dev); 3343 hsw_undo_trans_edp_pipe_A_crc_wa(dev);
3344
3345 hsw_enable_ips(crtc);
3331 } 3346 }
3332 3347
3333 return 0; 3348 return 0;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 85d14e169409..9a7353302b3f 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1275,12 +1275,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
1275 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1275 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1276 /* i915 resume handler doesn't set to D0 */ 1276 /* i915 resume handler doesn't set to D0 */
1277 pci_set_power_state(dev->pdev, PCI_D0); 1277 pci_set_power_state(dev->pdev, PCI_D0);
1278 i915_resume(dev); 1278 i915_resume_legacy(dev);
1279 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1279 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1280 } else { 1280 } else {
1281 pr_err("switched off\n"); 1281 pr_err("switched off\n");
1282 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1282 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1283 i915_suspend(dev, pmm); 1283 i915_suspend_legacy(dev, pmm);
1284 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1284 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1285 } 1285 }
1286} 1286}
@@ -1853,8 +1853,12 @@ int i915_driver_unload(struct drm_device *dev)
1853 1853
1854 acpi_video_unregister(); 1854 acpi_video_unregister();
1855 1855
1856 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1856 if (drm_core_check_feature(dev, DRIVER_MODESET))
1857 intel_fbdev_fini(dev); 1857 intel_fbdev_fini(dev);
1858
1859 drm_vblank_cleanup(dev);
1860
1861 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1858 intel_modeset_cleanup(dev); 1862 intel_modeset_cleanup(dev);
1859 1863
1860 /* 1864 /*
@@ -1895,8 +1899,6 @@ int i915_driver_unload(struct drm_device *dev)
1895 i915_free_hws(dev); 1899 i915_free_hws(dev);
1896 } 1900 }
1897 1901
1898 drm_vblank_cleanup(dev);
1899
1900 intel_teardown_gmbus(dev); 1902 intel_teardown_gmbus(dev);
1901 intel_teardown_mchbar(dev); 1903 intel_teardown_mchbar(dev);
1902 1904
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index bd7978cb094f..035ec94ca3c7 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -463,7 +463,7 @@ void intel_detect_pch(struct drm_device *dev)
463 dev_priv->pch_type = PCH_LPT; 463 dev_priv->pch_type = PCH_LPT;
464 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 464 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
465 WARN_ON(!IS_HASWELL(dev)); 465 WARN_ON(!IS_HASWELL(dev));
466 WARN_ON(IS_ULT(dev)); 466 WARN_ON(IS_HSW_ULT(dev));
467 } else if (IS_BROADWELL(dev)) { 467 } else if (IS_BROADWELL(dev)) {
468 dev_priv->pch_type = PCH_LPT; 468 dev_priv->pch_type = PCH_LPT;
469 dev_priv->pch_id = 469 dev_priv->pch_id =
@@ -474,17 +474,15 @@ void intel_detect_pch(struct drm_device *dev)
474 dev_priv->pch_type = PCH_LPT; 474 dev_priv->pch_type = PCH_LPT;
475 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 475 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
476 WARN_ON(!IS_HASWELL(dev)); 476 WARN_ON(!IS_HASWELL(dev));
477 WARN_ON(!IS_ULT(dev)); 477 WARN_ON(!IS_HSW_ULT(dev));
478 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 478 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
479 dev_priv->pch_type = PCH_SPT; 479 dev_priv->pch_type = PCH_SPT;
480 DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 480 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
481 WARN_ON(!IS_SKYLAKE(dev)); 481 WARN_ON(!IS_SKYLAKE(dev));
482 WARN_ON(IS_ULT(dev));
483 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { 482 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
484 dev_priv->pch_type = PCH_SPT; 483 dev_priv->pch_type = PCH_SPT;
485 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 484 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
486 WARN_ON(!IS_SKYLAKE(dev)); 485 WARN_ON(!IS_SKYLAKE(dev));
487 WARN_ON(!IS_ULT(dev));
488 } else 486 } else
489 continue; 487 continue;
490 488
@@ -556,7 +554,7 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv);
556static int intel_resume_prepare(struct drm_i915_private *dev_priv, 554static int intel_resume_prepare(struct drm_i915_private *dev_priv,
557 bool rpm_resume); 555 bool rpm_resume);
558 556
559static int i915_drm_freeze(struct drm_device *dev) 557static int i915_drm_suspend(struct drm_device *dev)
560{ 558{
561 struct drm_i915_private *dev_priv = dev->dev_private; 559 struct drm_i915_private *dev_priv = dev->dev_private;
562 struct drm_crtc *crtc; 560 struct drm_crtc *crtc;
@@ -632,7 +630,26 @@ static int i915_drm_freeze(struct drm_device *dev)
632 return 0; 630 return 0;
633} 631}
634 632
635int i915_suspend(struct drm_device *dev, pm_message_t state) 633static int i915_drm_suspend_late(struct drm_device *drm_dev)
634{
635 struct drm_i915_private *dev_priv = drm_dev->dev_private;
636 int ret;
637
638 ret = intel_suspend_complete(dev_priv);
639
640 if (ret) {
641 DRM_ERROR("Suspend complete failed: %d\n", ret);
642
643 return ret;
644 }
645
646 pci_disable_device(drm_dev->pdev);
647 pci_set_power_state(drm_dev->pdev, PCI_D3hot);
648
649 return 0;
650}
651
652int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
636{ 653{
637 int error; 654 int error;
638 655
@@ -642,48 +659,25 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
642 return -ENODEV; 659 return -ENODEV;
643 } 660 }
644 661
645 if (state.event == PM_EVENT_PRETHAW) 662 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
646 return 0; 663 state.event != PM_EVENT_FREEZE))
647 664 return -EINVAL;
648 665
649 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 666 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
650 return 0; 667 return 0;
651 668
652 error = i915_drm_freeze(dev); 669 error = i915_drm_suspend(dev);
653 if (error) 670 if (error)
654 return error; 671 return error;
655 672
656 if (state.event == PM_EVENT_SUSPEND) { 673 return i915_drm_suspend_late(dev);
657 /* Shut down the device */
658 pci_disable_device(dev->pdev);
659 pci_set_power_state(dev->pdev, PCI_D3hot);
660 }
661
662 return 0;
663} 674}
664 675
665static int i915_drm_thaw_early(struct drm_device *dev) 676static int i915_drm_resume(struct drm_device *dev)
666{ 677{
667 struct drm_i915_private *dev_priv = dev->dev_private; 678 struct drm_i915_private *dev_priv = dev->dev_private;
668 int ret;
669
670 ret = intel_resume_prepare(dev_priv, false);
671 if (ret)
672 DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
673
674 intel_uncore_early_sanitize(dev, true);
675 intel_uncore_sanitize(dev);
676 intel_power_domains_init_hw(dev_priv);
677 679
678 return ret; 680 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
679}
680
681static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
682{
683 struct drm_i915_private *dev_priv = dev->dev_private;
684
685 if (drm_core_check_feature(dev, DRIVER_MODESET) &&
686 restore_gtt_mappings) {
687 mutex_lock(&dev->struct_mutex); 681 mutex_lock(&dev->struct_mutex);
688 i915_gem_restore_gtt_mappings(dev); 682 i915_gem_restore_gtt_mappings(dev);
689 mutex_unlock(&dev->struct_mutex); 683 mutex_unlock(&dev->struct_mutex);
@@ -742,21 +736,15 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
742 736
743 intel_opregion_notify_adapter(dev, PCI_D0); 737 intel_opregion_notify_adapter(dev, PCI_D0);
744 738
745 return 0; 739 drm_kms_helper_poll_enable(dev);
746}
747
748static int i915_drm_thaw(struct drm_device *dev)
749{
750 if (drm_core_check_feature(dev, DRIVER_MODESET))
751 i915_check_and_clear_faults(dev);
752 740
753 return __i915_drm_thaw(dev, true); 741 return 0;
754} 742}
755 743
756static int i915_resume_early(struct drm_device *dev) 744static int i915_drm_resume_early(struct drm_device *dev)
757{ 745{
758 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 746 struct drm_i915_private *dev_priv = dev->dev_private;
759 return 0; 747 int ret;
760 748
761 /* 749 /*
762 * We have a resume ordering issue with the snd-hda driver also 750 * We have a resume ordering issue with the snd-hda driver also
@@ -772,33 +760,29 @@ static int i915_resume_early(struct drm_device *dev)
772 760
773 pci_set_master(dev->pdev); 761 pci_set_master(dev->pdev);
774 762
775 return i915_drm_thaw_early(dev); 763 ret = intel_resume_prepare(dev_priv, false);
764 if (ret)
765 DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
766
767 intel_uncore_early_sanitize(dev, true);
768 intel_uncore_sanitize(dev);
769 intel_power_domains_init_hw(dev_priv);
770
771 return ret;
776} 772}
777 773
778int i915_resume(struct drm_device *dev) 774int i915_resume_legacy(struct drm_device *dev)
779{ 775{
780 struct drm_i915_private *dev_priv = dev->dev_private;
781 int ret; 776 int ret;
782 777
783 /* 778 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
784 * Platforms with opregion should have sane BIOS, older ones (gen3 and 779 return 0;
785 * earlier) need to restore the GTT mappings since the BIOS might clear 780
786 * all our scratch PTEs. 781 ret = i915_drm_resume_early(dev);
787 */
788 ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
789 if (ret) 782 if (ret)
790 return ret; 783 return ret;
791 784
792 drm_kms_helper_poll_enable(dev); 785 return i915_drm_resume(dev);
793 return 0;
794}
795
796static int i915_resume_legacy(struct drm_device *dev)
797{
798 i915_resume_early(dev);
799 i915_resume(dev);
800
801 return 0;
802} 786}
803 787
804/** 788/**
@@ -950,15 +934,13 @@ static int i915_pm_suspend(struct device *dev)
950 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 934 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
951 return 0; 935 return 0;
952 936
953 return i915_drm_freeze(drm_dev); 937 return i915_drm_suspend(drm_dev);
954} 938}
955 939
956static int i915_pm_suspend_late(struct device *dev) 940static int i915_pm_suspend_late(struct device *dev)
957{ 941{
958 struct pci_dev *pdev = to_pci_dev(dev); 942 struct pci_dev *pdev = to_pci_dev(dev);
959 struct drm_device *drm_dev = pci_get_drvdata(pdev); 943 struct drm_device *drm_dev = pci_get_drvdata(pdev);
960 struct drm_i915_private *dev_priv = drm_dev->dev_private;
961 int ret;
962 944
963 /* 945 /*
964 * We have a suspedn ordering issue with the snd-hda driver also 946 * We have a suspedn ordering issue with the snd-hda driver also
@@ -972,16 +954,7 @@ static int i915_pm_suspend_late(struct device *dev)
972 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 954 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
973 return 0; 955 return 0;
974 956
975 ret = intel_suspend_complete(dev_priv); 957 return i915_drm_suspend_late(drm_dev);
976
977 if (ret)
978 DRM_ERROR("Suspend complete failed: %d\n", ret);
979 else {
980 pci_disable_device(pdev);
981 pci_set_power_state(pdev, PCI_D3hot);
982 }
983
984 return ret;
985} 958}
986 959
987static int i915_pm_resume_early(struct device *dev) 960static int i915_pm_resume_early(struct device *dev)
@@ -989,52 +962,21 @@ static int i915_pm_resume_early(struct device *dev)
989 struct pci_dev *pdev = to_pci_dev(dev); 962 struct pci_dev *pdev = to_pci_dev(dev);
990 struct drm_device *drm_dev = pci_get_drvdata(pdev); 963 struct drm_device *drm_dev = pci_get_drvdata(pdev);
991 964
992 return i915_resume_early(drm_dev); 965 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
993} 966 return 0;
994
995static int i915_pm_resume(struct device *dev)
996{
997 struct pci_dev *pdev = to_pci_dev(dev);
998 struct drm_device *drm_dev = pci_get_drvdata(pdev);
999
1000 return i915_resume(drm_dev);
1001}
1002
1003static int i915_pm_freeze(struct device *dev)
1004{
1005 struct pci_dev *pdev = to_pci_dev(dev);
1006 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1007
1008 if (!drm_dev || !drm_dev->dev_private) {
1009 dev_err(dev, "DRM not initialized, aborting suspend.\n");
1010 return -ENODEV;
1011 }
1012
1013 return i915_drm_freeze(drm_dev);
1014}
1015
1016static int i915_pm_thaw_early(struct device *dev)
1017{
1018 struct pci_dev *pdev = to_pci_dev(dev);
1019 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1020 967
1021 return i915_drm_thaw_early(drm_dev); 968 return i915_drm_resume_early(drm_dev);
1022} 969}
1023 970
1024static int i915_pm_thaw(struct device *dev) 971static int i915_pm_resume(struct device *dev)
1025{ 972{
1026 struct pci_dev *pdev = to_pci_dev(dev); 973 struct pci_dev *pdev = to_pci_dev(dev);
1027 struct drm_device *drm_dev = pci_get_drvdata(pdev); 974 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1028 975
1029 return i915_drm_thaw(drm_dev); 976 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1030} 977 return 0;
1031
1032static int i915_pm_poweroff(struct device *dev)
1033{
1034 struct pci_dev *pdev = to_pci_dev(dev);
1035 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1036 978
1037 return i915_drm_freeze(drm_dev); 979 return i915_drm_resume(drm_dev);
1038} 980}
1039 981
1040static int hsw_suspend_complete(struct drm_i915_private *dev_priv) 982static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
@@ -1592,16 +1534,40 @@ static int intel_resume_prepare(struct drm_i915_private *dev_priv,
1592} 1534}
1593 1535
1594static const struct dev_pm_ops i915_pm_ops = { 1536static const struct dev_pm_ops i915_pm_ops = {
1537 /*
1538 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1539 * PMSG_RESUME]
1540 */
1595 .suspend = i915_pm_suspend, 1541 .suspend = i915_pm_suspend,
1596 .suspend_late = i915_pm_suspend_late, 1542 .suspend_late = i915_pm_suspend_late,
1597 .resume_early = i915_pm_resume_early, 1543 .resume_early = i915_pm_resume_early,
1598 .resume = i915_pm_resume, 1544 .resume = i915_pm_resume,
1599 .freeze = i915_pm_freeze, 1545
1600 .thaw_early = i915_pm_thaw_early, 1546 /*
1601 .thaw = i915_pm_thaw, 1547 * S4 event handlers
1602 .poweroff = i915_pm_poweroff, 1548 * @freeze, @freeze_late : called (1) before creating the
1549 * hibernation image [PMSG_FREEZE] and
1550 * (2) after rebooting, before restoring
1551 * the image [PMSG_QUIESCE]
1552 * @thaw, @thaw_early : called (1) after creating the hibernation
1553 * image, before writing it [PMSG_THAW]
1554 * and (2) after failing to create or
1555 * restore the image [PMSG_RECOVER]
1556 * @poweroff, @poweroff_late: called after writing the hibernation
1557 * image, before rebooting [PMSG_HIBERNATE]
1558 * @restore, @restore_early : called after rebooting and restoring the
1559 * hibernation image [PMSG_RESTORE]
1560 */
1561 .freeze = i915_pm_suspend,
1562 .freeze_late = i915_pm_suspend_late,
1563 .thaw_early = i915_pm_resume_early,
1564 .thaw = i915_pm_resume,
1565 .poweroff = i915_pm_suspend,
1566 .poweroff_late = i915_pm_suspend_late,
1603 .restore_early = i915_pm_resume_early, 1567 .restore_early = i915_pm_resume_early,
1604 .restore = i915_pm_resume, 1568 .restore = i915_pm_resume,
1569
1570 /* S0ix (via runtime suspend) event handlers */
1605 .runtime_suspend = intel_runtime_suspend, 1571 .runtime_suspend = intel_runtime_suspend,
1606 .runtime_resume = intel_runtime_resume, 1572 .runtime_resume = intel_runtime_resume,
1607}; 1573};
@@ -1643,7 +1609,7 @@ static struct drm_driver driver = {
1643 .set_busid = drm_pci_set_busid, 1609 .set_busid = drm_pci_set_busid,
1644 1610
1645 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ 1611 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1646 .suspend = i915_suspend, 1612 .suspend = i915_suspend_legacy,
1647 .resume = i915_resume_legacy, 1613 .resume = i915_resume_legacy,
1648 1614
1649 .device_is_agp = i915_driver_device_is_agp, 1615 .device_is_agp = i915_driver_device_is_agp,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9962da202456..583c97debeb7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,7 +55,7 @@
55 55
56#define DRIVER_NAME "i915" 56#define DRIVER_NAME "i915"
57#define DRIVER_DESC "Intel Graphics" 57#define DRIVER_DESC "Intel Graphics"
58#define DRIVER_DATE "20141003" 58#define DRIVER_DATE "20141024"
59 59
60enum pipe { 60enum pipe {
61 INVALID_PIPE = -1, 61 INVALID_PIPE = -1,
@@ -460,7 +460,7 @@ struct drm_i915_display_funcs {
460 * Returns true on success, false on failure. 460 * Returns true on success, false on failure.
461 */ 461 */
462 bool (*find_dpll)(const struct intel_limit *limit, 462 bool (*find_dpll)(const struct intel_limit *limit,
463 struct drm_crtc *crtc, 463 struct intel_crtc *crtc,
464 int target, int refclk, 464 int target, int refclk,
465 struct dpll *match_clock, 465 struct dpll *match_clock,
466 struct dpll *best_clock); 466 struct dpll *best_clock);
@@ -476,7 +476,7 @@ struct drm_i915_display_funcs {
476 struct intel_crtc_config *); 476 struct intel_crtc_config *);
477 void (*get_plane_config)(struct intel_crtc *, 477 void (*get_plane_config)(struct intel_crtc *,
478 struct intel_plane_config *); 478 struct intel_plane_config *);
479 int (*crtc_mode_set)(struct drm_crtc *crtc, 479 int (*crtc_mode_set)(struct intel_crtc *crtc,
480 int x, int y, 480 int x, int y,
481 struct drm_framebuffer *old_fb); 481 struct drm_framebuffer *old_fb);
482 void (*crtc_enable)(struct drm_crtc *crtc); 482 void (*crtc_enable)(struct drm_crtc *crtc);
@@ -1448,6 +1448,20 @@ struct i915_frontbuffer_tracking {
1448 unsigned flip_bits; 1448 unsigned flip_bits;
1449}; 1449};
1450 1450
1451struct i915_wa_reg {
1452 u32 addr;
1453 u32 value;
1454 /* bitmask representing WA bits */
1455 u32 mask;
1456};
1457
1458#define I915_MAX_WA_REGS 16
1459
1460struct i915_workarounds {
1461 struct i915_wa_reg reg[I915_MAX_WA_REGS];
1462 u32 count;
1463};
1464
1451struct drm_i915_private { 1465struct drm_i915_private {
1452 struct drm_device *dev; 1466 struct drm_device *dev;
1453 struct kmem_cache *slab; 1467 struct kmem_cache *slab;
@@ -1527,6 +1541,8 @@ struct drm_i915_private {
1527 struct intel_opregion opregion; 1541 struct intel_opregion opregion;
1528 struct intel_vbt_data vbt; 1542 struct intel_vbt_data vbt;
1529 1543
1544 bool preserve_bios_swizzle;
1545
1530 /* overlay */ 1546 /* overlay */
1531 struct intel_overlay *overlay; 1547 struct intel_overlay *overlay;
1532 1548
@@ -1590,19 +1606,7 @@ struct drm_i915_private {
1590 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1606 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1591 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1607 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1592 1608
1593 /* 1609 struct i915_workarounds workarounds;
1594 * workarounds are currently applied at different places and
1595 * changes are being done to consolidate them so exact count is
1596 * not clear at this point, use a max value for now.
1597 */
1598#define I915_MAX_WA_REGS 16
1599 struct {
1600 u32 addr;
1601 u32 value;
1602 /* bitmask representing WA bits */
1603 u32 mask;
1604 } intel_wa_regs[I915_MAX_WA_REGS];
1605 u32 num_wa_regs;
1606 1610
1607 /* Reclocking support */ 1611 /* Reclocking support */
1608 bool render_reclock_avail; 1612 bool render_reclock_avail;
@@ -2107,7 +2111,6 @@ struct drm_i915_cmd_table {
2107 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2111 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
2108#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ 2112#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
2109 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) 2113 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
2110#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
2111#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 2114#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
2112 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2115 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
2113/* ULX machines are also considered ULT. */ 2116/* ULX machines are also considered ULT. */
@@ -2141,7 +2144,7 @@ struct drm_i915_cmd_table {
2141#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) 2144#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
2142#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 2145#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
2143#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ 2146#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
2144 to_i915(dev)->ellc_size) 2147 __I915__(dev)->ellc_size)
2145#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2148#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
2146 2149
2147#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 2150#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
@@ -2178,13 +2181,15 @@ struct drm_i915_cmd_table {
2178#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 2181#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
2179#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 2182#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
2180 2183
2181#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev)) 2184#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
2182 2185
2183#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 2186#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
2184#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 2187#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
2185#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2188#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
2186#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ 2189#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
2187 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev)) 2190 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
2191#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
2192#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
2188 2193
2189#define INTEL_PCH_DEVICE_ID_MASK 0xff00 2194#define INTEL_PCH_DEVICE_ID_MASK 0xff00
2190#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2195#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2195,7 +2200,7 @@ struct drm_i915_cmd_table {
2195#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2200#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
2196#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2201#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
2197 2202
2198#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type) 2203#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
2199#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) 2204#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
2200#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2205#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
2201#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 2206#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
@@ -2216,8 +2221,8 @@ struct drm_i915_cmd_table {
2216extern const struct drm_ioctl_desc i915_ioctls[]; 2221extern const struct drm_ioctl_desc i915_ioctls[];
2217extern int i915_max_ioctl; 2222extern int i915_max_ioctl;
2218 2223
2219extern int i915_suspend(struct drm_device *dev, pm_message_t state); 2224extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
2220extern int i915_resume(struct drm_device *dev); 2225extern int i915_resume_legacy(struct drm_device *dev);
2221extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 2226extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
2222extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 2227extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
2223 2228
@@ -2312,6 +2317,17 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2312 2317
2313void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2318void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
2314void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2319void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
2320void
2321ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
2322void
2323ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
2324void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
2325 uint32_t interrupt_mask,
2326 uint32_t enabled_irq_mask);
2327#define ibx_enable_display_interrupt(dev_priv, bits) \
2328 ibx_display_interrupt_update((dev_priv), (bits), (bits))
2329#define ibx_disable_display_interrupt(dev_priv, bits) \
2330 ibx_display_interrupt_update((dev_priv), (bits), 0)
2315 2331
2316/* i915_gem.c */ 2332/* i915_gem.c */
2317int i915_gem_init_ioctl(struct drm_device *dev, void *data, 2333int i915_gem_init_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2719c25588cb..827edb589883 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1466,6 +1466,16 @@ unlock:
1466 * 1466 *
1467 * While the mapping holds a reference on the contents of the object, it doesn't 1467 * While the mapping holds a reference on the contents of the object, it doesn't
1468 * imply a ref on the object itself. 1468 * imply a ref on the object itself.
1469 *
1470 * IMPORTANT:
1471 *
1472 * DRM driver writers who look a this function as an example for how to do GEM
1473 * mmap support, please don't implement mmap support like here. The modern way
1474 * to implement DRM mmap support is with an mmap offset ioctl (like
1475 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1476 * That way debug tooling like valgrind will understand what's going on, hiding
1477 * the mmap call in a driver private ioctl will break that. The i915 driver only
1478 * does cpu mmaps this way because we didn't know better.
1469 */ 1479 */
1470int 1480int
1471i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1481i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
@@ -2800,6 +2810,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2800 u32 seqno = 0; 2810 u32 seqno = 0;
2801 int ret = 0; 2811 int ret = 0;
2802 2812
2813 if (args->flags != 0)
2814 return -EINVAL;
2815
2803 ret = i915_mutex_lock_interruptible(dev); 2816 ret = i915_mutex_lock_interruptible(dev);
2804 if (ret) 2817 if (ret)
2805 return ret; 2818 return ret;
@@ -5259,7 +5272,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5259 struct drm_device *dev = dev_priv->dev; 5272 struct drm_device *dev = dev_priv->dev;
5260 struct drm_i915_gem_object *obj; 5273 struct drm_i915_gem_object *obj;
5261 unsigned long timeout = msecs_to_jiffies(5000) + 1; 5274 unsigned long timeout = msecs_to_jiffies(5000) + 1;
5262 unsigned long pinned, bound, unbound, freed; 5275 unsigned long pinned, bound, unbound, freed_pages;
5263 bool was_interruptible; 5276 bool was_interruptible;
5264 bool unlock; 5277 bool unlock;
5265 5278
@@ -5276,7 +5289,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5276 was_interruptible = dev_priv->mm.interruptible; 5289 was_interruptible = dev_priv->mm.interruptible;
5277 dev_priv->mm.interruptible = false; 5290 dev_priv->mm.interruptible = false;
5278 5291
5279 freed = i915_gem_shrink_all(dev_priv); 5292 freed_pages = i915_gem_shrink_all(dev_priv);
5280 5293
5281 dev_priv->mm.interruptible = was_interruptible; 5294 dev_priv->mm.interruptible = was_interruptible;
5282 5295
@@ -5307,14 +5320,15 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5307 if (unlock) 5320 if (unlock)
5308 mutex_unlock(&dev->struct_mutex); 5321 mutex_unlock(&dev->struct_mutex);
5309 5322
5310 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", 5323 if (freed_pages || unbound || bound)
5311 freed, pinned); 5324 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
5325 freed_pages << PAGE_SHIFT, pinned);
5312 if (unbound || bound) 5326 if (unbound || bound)
5313 pr_err("%lu and %lu bytes still available in the " 5327 pr_err("%lu and %lu bytes still available in the "
5314 "bound and unbound GPU page lists.\n", 5328 "bound and unbound GPU page lists.\n",
5315 bound, unbound); 5329 bound, unbound);
5316 5330
5317 *(unsigned long *)ptr += freed; 5331 *(unsigned long *)ptr += freed_pages;
5318 return NOTIFY_DONE; 5332 return NOTIFY_DONE;
5319} 5333}
5320 5334
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 2cefb597df6d..d1e7a3e088aa 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -102,22 +102,33 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
102 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 102 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
103 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 103 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
104 } else if (INTEL_INFO(dev)->gen >= 6) { 104 } else if (INTEL_INFO(dev)->gen >= 6) {
105 uint32_t dimm_c0, dimm_c1; 105 if (dev_priv->preserve_bios_swizzle) {
106 dimm_c0 = I915_READ(MAD_DIMM_C0); 106 if (I915_READ(DISP_ARB_CTL) &
107 dimm_c1 = I915_READ(MAD_DIMM_C1); 107 DISP_TILE_SURFACE_SWIZZLING) {
108 dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; 108 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
109 dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; 109 swizzle_y = I915_BIT_6_SWIZZLE_9;
110 /* Enable swizzling when the channels are populated with 110 } else {
111 * identically sized dimms. We don't need to check the 3rd 111 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
112 * channel because no cpu with gpu attached ships in that 112 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
113 * configuration. Also, swizzling only makes sense for 2 113 }
114 * channels anyway. */
115 if (dimm_c0 == dimm_c1) {
116 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
117 swizzle_y = I915_BIT_6_SWIZZLE_9;
118 } else { 114 } else {
119 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 115 uint32_t dimm_c0, dimm_c1;
120 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 116 dimm_c0 = I915_READ(MAD_DIMM_C0);
117 dimm_c1 = I915_READ(MAD_DIMM_C1);
118 dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
119 dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
120 /* Enable swizzling when the channels are populated
121 * with identically sized dimms. We don't need to check
122 * the 3rd channel because no cpu with gpu attached
123 * ships in that configuration. Also, swizzling only
124 * makes sense for 2 channels anyway. */
125 if (dimm_c0 == dimm_c1) {
126 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
127 swizzle_y = I915_BIT_6_SWIZZLE_9;
128 } else {
129 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
130 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
131 }
121 } 132 }
122 } else if (IS_GEN5(dev)) { 133 } else if (IS_GEN5(dev)) {
123 /* On Ironlake whatever DRAM config, GPU always do 134 /* On Ironlake whatever DRAM config, GPU always do
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 2e0613e26251..176de6322e4d 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -189,7 +189,6 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
189 [DRM_I915_ALLOC] = compat_i915_alloc 189 [DRM_I915_ALLOC] = compat_i915_alloc
190}; 190};
191 191
192#ifdef CONFIG_COMPAT
193/** 192/**
194 * Called whenever a 32-bit process running under a 64-bit kernel 193 * Called whenever a 32-bit process running under a 64-bit kernel
195 * performs an ioctl on /dev/dri/card<n>. 194 * performs an ioctl on /dev/dri/card<n>.
@@ -218,4 +217,3 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
218 217
219 return ret; 218 return ret;
220} 219}
221#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f17bbf3ac136..a2b013d97fb6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -139,7 +139,7 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
139} while (0) 139} while (0)
140 140
141/* For display hotplug interrupt */ 141/* For display hotplug interrupt */
142static void 142void
143ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 143ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
144{ 144{
145 assert_spin_locked(&dev_priv->irq_lock); 145 assert_spin_locked(&dev_priv->irq_lock);
@@ -154,7 +154,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
154 } 154 }
155} 155}
156 156
157static void 157void
158ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 158ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
159{ 159{
160 assert_spin_locked(&dev_priv->irq_lock); 160 assert_spin_locked(&dev_priv->irq_lock);
@@ -238,24 +238,6 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
238 snb_update_pm_irq(dev_priv, mask, 0); 238 snb_update_pm_irq(dev_priv, mask, 0);
239} 239}
240 240
241static bool ivb_can_enable_err_int(struct drm_device *dev)
242{
243 struct drm_i915_private *dev_priv = dev->dev_private;
244 struct intel_crtc *crtc;
245 enum pipe pipe;
246
247 assert_spin_locked(&dev_priv->irq_lock);
248
249 for_each_pipe(dev_priv, pipe) {
250 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
251
252 if (crtc->cpu_fifo_underrun_disabled)
253 return false;
254 }
255
256 return true;
257}
258
259/** 241/**
260 * bdw_update_pm_irq - update GT interrupt 2 242 * bdw_update_pm_irq - update GT interrupt 2
261 * @dev_priv: driver private 243 * @dev_priv: driver private
@@ -296,130 +278,15 @@ void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
296 bdw_update_pm_irq(dev_priv, mask, 0); 278 bdw_update_pm_irq(dev_priv, mask, 0);
297} 279}
298 280
299static bool cpt_can_enable_serr_int(struct drm_device *dev)
300{
301 struct drm_i915_private *dev_priv = dev->dev_private;
302 enum pipe pipe;
303 struct intel_crtc *crtc;
304
305 assert_spin_locked(&dev_priv->irq_lock);
306
307 for_each_pipe(dev_priv, pipe) {
308 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
309
310 if (crtc->pch_fifo_underrun_disabled)
311 return false;
312 }
313
314 return true;
315}
316
317void i9xx_check_fifo_underruns(struct drm_device *dev)
318{
319 struct drm_i915_private *dev_priv = dev->dev_private;
320 struct intel_crtc *crtc;
321
322 spin_lock_irq(&dev_priv->irq_lock);
323
324 for_each_intel_crtc(dev, crtc) {
325 u32 reg = PIPESTAT(crtc->pipe);
326 u32 pipestat;
327
328 if (crtc->cpu_fifo_underrun_disabled)
329 continue;
330
331 pipestat = I915_READ(reg) & 0xffff0000;
332 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
333 continue;
334
335 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
336 POSTING_READ(reg);
337
338 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
339 }
340
341 spin_unlock_irq(&dev_priv->irq_lock);
342}
343
344static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
345 enum pipe pipe,
346 bool enable, bool old)
347{
348 struct drm_i915_private *dev_priv = dev->dev_private;
349 u32 reg = PIPESTAT(pipe);
350 u32 pipestat = I915_READ(reg) & 0xffff0000;
351
352 assert_spin_locked(&dev_priv->irq_lock);
353
354 if (enable) {
355 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
356 POSTING_READ(reg);
357 } else {
358 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
359 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
360 }
361}
362
363static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
364 enum pipe pipe, bool enable)
365{
366 struct drm_i915_private *dev_priv = dev->dev_private;
367 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
368 DE_PIPEB_FIFO_UNDERRUN;
369
370 if (enable)
371 ironlake_enable_display_irq(dev_priv, bit);
372 else
373 ironlake_disable_display_irq(dev_priv, bit);
374}
375
376static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
377 enum pipe pipe,
378 bool enable, bool old)
379{
380 struct drm_i915_private *dev_priv = dev->dev_private;
381 if (enable) {
382 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
383
384 if (!ivb_can_enable_err_int(dev))
385 return;
386
387 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
388 } else {
389 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
390
391 if (old &&
392 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
393 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
394 pipe_name(pipe));
395 }
396 }
397}
398
399static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
400 enum pipe pipe, bool enable)
401{
402 struct drm_i915_private *dev_priv = dev->dev_private;
403
404 assert_spin_locked(&dev_priv->irq_lock);
405
406 if (enable)
407 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
408 else
409 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
410 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
411 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
412}
413
414/** 281/**
415 * ibx_display_interrupt_update - update SDEIMR 282 * ibx_display_interrupt_update - update SDEIMR
416 * @dev_priv: driver private 283 * @dev_priv: driver private
417 * @interrupt_mask: mask of interrupt bits to update 284 * @interrupt_mask: mask of interrupt bits to update
418 * @enabled_irq_mask: mask of interrupt bits to enable 285 * @enabled_irq_mask: mask of interrupt bits to enable
419 */ 286 */
420static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 287void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
421 uint32_t interrupt_mask, 288 uint32_t interrupt_mask,
422 uint32_t enabled_irq_mask) 289 uint32_t enabled_irq_mask)
423{ 290{
424 uint32_t sdeimr = I915_READ(SDEIMR); 291 uint32_t sdeimr = I915_READ(SDEIMR);
425 sdeimr &= ~interrupt_mask; 292 sdeimr &= ~interrupt_mask;
@@ -433,160 +300,6 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
433 I915_WRITE(SDEIMR, sdeimr); 300 I915_WRITE(SDEIMR, sdeimr);
434 POSTING_READ(SDEIMR); 301 POSTING_READ(SDEIMR);
435} 302}
436#define ibx_enable_display_interrupt(dev_priv, bits) \
437 ibx_display_interrupt_update((dev_priv), (bits), (bits))
438#define ibx_disable_display_interrupt(dev_priv, bits) \
439 ibx_display_interrupt_update((dev_priv), (bits), 0)
440
441static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
442 enum transcoder pch_transcoder,
443 bool enable)
444{
445 struct drm_i915_private *dev_priv = dev->dev_private;
446 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
447 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
448
449 if (enable)
450 ibx_enable_display_interrupt(dev_priv, bit);
451 else
452 ibx_disable_display_interrupt(dev_priv, bit);
453}
454
455static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
456 enum transcoder pch_transcoder,
457 bool enable, bool old)
458{
459 struct drm_i915_private *dev_priv = dev->dev_private;
460
461 if (enable) {
462 I915_WRITE(SERR_INT,
463 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
464
465 if (!cpt_can_enable_serr_int(dev))
466 return;
467
468 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
469 } else {
470 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
471
472 if (old && I915_READ(SERR_INT) &
473 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
474 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
475 transcoder_name(pch_transcoder));
476 }
477 }
478}
479
480/**
481 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
482 * @dev: drm device
483 * @pipe: pipe
484 * @enable: true if we want to report FIFO underrun errors, false otherwise
485 *
486 * This function makes us disable or enable CPU fifo underruns for a specific
487 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
488 * reporting for one pipe may also disable all the other CPU error interruts for
489 * the other pipes, due to the fact that there's just one interrupt mask/enable
490 * bit for all the pipes.
491 *
492 * Returns the previous state of underrun reporting.
493 */
494static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
495 enum pipe pipe, bool enable)
496{
497 struct drm_i915_private *dev_priv = dev->dev_private;
498 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
499 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
500 bool old;
501
502 assert_spin_locked(&dev_priv->irq_lock);
503
504 old = !intel_crtc->cpu_fifo_underrun_disabled;
505 intel_crtc->cpu_fifo_underrun_disabled = !enable;
506
507 if (HAS_GMCH_DISPLAY(dev))
508 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
509 else if (IS_GEN5(dev) || IS_GEN6(dev))
510 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
511 else if (IS_GEN7(dev))
512 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
513 else if (IS_GEN8(dev) || IS_GEN9(dev))
514 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
515
516 return old;
517}
518
519bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
520 enum pipe pipe, bool enable)
521{
522 struct drm_i915_private *dev_priv = dev->dev_private;
523 unsigned long flags;
524 bool ret;
525
526 spin_lock_irqsave(&dev_priv->irq_lock, flags);
527 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
528 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
529
530 return ret;
531}
532
533static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
534 enum pipe pipe)
535{
536 struct drm_i915_private *dev_priv = dev->dev_private;
537 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
538 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
539
540 return !intel_crtc->cpu_fifo_underrun_disabled;
541}
542
543/**
544 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
545 * @dev: drm device
546 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
547 * @enable: true if we want to report FIFO underrun errors, false otherwise
548 *
549 * This function makes us disable or enable PCH fifo underruns for a specific
550 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
551 * underrun reporting for one transcoder may also disable all the other PCH
552 * error interruts for the other transcoders, due to the fact that there's just
553 * one interrupt mask/enable bit for all the transcoders.
554 *
555 * Returns the previous state of underrun reporting.
556 */
557bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
558 enum transcoder pch_transcoder,
559 bool enable)
560{
561 struct drm_i915_private *dev_priv = dev->dev_private;
562 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
563 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
564 unsigned long flags;
565 bool old;
566
567 /*
568 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
569 * has only one pch transcoder A that all pipes can use. To avoid racy
570 * pch transcoder -> pipe lookups from interrupt code simply store the
571 * underrun statistics in crtc A. Since we never expose this anywhere
572 * nor use it outside of the fifo underrun code here using the "wrong"
573 * crtc on LPT won't cause issues.
574 */
575
576 spin_lock_irqsave(&dev_priv->irq_lock, flags);
577
578 old = !intel_crtc->pch_fifo_underrun_disabled;
579 intel_crtc->pch_fifo_underrun_disabled = !enable;
580
581 if (HAS_PCH_IBX(dev))
582 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
583 else
584 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
585
586 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
587 return old;
588}
589
590 303
591static void 304static void
592__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 305__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -2036,9 +1749,9 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
2036 * we need to be careful that we only handle what we want to 1749 * we need to be careful that we only handle what we want to
2037 * handle. 1750 * handle.
2038 */ 1751 */
2039 mask = 0; 1752
2040 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) 1753 /* fifo underruns are filterered in the underrun handler. */
2041 mask |= PIPE_FIFO_UNDERRUN_STATUS; 1754 mask = PIPE_FIFO_UNDERRUN_STATUS;
2042 1755
2043 switch (pipe) { 1756 switch (pipe) {
2044 case PIPE_A: 1757 case PIPE_A:
@@ -2083,9 +1796,8 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
2083 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1796 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2084 i9xx_pipe_crc_irq_handler(dev, pipe); 1797 i9xx_pipe_crc_irq_handler(dev, pipe);
2085 1798
2086 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 1799 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2087 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1800 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2088 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
2089 } 1801 }
2090 1802
2091 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1803 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
@@ -2252,14 +1964,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2252 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1964 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2253 1965
2254 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1966 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2255 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1967 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2256 false))
2257 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2258 1968
2259 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1969 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2260 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1970 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2261 false))
2262 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2263} 1971}
2264 1972
2265static void ivb_err_int_handler(struct drm_device *dev) 1973static void ivb_err_int_handler(struct drm_device *dev)
@@ -2272,12 +1980,8 @@ static void ivb_err_int_handler(struct drm_device *dev)
2272 DRM_ERROR("Poison interrupt\n"); 1980 DRM_ERROR("Poison interrupt\n");
2273 1981
2274 for_each_pipe(dev_priv, pipe) { 1982 for_each_pipe(dev_priv, pipe) {
2275 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 1983 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2276 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1984 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2277 false))
2278 DRM_ERROR("Pipe %c FIFO underrun\n",
2279 pipe_name(pipe));
2280 }
2281 1985
2282 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1986 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2283 if (IS_IVYBRIDGE(dev)) 1987 if (IS_IVYBRIDGE(dev))
@@ -2299,19 +2003,13 @@ static void cpt_serr_int_handler(struct drm_device *dev)
2299 DRM_ERROR("PCH poison interrupt\n"); 2003 DRM_ERROR("PCH poison interrupt\n");
2300 2004
2301 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2005 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2302 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 2006 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2303 false))
2304 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2305 2007
2306 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2008 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2307 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 2009 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2308 false))
2309 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2310 2010
2311 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2011 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2312 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 2012 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2313 false))
2314 DRM_ERROR("PCH transcoder C FIFO underrun\n");
2315 2013
2316 I915_WRITE(SERR_INT, serr_int); 2014 I915_WRITE(SERR_INT, serr_int);
2317} 2015}
@@ -2377,9 +2075,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2377 intel_check_page_flip(dev, pipe); 2075 intel_check_page_flip(dev, pipe);
2378 2076
2379 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2077 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2380 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 2078 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2381 DRM_ERROR("Pipe %c FIFO underrun\n",
2382 pipe_name(pipe));
2383 2079
2384 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2080 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2385 i9xx_pipe_crc_irq_handler(dev, pipe); 2081 i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -2598,12 +2294,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2598 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2294 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2599 hsw_pipe_crc_irq_handler(dev, pipe); 2295 hsw_pipe_crc_irq_handler(dev, pipe);
2600 2296
2601 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 2297 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2602 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 2298 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2603 false)) 2299 pipe);
2604 DRM_ERROR("Pipe %c FIFO underrun\n",
2605 pipe_name(pipe));
2606 }
2607 2300
2608 2301
2609 if (IS_GEN9(dev)) 2302 if (IS_GEN9(dev))
@@ -4120,9 +3813,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4120 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3813 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4121 i9xx_pipe_crc_irq_handler(dev, pipe); 3814 i9xx_pipe_crc_irq_handler(dev, pipe);
4122 3815
4123 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 3816 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4124 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 3817 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4125 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 3818 pipe);
4126 } 3819 }
4127 3820
4128 iir = new_iir; 3821 iir = new_iir;
@@ -4314,9 +4007,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4314 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4007 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4315 i9xx_pipe_crc_irq_handler(dev, pipe); 4008 i9xx_pipe_crc_irq_handler(dev, pipe);
4316 4009
4317 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 4010 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4318 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 4011 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4319 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 4012 pipe);
4320 } 4013 }
4321 4014
4322 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4015 if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -4542,9 +4235,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4542 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4235 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4543 i9xx_pipe_crc_irq_handler(dev, pipe); 4236 i9xx_pipe_crc_irq_handler(dev, pipe);
4544 4237
4545 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 4238 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4546 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 4239 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4547 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4548 } 4240 }
4549 4241
4550 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4242 if (blc_event || (iir & I915_ASLE_INTERRUPT))
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a56d9a7e7e0e..ea84e1ec5e5f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -883,8 +883,8 @@ enum punit_power_well {
883#define _VLV_PCS23_DW11_CH0 0x042c 883#define _VLV_PCS23_DW11_CH0 0x042c
884#define _VLV_PCS01_DW11_CH1 0x262c 884#define _VLV_PCS01_DW11_CH1 0x262c
885#define _VLV_PCS23_DW11_CH1 0x282c 885#define _VLV_PCS23_DW11_CH1 0x282c
886#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1) 886#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
887#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1) 887#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
888 888
889#define _VLV_PCS_DW12_CH0 0x8230 889#define _VLV_PCS_DW12_CH0 0x8230
890#define _VLV_PCS_DW12_CH1 0x8430 890#define _VLV_PCS_DW12_CH1 0x8430
@@ -4054,17 +4054,18 @@ enum punit_power_well {
4054#define DSPFW_PLANEA_WM1_HI_MASK (1<<0) 4054#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
4055 4055
4056/* drain latency register values*/ 4056/* drain latency register values*/
4057#define DRAIN_LATENCY_PRECISION_16 16
4057#define DRAIN_LATENCY_PRECISION_32 32 4058#define DRAIN_LATENCY_PRECISION_32 32
4058#define DRAIN_LATENCY_PRECISION_64 64 4059#define DRAIN_LATENCY_PRECISION_64 64
4059#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe)) 4060#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
4060#define DDL_CURSOR_PRECISION_64 (1<<31) 4061#define DDL_CURSOR_PRECISION_HIGH (1<<31)
4061#define DDL_CURSOR_PRECISION_32 (0<<31) 4062#define DDL_CURSOR_PRECISION_LOW (0<<31)
4062#define DDL_CURSOR_SHIFT 24 4063#define DDL_CURSOR_SHIFT 24
4063#define DDL_SPRITE_PRECISION_64(sprite) (1<<(15+8*(sprite))) 4064#define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite)))
4064#define DDL_SPRITE_PRECISION_32(sprite) (0<<(15+8*(sprite))) 4065#define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite)))
4065#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite)) 4066#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
4066#define DDL_PLANE_PRECISION_64 (1<<7) 4067#define DDL_PLANE_PRECISION_HIGH (1<<7)
4067#define DDL_PLANE_PRECISION_32 (0<<7) 4068#define DDL_PLANE_PRECISION_LOW (0<<7)
4068#define DDL_PLANE_SHIFT 0 4069#define DDL_PLANE_SHIFT 0
4069#define DRAIN_LATENCY_MASK 0x7f 4070#define DRAIN_LATENCY_MASK 0x7f
4070 4071
@@ -4207,6 +4208,7 @@ enum punit_power_well {
4207#define MCURSOR_PIPE_A 0x00 4208#define MCURSOR_PIPE_A 0x00
4208#define MCURSOR_PIPE_B (1 << 28) 4209#define MCURSOR_PIPE_B (1 << 28)
4209#define MCURSOR_GAMMA_ENABLE (1 << 26) 4210#define MCURSOR_GAMMA_ENABLE (1 << 26)
4211#define CURSOR_ROTATE_180 (1<<15)
4210#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14) 4212#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
4211#define _CURABASE 0x70084 4213#define _CURABASE 0x70084
4212#define _CURAPOS 0x70088 4214#define _CURAPOS 0x70088
@@ -4579,6 +4581,9 @@ enum punit_power_well {
4579#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4) 4581#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
4580#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4) 4582#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
4581#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4) 4583#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
4584#define PLANE_CTL_ROTATE_MASK 0x3
4585#define PLANE_CTL_ROTATE_0 0x0
4586#define PLANE_CTL_ROTATE_180 0x2
4582#define _PLANE_STRIDE_1_A 0x70188 4587#define _PLANE_STRIDE_1_A 0x70188
4583#define _PLANE_STRIDE_2_A 0x70288 4588#define _PLANE_STRIDE_2_A 0x70288
4584#define _PLANE_STRIDE_3_A 0x70388 4589#define _PLANE_STRIDE_3_A 0x70388
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 503847f18fdd..4a5af695307e 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -139,8 +139,6 @@ static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
139static struct attribute *rc6_attrs[] = { 139static struct attribute *rc6_attrs[] = {
140 &dev_attr_rc6_enable.attr, 140 &dev_attr_rc6_enable.attr,
141 &dev_attr_rc6_residency_ms.attr, 141 &dev_attr_rc6_residency_ms.attr,
142 &dev_attr_rc6p_residency_ms.attr,
143 &dev_attr_rc6pp_residency_ms.attr,
144 NULL 142 NULL
145}; 143};
146 144
@@ -148,6 +146,17 @@ static struct attribute_group rc6_attr_group = {
148 .name = power_group_name, 146 .name = power_group_name,
149 .attrs = rc6_attrs 147 .attrs = rc6_attrs
150}; 148};
149
150static struct attribute *rc6p_attrs[] = {
151 &dev_attr_rc6p_residency_ms.attr,
152 &dev_attr_rc6pp_residency_ms.attr,
153 NULL
154};
155
156static struct attribute_group rc6p_attr_group = {
157 .name = power_group_name,
158 .attrs = rc6p_attrs
159};
151#endif 160#endif
152 161
153static int l3_access_valid(struct drm_device *dev, loff_t offset) 162static int l3_access_valid(struct drm_device *dev, loff_t offset)
@@ -595,12 +604,18 @@ void i915_setup_sysfs(struct drm_device *dev)
595 int ret; 604 int ret;
596 605
597#ifdef CONFIG_PM 606#ifdef CONFIG_PM
598 if (INTEL_INFO(dev)->gen >= 6) { 607 if (HAS_RC6(dev)) {
599 ret = sysfs_merge_group(&dev->primary->kdev->kobj, 608 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
600 &rc6_attr_group); 609 &rc6_attr_group);
601 if (ret) 610 if (ret)
602 DRM_ERROR("RC6 residency sysfs setup failed\n"); 611 DRM_ERROR("RC6 residency sysfs setup failed\n");
603 } 612 }
613 if (HAS_RC6p(dev)) {
614 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
615 &rc6p_attr_group);
616 if (ret)
617 DRM_ERROR("RC6p residency sysfs setup failed\n");
618 }
604#endif 619#endif
605 if (HAS_L3_DPF(dev)) { 620 if (HAS_L3_DPF(dev)) {
606 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs); 621 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
@@ -640,5 +655,6 @@ void i915_teardown_sysfs(struct drm_device *dev)
640 device_remove_bin_file(dev->primary->kdev, &dpf_attrs); 655 device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
641#ifdef CONFIG_PM 656#ifdef CONFIG_PM
642 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group); 657 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
658 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
643#endif 659#endif
644} 660}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dacaad5f4e34..a9af9a4866db 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -775,7 +775,7 @@ static void intel_crt_reset(struct drm_connector *connector)
775 I915_WRITE(crt->adpa_reg, adpa); 775 I915_WRITE(crt->adpa_reg, adpa);
776 POSTING_READ(crt->adpa_reg); 776 POSTING_READ(crt->adpa_reg);
777 777
778 DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); 778 DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
779 crt->force_hotplug_required = 1; 779 crt->force_hotplug_required = 1;
780 } 780 }
781 781
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index a151de7d13cd..cb5367c6f95a 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1291,7 +1291,7 @@ static int hsw_get_cdclk_freq(struct drm_i915_private *dev_priv)
1291 return 450000; 1291 return 450000;
1292 else if (freq == LCPLL_CLK_FREQ_450) 1292 else if (freq == LCPLL_CLK_FREQ_450)
1293 return 450000; 1293 return 450000;
1294 else if (IS_ULT(dev)) 1294 else if (IS_HSW_ULT(dev))
1295 return 337500; 1295 return 337500;
1296 else 1296 else
1297 return 540000; 1297 return 540000;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1fc05ffc4695..1d2fa7f4523e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -406,22 +406,22 @@ static void vlv_clock(int refclk, intel_clock_t *clock)
406/** 406/**
407 * Returns whether any output on the specified pipe is of the specified type 407 * Returns whether any output on the specified pipe is of the specified type
408 */ 408 */
409static bool intel_pipe_has_type(struct drm_crtc *crtc, int type) 409static bool intel_pipe_has_type(struct intel_crtc *crtc, int type)
410{ 410{
411 struct drm_device *dev = crtc->dev; 411 struct drm_device *dev = crtc->base.dev;
412 struct intel_encoder *encoder; 412 struct intel_encoder *encoder;
413 413
414 for_each_encoder_on_crtc(dev, crtc, encoder) 414 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
415 if (encoder->type == type) 415 if (encoder->type == type)
416 return true; 416 return true;
417 417
418 return false; 418 return false;
419} 419}
420 420
421static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 421static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
422 int refclk) 422 int refclk)
423{ 423{
424 struct drm_device *dev = crtc->dev; 424 struct drm_device *dev = crtc->base.dev;
425 const intel_limit_t *limit; 425 const intel_limit_t *limit;
426 426
427 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 427 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -442,9 +442,9 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
442 return limit; 442 return limit;
443} 443}
444 444
445static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 445static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
446{ 446{
447 struct drm_device *dev = crtc->dev; 447 struct drm_device *dev = crtc->base.dev;
448 const intel_limit_t *limit; 448 const intel_limit_t *limit;
449 449
450 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 450 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -463,9 +463,9 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
463 return limit; 463 return limit;
464} 464}
465 465
466static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) 466static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
467{ 467{
468 struct drm_device *dev = crtc->dev; 468 struct drm_device *dev = crtc->base.dev;
469 const intel_limit_t *limit; 469 const intel_limit_t *limit;
470 470
471 if (HAS_PCH_SPLIT(dev)) 471 if (HAS_PCH_SPLIT(dev))
@@ -576,11 +576,11 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
576} 576}
577 577
578static bool 578static bool
579i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 579i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
580 int target, int refclk, intel_clock_t *match_clock, 580 int target, int refclk, intel_clock_t *match_clock,
581 intel_clock_t *best_clock) 581 intel_clock_t *best_clock)
582{ 582{
583 struct drm_device *dev = crtc->dev; 583 struct drm_device *dev = crtc->base.dev;
584 intel_clock_t clock; 584 intel_clock_t clock;
585 int err = target; 585 int err = target;
586 586
@@ -637,11 +637,11 @@ i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
637} 637}
638 638
639static bool 639static bool
640pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 640pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
641 int target, int refclk, intel_clock_t *match_clock, 641 int target, int refclk, intel_clock_t *match_clock,
642 intel_clock_t *best_clock) 642 intel_clock_t *best_clock)
643{ 643{
644 struct drm_device *dev = crtc->dev; 644 struct drm_device *dev = crtc->base.dev;
645 intel_clock_t clock; 645 intel_clock_t clock;
646 int err = target; 646 int err = target;
647 647
@@ -696,11 +696,11 @@ pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
696} 696}
697 697
698static bool 698static bool
699g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 699g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
700 int target, int refclk, intel_clock_t *match_clock, 700 int target, int refclk, intel_clock_t *match_clock,
701 intel_clock_t *best_clock) 701 intel_clock_t *best_clock)
702{ 702{
703 struct drm_device *dev = crtc->dev; 703 struct drm_device *dev = crtc->base.dev;
704 intel_clock_t clock; 704 intel_clock_t clock;
705 int max_n; 705 int max_n;
706 bool found; 706 bool found;
@@ -753,11 +753,11 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
753} 753}
754 754
755static bool 755static bool
756vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 756vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
757 int target, int refclk, intel_clock_t *match_clock, 757 int target, int refclk, intel_clock_t *match_clock,
758 intel_clock_t *best_clock) 758 intel_clock_t *best_clock)
759{ 759{
760 struct drm_device *dev = crtc->dev; 760 struct drm_device *dev = crtc->base.dev;
761 intel_clock_t clock; 761 intel_clock_t clock;
762 unsigned int bestppm = 1000000; 762 unsigned int bestppm = 1000000;
763 /* min update 19.2 MHz */ 763 /* min update 19.2 MHz */
@@ -810,11 +810,11 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
810} 810}
811 811
812static bool 812static bool
813chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 813chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
814 int target, int refclk, intel_clock_t *match_clock, 814 int target, int refclk, intel_clock_t *match_clock,
815 intel_clock_t *best_clock) 815 intel_clock_t *best_clock)
816{ 816{
817 struct drm_device *dev = crtc->dev; 817 struct drm_device *dev = crtc->base.dev;
818 intel_clock_t clock; 818 intel_clock_t clock;
819 uint64_t m2; 819 uint64_t m2;
820 int found = false; 820 int found = false;
@@ -1567,7 +1567,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
1567 1567
1568 for_each_intel_crtc(dev, crtc) 1568 for_each_intel_crtc(dev, crtc)
1569 count += crtc->active && 1569 count += crtc->active &&
1570 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO); 1570 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1571 1571
1572 return count; 1572 return count;
1573} 1573}
@@ -1646,7 +1646,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
1646 1646
1647 /* Disable DVO 2x clock on both PLLs if necessary */ 1647 /* Disable DVO 2x clock on both PLLs if necessary */
1648 if (IS_I830(dev) && 1648 if (IS_I830(dev) &&
1649 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) && 1649 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1650 intel_num_dvo_pipes(dev) == 1) { 1650 intel_num_dvo_pipes(dev) == 1) {
1651 I915_WRITE(DPLL(PIPE_B), 1651 I915_WRITE(DPLL(PIPE_B),
1652 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1652 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1884,7 +1884,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1884 val &= ~TRANS_INTERLACE_MASK; 1884 val &= ~TRANS_INTERLACE_MASK;
1885 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1885 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1886 if (HAS_PCH_IBX(dev_priv->dev) && 1886 if (HAS_PCH_IBX(dev_priv->dev) &&
1887 intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) 1887 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
1888 val |= TRANS_LEGACY_INTERLACED_ILK; 1888 val |= TRANS_LEGACY_INTERLACED_ILK;
1889 else 1889 else
1890 val |= TRANS_INTERLACED; 1890 val |= TRANS_INTERLACED;
@@ -2007,7 +2007,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
2007 * need the check. 2007 * need the check.
2008 */ 2008 */
2009 if (!HAS_PCH_SPLIT(dev_priv->dev)) 2009 if (!HAS_PCH_SPLIT(dev_priv->dev))
2010 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI)) 2010 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
2011 assert_dsi_pll_enabled(dev_priv); 2011 assert_dsi_pll_enabled(dev_priv);
2012 else 2012 else
2013 assert_pll_enabled(dev_priv, pipe); 2013 assert_pll_enabled(dev_priv, pipe);
@@ -2359,6 +2359,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2359 struct intel_plane_config *plane_config) 2359 struct intel_plane_config *plane_config)
2360{ 2360{
2361 struct drm_device *dev = intel_crtc->base.dev; 2361 struct drm_device *dev = intel_crtc->base.dev;
2362 struct drm_i915_private *dev_priv = dev->dev_private;
2362 struct drm_crtc *c; 2363 struct drm_crtc *c;
2363 struct intel_crtc *i; 2364 struct intel_crtc *i;
2364 struct drm_i915_gem_object *obj; 2365 struct drm_i915_gem_object *obj;
@@ -2390,6 +2391,9 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2390 continue; 2391 continue;
2391 2392
2392 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2393 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2394 if (obj->tiling_mode != I915_TILING_NONE)
2395 dev_priv->preserve_bios_swizzle = true;
2396
2393 drm_framebuffer_reference(c->primary->fb); 2397 drm_framebuffer_reference(c->primary->fb);
2394 intel_crtc->base.primary->fb = c->primary->fb; 2398 intel_crtc->base.primary->fb = c->primary->fb;
2395 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 2399 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
@@ -2694,6 +2698,8 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
2694 } 2698 }
2695 2699
2696 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 2700 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
2701 if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
2702 plane_ctl |= PLANE_CTL_ROTATE_180;
2697 2703
2698 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); 2704 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
2699 2705
@@ -2844,8 +2850,8 @@ static void intel_update_pipe_size(struct intel_crtc *crtc)
2844 ((adjusted_mode->crtc_hdisplay - 1) << 16) | 2850 ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2845 (adjusted_mode->crtc_vdisplay - 1)); 2851 (adjusted_mode->crtc_vdisplay - 1));
2846 if (!crtc->config.pch_pfit.enabled && 2852 if (!crtc->config.pch_pfit.enabled &&
2847 (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) || 2853 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2848 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))) { 2854 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2849 I915_WRITE(PF_CTL(crtc->pipe), 0); 2855 I915_WRITE(PF_CTL(crtc->pipe), 0);
2850 I915_WRITE(PF_WIN_POS(crtc->pipe), 0); 2856 I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
2851 I915_WRITE(PF_WIN_SZ(crtc->pipe), 0); 2857 I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
@@ -3753,8 +3759,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3753 3759
3754 /* For PCH DP, enable TRANS_DP_CTL */ 3760 /* For PCH DP, enable TRANS_DP_CTL */
3755 if (HAS_PCH_CPT(dev) && 3761 if (HAS_PCH_CPT(dev) &&
3756 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 3762 (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3757 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 3763 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_EDP))) {
3758 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 3764 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3759 reg = TRANS_DP_CTL(pipe); 3765 reg = TRANS_DP_CTL(pipe);
3760 temp = I915_READ(reg); 3766 temp = I915_READ(reg);
@@ -4031,7 +4037,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
4031 return; 4037 return;
4032 4038
4033 if (!HAS_PCH_SPLIT(dev_priv->dev)) { 4039 if (!HAS_PCH_SPLIT(dev_priv->dev)) {
4034 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) 4040 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
4035 assert_dsi_pll_enabled(dev_priv); 4041 assert_dsi_pll_enabled(dev_priv);
4036 else 4042 else
4037 assert_pll_enabled(dev_priv, pipe); 4043 assert_pll_enabled(dev_priv, pipe);
@@ -4163,8 +4169,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4163 4169
4164 intel_crtc->active = true; 4170 intel_crtc->active = true;
4165 4171
4166 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4172 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4167 intel_set_pch_fifo_underrun_reporting(dev, pipe, true); 4173 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4168 4174
4169 for_each_encoder_on_crtc(dev, crtc, encoder) 4175 for_each_encoder_on_crtc(dev, crtc, encoder)
4170 if (encoder->pre_enable) 4176 if (encoder->pre_enable)
@@ -4278,13 +4284,14 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4278 4284
4279 intel_crtc->active = true; 4285 intel_crtc->active = true;
4280 4286
4281 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4287 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4282 for_each_encoder_on_crtc(dev, crtc, encoder) 4288 for_each_encoder_on_crtc(dev, crtc, encoder)
4283 if (encoder->pre_enable) 4289 if (encoder->pre_enable)
4284 encoder->pre_enable(encoder); 4290 encoder->pre_enable(encoder);
4285 4291
4286 if (intel_crtc->config.has_pch_encoder) { 4292 if (intel_crtc->config.has_pch_encoder) {
4287 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); 4293 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4294 true);
4288 dev_priv->display.fdi_link_train(crtc); 4295 dev_priv->display.fdi_link_train(crtc);
4289 } 4296 }
4290 4297
@@ -4360,7 +4367,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4360 encoder->disable(encoder); 4367 encoder->disable(encoder);
4361 4368
4362 if (intel_crtc->config.has_pch_encoder) 4369 if (intel_crtc->config.has_pch_encoder)
4363 intel_set_pch_fifo_underrun_reporting(dev, pipe, false); 4370 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4364 4371
4365 intel_disable_pipe(intel_crtc); 4372 intel_disable_pipe(intel_crtc);
4366 4373
@@ -4374,7 +4381,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4374 ironlake_fdi_disable(crtc); 4381 ironlake_fdi_disable(crtc);
4375 4382
4376 ironlake_disable_pch_transcoder(dev_priv, pipe); 4383 ironlake_disable_pch_transcoder(dev_priv, pipe);
4377 intel_set_pch_fifo_underrun_reporting(dev, pipe, true); 4384 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4378 4385
4379 if (HAS_PCH_CPT(dev)) { 4386 if (HAS_PCH_CPT(dev)) {
4380 /* disable TRANS_DP_CTL */ 4387 /* disable TRANS_DP_CTL */
@@ -4427,7 +4434,8 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4427 } 4434 }
4428 4435
4429 if (intel_crtc->config.has_pch_encoder) 4436 if (intel_crtc->config.has_pch_encoder)
4430 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); 4437 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4438 false);
4431 intel_disable_pipe(intel_crtc); 4439 intel_disable_pipe(intel_crtc);
4432 4440
4433 if (intel_crtc->config.dp_encoder_is_mst) 4441 if (intel_crtc->config.dp_encoder_is_mst)
@@ -4441,7 +4449,8 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4441 4449
4442 if (intel_crtc->config.has_pch_encoder) { 4450 if (intel_crtc->config.has_pch_encoder) {
4443 lpt_disable_pch_transcoder(dev_priv); 4451 lpt_disable_pch_transcoder(dev_priv);
4444 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); 4452 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4453 true);
4445 intel_ddi_fdi_disable(crtc); 4454 intel_ddi_fdi_disable(crtc);
4446 } 4455 }
4447 4456
@@ -4615,7 +4624,7 @@ static void vlv_update_cdclk(struct drm_device *dev)
4615 struct drm_i915_private *dev_priv = dev->dev_private; 4624 struct drm_i915_private *dev_priv = dev->dev_private;
4616 4625
4617 dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 4626 dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
4618 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz", 4627 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
4619 dev_priv->vlv_cdclk_freq); 4628 dev_priv->vlv_cdclk_freq);
4620 4629
4621 /* 4630 /*
@@ -4818,6 +4827,7 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
4818static void valleyview_crtc_enable(struct drm_crtc *crtc) 4827static void valleyview_crtc_enable(struct drm_crtc *crtc)
4819{ 4828{
4820 struct drm_device *dev = crtc->dev; 4829 struct drm_device *dev = crtc->dev;
4830 struct drm_i915_private *dev_priv = to_i915(dev);
4821 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4831 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4822 struct intel_encoder *encoder; 4832 struct intel_encoder *encoder;
4823 int pipe = intel_crtc->pipe; 4833 int pipe = intel_crtc->pipe;
@@ -4828,7 +4838,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4828 if (intel_crtc->active) 4838 if (intel_crtc->active)
4829 return; 4839 return;
4830 4840
4831 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI); 4841 is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
4832 4842
4833 if (!is_dsi) { 4843 if (!is_dsi) {
4834 if (IS_CHERRYVIEW(dev)) 4844 if (IS_CHERRYVIEW(dev))
@@ -4846,7 +4856,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4846 4856
4847 intel_crtc->active = true; 4857 intel_crtc->active = true;
4848 4858
4849 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4859 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4850 4860
4851 for_each_encoder_on_crtc(dev, crtc, encoder) 4861 for_each_encoder_on_crtc(dev, crtc, encoder)
4852 if (encoder->pre_pll_enable) 4862 if (encoder->pre_pll_enable)
@@ -4879,7 +4889,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4879 intel_crtc_enable_planes(crtc); 4889 intel_crtc_enable_planes(crtc);
4880 4890
4881 /* Underruns don't raise interrupts, so check manually. */ 4891 /* Underruns don't raise interrupts, so check manually. */
4882 i9xx_check_fifo_underruns(dev); 4892 i9xx_check_fifo_underruns(dev_priv);
4883} 4893}
4884 4894
4885static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 4895static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
@@ -4894,6 +4904,7 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
4894static void i9xx_crtc_enable(struct drm_crtc *crtc) 4904static void i9xx_crtc_enable(struct drm_crtc *crtc)
4895{ 4905{
4896 struct drm_device *dev = crtc->dev; 4906 struct drm_device *dev = crtc->dev;
4907 struct drm_i915_private *dev_priv = to_i915(dev);
4897 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4908 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4898 struct intel_encoder *encoder; 4909 struct intel_encoder *encoder;
4899 int pipe = intel_crtc->pipe; 4910 int pipe = intel_crtc->pipe;
@@ -4915,7 +4926,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
4915 intel_crtc->active = true; 4926 intel_crtc->active = true;
4916 4927
4917 if (!IS_GEN2(dev)) 4928 if (!IS_GEN2(dev))
4918 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4929 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4919 4930
4920 for_each_encoder_on_crtc(dev, crtc, encoder) 4931 for_each_encoder_on_crtc(dev, crtc, encoder)
4921 if (encoder->pre_enable) 4932 if (encoder->pre_enable)
@@ -4946,10 +4957,10 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
4946 * but leave the pipe running. 4957 * but leave the pipe running.
4947 */ 4958 */
4948 if (IS_GEN2(dev)) 4959 if (IS_GEN2(dev))
4949 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4960 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4950 4961
4951 /* Underruns don't raise interrupts, so check manually. */ 4962 /* Underruns don't raise interrupts, so check manually. */
4952 i9xx_check_fifo_underruns(dev); 4963 i9xx_check_fifo_underruns(dev_priv);
4953} 4964}
4954 4965
4955static void i9xx_pfit_disable(struct intel_crtc *crtc) 4966static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -4985,7 +4996,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4985 * but leave the pipe running. 4996 * but leave the pipe running.
4986 */ 4997 */
4987 if (IS_GEN2(dev)) 4998 if (IS_GEN2(dev))
4988 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); 4999 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4989 5000
4990 /* 5001 /*
4991 * Vblank time updates from the shadow to live plane control register 5002 * Vblank time updates from the shadow to live plane control register
@@ -5021,7 +5032,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
5021 if (encoder->post_disable) 5032 if (encoder->post_disable)
5022 encoder->post_disable(encoder); 5033 encoder->post_disable(encoder);
5023 5034
5024 if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) { 5035 if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
5025 if (IS_CHERRYVIEW(dev)) 5036 if (IS_CHERRYVIEW(dev))
5026 chv_disable_pll(dev_priv, pipe); 5037 chv_disable_pll(dev_priv, pipe);
5027 else if (IS_VALLEYVIEW(dev)) 5038 else if (IS_VALLEYVIEW(dev))
@@ -5031,7 +5042,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
5031 } 5042 }
5032 5043
5033 if (!IS_GEN2(dev)) 5044 if (!IS_GEN2(dev))
5034 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); 5045 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5035 5046
5036 intel_crtc->active = false; 5047 intel_crtc->active = false;
5037 intel_update_watermarks(crtc); 5048 intel_update_watermarks(crtc);
@@ -5404,7 +5415,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
5404 * - LVDS dual channel mode 5415 * - LVDS dual channel mode
5405 * - Double wide pipe 5416 * - Double wide pipe
5406 */ 5417 */
5407 if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5418 if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5408 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 5419 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5409 pipe_config->pipe_src_w &= ~1; 5420 pipe_config->pipe_src_w &= ~1;
5410 5421
@@ -5592,9 +5603,9 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5592 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 5603 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5593} 5604}
5594 5605
5595static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) 5606static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
5596{ 5607{
5597 struct drm_device *dev = crtc->dev; 5608 struct drm_device *dev = crtc->base.dev;
5598 struct drm_i915_private *dev_priv = dev->dev_private; 5609 struct drm_i915_private *dev_priv = dev->dev_private;
5599 int refclk; 5610 int refclk;
5600 5611
@@ -5642,7 +5653,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
5642 crtc->config.dpll_hw_state.fp0 = fp; 5653 crtc->config.dpll_hw_state.fp0 = fp;
5643 5654
5644 crtc->lowfreq_avail = false; 5655 crtc->lowfreq_avail = false;
5645 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5656 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5646 reduced_clock && i915.powersave) { 5657 reduced_clock && i915.powersave) {
5647 crtc->config.dpll_hw_state.fp1 = fp2; 5658 crtc->config.dpll_hw_state.fp1 = fp2;
5648 crtc->lowfreq_avail = true; 5659 crtc->lowfreq_avail = true;
@@ -5811,16 +5822,16 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
5811 5822
5812 /* Set HBR and RBR LPF coefficients */ 5823 /* Set HBR and RBR LPF coefficients */
5813 if (crtc->config.port_clock == 162000 || 5824 if (crtc->config.port_clock == 162000 ||
5814 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || 5825 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
5815 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) 5826 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
5816 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 5827 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5817 0x009f0003); 5828 0x009f0003);
5818 else 5829 else
5819 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 5830 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5820 0x00d0000f); 5831 0x00d0000f);
5821 5832
5822 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || 5833 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP) ||
5823 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) { 5834 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
5824 /* Use SSC source */ 5835 /* Use SSC source */
5825 if (pipe == PIPE_A) 5836 if (pipe == PIPE_A)
5826 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 5837 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -5840,8 +5851,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
5840 5851
5841 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 5852 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
5842 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 5853 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5843 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) || 5854 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
5844 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) 5855 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
5845 coreclk |= 0x01000000; 5856 coreclk |= 0x01000000;
5846 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 5857 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
5847 5858
@@ -5911,7 +5922,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
5911 (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT)); 5922 (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
5912 5923
5913 /* Loop filter */ 5924 /* Loop filter */
5914 refclk = i9xx_get_refclk(&crtc->base, 0); 5925 refclk = i9xx_get_refclk(crtc, 0);
5915 loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT | 5926 loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
5916 2 << DPIO_CHV_GAIN_CTRL_SHIFT; 5927 2 << DPIO_CHV_GAIN_CTRL_SHIFT;
5917 if (refclk == 100000) 5928 if (refclk == 100000)
@@ -5943,12 +5954,12 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
5943 5954
5944 i9xx_update_pll_dividers(crtc, reduced_clock); 5955 i9xx_update_pll_dividers(crtc, reduced_clock);
5945 5956
5946 is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) || 5957 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
5947 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI); 5958 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
5948 5959
5949 dpll = DPLL_VGA_MODE_DIS; 5960 dpll = DPLL_VGA_MODE_DIS;
5950 5961
5951 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) 5962 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
5952 dpll |= DPLLB_MODE_LVDS; 5963 dpll |= DPLLB_MODE_LVDS;
5953 else 5964 else
5954 dpll |= DPLLB_MODE_DAC_SERIAL; 5965 dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -5961,7 +5972,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
5961 if (is_sdvo) 5972 if (is_sdvo)
5962 dpll |= DPLL_SDVO_HIGH_SPEED; 5973 dpll |= DPLL_SDVO_HIGH_SPEED;
5963 5974
5964 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) 5975 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
5965 dpll |= DPLL_SDVO_HIGH_SPEED; 5976 dpll |= DPLL_SDVO_HIGH_SPEED;
5966 5977
5967 /* compute bitmask from p1 value */ 5978 /* compute bitmask from p1 value */
@@ -5991,7 +6002,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
5991 6002
5992 if (crtc->config.sdvo_tv_clock) 6003 if (crtc->config.sdvo_tv_clock)
5993 dpll |= PLL_REF_INPUT_TVCLKINBC; 6004 dpll |= PLL_REF_INPUT_TVCLKINBC;
5994 else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 6005 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5995 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 6006 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5996 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6007 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5997 else 6008 else
@@ -6020,7 +6031,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
6020 6031
6021 dpll = DPLL_VGA_MODE_DIS; 6032 dpll = DPLL_VGA_MODE_DIS;
6022 6033
6023 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) { 6034 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
6024 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6035 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6025 } else { 6036 } else {
6026 if (clock->p1 == 2) 6037 if (clock->p1 == 2)
@@ -6031,10 +6042,10 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
6031 dpll |= PLL_P2_DIVIDE_BY_4; 6042 dpll |= PLL_P2_DIVIDE_BY_4;
6032 } 6043 }
6033 6044
6034 if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO)) 6045 if (!IS_I830(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
6035 dpll |= DPLL_DVO_2X_MODE; 6046 dpll |= DPLL_DVO_2X_MODE;
6036 6047
6037 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 6048 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
6038 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 6049 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6039 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6050 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6040 else 6051 else
@@ -6065,7 +6076,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
6065 crtc_vtotal -= 1; 6076 crtc_vtotal -= 1;
6066 crtc_vblank_end -= 1; 6077 crtc_vblank_end -= 1;
6067 6078
6068 if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) 6079 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
6069 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 6080 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
6070 else 6081 else
6071 vsyncshift = adjusted_mode->crtc_hsync_start - 6082 vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -6223,7 +6234,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
6223 6234
6224 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 6235 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6225 if (INTEL_INFO(dev)->gen < 4 || 6236 if (INTEL_INFO(dev)->gen < 4 ||
6226 intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) 6237 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
6227 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 6238 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6228 else 6239 else
6229 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 6240 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -6237,13 +6248,12 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
6237 POSTING_READ(PIPECONF(intel_crtc->pipe)); 6248 POSTING_READ(PIPECONF(intel_crtc->pipe));
6238} 6249}
6239 6250
6240static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 6251static int i9xx_crtc_mode_set(struct intel_crtc *crtc,
6241 int x, int y, 6252 int x, int y,
6242 struct drm_framebuffer *fb) 6253 struct drm_framebuffer *fb)
6243{ 6254{
6244 struct drm_device *dev = crtc->dev; 6255 struct drm_device *dev = crtc->base.dev;
6245 struct drm_i915_private *dev_priv = dev->dev_private; 6256 struct drm_i915_private *dev_priv = dev->dev_private;
6246 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6247 int refclk, num_connectors = 0; 6257 int refclk, num_connectors = 0;
6248 intel_clock_t clock, reduced_clock; 6258 intel_clock_t clock, reduced_clock;
6249 bool ok, has_reduced_clock = false; 6259 bool ok, has_reduced_clock = false;
@@ -6251,7 +6261,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
6251 struct intel_encoder *encoder; 6261 struct intel_encoder *encoder;
6252 const intel_limit_t *limit; 6262 const intel_limit_t *limit;
6253 6263
6254 for_each_encoder_on_crtc(dev, crtc, encoder) { 6264 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
6255 switch (encoder->type) { 6265 switch (encoder->type) {
6256 case INTEL_OUTPUT_LVDS: 6266 case INTEL_OUTPUT_LVDS:
6257 is_lvds = true; 6267 is_lvds = true;
@@ -6267,7 +6277,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
6267 if (is_dsi) 6277 if (is_dsi)
6268 return 0; 6278 return 0;
6269 6279
6270 if (!intel_crtc->config.clock_set) { 6280 if (!crtc->config.clock_set) {
6271 refclk = i9xx_get_refclk(crtc, num_connectors); 6281 refclk = i9xx_get_refclk(crtc, num_connectors);
6272 6282
6273 /* 6283 /*
@@ -6278,7 +6288,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
6278 */ 6288 */
6279 limit = intel_limit(crtc, refclk); 6289 limit = intel_limit(crtc, refclk);
6280 ok = dev_priv->display.find_dpll(limit, crtc, 6290 ok = dev_priv->display.find_dpll(limit, crtc,
6281 intel_crtc->config.port_clock, 6291 crtc->config.port_clock,
6282 refclk, NULL, &clock); 6292 refclk, NULL, &clock);
6283 if (!ok) { 6293 if (!ok) {
6284 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 6294 DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -6299,23 +6309,23 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
6299 &reduced_clock); 6309 &reduced_clock);
6300 } 6310 }
6301 /* Compat-code for transition, will disappear. */ 6311 /* Compat-code for transition, will disappear. */
6302 intel_crtc->config.dpll.n = clock.n; 6312 crtc->config.dpll.n = clock.n;
6303 intel_crtc->config.dpll.m1 = clock.m1; 6313 crtc->config.dpll.m1 = clock.m1;
6304 intel_crtc->config.dpll.m2 = clock.m2; 6314 crtc->config.dpll.m2 = clock.m2;
6305 intel_crtc->config.dpll.p1 = clock.p1; 6315 crtc->config.dpll.p1 = clock.p1;
6306 intel_crtc->config.dpll.p2 = clock.p2; 6316 crtc->config.dpll.p2 = clock.p2;
6307 } 6317 }
6308 6318
6309 if (IS_GEN2(dev)) { 6319 if (IS_GEN2(dev)) {
6310 i8xx_update_pll(intel_crtc, 6320 i8xx_update_pll(crtc,
6311 has_reduced_clock ? &reduced_clock : NULL, 6321 has_reduced_clock ? &reduced_clock : NULL,
6312 num_connectors); 6322 num_connectors);
6313 } else if (IS_CHERRYVIEW(dev)) { 6323 } else if (IS_CHERRYVIEW(dev)) {
6314 chv_update_pll(intel_crtc); 6324 chv_update_pll(crtc);
6315 } else if (IS_VALLEYVIEW(dev)) { 6325 } else if (IS_VALLEYVIEW(dev)) {
6316 vlv_update_pll(intel_crtc); 6326 vlv_update_pll(crtc);
6317 } else { 6327 } else {
6318 i9xx_update_pll(intel_crtc, 6328 i9xx_update_pll(crtc,
6319 has_reduced_clock ? &reduced_clock : NULL, 6329 has_reduced_clock ? &reduced_clock : NULL,
6320 num_connectors); 6330 num_connectors);
6321 } 6331 }
@@ -7103,18 +7113,12 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
7103{ 7113{
7104 struct drm_device *dev = crtc->dev; 7114 struct drm_device *dev = crtc->dev;
7105 struct drm_i915_private *dev_priv = dev->dev_private; 7115 struct drm_i915_private *dev_priv = dev->dev_private;
7106 struct intel_encoder *intel_encoder; 7116 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7107 int refclk; 7117 int refclk;
7108 const intel_limit_t *limit; 7118 const intel_limit_t *limit;
7109 bool ret, is_lvds = false; 7119 bool ret, is_lvds = false;
7110 7120
7111 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 7121 is_lvds = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_LVDS);
7112 switch (intel_encoder->type) {
7113 case INTEL_OUTPUT_LVDS:
7114 is_lvds = true;
7115 break;
7116 }
7117 }
7118 7122
7119 refclk = ironlake_get_refclk(crtc); 7123 refclk = ironlake_get_refclk(crtc);
7120 7124
@@ -7123,9 +7127,9 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
7123 * refclk, or FALSE. The returned values represent the clock equation: 7127 * refclk, or FALSE. The returned values represent the clock equation:
7124 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 7128 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
7125 */ 7129 */
7126 limit = intel_limit(crtc, refclk); 7130 limit = intel_limit(intel_crtc, refclk);
7127 ret = dev_priv->display.find_dpll(limit, crtc, 7131 ret = dev_priv->display.find_dpll(limit, intel_crtc,
7128 to_intel_crtc(crtc)->config.port_clock, 7132 intel_crtc->config.port_clock,
7129 refclk, NULL, clock); 7133 refclk, NULL, clock);
7130 if (!ret) 7134 if (!ret)
7131 return false; 7135 return false;
@@ -7138,7 +7142,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
7138 * downclock feature. 7142 * downclock feature.
7139 */ 7143 */
7140 *has_reduced_clock = 7144 *has_reduced_clock =
7141 dev_priv->display.find_dpll(limit, crtc, 7145 dev_priv->display.find_dpll(limit, intel_crtc,
7142 dev_priv->lvds_downclock, 7146 dev_priv->lvds_downclock,
7143 refclk, clock, 7147 refclk, clock,
7144 reduced_clock); 7148 reduced_clock);
@@ -7248,78 +7252,67 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
7248 return dpll | DPLL_VCO_ENABLE; 7252 return dpll | DPLL_VCO_ENABLE;
7249} 7253}
7250 7254
7251static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 7255static int ironlake_crtc_mode_set(struct intel_crtc *crtc,
7252 int x, int y, 7256 int x, int y,
7253 struct drm_framebuffer *fb) 7257 struct drm_framebuffer *fb)
7254{ 7258{
7255 struct drm_device *dev = crtc->dev; 7259 struct drm_device *dev = crtc->base.dev;
7256 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7257 int num_connectors = 0;
7258 intel_clock_t clock, reduced_clock; 7260 intel_clock_t clock, reduced_clock;
7259 u32 dpll = 0, fp = 0, fp2 = 0; 7261 u32 dpll = 0, fp = 0, fp2 = 0;
7260 bool ok, has_reduced_clock = false; 7262 bool ok, has_reduced_clock = false;
7261 bool is_lvds = false; 7263 bool is_lvds = false;
7262 struct intel_encoder *encoder;
7263 struct intel_shared_dpll *pll; 7264 struct intel_shared_dpll *pll;
7264 7265
7265 for_each_encoder_on_crtc(dev, crtc, encoder) { 7266 is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
7266 switch (encoder->type) {
7267 case INTEL_OUTPUT_LVDS:
7268 is_lvds = true;
7269 break;
7270 }
7271
7272 num_connectors++;
7273 }
7274 7267
7275 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 7268 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
7276 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 7269 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
7277 7270
7278 ok = ironlake_compute_clocks(crtc, &clock, 7271 ok = ironlake_compute_clocks(&crtc->base, &clock,
7279 &has_reduced_clock, &reduced_clock); 7272 &has_reduced_clock, &reduced_clock);
7280 if (!ok && !intel_crtc->config.clock_set) { 7273 if (!ok && !crtc->config.clock_set) {
7281 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7274 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7282 return -EINVAL; 7275 return -EINVAL;
7283 } 7276 }
7284 /* Compat-code for transition, will disappear. */ 7277 /* Compat-code for transition, will disappear. */
7285 if (!intel_crtc->config.clock_set) { 7278 if (!crtc->config.clock_set) {
7286 intel_crtc->config.dpll.n = clock.n; 7279 crtc->config.dpll.n = clock.n;
7287 intel_crtc->config.dpll.m1 = clock.m1; 7280 crtc->config.dpll.m1 = clock.m1;
7288 intel_crtc->config.dpll.m2 = clock.m2; 7281 crtc->config.dpll.m2 = clock.m2;
7289 intel_crtc->config.dpll.p1 = clock.p1; 7282 crtc->config.dpll.p1 = clock.p1;
7290 intel_crtc->config.dpll.p2 = clock.p2; 7283 crtc->config.dpll.p2 = clock.p2;
7291 } 7284 }
7292 7285
7293 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 7286 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
7294 if (intel_crtc->config.has_pch_encoder) { 7287 if (crtc->config.has_pch_encoder) {
7295 fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll); 7288 fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
7296 if (has_reduced_clock) 7289 if (has_reduced_clock)
7297 fp2 = i9xx_dpll_compute_fp(&reduced_clock); 7290 fp2 = i9xx_dpll_compute_fp(&reduced_clock);
7298 7291
7299 dpll = ironlake_compute_dpll(intel_crtc, 7292 dpll = ironlake_compute_dpll(crtc,
7300 &fp, &reduced_clock, 7293 &fp, &reduced_clock,
7301 has_reduced_clock ? &fp2 : NULL); 7294 has_reduced_clock ? &fp2 : NULL);
7302 7295
7303 intel_crtc->config.dpll_hw_state.dpll = dpll; 7296 crtc->config.dpll_hw_state.dpll = dpll;
7304 intel_crtc->config.dpll_hw_state.fp0 = fp; 7297 crtc->config.dpll_hw_state.fp0 = fp;
7305 if (has_reduced_clock) 7298 if (has_reduced_clock)
7306 intel_crtc->config.dpll_hw_state.fp1 = fp2; 7299 crtc->config.dpll_hw_state.fp1 = fp2;
7307 else 7300 else
7308 intel_crtc->config.dpll_hw_state.fp1 = fp; 7301 crtc->config.dpll_hw_state.fp1 = fp;
7309 7302
7310 pll = intel_get_shared_dpll(intel_crtc); 7303 pll = intel_get_shared_dpll(crtc);
7311 if (pll == NULL) { 7304 if (pll == NULL) {
7312 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 7305 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
7313 pipe_name(intel_crtc->pipe)); 7306 pipe_name(crtc->pipe));
7314 return -EINVAL; 7307 return -EINVAL;
7315 } 7308 }
7316 } else 7309 } else
7317 intel_put_shared_dpll(intel_crtc); 7310 intel_put_shared_dpll(crtc);
7318 7311
7319 if (is_lvds && has_reduced_clock && i915.powersave) 7312 if (is_lvds && has_reduced_clock && i915.powersave)
7320 intel_crtc->lowfreq_avail = true; 7313 crtc->lowfreq_avail = true;
7321 else 7314 else
7322 intel_crtc->lowfreq_avail = false; 7315 crtc->lowfreq_avail = false;
7323 7316
7324 return 0; 7317 return 0;
7325} 7318}
@@ -7813,16 +7806,14 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
7813 modeset_update_crtc_power_domains(dev); 7806 modeset_update_crtc_power_domains(dev);
7814} 7807}
7815 7808
7816static int haswell_crtc_mode_set(struct drm_crtc *crtc, 7809static int haswell_crtc_mode_set(struct intel_crtc *crtc,
7817 int x, int y, 7810 int x, int y,
7818 struct drm_framebuffer *fb) 7811 struct drm_framebuffer *fb)
7819{ 7812{
7820 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7813 if (!intel_ddi_pll_select(crtc))
7821
7822 if (!intel_ddi_pll_select(intel_crtc))
7823 return -EINVAL; 7814 return -EINVAL;
7824 7815
7825 intel_crtc->lowfreq_avail = false; 7816 crtc->lowfreq_avail = false;
7826 7817
7827 return 0; 7818 return 0;
7828} 7819}
@@ -8062,6 +8053,7 @@ static void haswell_write_eld(struct drm_connector *connector,
8062 struct drm_display_mode *mode) 8053 struct drm_display_mode *mode)
8063{ 8054{
8064 struct drm_i915_private *dev_priv = connector->dev->dev_private; 8055 struct drm_i915_private *dev_priv = connector->dev->dev_private;
8056 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8065 uint8_t *eld = connector->eld; 8057 uint8_t *eld = connector->eld;
8066 uint32_t eldv; 8058 uint32_t eldv;
8067 uint32_t i; 8059 uint32_t i;
@@ -8102,7 +8094,7 @@ static void haswell_write_eld(struct drm_connector *connector,
8102 8094
8103 eldv = AUDIO_ELD_VALID_A << (pipe * 4); 8095 eldv = AUDIO_ELD_VALID_A << (pipe * 4);
8104 8096
8105 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 8097 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) {
8106 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 8098 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
8107 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 8099 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
8108 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ 8100 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
@@ -8145,6 +8137,7 @@ static void ironlake_write_eld(struct drm_connector *connector,
8145 struct drm_display_mode *mode) 8137 struct drm_display_mode *mode)
8146{ 8138{
8147 struct drm_i915_private *dev_priv = connector->dev->dev_private; 8139 struct drm_i915_private *dev_priv = connector->dev->dev_private;
8140 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8148 uint8_t *eld = connector->eld; 8141 uint8_t *eld = connector->eld;
8149 uint32_t eldv; 8142 uint32_t eldv;
8150 uint32_t i; 8143 uint32_t i;
@@ -8198,7 +8191,7 @@ static void ironlake_write_eld(struct drm_connector *connector,
8198 eldv = IBX_ELD_VALIDB << ((i - 1) * 4); 8191 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
8199 } 8192 }
8200 8193
8201 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 8194 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) {
8202 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 8195 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
8203 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 8196 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
8204 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ 8197 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
@@ -8350,6 +8343,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8350 cntl |= CURSOR_PIPE_CSC_ENABLE; 8343 cntl |= CURSOR_PIPE_CSC_ENABLE;
8351 } 8344 }
8352 8345
8346 if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180))
8347 cntl |= CURSOR_ROTATE_180;
8348
8353 if (intel_crtc->cursor_cntl != cntl) { 8349 if (intel_crtc->cursor_cntl != cntl) {
8354 I915_WRITE(CURCNTR(pipe), cntl); 8350 I915_WRITE(CURCNTR(pipe), cntl);
8355 POSTING_READ(CURCNTR(pipe)); 8351 POSTING_READ(CURCNTR(pipe));
@@ -8407,6 +8403,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
8407 8403
8408 I915_WRITE(CURPOS(pipe), pos); 8404 I915_WRITE(CURPOS(pipe), pos);
8409 8405
8406 /* ILK+ do this automagically */
8407 if (HAS_GMCH_DISPLAY(dev) &&
8408 to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) {
8409 base += (intel_crtc->cursor_height *
8410 intel_crtc->cursor_width - 1) * 4;
8411 }
8412
8410 if (IS_845G(dev) || IS_I865G(dev)) 8413 if (IS_845G(dev) || IS_I865G(dev))
8411 i845_update_cursor(crtc, base); 8414 i845_update_cursor(crtc, base);
8412 else 8415 else
@@ -8450,13 +8453,6 @@ static bool cursor_size_ok(struct drm_device *dev,
8450 return true; 8453 return true;
8451} 8454}
8452 8455
8453/*
8454 * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
8455 *
8456 * Note that the object's reference will be consumed if the update fails. If
8457 * the update succeeds, the reference of the old object (if any) will be
8458 * consumed.
8459 */
8460static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, 8456static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8461 struct drm_i915_gem_object *obj, 8457 struct drm_i915_gem_object *obj,
8462 uint32_t width, uint32_t height) 8458 uint32_t width, uint32_t height)
@@ -8465,7 +8461,7 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8465 struct drm_i915_private *dev_priv = dev->dev_private; 8461 struct drm_i915_private *dev_priv = dev->dev_private;
8466 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8462 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8467 enum pipe pipe = intel_crtc->pipe; 8463 enum pipe pipe = intel_crtc->pipe;
8468 unsigned old_width, stride; 8464 unsigned old_width;
8469 uint32_t addr; 8465 uint32_t addr;
8470 int ret; 8466 int ret;
8471 8467
@@ -8477,30 +8473,11 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8477 goto finish; 8473 goto finish;
8478 } 8474 }
8479 8475
8480 /* Check for which cursor types we support */
8481 if (!cursor_size_ok(dev, width, height)) {
8482 DRM_DEBUG("Cursor dimension not supported\n");
8483 return -EINVAL;
8484 }
8485
8486 stride = roundup_pow_of_two(width) * 4;
8487 if (obj->base.size < stride * height) {
8488 DRM_DEBUG_KMS("buffer is too small\n");
8489 ret = -ENOMEM;
8490 goto fail;
8491 }
8492
8493 /* we only need to pin inside GTT if cursor is non-phy */ 8476 /* we only need to pin inside GTT if cursor is non-phy */
8494 mutex_lock(&dev->struct_mutex); 8477 mutex_lock(&dev->struct_mutex);
8495 if (!INTEL_INFO(dev)->cursor_needs_physical) { 8478 if (!INTEL_INFO(dev)->cursor_needs_physical) {
8496 unsigned alignment; 8479 unsigned alignment;
8497 8480
8498 if (obj->tiling_mode) {
8499 DRM_DEBUG_KMS("cursor cannot be tiled\n");
8500 ret = -EINVAL;
8501 goto fail_locked;
8502 }
8503
8504 /* 8481 /*
8505 * Global gtt pte registers are special registers which actually 8482 * Global gtt pte registers are special registers which actually
8506 * forward writes to a chunk of system memory. Which means that 8483 * forward writes to a chunk of system memory. Which means that
@@ -8576,8 +8553,6 @@ fail_unpin:
8576 i915_gem_object_unpin_from_display_plane(obj); 8553 i915_gem_object_unpin_from_display_plane(obj);
8577fail_locked: 8554fail_locked:
8578 mutex_unlock(&dev->struct_mutex); 8555 mutex_unlock(&dev->struct_mutex);
8579fail:
8580 drm_gem_object_unreference_unlocked(&obj->base);
8581 return ret; 8556 return ret;
8582} 8557}
8583 8558
@@ -10921,7 +10896,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
10921 10896
10922 crtc->scanline_offset = vtotal - 1; 10897 crtc->scanline_offset = vtotal - 1;
10923 } else if (HAS_DDI(dev) && 10898 } else if (HAS_DDI(dev) &&
10924 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) { 10899 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
10925 crtc->scanline_offset = 2; 10900 crtc->scanline_offset = 2;
10926 } else 10901 } else
10927 crtc->scanline_offset = 1; 10902 crtc->scanline_offset = 1;
@@ -11041,8 +11016,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
11041 crtc->x = x; 11016 crtc->x = x;
11042 crtc->y = y; 11017 crtc->y = y;
11043 11018
11044 ret = dev_priv->display.crtc_mode_set(&intel_crtc->base, 11019 ret = dev_priv->display.crtc_mode_set(intel_crtc, x, y, fb);
11045 x, y, fb);
11046 if (ret) 11020 if (ret)
11047 goto done; 11021 goto done;
11048 } 11022 }
@@ -11666,12 +11640,23 @@ intel_check_primary_plane(struct drm_plane *plane,
11666 struct drm_rect *dest = &state->dst; 11640 struct drm_rect *dest = &state->dst;
11667 struct drm_rect *src = &state->src; 11641 struct drm_rect *src = &state->src;
11668 const struct drm_rect *clip = &state->clip; 11642 const struct drm_rect *clip = &state->clip;
11643 int ret;
11669 11644
11670 return drm_plane_helper_check_update(plane, crtc, fb, 11645 ret = drm_plane_helper_check_update(plane, crtc, fb,
11671 src, dest, clip, 11646 src, dest, clip,
11672 DRM_PLANE_HELPER_NO_SCALING, 11647 DRM_PLANE_HELPER_NO_SCALING,
11673 DRM_PLANE_HELPER_NO_SCALING, 11648 DRM_PLANE_HELPER_NO_SCALING,
11674 false, true, &state->visible); 11649 false, true, &state->visible);
11650 if (ret)
11651 return ret;
11652
11653 /* no fb bound */
11654 if (state->visible && !fb) {
11655 DRM_ERROR("No FB bound\n");
11656 return -EINVAL;
11657 }
11658
11659 return 0;
11675} 11660}
11676 11661
11677static int 11662static int
@@ -11683,6 +11668,8 @@ intel_commit_primary_plane(struct drm_plane *plane,
11683 struct drm_device *dev = crtc->dev; 11668 struct drm_device *dev = crtc->dev;
11684 struct drm_i915_private *dev_priv = dev->dev_private; 11669 struct drm_i915_private *dev_priv = dev->dev_private;
11685 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11670 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11671 enum pipe pipe = intel_crtc->pipe;
11672 struct drm_framebuffer *old_fb = plane->fb;
11686 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11673 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11687 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 11674 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11688 struct intel_plane *intel_plane = to_intel_plane(plane); 11675 struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -11691,76 +11678,100 @@ intel_commit_primary_plane(struct drm_plane *plane,
11691 11678
11692 intel_crtc_wait_for_pending_flips(crtc); 11679 intel_crtc_wait_for_pending_flips(crtc);
11693 11680
11694 /* 11681 if (intel_crtc_has_pending_flip(crtc)) {
11695 * If clipping results in a non-visible primary plane, we'll disable 11682 DRM_ERROR("pipe is still busy with an old pageflip\n");
11696 * the primary plane. Note that this is a bit different than what 11683 return -EBUSY;
11697 * happens if userspace explicitly disables the plane by passing fb=0 11684 }
11698 * because plane->fb still gets set and pinned. 11685
11699 */ 11686 if (plane->fb != fb) {
11700 if (!state->visible) {
11701 mutex_lock(&dev->struct_mutex); 11687 mutex_lock(&dev->struct_mutex);
11688 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11689 if (ret == 0)
11690 i915_gem_track_fb(old_obj, obj,
11691 INTEL_FRONTBUFFER_PRIMARY(pipe));
11692 mutex_unlock(&dev->struct_mutex);
11693 if (ret != 0) {
11694 DRM_DEBUG_KMS("pin & fence failed\n");
11695 return ret;
11696 }
11697 }
11702 11698
11699 crtc->primary->fb = fb;
11700 crtc->x = src->x1;
11701 crtc->y = src->y1;
11702
11703 intel_plane->crtc_x = state->orig_dst.x1;
11704 intel_plane->crtc_y = state->orig_dst.y1;
11705 intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
11706 intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
11707 intel_plane->src_x = state->orig_src.x1;
11708 intel_plane->src_y = state->orig_src.y1;
11709 intel_plane->src_w = drm_rect_width(&state->orig_src);
11710 intel_plane->src_h = drm_rect_height(&state->orig_src);
11711 intel_plane->obj = obj;
11712
11713 if (intel_crtc->active) {
11703 /* 11714 /*
11704 * Try to pin the new fb first so that we can bail out if we 11715 * FBC does not work on some platforms for rotated
11705 * fail. 11716 * planes, so disable it when rotation is not 0 and
11717 * update it when rotation is set back to 0.
11718 *
11719 * FIXME: This is redundant with the fbc update done in
11720 * the primary plane enable function except that that
11721 * one is done too late. We eventually need to unify
11722 * this.
11706 */ 11723 */
11707 if (plane->fb != fb) { 11724 if (intel_crtc->primary_enabled &&
11708 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 11725 INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
11709 if (ret) { 11726 dev_priv->fbc.plane == intel_crtc->plane &&
11710 mutex_unlock(&dev->struct_mutex); 11727 intel_plane->rotation != BIT(DRM_ROTATE_0)) {
11711 return ret; 11728 intel_disable_fbc(dev);
11712 }
11713 } 11729 }
11714 11730
11715 i915_gem_track_fb(old_obj, obj, 11731 if (state->visible) {
11716 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 11732 bool was_enabled = intel_crtc->primary_enabled;
11717
11718 if (intel_crtc->primary_enabled)
11719 intel_disable_primary_hw_plane(plane, crtc);
11720 11733
11734 /* FIXME: kill this fastboot hack */
11735 intel_update_pipe_size(intel_crtc);
11721 11736
11722 if (plane->fb != fb) 11737 intel_crtc->primary_enabled = true;
11723 if (plane->fb)
11724 intel_unpin_fb_obj(old_obj);
11725 11738
11726 mutex_unlock(&dev->struct_mutex); 11739 dev_priv->display.update_primary_plane(crtc, plane->fb,
11740 crtc->x, crtc->y);
11727 11741
11728 } else {
11729 if (intel_crtc && intel_crtc->active &&
11730 intel_crtc->primary_enabled) {
11731 /* 11742 /*
11732 * FBC does not work on some platforms for rotated 11743 * BDW signals flip done immediately if the plane
11733 * planes, so disable it when rotation is not 0 and 11744 * is disabled, even if the plane enable is already
11734 * update it when rotation is set back to 0. 11745 * armed to occur at the next vblank :(
11735 *
11736 * FIXME: This is redundant with the fbc update done in
11737 * the primary plane enable function except that that
11738 * one is done too late. We eventually need to unify
11739 * this.
11740 */ 11746 */
11741 if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && 11747 if (IS_BROADWELL(dev) && !was_enabled)
11742 dev_priv->fbc.plane == intel_crtc->plane && 11748 intel_wait_for_vblank(dev, intel_crtc->pipe);
11743 intel_plane->rotation != BIT(DRM_ROTATE_0)) { 11749 } else {
11744 intel_disable_fbc(dev); 11750 /*
11745 } 11751 * If clipping results in a non-visible primary plane,
11752 * we'll disable the primary plane. Note that this is
11753 * a bit different than what happens if userspace
11754 * explicitly disables the plane by passing fb=0
11755 * because plane->fb still gets set and pinned.
11756 */
11757 intel_disable_primary_hw_plane(plane, crtc);
11746 } 11758 }
11747 ret = intel_pipe_set_base(crtc, src->x1, src->y1, fb);
11748 if (ret)
11749 return ret;
11750 11759
11751 if (!intel_crtc->primary_enabled) 11760 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
11752 intel_enable_primary_hw_plane(plane, crtc); 11761
11762 mutex_lock(&dev->struct_mutex);
11763 intel_update_fbc(dev);
11764 mutex_unlock(&dev->struct_mutex);
11753 } 11765 }
11754 11766
11755 intel_plane->crtc_x = state->orig_dst.x1; 11767 if (old_fb && old_fb != fb) {
11756 intel_plane->crtc_y = state->orig_dst.y1; 11768 if (intel_crtc->active)
11757 intel_plane->crtc_w = drm_rect_width(&state->orig_dst); 11769 intel_wait_for_vblank(dev, intel_crtc->pipe);
11758 intel_plane->crtc_h = drm_rect_height(&state->orig_dst); 11770
11759 intel_plane->src_x = state->orig_src.x1; 11771 mutex_lock(&dev->struct_mutex);
11760 intel_plane->src_y = state->orig_src.y1; 11772 intel_unpin_fb_obj(old_obj);
11761 intel_plane->src_w = drm_rect_width(&state->orig_src); 11773 mutex_unlock(&dev->struct_mutex);
11762 intel_plane->src_h = drm_rect_height(&state->orig_src); 11774 }
11763 intel_plane->obj = obj;
11764 11775
11765 return 0; 11776 return 0;
11766} 11777}
@@ -11886,16 +11897,55 @@ intel_check_cursor_plane(struct drm_plane *plane,
11886 struct intel_plane_state *state) 11897 struct intel_plane_state *state)
11887{ 11898{
11888 struct drm_crtc *crtc = state->crtc; 11899 struct drm_crtc *crtc = state->crtc;
11900 struct drm_device *dev = crtc->dev;
11889 struct drm_framebuffer *fb = state->fb; 11901 struct drm_framebuffer *fb = state->fb;
11890 struct drm_rect *dest = &state->dst; 11902 struct drm_rect *dest = &state->dst;
11891 struct drm_rect *src = &state->src; 11903 struct drm_rect *src = &state->src;
11892 const struct drm_rect *clip = &state->clip; 11904 const struct drm_rect *clip = &state->clip;
11905 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11906 int crtc_w, crtc_h;
11907 unsigned stride;
11908 int ret;
11893 11909
11894 return drm_plane_helper_check_update(plane, crtc, fb, 11910 ret = drm_plane_helper_check_update(plane, crtc, fb,
11895 src, dest, clip, 11911 src, dest, clip,
11896 DRM_PLANE_HELPER_NO_SCALING, 11912 DRM_PLANE_HELPER_NO_SCALING,
11897 DRM_PLANE_HELPER_NO_SCALING, 11913 DRM_PLANE_HELPER_NO_SCALING,
11898 true, true, &state->visible); 11914 true, true, &state->visible);
11915 if (ret)
11916 return ret;
11917
11918
11919 /* if we want to turn off the cursor ignore width and height */
11920 if (!obj)
11921 return 0;
11922
11923 /* Check for which cursor types we support */
11924 crtc_w = drm_rect_width(&state->orig_dst);
11925 crtc_h = drm_rect_height(&state->orig_dst);
11926 if (!cursor_size_ok(dev, crtc_w, crtc_h)) {
11927 DRM_DEBUG("Cursor dimension not supported\n");
11928 return -EINVAL;
11929 }
11930
11931 stride = roundup_pow_of_two(crtc_w) * 4;
11932 if (obj->base.size < stride * crtc_h) {
11933 DRM_DEBUG_KMS("buffer is too small\n");
11934 return -ENOMEM;
11935 }
11936
11937 if (fb == crtc->cursor->fb)
11938 return 0;
11939
11940 /* we only need to pin inside GTT if cursor is non-phy */
11941 mutex_lock(&dev->struct_mutex);
11942 if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
11943 DRM_DEBUG_KMS("cursor cannot be tiled\n");
11944 ret = -EINVAL;
11945 }
11946 mutex_unlock(&dev->struct_mutex);
11947
11948 return ret;
11899} 11949}
11900 11950
11901static int 11951static int
@@ -11970,6 +12020,7 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
11970 .update_plane = intel_cursor_plane_update, 12020 .update_plane = intel_cursor_plane_update,
11971 .disable_plane = intel_cursor_plane_disable, 12021 .disable_plane = intel_cursor_plane_disable,
11972 .destroy = intel_plane_destroy, 12022 .destroy = intel_plane_destroy,
12023 .set_property = intel_plane_set_property,
11973}; 12024};
11974 12025
11975static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, 12026static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
@@ -11985,12 +12036,26 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
11985 cursor->max_downscale = 1; 12036 cursor->max_downscale = 1;
11986 cursor->pipe = pipe; 12037 cursor->pipe = pipe;
11987 cursor->plane = pipe; 12038 cursor->plane = pipe;
12039 cursor->rotation = BIT(DRM_ROTATE_0);
11988 12040
11989 drm_universal_plane_init(dev, &cursor->base, 0, 12041 drm_universal_plane_init(dev, &cursor->base, 0,
11990 &intel_cursor_plane_funcs, 12042 &intel_cursor_plane_funcs,
11991 intel_cursor_formats, 12043 intel_cursor_formats,
11992 ARRAY_SIZE(intel_cursor_formats), 12044 ARRAY_SIZE(intel_cursor_formats),
11993 DRM_PLANE_TYPE_CURSOR); 12045 DRM_PLANE_TYPE_CURSOR);
12046
12047 if (INTEL_INFO(dev)->gen >= 4) {
12048 if (!dev->mode_config.rotation_property)
12049 dev->mode_config.rotation_property =
12050 drm_mode_create_rotation_property(dev,
12051 BIT(DRM_ROTATE_0) |
12052 BIT(DRM_ROTATE_180));
12053 if (dev->mode_config.rotation_property)
12054 drm_object_attach_property(&cursor->base.base,
12055 dev->mode_config.rotation_property,
12056 cursor->rotation);
12057 }
12058
11994 return &cursor->base; 12059 return &cursor->base;
11995} 12060}
11996 12061
@@ -12157,7 +12222,7 @@ static bool intel_crt_present(struct drm_device *dev)
12157 if (INTEL_INFO(dev)->gen >= 9) 12222 if (INTEL_INFO(dev)->gen >= 9)
12158 return false; 12223 return false;
12159 12224
12160 if (IS_ULT(dev)) 12225 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
12161 return false; 12226 return false;
12162 12227
12163 if (IS_CHERRYVIEW(dev)) 12228 if (IS_CHERRYVIEW(dev))
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index d9a7a7865f66..b03fa9026a9c 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -278,7 +278,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
278} 278}
279 279
280static enum drm_connector_status 280static enum drm_connector_status
281intel_mst_port_dp_detect(struct drm_connector *connector) 281intel_dp_mst_detect(struct drm_connector *connector, bool force)
282{ 282{
283 struct intel_connector *intel_connector = to_intel_connector(connector); 283 struct intel_connector *intel_connector = to_intel_connector(connector);
284 struct intel_dp *intel_dp = intel_connector->mst_port; 284 struct intel_dp *intel_dp = intel_connector->mst_port;
@@ -286,14 +286,6 @@ intel_mst_port_dp_detect(struct drm_connector *connector)
286 return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port); 286 return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
287} 287}
288 288
289static enum drm_connector_status
290intel_dp_mst_detect(struct drm_connector *connector, bool force)
291{
292 enum drm_connector_status status;
293 status = intel_mst_port_dp_detect(connector);
294 return status;
295}
296
297static int 289static int
298intel_dp_mst_set_property(struct drm_connector *connector, 290intel_dp_mst_set_property(struct drm_connector *connector,
299 struct drm_property *property, 291 struct drm_property *property,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 94993d23e547..5ab813c6091e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -755,12 +755,19 @@ static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
755 return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1; 755 return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
756} 756}
757 757
758/* i915_irq.c */ 758/* intel_fifo_underrun.c */
759bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 759bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
760 enum pipe pipe, bool enable); 760 enum pipe pipe, bool enable);
761bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 761bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
762 enum transcoder pch_transcoder, 762 enum transcoder pch_transcoder,
763 bool enable); 763 bool enable);
764void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
765 enum pipe pipe);
766void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
767 enum transcoder pch_transcoder);
768void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv);
769
770/* i915_irq.c */
764void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 771void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
765void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 772void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
766void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 773void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
@@ -779,7 +786,6 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
779} 786}
780 787
781int intel_get_crtc_scanline(struct intel_crtc *crtc); 788int intel_get_crtc_scanline(struct intel_crtc *crtc);
782void i9xx_check_fifo_underruns(struct drm_device *dev);
783void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv); 789void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
784 790
785/* intel_crt.c */ 791/* intel_crt.c */
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
new file mode 100644
index 000000000000..77af512d2d35
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -0,0 +1,381 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Daniel Vetter <daniel.vetter@ffwll.ch>
25 *
26 */
27
28#include "i915_drv.h"
29#include "intel_drv.h"
30
31/**
32 * DOC: fifo underrun handling
33 *
34 * The i915 driver checks for display fifo underruns using the interrupt signals
35 * provided by the hardware. This is enabled by default and fairly useful to
36 * debug display issues, especially watermark settings.
37 *
38 * If an underrun is detected this is logged into dmesg. To avoid flooding logs
39 * and occupying the cpu underrun interrupts are disabled after the first
40 * occurrence until the next modeset on a given pipe.
41 *
42 * Note that underrun detection on gmch platforms is a bit more ugly since there
43 * is no interrupt (despite that the signalling bit is in the PIPESTAT pipe
44 * interrupt register). Also on some other platforms underrun interrupts are
45 * shared, which means that if we detect an underrun we need to disable underrun
46 * reporting on all pipes.
47 *
48 * The code also supports underrun detection on the PCH transcoder.
49 */
50
51static bool ivb_can_enable_err_int(struct drm_device *dev)
52{
53 struct drm_i915_private *dev_priv = dev->dev_private;
54 struct intel_crtc *crtc;
55 enum pipe pipe;
56
57 assert_spin_locked(&dev_priv->irq_lock);
58
59 for_each_pipe(dev_priv, pipe) {
60 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
61
62 if (crtc->cpu_fifo_underrun_disabled)
63 return false;
64 }
65
66 return true;
67}
68
69static bool cpt_can_enable_serr_int(struct drm_device *dev)
70{
71 struct drm_i915_private *dev_priv = dev->dev_private;
72 enum pipe pipe;
73 struct intel_crtc *crtc;
74
75 assert_spin_locked(&dev_priv->irq_lock);
76
77 for_each_pipe(dev_priv, pipe) {
78 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
79
80 if (crtc->pch_fifo_underrun_disabled)
81 return false;
82 }
83
84 return true;
85}
86
87/**
88 * i9xx_check_fifo_underruns - check for fifo underruns
89 * @dev_priv: i915 device instance
90 *
91 * This function checks for fifo underruns on GMCH platforms. This needs to be
92 * done manually on modeset to make sure that we catch all underruns since they
93 * do not generate an interrupt by themselves on these platforms.
94 */
95void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv)
96{
97 struct intel_crtc *crtc;
98
99 spin_lock_irq(&dev_priv->irq_lock);
100
101 for_each_intel_crtc(dev_priv->dev, crtc) {
102 u32 reg = PIPESTAT(crtc->pipe);
103 u32 pipestat;
104
105 if (crtc->cpu_fifo_underrun_disabled)
106 continue;
107
108 pipestat = I915_READ(reg) & 0xffff0000;
109 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
110 continue;
111
112 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
113 POSTING_READ(reg);
114
115 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
116 }
117
118 spin_unlock_irq(&dev_priv->irq_lock);
119}
120
121static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
122 enum pipe pipe,
123 bool enable, bool old)
124{
125 struct drm_i915_private *dev_priv = dev->dev_private;
126 u32 reg = PIPESTAT(pipe);
127 u32 pipestat = I915_READ(reg) & 0xffff0000;
128
129 assert_spin_locked(&dev_priv->irq_lock);
130
131 if (enable) {
132 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
133 POSTING_READ(reg);
134 } else {
135 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
136 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
137 }
138}
139
140static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
141 enum pipe pipe, bool enable)
142{
143 struct drm_i915_private *dev_priv = dev->dev_private;
144 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
145 DE_PIPEB_FIFO_UNDERRUN;
146
147 if (enable)
148 ironlake_enable_display_irq(dev_priv, bit);
149 else
150 ironlake_disable_display_irq(dev_priv, bit);
151}
152
153static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
154 enum pipe pipe,
155 bool enable, bool old)
156{
157 struct drm_i915_private *dev_priv = dev->dev_private;
158 if (enable) {
159 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
160
161 if (!ivb_can_enable_err_int(dev))
162 return;
163
164 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
165 } else {
166 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
167
168 if (old &&
169 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
170 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
171 pipe_name(pipe));
172 }
173 }
174}
175
176static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
177 enum pipe pipe, bool enable)
178{
179 struct drm_i915_private *dev_priv = dev->dev_private;
180
181 assert_spin_locked(&dev_priv->irq_lock);
182
183 if (enable)
184 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
185 else
186 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
187 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
188 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
189}
190
191static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
192 enum transcoder pch_transcoder,
193 bool enable)
194{
195 struct drm_i915_private *dev_priv = dev->dev_private;
196 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
197 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
198
199 if (enable)
200 ibx_enable_display_interrupt(dev_priv, bit);
201 else
202 ibx_disable_display_interrupt(dev_priv, bit);
203}
204
205static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
206 enum transcoder pch_transcoder,
207 bool enable, bool old)
208{
209 struct drm_i915_private *dev_priv = dev->dev_private;
210
211 if (enable) {
212 I915_WRITE(SERR_INT,
213 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
214
215 if (!cpt_can_enable_serr_int(dev))
216 return;
217
218 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
219 } else {
220 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
221
222 if (old && I915_READ(SERR_INT) &
223 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
224 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
225 transcoder_name(pch_transcoder));
226 }
227 }
228}
229
230static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
231 enum pipe pipe, bool enable)
232{
233 struct drm_i915_private *dev_priv = dev->dev_private;
234 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
235 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
236 bool old;
237
238 assert_spin_locked(&dev_priv->irq_lock);
239
240 old = !intel_crtc->cpu_fifo_underrun_disabled;
241 intel_crtc->cpu_fifo_underrun_disabled = !enable;
242
243 if (HAS_GMCH_DISPLAY(dev))
244 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
245 else if (IS_GEN5(dev) || IS_GEN6(dev))
246 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
247 else if (IS_GEN7(dev))
248 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
249 else if (IS_GEN8(dev) || IS_GEN9(dev))
250 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
251
252 return old;
253}
254
255/**
256 * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state
257 * @dev_priv: i915 device instance
258 * @pipe: (CPU) pipe to set state for
259 * @enable: whether underruns should be reported or not
260 *
261 * This function sets the fifo underrun state for @pipe. It is used in the
262 * modeset code to avoid false positives since on many platforms underruns are
263 * expected when disabling or enabling the pipe.
264 *
265 * Notice that on some platforms disabling underrun reports for one pipe
266 * disables for all due to shared interrupts. Actual reporting is still per-pipe
267 * though.
268 *
269 * Returns the previous state of underrun reporting.
270 */
271bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
272 enum pipe pipe, bool enable)
273{
274 unsigned long flags;
275 bool ret;
276
277 spin_lock_irqsave(&dev_priv->irq_lock, flags);
278 ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe,
279 enable);
280 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
281
282 return ret;
283}
284
285static bool
286__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
287 enum pipe pipe)
288{
289 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
290 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
291
292 return !intel_crtc->cpu_fifo_underrun_disabled;
293}
294
295/**
296 * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
297 * @dev_priv: i915 device instance
298 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
299 * @enable: whether underruns should be reported or not
300 *
301 * This function makes us disable or enable PCH fifo underruns for a specific
302 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
303 * underrun reporting for one transcoder may also disable all the other PCH
304 * error interruts for the other transcoders, due to the fact that there's just
305 * one interrupt mask/enable bit for all the transcoders.
306 *
307 * Returns the previous state of underrun reporting.
308 */
309bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
310 enum transcoder pch_transcoder,
311 bool enable)
312{
313 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
314 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
315 unsigned long flags;
316 bool old;
317
318 /*
319 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
320 * has only one pch transcoder A that all pipes can use. To avoid racy
321 * pch transcoder -> pipe lookups from interrupt code simply store the
322 * underrun statistics in crtc A. Since we never expose this anywhere
323 * nor use it outside of the fifo underrun code here using the "wrong"
324 * crtc on LPT won't cause issues.
325 */
326
327 spin_lock_irqsave(&dev_priv->irq_lock, flags);
328
329 old = !intel_crtc->pch_fifo_underrun_disabled;
330 intel_crtc->pch_fifo_underrun_disabled = !enable;
331
332 if (HAS_PCH_IBX(dev_priv->dev))
333 ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
334 enable);
335 else
336 cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
337 enable, old);
338
339 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
340 return old;
341}
342
343/**
344 * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
345 * @dev_priv: i915 device instance
346 * @pipe: (CPU) pipe to set state for
347 *
348 * This handles a CPU fifo underrun interrupt, generating an underrun warning
349 * into dmesg if underrun reporting is enabled and then disables the underrun
350 * interrupt to avoid an irq storm.
351 */
352void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
353 enum pipe pipe)
354{
355 /* GMCH can't disable fifo underruns, filter them. */
356 if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
357 !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe))
358 return;
359
360 if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
361 DRM_ERROR("CPU pipe %c FIFO underrun\n",
362 pipe_name(pipe));
363}
364
365/**
366 * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
367 * @dev_priv: i915 device instance
368 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
369 *
370 * This handles a PCH fifo underrun interrupt, generating an underrun warning
371 * into dmesg if underrun reporting is enabled and then disables the underrun
372 * interrupt to avoid an irq storm.
373 */
374void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
375 enum transcoder pch_transcoder)
376{
377 if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
378 false))
379 DRM_ERROR("PCH transcoder %c FIFO underrun\n",
380 transcoder_name(pch_transcoder));
381}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e3def5ad4a77..e18b3f49074c 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -775,8 +775,9 @@ static void bdw_enable_backlight(struct intel_connector *connector)
775 if (panel->backlight.active_low_pwm) 775 if (panel->backlight.active_low_pwm)
776 pch_ctl1 |= BLM_PCH_POLARITY; 776 pch_ctl1 |= BLM_PCH_POLARITY;
777 777
778 /* BDW always uses the pch pwm controls. */ 778 /* After LPT, override is the default. */
779 pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE; 779 if (HAS_PCH_LPT(dev_priv))
780 pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
780 781
781 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); 782 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
782 POSTING_READ(BLC_PWM_PCH_CTL1); 783 POSTING_READ(BLC_PWM_PCH_CTL1);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a14be5d56c6b..7a69eba533c7 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1345,6 +1345,7 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
1345 int *prec_mult, 1345 int *prec_mult,
1346 int *drain_latency) 1346 int *drain_latency)
1347{ 1347{
1348 struct drm_device *dev = crtc->dev;
1348 int entries; 1349 int entries;
1349 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; 1350 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1350 1351
@@ -1355,8 +1356,12 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
1355 return false; 1356 return false;
1356 1357
1357 entries = DIV_ROUND_UP(clock, 1000) * pixel_size; 1358 entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
1358 *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 : 1359 if (IS_CHERRYVIEW(dev))
1359 DRAIN_LATENCY_PRECISION_32; 1360 *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
1361 DRAIN_LATENCY_PRECISION_16;
1362 else
1363 *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
1364 DRAIN_LATENCY_PRECISION_32;
1360 *drain_latency = (64 * (*prec_mult) * 4) / entries; 1365 *drain_latency = (64 * (*prec_mult) * 4) / entries;
1361 1366
1362 if (*drain_latency > DRAIN_LATENCY_MASK) 1367 if (*drain_latency > DRAIN_LATENCY_MASK)
@@ -1375,15 +1380,18 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
1375 1380
1376static void vlv_update_drain_latency(struct drm_crtc *crtc) 1381static void vlv_update_drain_latency(struct drm_crtc *crtc)
1377{ 1382{
1378 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 1383 struct drm_device *dev = crtc->dev;
1384 struct drm_i915_private *dev_priv = dev->dev_private;
1379 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1385 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1380 int pixel_size; 1386 int pixel_size;
1381 int drain_latency; 1387 int drain_latency;
1382 enum pipe pipe = intel_crtc->pipe; 1388 enum pipe pipe = intel_crtc->pipe;
1383 int plane_prec, prec_mult, plane_dl; 1389 int plane_prec, prec_mult, plane_dl;
1390 const int high_precision = IS_CHERRYVIEW(dev) ?
1391 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
1384 1392
1385 plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 | 1393 plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
1386 DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 | 1394 DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
1387 (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT)); 1395 (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
1388 1396
1389 if (!intel_crtc_active(crtc)) { 1397 if (!intel_crtc_active(crtc)) {
@@ -1394,9 +1402,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc)
1394 /* Primary plane Drain Latency */ 1402 /* Primary plane Drain Latency */
1395 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */ 1403 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
1396 if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) { 1404 if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1397 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ? 1405 plane_prec = (prec_mult == high_precision) ?
1398 DDL_PLANE_PRECISION_64 : 1406 DDL_PLANE_PRECISION_HIGH :
1399 DDL_PLANE_PRECISION_32; 1407 DDL_PLANE_PRECISION_LOW;
1400 plane_dl |= plane_prec | drain_latency; 1408 plane_dl |= plane_prec | drain_latency;
1401 } 1409 }
1402 1410
@@ -1408,9 +1416,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc)
1408 /* Program cursor DL only if it is enabled */ 1416 /* Program cursor DL only if it is enabled */
1409 if (intel_crtc->cursor_base && 1417 if (intel_crtc->cursor_base &&
1410 vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) { 1418 vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1411 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ? 1419 plane_prec = (prec_mult == high_precision) ?
1412 DDL_CURSOR_PRECISION_64 : 1420 DDL_CURSOR_PRECISION_HIGH :
1413 DDL_CURSOR_PRECISION_32; 1421 DDL_CURSOR_PRECISION_LOW;
1414 plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT); 1422 plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
1415 } 1423 }
1416 1424
@@ -1578,15 +1586,17 @@ static void valleyview_update_sprite_wm(struct drm_plane *plane,
1578 int plane_prec; 1586 int plane_prec;
1579 int sprite_dl; 1587 int sprite_dl;
1580 int prec_mult; 1588 int prec_mult;
1589 const int high_precision = IS_CHERRYVIEW(dev) ?
1590 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
1581 1591
1582 sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) | 1592 sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
1583 (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite))); 1593 (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
1584 1594
1585 if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, 1595 if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
1586 &drain_latency)) { 1596 &drain_latency)) {
1587 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ? 1597 plane_prec = (prec_mult == high_precision) ?
1588 DDL_SPRITE_PRECISION_64(sprite) : 1598 DDL_SPRITE_PRECISION_HIGH(sprite) :
1589 DDL_SPRITE_PRECISION_32(sprite); 1599 DDL_SPRITE_PRECISION_LOW(sprite);
1590 sprite_dl |= plane_prec | 1600 sprite_dl |= plane_prec |
1591 (drain_latency << DDL_SPRITE_SHIFT(sprite)); 1601 (drain_latency << DDL_SPRITE_SHIFT(sprite));
1592 } 1602 }
@@ -3629,10 +3639,15 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3629 else 3639 else
3630 mode = 0; 3640 mode = 0;
3631 } 3641 }
3632 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", 3642 if (HAS_RC6p(dev))
3633 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", 3643 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
3634 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", 3644 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3635 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); 3645 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3646 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3647
3648 else
3649 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
3650 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
3636} 3651}
3637 3652
3638static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) 3653static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
@@ -3649,7 +3664,7 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
3649 if (enable_rc6 >= 0) { 3664 if (enable_rc6 >= 0) {
3650 int mask; 3665 int mask;
3651 3666
3652 if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 3667 if (HAS_RC6p(dev))
3653 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | 3668 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
3654 INTEL_RC6pp_ENABLE; 3669 INTEL_RC6pp_ENABLE;
3655 else 3670 else
@@ -5649,16 +5664,6 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
5649 I915_WRITE(WM2_LP_ILK, 0); 5664 I915_WRITE(WM2_LP_ILK, 0);
5650 I915_WRITE(WM1_LP_ILK, 0); 5665 I915_WRITE(WM1_LP_ILK, 0);
5651 5666
5652 /* FIXME(BDW): Check all the w/a, some might only apply to
5653 * pre-production hw. */
5654
5655
5656 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5657
5658 I915_WRITE(_3D_CHICKEN3,
5659 _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
5660
5661
5662 /* WaSwitchSolVfFArbitrationPriority:bdw */ 5667 /* WaSwitchSolVfFArbitrationPriority:bdw */
5663 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 5668 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5664 5669
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 816a6926df28..a8f72e8d64e3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -665,80 +665,108 @@ err:
665 return ret; 665 return ret;
666} 666}
667 667
668static inline void intel_ring_emit_wa(struct intel_engine_cs *ring, 668static int intel_ring_workarounds_emit(struct intel_engine_cs *ring)
669 u32 addr, u32 value)
670{ 669{
670 int ret, i;
671 struct drm_device *dev = ring->dev; 671 struct drm_device *dev = ring->dev;
672 struct drm_i915_private *dev_priv = dev->dev_private; 672 struct drm_i915_private *dev_priv = dev->dev_private;
673 struct i915_workarounds *w = &dev_priv->workarounds;
673 674
674 if (WARN_ON(dev_priv->num_wa_regs >= I915_MAX_WA_REGS)) 675 if (WARN_ON(w->count == 0))
675 return; 676 return 0;
676 677
677 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 678 ring->gpu_caches_dirty = true;
678 intel_ring_emit(ring, addr); 679 ret = intel_ring_flush_all_caches(ring);
679 intel_ring_emit(ring, value); 680 if (ret)
681 return ret;
680 682
681 dev_priv->intel_wa_regs[dev_priv->num_wa_regs].addr = addr; 683 ret = intel_ring_begin(ring, (w->count * 2 + 2));
682 dev_priv->intel_wa_regs[dev_priv->num_wa_regs].mask = value & 0xFFFF; 684 if (ret)
683 /* value is updated with the status of remaining bits of this 685 return ret;
684 * register when it is read from debugfs file 686
685 */ 687 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
686 dev_priv->intel_wa_regs[dev_priv->num_wa_regs].value = value; 688 for (i = 0; i < w->count; i++) {
687 dev_priv->num_wa_regs++; 689 intel_ring_emit(ring, w->reg[i].addr);
690 intel_ring_emit(ring, w->reg[i].value);
691 }
692 intel_ring_emit(ring, MI_NOOP);
693
694 intel_ring_advance(ring);
695
696 ring->gpu_caches_dirty = true;
697 ret = intel_ring_flush_all_caches(ring);
698 if (ret)
699 return ret;
688 700
689 return; 701 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
702
703 return 0;
690} 704}
691 705
706static int wa_add(struct drm_i915_private *dev_priv,
707 const u32 addr, const u32 val, const u32 mask)
708{
709 const u32 idx = dev_priv->workarounds.count;
710
711 if (WARN_ON(idx >= I915_MAX_WA_REGS))
712 return -ENOSPC;
713
714 dev_priv->workarounds.reg[idx].addr = addr;
715 dev_priv->workarounds.reg[idx].value = val;
716 dev_priv->workarounds.reg[idx].mask = mask;
717
718 dev_priv->workarounds.count++;
719
720 return 0;
721}
722
723#define WA_REG(addr, val, mask) { \
724 const int r = wa_add(dev_priv, (addr), (val), (mask)); \
725 if (r) \
726 return r; \
727 }
728
729#define WA_SET_BIT_MASKED(addr, mask) \
730 WA_REG(addr, _MASKED_BIT_ENABLE(mask), (mask) & 0xffff)
731
732#define WA_CLR_BIT_MASKED(addr, mask) \
733 WA_REG(addr, _MASKED_BIT_DISABLE(mask), (mask) & 0xffff)
734
735#define WA_SET_BIT(addr, mask) WA_REG(addr, I915_READ(addr) | (mask), mask)
736#define WA_CLR_BIT(addr, mask) WA_REG(addr, I915_READ(addr) & ~(mask), mask)
737
738#define WA_WRITE(addr, val) WA_REG(addr, val, 0xffffffff)
739
692static int bdw_init_workarounds(struct intel_engine_cs *ring) 740static int bdw_init_workarounds(struct intel_engine_cs *ring)
693{ 741{
694 int ret;
695 struct drm_device *dev = ring->dev; 742 struct drm_device *dev = ring->dev;
696 struct drm_i915_private *dev_priv = dev->dev_private; 743 struct drm_i915_private *dev_priv = dev->dev_private;
697 744
698 /*
699 * workarounds applied in this fn are part of register state context,
700 * they need to be re-initialized followed by gpu reset, suspend/resume,
701 * module reload.
702 */
703 dev_priv->num_wa_regs = 0;
704 memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
705
706 /*
707 * update the number of dwords required based on the
708 * actual number of workarounds applied
709 */
710 ret = intel_ring_begin(ring, 18);
711 if (ret)
712 return ret;
713
714 /* WaDisablePartialInstShootdown:bdw */ 745 /* WaDisablePartialInstShootdown:bdw */
715 /* WaDisableThreadStallDopClockGating:bdw */ 746 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
716 /* FIXME: Unclear whether we really need this on production bdw. */ 747 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
717 intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN, 748 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
718 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE 749 STALL_DOP_GATING_DISABLE);
719 | STALL_DOP_GATING_DISABLE));
720 750
721 /* WaDisableDopClockGating:bdw May not be needed for production */ 751 /* WaDisableDopClockGating:bdw */
722 intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2, 752 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
723 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 753 DOP_CLOCK_GATING_DISABLE);
724 754
725 intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3, 755 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
726 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS)); 756 GEN8_SAMPLER_POWER_BYPASS_DIS);
727 757
728 /* Use Force Non-Coherent whenever executing a 3D context. This is a 758 /* Use Force Non-Coherent whenever executing a 3D context. This is a
729 * workaround for for a possible hang in the unlikely event a TLB 759 * workaround for for a possible hang in the unlikely event a TLB
730 * invalidation occurs during a PSD flush. 760 * invalidation occurs during a PSD flush.
731 */ 761 */
732 /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */ 762 /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
733 intel_ring_emit_wa(ring, HDC_CHICKEN0, 763 WA_SET_BIT_MASKED(HDC_CHICKEN0,
734 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT | 764 HDC_FORCE_NON_COHERENT |
735 (IS_BDW_GT3(dev) ? 765 (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
736 HDC_FENCE_DEST_SLM_DISABLE : 0)
737 ));
738 766
739 /* Wa4x4STCOptimizationDisable:bdw */ 767 /* Wa4x4STCOptimizationDisable:bdw */
740 intel_ring_emit_wa(ring, CACHE_MODE_1, 768 WA_SET_BIT_MASKED(CACHE_MODE_1,
741 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE)); 769 GEN8_4x4_STC_OPTIMIZATION_DISABLE);
742 770
743 /* 771 /*
744 * BSpec recommends 8x4 when MSAA is used, 772 * BSpec recommends 8x4 when MSAA is used,
@@ -748,52 +776,50 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
748 * disable bit, which we don't touch here, but it's good 776 * disable bit, which we don't touch here, but it's good
749 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 777 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
750 */ 778 */
751 intel_ring_emit_wa(ring, GEN7_GT_MODE, 779 WA_SET_BIT_MASKED(GEN7_GT_MODE,
752 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 780 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
753
754 intel_ring_advance(ring);
755
756 DRM_DEBUG_DRIVER("Number of Workarounds applied: %d\n",
757 dev_priv->num_wa_regs);
758 781
759 return 0; 782 return 0;
760} 783}
761 784
762static int chv_init_workarounds(struct intel_engine_cs *ring) 785static int chv_init_workarounds(struct intel_engine_cs *ring)
763{ 786{
764 int ret;
765 struct drm_device *dev = ring->dev; 787 struct drm_device *dev = ring->dev;
766 struct drm_i915_private *dev_priv = dev->dev_private; 788 struct drm_i915_private *dev_priv = dev->dev_private;
767 789
768 /*
769 * workarounds applied in this fn are part of register state context,
770 * they need to be re-initialized followed by gpu reset, suspend/resume,
771 * module reload.
772 */
773 dev_priv->num_wa_regs = 0;
774 memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
775
776 ret = intel_ring_begin(ring, 12);
777 if (ret)
778 return ret;
779
780 /* WaDisablePartialInstShootdown:chv */ 790 /* WaDisablePartialInstShootdown:chv */
781 intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN, 791 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
782 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE)); 792 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
783 793
784 /* WaDisableThreadStallDopClockGating:chv */ 794 /* WaDisableThreadStallDopClockGating:chv */
785 intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN, 795 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
786 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE)); 796 STALL_DOP_GATING_DISABLE);
787 797
788 /* WaDisableDopClockGating:chv (pre-production hw) */ 798 /* WaDisableDopClockGating:chv (pre-production hw) */
789 intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2, 799 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
790 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 800 DOP_CLOCK_GATING_DISABLE);
791 801
792 /* WaDisableSamplerPowerBypass:chv (pre-production hw) */ 802 /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
793 intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3, 803 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
794 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS)); 804 GEN8_SAMPLER_POWER_BYPASS_DIS);
795 805
796 intel_ring_advance(ring); 806 return 0;
807}
808
809static int init_workarounds_ring(struct intel_engine_cs *ring)
810{
811 struct drm_device *dev = ring->dev;
812 struct drm_i915_private *dev_priv = dev->dev_private;
813
814 WARN_ON(ring->id != RCS);
815
816 dev_priv->workarounds.count = 0;
817
818 if (IS_BROADWELL(dev))
819 return bdw_init_workarounds(ring);
820
821 if (IS_CHERRYVIEW(dev))
822 return chv_init_workarounds(ring);
797 823
798 return 0; 824 return 0;
799} 825}
@@ -853,7 +879,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
853 if (HAS_L3_DPF(dev)) 879 if (HAS_L3_DPF(dev))
854 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 880 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
855 881
856 return ret; 882 return init_workarounds_ring(ring);
857} 883}
858 884
859static void render_ring_cleanup(struct intel_engine_cs *ring) 885static void render_ring_cleanup(struct intel_engine_cs *ring)
@@ -2299,10 +2325,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2299 dev_priv->semaphore_obj = obj; 2325 dev_priv->semaphore_obj = obj;
2300 } 2326 }
2301 } 2327 }
2302 if (IS_CHERRYVIEW(dev)) 2328
2303 ring->init_context = chv_init_workarounds; 2329 ring->init_context = intel_ring_workarounds_emit;
2304 else
2305 ring->init_context = bdw_init_workarounds;
2306 ring->add_request = gen6_add_request; 2330 ring->add_request = gen6_add_request;
2307 ring->flush = gen8_render_ring_flush; 2331 ring->flush = gen8_render_ring_flush;
2308 ring->irq_get = gen8_ring_get_irq; 2332 ring->irq_get = gen8_ring_get_irq;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 36749b91d28e..39c33e0a753c 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -221,9 +221,9 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
221 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 221 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
222 HSW_PWR_WELL_STATE_ENABLED), 20)) 222 HSW_PWR_WELL_STATE_ENABLED), 20))
223 DRM_ERROR("Timeout enabling power well\n"); 223 DRM_ERROR("Timeout enabling power well\n");
224 hsw_power_well_post_enable(dev_priv);
224 } 225 }
225 226
226 hsw_power_well_post_enable(dev_priv);
227 } else { 227 } else {
228 if (enable_requested) { 228 if (enable_requested) {
229 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 229 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 750b634d45ec..2c060addea29 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -162,6 +162,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
162 plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK; 162 plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
163 plane_ctl &= ~PLANE_CTL_TILED_MASK; 163 plane_ctl &= ~PLANE_CTL_TILED_MASK;
164 plane_ctl &= ~PLANE_CTL_ALPHA_MASK; 164 plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
165 plane_ctl &= ~PLANE_CTL_ROTATE_MASK;
165 166
166 /* Trickle feed has to be enabled */ 167 /* Trickle feed has to be enabled */
167 plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE; 168 plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
@@ -217,6 +218,8 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
217 default: 218 default:
218 BUG(); 219 BUG();
219 } 220 }
221 if (intel_plane->rotation == BIT(DRM_ROTATE_180))
222 plane_ctl |= PLANE_CTL_ROTATE_180;
220 223
221 plane_ctl |= PLANE_CTL_ENABLE; 224 plane_ctl |= PLANE_CTL_ENABLE;
222 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; 225 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 0b0f4f85c4f2..94276419c13f 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -360,7 +360,8 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
360 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 360 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
361} 361}
362 362
363void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 363static void __intel_uncore_early_sanitize(struct drm_device *dev,
364 bool restore_forcewake)
364{ 365{
365 struct drm_i915_private *dev_priv = dev->dev_private; 366 struct drm_i915_private *dev_priv = dev->dev_private;
366 367
@@ -386,6 +387,12 @@ void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
386 intel_uncore_forcewake_reset(dev, restore_forcewake); 387 intel_uncore_forcewake_reset(dev, restore_forcewake);
387} 388}
388 389
390void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
391{
392 __intel_uncore_early_sanitize(dev, restore_forcewake);
393 i915_check_and_clear_faults(dev);
394}
395
389void intel_uncore_sanitize(struct drm_device *dev) 396void intel_uncore_sanitize(struct drm_device *dev)
390{ 397{
391 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 398 /* BIOS often leaves RC6 enabled, but disable it for hw init */
@@ -823,6 +830,22 @@ __gen4_write(64)
823#undef REG_WRITE_FOOTER 830#undef REG_WRITE_FOOTER
824#undef REG_WRITE_HEADER 831#undef REG_WRITE_HEADER
825 832
833#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
834do { \
835 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
836 dev_priv->uncore.funcs.mmio_writew = x##_write16; \
837 dev_priv->uncore.funcs.mmio_writel = x##_write32; \
838 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
839} while (0)
840
841#define ASSIGN_READ_MMIO_VFUNCS(x) \
842do { \
843 dev_priv->uncore.funcs.mmio_readb = x##_read8; \
844 dev_priv->uncore.funcs.mmio_readw = x##_read16; \
845 dev_priv->uncore.funcs.mmio_readl = x##_read32; \
846 dev_priv->uncore.funcs.mmio_readq = x##_read64; \
847} while (0)
848
826void intel_uncore_init(struct drm_device *dev) 849void intel_uncore_init(struct drm_device *dev)
827{ 850{
828 struct drm_i915_private *dev_priv = dev->dev_private; 851 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -830,7 +853,7 @@ void intel_uncore_init(struct drm_device *dev)
830 setup_timer(&dev_priv->uncore.force_wake_timer, 853 setup_timer(&dev_priv->uncore.force_wake_timer,
831 gen6_force_wake_timer, (unsigned long)dev_priv); 854 gen6_force_wake_timer, (unsigned long)dev_priv);
832 855
833 intel_uncore_early_sanitize(dev, false); 856 __intel_uncore_early_sanitize(dev, false);
834 857
835 if (IS_VALLEYVIEW(dev)) { 858 if (IS_VALLEYVIEW(dev)) {
836 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; 859 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
@@ -879,76 +902,44 @@ void intel_uncore_init(struct drm_device *dev)
879 switch (INTEL_INFO(dev)->gen) { 902 switch (INTEL_INFO(dev)->gen) {
880 default: 903 default:
881 if (IS_CHERRYVIEW(dev)) { 904 if (IS_CHERRYVIEW(dev)) {
882 dev_priv->uncore.funcs.mmio_writeb = chv_write8; 905 ASSIGN_WRITE_MMIO_VFUNCS(chv);
883 dev_priv->uncore.funcs.mmio_writew = chv_write16; 906 ASSIGN_READ_MMIO_VFUNCS(chv);
884 dev_priv->uncore.funcs.mmio_writel = chv_write32;
885 dev_priv->uncore.funcs.mmio_writeq = chv_write64;
886 dev_priv->uncore.funcs.mmio_readb = chv_read8;
887 dev_priv->uncore.funcs.mmio_readw = chv_read16;
888 dev_priv->uncore.funcs.mmio_readl = chv_read32;
889 dev_priv->uncore.funcs.mmio_readq = chv_read64;
890 907
891 } else { 908 } else {
892 dev_priv->uncore.funcs.mmio_writeb = gen8_write8; 909 ASSIGN_WRITE_MMIO_VFUNCS(gen8);
893 dev_priv->uncore.funcs.mmio_writew = gen8_write16; 910 ASSIGN_READ_MMIO_VFUNCS(gen6);
894 dev_priv->uncore.funcs.mmio_writel = gen8_write32;
895 dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
896 dev_priv->uncore.funcs.mmio_readb = gen6_read8;
897 dev_priv->uncore.funcs.mmio_readw = gen6_read16;
898 dev_priv->uncore.funcs.mmio_readl = gen6_read32;
899 dev_priv->uncore.funcs.mmio_readq = gen6_read64;
900 } 911 }
901 break; 912 break;
902 case 7: 913 case 7:
903 case 6: 914 case 6:
904 if (IS_HASWELL(dev)) { 915 if (IS_HASWELL(dev)) {
905 dev_priv->uncore.funcs.mmio_writeb = hsw_write8; 916 ASSIGN_WRITE_MMIO_VFUNCS(hsw);
906 dev_priv->uncore.funcs.mmio_writew = hsw_write16;
907 dev_priv->uncore.funcs.mmio_writel = hsw_write32;
908 dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
909 } else { 917 } else {
910 dev_priv->uncore.funcs.mmio_writeb = gen6_write8; 918 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
911 dev_priv->uncore.funcs.mmio_writew = gen6_write16;
912 dev_priv->uncore.funcs.mmio_writel = gen6_write32;
913 dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
914 } 919 }
915 920
916 if (IS_VALLEYVIEW(dev)) { 921 if (IS_VALLEYVIEW(dev)) {
917 dev_priv->uncore.funcs.mmio_readb = vlv_read8; 922 ASSIGN_READ_MMIO_VFUNCS(vlv);
918 dev_priv->uncore.funcs.mmio_readw = vlv_read16;
919 dev_priv->uncore.funcs.mmio_readl = vlv_read32;
920 dev_priv->uncore.funcs.mmio_readq = vlv_read64;
921 } else { 923 } else {
922 dev_priv->uncore.funcs.mmio_readb = gen6_read8; 924 ASSIGN_READ_MMIO_VFUNCS(gen6);
923 dev_priv->uncore.funcs.mmio_readw = gen6_read16;
924 dev_priv->uncore.funcs.mmio_readl = gen6_read32;
925 dev_priv->uncore.funcs.mmio_readq = gen6_read64;
926 } 925 }
927 break; 926 break;
928 case 5: 927 case 5:
929 dev_priv->uncore.funcs.mmio_writeb = gen5_write8; 928 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
930 dev_priv->uncore.funcs.mmio_writew = gen5_write16; 929 ASSIGN_READ_MMIO_VFUNCS(gen5);
931 dev_priv->uncore.funcs.mmio_writel = gen5_write32;
932 dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
933 dev_priv->uncore.funcs.mmio_readb = gen5_read8;
934 dev_priv->uncore.funcs.mmio_readw = gen5_read16;
935 dev_priv->uncore.funcs.mmio_readl = gen5_read32;
936 dev_priv->uncore.funcs.mmio_readq = gen5_read64;
937 break; 930 break;
938 case 4: 931 case 4:
939 case 3: 932 case 3:
940 case 2: 933 case 2:
941 dev_priv->uncore.funcs.mmio_writeb = gen4_write8; 934 ASSIGN_WRITE_MMIO_VFUNCS(gen4);
942 dev_priv->uncore.funcs.mmio_writew = gen4_write16; 935 ASSIGN_READ_MMIO_VFUNCS(gen4);
943 dev_priv->uncore.funcs.mmio_writel = gen4_write32;
944 dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
945 dev_priv->uncore.funcs.mmio_readb = gen4_read8;
946 dev_priv->uncore.funcs.mmio_readw = gen4_read16;
947 dev_priv->uncore.funcs.mmio_readl = gen4_read32;
948 dev_priv->uncore.funcs.mmio_readq = gen4_read64;
949 break; 936 break;
950 } 937 }
938
939 i915_check_and_clear_faults(dev);
951} 940}
941#undef ASSIGN_WRITE_MMIO_VFUNCS
942#undef ASSIGN_READ_MMIO_VFUNCS
952 943
953void intel_uncore_fini(struct drm_device *dev) 944void intel_uncore_fini(struct drm_device *dev)
954{ 945{