aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS4
-rw-r--r--drivers/char/agp/intel-agp.c4
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c9
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c33
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c174
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h46
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c6
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c755
-rw-r--r--drivers/gpu/drm/i915/intel_display.c371
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h33
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c278
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c92
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h6
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c11
22 files changed, 1627 insertions, 292 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 707163365a93..90e7e226a90c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2373,10 +2373,10 @@ F: drivers/gpu/drm/
2373F: include/drm/ 2373F: include/drm/
2374 2374
2375INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) 2375INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
2376M: Keith Packard <keithp@keithp.com> 2376M: Daniel Vetter <daniel.vetter@ffwll.ch>
2377L: intel-gfx@lists.freedesktop.org (subscribers-only) 2377L: intel-gfx@lists.freedesktop.org (subscribers-only)
2378L: dri-devel@lists.freedesktop.org 2378L: dri-devel@lists.freedesktop.org
2379T: git git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux.git 2379T: git git://people.freedesktop.org/~danvet/drm-intel
2380S: Supported 2380S: Supported
2381F: drivers/gpu/drm/i915 2381F: drivers/gpu/drm/i915
2382F: include/drm/i915* 2382F: include/drm/i915*
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 74c2d9274c53..764f70c5e690 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -908,6 +908,10 @@ static struct pci_device_id agp_intel_pci_table[] = {
908 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB), 908 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
909 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB), 909 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
910 ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB), 910 ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB),
911 ID(PCI_DEVICE_ID_INTEL_HASWELL_HB),
912 ID(PCI_DEVICE_ID_INTEL_HASWELL_M_HB),
913 ID(PCI_DEVICE_ID_INTEL_HASWELL_S_HB),
914 ID(PCI_DEVICE_ID_INTEL_HASWELL_E_HB),
911 { } 915 { }
912}; 916};
913 917
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 8b8bbc70f86b..0ca7f7646ab5 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -19,6 +19,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
19 intel_crt.o \ 19 intel_crt.o \
20 intel_lvds.o \ 20 intel_lvds.o \
21 intel_bios.o \ 21 intel_bios.o \
22 intel_ddi.o \
22 intel_dp.o \ 23 intel_dp.o \
23 intel_hdmi.o \ 24 intel_hdmi.o \
24 intel_sdvo.o \ 25 intel_sdvo.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 950f72a0d729..eb2b3c25b9e1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -699,6 +699,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
699 struct drm_device *dev = error_priv->dev; 699 struct drm_device *dev = error_priv->dev;
700 drm_i915_private_t *dev_priv = dev->dev_private; 700 drm_i915_private_t *dev_priv = dev->dev_private;
701 struct drm_i915_error_state *error = error_priv->error; 701 struct drm_i915_error_state *error = error_priv->error;
702 struct intel_ring_buffer *ring;
702 int i, j, page, offset, elt; 703 int i, j, page, offset, elt;
703 704
704 if (!error) { 705 if (!error) {
@@ -706,7 +707,6 @@ static int i915_error_state(struct seq_file *m, void *unused)
706 return 0; 707 return 0;
707 } 708 }
708 709
709
710 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 710 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
711 error->time.tv_usec); 711 error->time.tv_usec);
712 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 712 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
@@ -722,11 +722,8 @@ static int i915_error_state(struct seq_file *m, void *unused)
722 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 722 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
723 } 723 }
724 724
725 i915_ring_error_state(m, dev, error, RCS); 725 for_each_ring(ring, dev_priv, i)
726 if (HAS_BLT(dev)) 726 i915_ring_error_state(m, dev, error, i);
727 i915_ring_error_state(m, dev, error, BCS);
728 if (HAS_BSD(dev))
729 i915_ring_error_state(m, dev, error, VCS);
730 727
731 if (error->active_bo) 728 if (error->active_bo)
732 print_error_buffers(m, "Active", 729 print_error_buffers(m, "Active",
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 42d91e841629..f94792626b94 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -980,10 +980,10 @@ static int i915_getparam(struct drm_device *dev, void *data,
980 value = 1; 980 value = 1;
981 break; 981 break;
982 case I915_PARAM_HAS_BSD: 982 case I915_PARAM_HAS_BSD:
983 value = HAS_BSD(dev); 983 value = intel_ring_initialized(&dev_priv->ring[VCS]);
984 break; 984 break;
985 case I915_PARAM_HAS_BLT: 985 case I915_PARAM_HAS_BLT:
986 value = HAS_BLT(dev); 986 value = intel_ring_initialized(&dev_priv->ring[BCS]);
987 break; 987 break;
988 case I915_PARAM_HAS_RELAXED_FENCING: 988 case I915_PARAM_HAS_RELAXED_FENCING:
989 value = 1; 989 value = 1;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 77b7a50e2014..d3e194853061 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -345,6 +345,13 @@ static const struct pci_device_id pciidlist[] = { /* aka */
345 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ 345 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
346 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ 346 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
347 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ 347 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
348 INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
349 INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
350 INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
351 INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
352 INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
353 INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
354 INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
348 {0, 0, 0} 355 {0, 0, 0}
349}; 356};
350 357
@@ -407,9 +414,11 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
407 if (i915_semaphores >= 0) 414 if (i915_semaphores >= 0)
408 return i915_semaphores; 415 return i915_semaphores;
409 416
417#ifdef CONFIG_INTEL_IOMMU
410 /* Enable semaphores on SNB when IO remapping is off */ 418 /* Enable semaphores on SNB when IO remapping is off */
411 if (INTEL_INFO(dev)->gen == 6) 419 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
412 return !intel_iommu_enabled; 420 return false;
421#endif
413 422
414 return 1; 423 return 1;
415} 424}
@@ -622,15 +631,16 @@ static int i915_drm_thaw(struct drm_device *dev)
622 631
623 /* KMS EnterVT equivalent */ 632 /* KMS EnterVT equivalent */
624 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 633 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
634 if (HAS_PCH_SPLIT(dev))
635 ironlake_init_pch_refclk(dev);
636
625 mutex_lock(&dev->struct_mutex); 637 mutex_lock(&dev->struct_mutex);
626 dev_priv->mm.suspended = 0; 638 dev_priv->mm.suspended = 0;
627 639
628 error = i915_gem_init_hw(dev); 640 error = i915_gem_init_hw(dev);
629 mutex_unlock(&dev->struct_mutex); 641 mutex_unlock(&dev->struct_mutex);
630 642
631 if (HAS_PCH_SPLIT(dev)) 643 intel_modeset_init_hw(dev);
632 ironlake_init_pch_refclk(dev);
633
634 drm_mode_config_reset(dev); 644 drm_mode_config_reset(dev);
635 drm_irq_install(dev); 645 drm_irq_install(dev);
636 646
@@ -638,9 +648,6 @@ static int i915_drm_thaw(struct drm_device *dev)
638 mutex_lock(&dev->mode_config.mutex); 648 mutex_lock(&dev->mode_config.mutex);
639 drm_helper_resume_force_mode(dev); 649 drm_helper_resume_force_mode(dev);
640 mutex_unlock(&dev->mode_config.mutex); 650 mutex_unlock(&dev->mode_config.mutex);
641
642 if (IS_IRONLAKE_M(dev))
643 ironlake_enable_rc6(dev);
644 } 651 }
645 652
646 intel_opregion_init(dev); 653 intel_opregion_init(dev);
@@ -886,15 +893,15 @@ int i915_reset(struct drm_device *dev)
886 */ 893 */
887 if (drm_core_check_feature(dev, DRIVER_MODESET) || 894 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
888 !dev_priv->mm.suspended) { 895 !dev_priv->mm.suspended) {
896 struct intel_ring_buffer *ring;
897 int i;
898
889 dev_priv->mm.suspended = 0; 899 dev_priv->mm.suspended = 0;
890 900
891 i915_gem_init_swizzling(dev); 901 i915_gem_init_swizzling(dev);
892 902
893 dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); 903 for_each_ring(ring, dev_priv, i)
894 if (HAS_BSD(dev)) 904 ring->init(ring);
895 dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
896 if (HAS_BLT(dev))
897 dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
898 905
899 i915_gem_init_ppgtt(dev); 906 i915_gem_init_ppgtt(dev);
900 907
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e03a4f80c5c9..11c7a6a330c1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -243,6 +243,8 @@ struct drm_i915_display_funcs {
243 void (*update_sprite_wm)(struct drm_device *dev, int pipe, 243 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
244 uint32_t sprite_width, int pixel_size); 244 uint32_t sprite_width, int pixel_size);
245 void (*sanitize_pm)(struct drm_device *dev); 245 void (*sanitize_pm)(struct drm_device *dev);
246 void (*update_linetime_wm)(struct drm_device *dev, int pipe,
247 struct drm_display_mode *mode);
246 int (*crtc_mode_set)(struct drm_crtc *crtc, 248 int (*crtc_mode_set)(struct drm_crtc *crtc,
247 struct drm_display_mode *mode, 249 struct drm_display_mode *mode,
248 struct drm_display_mode *adjusted_mode, 250 struct drm_display_mode *adjusted_mode,
@@ -408,9 +410,7 @@ typedef struct drm_i915_private {
408#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 410#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
409 struct timer_list hangcheck_timer; 411 struct timer_list hangcheck_timer;
410 int hangcheck_count; 412 int hangcheck_count;
411 uint32_t last_acthd; 413 uint32_t last_acthd[I915_NUM_RINGS];
412 uint32_t last_acthd_bsd;
413 uint32_t last_acthd_blt;
414 uint32_t last_instdone; 414 uint32_t last_instdone;
415 uint32_t last_instdone1; 415 uint32_t last_instdone1;
416 416
@@ -818,6 +818,11 @@ typedef struct drm_i915_private {
818 struct drm_property *force_audio_property; 818 struct drm_property *force_audio_property;
819} drm_i915_private_t; 819} drm_i915_private_t;
820 820
821/* Iterate over initialised rings */
822#define for_each_ring(ring__, dev_priv__, i__) \
823 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
824 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
825
821enum hdmi_force_audio { 826enum hdmi_force_audio {
822 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 827 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
823 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 828 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 44a5f241b1a0..6d2180cf3da5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1655,10 +1655,11 @@ void i915_gem_reset(struct drm_device *dev)
1655{ 1655{
1656 struct drm_i915_private *dev_priv = dev->dev_private; 1656 struct drm_i915_private *dev_priv = dev->dev_private;
1657 struct drm_i915_gem_object *obj; 1657 struct drm_i915_gem_object *obj;
1658 struct intel_ring_buffer *ring;
1658 int i; 1659 int i;
1659 1660
1660 for (i = 0; i < I915_NUM_RINGS; i++) 1661 for_each_ring(ring, dev_priv, i)
1661 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]); 1662 i915_gem_reset_ring_lists(dev_priv, ring);
1662 1663
1663 /* Remove anything from the flushing lists. The GPU cache is likely 1664 /* Remove anything from the flushing lists. The GPU cache is likely
1664 * to be lost on reset along with the data, so simply move the 1665 * to be lost on reset along with the data, so simply move the
@@ -1763,10 +1764,11 @@ void
1763i915_gem_retire_requests(struct drm_device *dev) 1764i915_gem_retire_requests(struct drm_device *dev)
1764{ 1765{
1765 drm_i915_private_t *dev_priv = dev->dev_private; 1766 drm_i915_private_t *dev_priv = dev->dev_private;
1767 struct intel_ring_buffer *ring;
1766 int i; 1768 int i;
1767 1769
1768 for (i = 0; i < I915_NUM_RINGS; i++) 1770 for_each_ring(ring, dev_priv, i)
1769 i915_gem_retire_requests_ring(&dev_priv->ring[i]); 1771 i915_gem_retire_requests_ring(ring);
1770} 1772}
1771 1773
1772static void 1774static void
@@ -1774,6 +1776,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
1774{ 1776{
1775 drm_i915_private_t *dev_priv; 1777 drm_i915_private_t *dev_priv;
1776 struct drm_device *dev; 1778 struct drm_device *dev;
1779 struct intel_ring_buffer *ring;
1777 bool idle; 1780 bool idle;
1778 int i; 1781 int i;
1779 1782
@@ -1793,9 +1796,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
1793 * objects indefinitely. 1796 * objects indefinitely.
1794 */ 1797 */
1795 idle = true; 1798 idle = true;
1796 for (i = 0; i < I915_NUM_RINGS; i++) { 1799 for_each_ring(ring, dev_priv, i) {
1797 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1798
1799 if (!list_empty(&ring->gpu_write_list)) { 1800 if (!list_empty(&ring->gpu_write_list)) {
1800 struct drm_i915_gem_request *request; 1801 struct drm_i915_gem_request *request;
1801 int ret; 1802 int ret;
@@ -2137,13 +2138,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
2137int i915_gpu_idle(struct drm_device *dev) 2138int i915_gpu_idle(struct drm_device *dev)
2138{ 2139{
2139 drm_i915_private_t *dev_priv = dev->dev_private; 2140 drm_i915_private_t *dev_priv = dev->dev_private;
2141 struct intel_ring_buffer *ring;
2140 int ret, i; 2142 int ret, i;
2141 2143
2142 /* Flush everything onto the inactive list. */ 2144 /* Flush everything onto the inactive list. */
2143 for (i = 0; i < I915_NUM_RINGS; i++) { 2145 for_each_ring(ring, dev_priv, i) {
2144 ret = i915_ring_idle(&dev_priv->ring[i]); 2146 ret = i915_ring_idle(ring);
2145 if (ret) 2147 if (ret)
2146 return ret; 2148 return ret;
2149
2150 /* Is the device fubar? */
2151 if (WARN_ON(!list_empty(&ring->gpu_write_list)))
2152 return -EBUSY;
2147 } 2153 }
2148 2154
2149 return 0; 2155 return 0;
@@ -3463,9 +3469,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
3463 /* GFX_MODE is per-ring on gen7+ */ 3469 /* GFX_MODE is per-ring on gen7+ */
3464 } 3470 }
3465 3471
3466 for (i = 0; i < I915_NUM_RINGS; i++) { 3472 for_each_ring(ring, dev_priv, i) {
3467 ring = &dev_priv->ring[i];
3468
3469 if (INTEL_INFO(dev)->gen >= 7) 3473 if (INTEL_INFO(dev)->gen >= 7)
3470 I915_WRITE(RING_MODE_GEN7(ring), 3474 I915_WRITE(RING_MODE_GEN7(ring),
3471 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 3475 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
@@ -3581,10 +3585,11 @@ void
3581i915_gem_cleanup_ringbuffer(struct drm_device *dev) 3585i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3582{ 3586{
3583 drm_i915_private_t *dev_priv = dev->dev_private; 3587 drm_i915_private_t *dev_priv = dev->dev_private;
3588 struct intel_ring_buffer *ring;
3584 int i; 3589 int i;
3585 3590
3586 for (i = 0; i < I915_NUM_RINGS; i++) 3591 for_each_ring(ring, dev_priv, i)
3587 intel_cleanup_ring_buffer(&dev_priv->ring[i]); 3592 intel_cleanup_ring_buffer(ring);
3588} 3593}
3589 3594
3590int 3595int
@@ -3592,7 +3597,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3592 struct drm_file *file_priv) 3597 struct drm_file *file_priv)
3593{ 3598{
3594 drm_i915_private_t *dev_priv = dev->dev_private; 3599 drm_i915_private_t *dev_priv = dev->dev_private;
3595 int ret, i; 3600 int ret;
3596 3601
3597 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3602 if (drm_core_check_feature(dev, DRIVER_MODESET))
3598 return 0; 3603 return 0;
@@ -3614,10 +3619,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3614 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 3619 BUG_ON(!list_empty(&dev_priv->mm.active_list));
3615 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 3620 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3616 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 3621 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3617 for (i = 0; i < I915_NUM_RINGS; i++) {
3618 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3619 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3620 }
3621 mutex_unlock(&dev->struct_mutex); 3622 mutex_unlock(&dev->struct_mutex);
3622 3623
3623 ret = drm_irq_install(dev); 3624 ret = drm_irq_install(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 3bcf0451d07c..ae7c24e12e52 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -168,7 +168,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
168 drm_i915_private_t *dev_priv = dev->dev_private; 168 drm_i915_private_t *dev_priv = dev->dev_private;
169 struct drm_i915_gem_object *obj, *next; 169 struct drm_i915_gem_object *obj, *next;
170 bool lists_empty; 170 bool lists_empty;
171 int ret,i; 171 int ret;
172 172
173 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 173 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
174 list_empty(&dev_priv->mm.flushing_list) && 174 list_empty(&dev_priv->mm.flushing_list) &&
@@ -178,17 +178,13 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
178 178
179 trace_i915_gem_evict_everything(dev, purgeable_only); 179 trace_i915_gem_evict_everything(dev, purgeable_only);
180 180
181 ret = i915_gpu_idle(dev);
182 if (ret)
183 return ret;
184
185 /* The gpu_idle will flush everything in the write domain to the 181 /* The gpu_idle will flush everything in the write domain to the
186 * active list. Then we must move everything off the active list 182 * active list. Then we must move everything off the active list
187 * with retire requests. 183 * with retire requests.
188 */ 184 */
189 for (i = 0; i < I915_NUM_RINGS; i++) 185 ret = i915_gpu_idle(dev);
190 if (WARN_ON(!list_empty(&dev_priv->ring[i].gpu_write_list))) 186 if (ret)
191 return -EBUSY; 187 return ret;
192 188
193 i915_gem_retire_requests(dev); 189 i915_gem_retire_requests(dev);
194 190
@@ -203,5 +199,5 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
203 } 199 }
204 } 200 }
205 201
206 return ret; 202 return 0;
207} 203}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 206b9bbe6979..974a9f1068a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -967,11 +967,14 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
967 obj->pending_gpu_write = true; 967 obj->pending_gpu_write = true;
968 list_move_tail(&obj->gpu_write_list, 968 list_move_tail(&obj->gpu_write_list,
969 &ring->gpu_write_list); 969 &ring->gpu_write_list);
970 intel_mark_busy(ring->dev, obj); 970 if (obj->pin_count) /* check for potential scanout */
971 intel_mark_busy(ring->dev, obj);
971 } 972 }
972 973
973 trace_i915_gem_object_change_domain(obj, old_read, old_write); 974 trace_i915_gem_object_change_domain(obj, old_read, old_write);
974 } 975 }
976
977 intel_mark_busy(ring->dev, NULL);
975} 978}
976 979
977static void 980static void
@@ -1061,17 +1064,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1061 ring = &dev_priv->ring[RCS]; 1064 ring = &dev_priv->ring[RCS];
1062 break; 1065 break;
1063 case I915_EXEC_BSD: 1066 case I915_EXEC_BSD:
1064 if (!HAS_BSD(dev)) {
1065 DRM_DEBUG("execbuf with invalid ring (BSD)\n");
1066 return -EINVAL;
1067 }
1068 ring = &dev_priv->ring[VCS]; 1067 ring = &dev_priv->ring[VCS];
1069 break; 1068 break;
1070 case I915_EXEC_BLT: 1069 case I915_EXEC_BLT:
1071 if (!HAS_BLT(dev)) {
1072 DRM_DEBUG("execbuf with invalid ring (BLT)\n");
1073 return -EINVAL;
1074 }
1075 ring = &dev_priv->ring[BCS]; 1070 ring = &dev_priv->ring[BCS];
1076 break; 1071 break;
1077 default: 1072 default:
@@ -1079,6 +1074,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1079 (int)(args->flags & I915_EXEC_RING_MASK)); 1074 (int)(args->flags & I915_EXEC_RING_MASK));
1080 return -EINVAL; 1075 return -EINVAL;
1081 } 1076 }
1077 if (!intel_ring_initialized(ring)) {
1078 DRM_DEBUG("execbuf with invalid ring: %d\n",
1079 (int)(args->flags & I915_EXEC_RING_MASK));
1080 return -EINVAL;
1081 }
1082 1082
1083 mode = args->flags & I915_EXEC_CONSTANTS_MASK; 1083 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1084 mask = I915_EXEC_CONSTANTS_MASK; 1084 mask = I915_EXEC_CONSTANTS_MASK;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b4999b5288e8..cc4a63307611 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -533,14 +533,11 @@ out:
533 return ret; 533 return ret;
534} 534}
535 535
536static void pch_irq_handler(struct drm_device *dev) 536static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
537{ 537{
538 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 538 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
539 u32 pch_iir;
540 int pipe; 539 int pipe;
541 540
542 pch_iir = I915_READ(SDEIIR);
543
544 if (pch_iir & SDE_AUDIO_POWER_MASK) 541 if (pch_iir & SDE_AUDIO_POWER_MASK)
545 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 542 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
546 (pch_iir & SDE_AUDIO_POWER_MASK) >> 543 (pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -580,72 +577,61 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
580{ 577{
581 struct drm_device *dev = (struct drm_device *) arg; 578 struct drm_device *dev = (struct drm_device *) arg;
582 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 579 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
583 int ret = IRQ_NONE; 580 u32 de_iir, gt_iir, de_ier, pm_iir;
584 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 581 irqreturn_t ret = IRQ_NONE;
582 int i;
585 583
586 atomic_inc(&dev_priv->irq_received); 584 atomic_inc(&dev_priv->irq_received);
587 585
588 /* disable master interrupt before clearing iir */ 586 /* disable master interrupt before clearing iir */
589 de_ier = I915_READ(DEIER); 587 de_ier = I915_READ(DEIER);
590 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 588 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
591 POSTING_READ(DEIER);
592 589
593 de_iir = I915_READ(DEIIR);
594 gt_iir = I915_READ(GTIIR); 590 gt_iir = I915_READ(GTIIR);
595 pch_iir = I915_READ(SDEIIR); 591 if (gt_iir) {
596 pm_iir = I915_READ(GEN6_PMIIR); 592 snb_gt_irq_handler(dev, dev_priv, gt_iir);
597 593 I915_WRITE(GTIIR, gt_iir);
598 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0) 594 ret = IRQ_HANDLED;
599 goto done;
600
601 ret = IRQ_HANDLED;
602
603 snb_gt_irq_handler(dev, dev_priv, gt_iir);
604
605 if (de_iir & DE_GSE_IVB)
606 intel_opregion_gse_intr(dev);
607
608 if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
609 intel_prepare_page_flip(dev, 0);
610 intel_finish_page_flip_plane(dev, 0);
611 }
612
613 if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
614 intel_prepare_page_flip(dev, 1);
615 intel_finish_page_flip_plane(dev, 1);
616 } 595 }
617 596
618 if (de_iir & DE_PLANEC_FLIP_DONE_IVB) { 597 de_iir = I915_READ(DEIIR);
619 intel_prepare_page_flip(dev, 2); 598 if (de_iir) {
620 intel_finish_page_flip_plane(dev, 2); 599 if (de_iir & DE_GSE_IVB)
621 } 600 intel_opregion_gse_intr(dev);
601
602 for (i = 0; i < 3; i++) {
603 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
604 intel_prepare_page_flip(dev, i);
605 intel_finish_page_flip_plane(dev, i);
606 }
607 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
608 drm_handle_vblank(dev, i);
609 }
622 610
623 if (de_iir & DE_PIPEA_VBLANK_IVB) 611 /* check event from PCH */
624 drm_handle_vblank(dev, 0); 612 if (de_iir & DE_PCH_EVENT_IVB) {
613 u32 pch_iir = I915_READ(SDEIIR);
625 614
626 if (de_iir & DE_PIPEB_VBLANK_IVB) 615 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
627 drm_handle_vblank(dev, 1); 616 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
617 pch_irq_handler(dev, pch_iir);
628 618
629 if (de_iir & DE_PIPEC_VBLANK_IVB) 619 /* clear PCH hotplug event before clear CPU irq */
630 drm_handle_vblank(dev, 2); 620 I915_WRITE(SDEIIR, pch_iir);
621 }
631 622
632 /* check event from PCH */ 623 I915_WRITE(DEIIR, de_iir);
633 if (de_iir & DE_PCH_EVENT_IVB) { 624 ret = IRQ_HANDLED;
634 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
635 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
636 pch_irq_handler(dev);
637 } 625 }
638 626
639 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 627 pm_iir = I915_READ(GEN6_PMIIR);
640 gen6_queue_rps_work(dev_priv, pm_iir); 628 if (pm_iir) {
641 629 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
642 /* should clear PCH hotplug event before clear CPU irq */ 630 gen6_queue_rps_work(dev_priv, pm_iir);
643 I915_WRITE(SDEIIR, pch_iir); 631 I915_WRITE(GEN6_PMIIR, pm_iir);
644 I915_WRITE(GTIIR, gt_iir); 632 ret = IRQ_HANDLED;
645 I915_WRITE(DEIIR, de_iir); 633 }
646 I915_WRITE(GEN6_PMIIR, pm_iir);
647 634
648done:
649 I915_WRITE(DEIER, de_ier); 635 I915_WRITE(DEIER, de_ier);
650 POSTING_READ(DEIER); 636 POSTING_READ(DEIER);
651 637
@@ -721,7 +707,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
721 if (de_iir & DE_PCH_EVENT) { 707 if (de_iir & DE_PCH_EVENT) {
722 if (pch_iir & hotplug_mask) 708 if (pch_iir & hotplug_mask)
723 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 709 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
724 pch_irq_handler(dev); 710 pch_irq_handler(dev, pch_iir);
725 } 711 }
726 712
727 if (de_iir & DE_PCU_EVENT) { 713 if (de_iir & DE_PCU_EVENT) {
@@ -1036,15 +1022,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
1036 struct drm_i915_error_state *error) 1022 struct drm_i915_error_state *error)
1037{ 1023{
1038 struct drm_i915_private *dev_priv = dev->dev_private; 1024 struct drm_i915_private *dev_priv = dev->dev_private;
1025 struct intel_ring_buffer *ring;
1039 struct drm_i915_gem_request *request; 1026 struct drm_i915_gem_request *request;
1040 int i, count; 1027 int i, count;
1041 1028
1042 for (i = 0; i < I915_NUM_RINGS; i++) { 1029 for_each_ring(ring, dev_priv, i) {
1043 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1044
1045 if (ring->obj == NULL)
1046 continue;
1047
1048 i915_record_ring_state(dev, error, ring); 1030 i915_record_ring_state(dev, error, ring);
1049 1031
1050 error->ring[i].batchbuffer = 1032 error->ring[i].batchbuffer =
@@ -1309,6 +1291,8 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
1309void i915_handle_error(struct drm_device *dev, bool wedged) 1291void i915_handle_error(struct drm_device *dev, bool wedged)
1310{ 1292{
1311 struct drm_i915_private *dev_priv = dev->dev_private; 1293 struct drm_i915_private *dev_priv = dev->dev_private;
1294 struct intel_ring_buffer *ring;
1295 int i;
1312 1296
1313 i915_capture_error_state(dev); 1297 i915_capture_error_state(dev);
1314 i915_report_and_clear_eir(dev); 1298 i915_report_and_clear_eir(dev);
@@ -1320,11 +1304,8 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
1320 /* 1304 /*
1321 * Wakeup waiting processes so they don't hang 1305 * Wakeup waiting processes so they don't hang
1322 */ 1306 */
1323 wake_up_all(&dev_priv->ring[RCS].irq_queue); 1307 for_each_ring(ring, dev_priv, i)
1324 if (HAS_BSD(dev)) 1308 wake_up_all(&ring->irq_queue);
1325 wake_up_all(&dev_priv->ring[VCS].irq_queue);
1326 if (HAS_BLT(dev))
1327 wake_up_all(&dev_priv->ring[BCS].irq_queue);
1328 } 1309 }
1329 1310
1330 queue_work(dev_priv->wq, &dev_priv->error_work); 1311 queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -1529,11 +1510,6 @@ ring_last_seqno(struct intel_ring_buffer *ring)
1529 1510
1530static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 1511static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1531{ 1512{
1532 /* We don't check whether the ring even exists before calling this
1533 * function. Hence check whether it's initialized. */
1534 if (ring->obj == NULL)
1535 return true;
1536
1537 if (list_empty(&ring->request_list) || 1513 if (list_empty(&ring->request_list) ||
1538 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { 1514 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1539 /* Issue a wake-up to catch stuck h/w. */ 1515 /* Issue a wake-up to catch stuck h/w. */
@@ -1567,26 +1543,25 @@ static bool i915_hangcheck_hung(struct drm_device *dev)
1567 drm_i915_private_t *dev_priv = dev->dev_private; 1543 drm_i915_private_t *dev_priv = dev->dev_private;
1568 1544
1569 if (dev_priv->hangcheck_count++ > 1) { 1545 if (dev_priv->hangcheck_count++ > 1) {
1546 bool hung = true;
1547
1570 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1548 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1571 i915_handle_error(dev, true); 1549 i915_handle_error(dev, true);
1572 1550
1573 if (!IS_GEN2(dev)) { 1551 if (!IS_GEN2(dev)) {
1552 struct intel_ring_buffer *ring;
1553 int i;
1554
1574 /* Is the chip hanging on a WAIT_FOR_EVENT? 1555 /* Is the chip hanging on a WAIT_FOR_EVENT?
1575 * If so we can simply poke the RB_WAIT bit 1556 * If so we can simply poke the RB_WAIT bit
1576 * and break the hang. This should work on 1557 * and break the hang. This should work on
1577 * all but the second generation chipsets. 1558 * all but the second generation chipsets.
1578 */ 1559 */
1579 if (kick_ring(&dev_priv->ring[RCS])) 1560 for_each_ring(ring, dev_priv, i)
1580 return false; 1561 hung &= !kick_ring(ring);
1581
1582 if (HAS_BSD(dev) && kick_ring(&dev_priv->ring[VCS]))
1583 return false;
1584
1585 if (HAS_BLT(dev) && kick_ring(&dev_priv->ring[BCS]))
1586 return false;
1587 } 1562 }
1588 1563
1589 return true; 1564 return hung;
1590 } 1565 }
1591 1566
1592 return false; 1567 return false;
@@ -1602,16 +1577,23 @@ void i915_hangcheck_elapsed(unsigned long data)
1602{ 1577{
1603 struct drm_device *dev = (struct drm_device *)data; 1578 struct drm_device *dev = (struct drm_device *)data;
1604 drm_i915_private_t *dev_priv = dev->dev_private; 1579 drm_i915_private_t *dev_priv = dev->dev_private;
1605 uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt; 1580 uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
1606 bool err = false; 1581 struct intel_ring_buffer *ring;
1582 bool err = false, idle;
1583 int i;
1607 1584
1608 if (!i915_enable_hangcheck) 1585 if (!i915_enable_hangcheck)
1609 return; 1586 return;
1610 1587
1588 memset(acthd, 0, sizeof(acthd));
1589 idle = true;
1590 for_each_ring(ring, dev_priv, i) {
1591 idle &= i915_hangcheck_ring_idle(ring, &err);
1592 acthd[i] = intel_ring_get_active_head(ring);
1593 }
1594
1611 /* If all work is done then ACTHD clearly hasn't advanced. */ 1595 /* If all work is done then ACTHD clearly hasn't advanced. */
1612 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && 1596 if (idle) {
1613 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
1614 i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
1615 if (err) { 1597 if (err) {
1616 if (i915_hangcheck_hung(dev)) 1598 if (i915_hangcheck_hung(dev))
1617 return; 1599 return;
@@ -1630,15 +1612,8 @@ void i915_hangcheck_elapsed(unsigned long data)
1630 instdone = I915_READ(INSTDONE_I965); 1612 instdone = I915_READ(INSTDONE_I965);
1631 instdone1 = I915_READ(INSTDONE1); 1613 instdone1 = I915_READ(INSTDONE1);
1632 } 1614 }
1633 acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
1634 acthd_bsd = HAS_BSD(dev) ?
1635 intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
1636 acthd_blt = HAS_BLT(dev) ?
1637 intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
1638 1615
1639 if (dev_priv->last_acthd == acthd && 1616 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
1640 dev_priv->last_acthd_bsd == acthd_bsd &&
1641 dev_priv->last_acthd_blt == acthd_blt &&
1642 dev_priv->last_instdone == instdone && 1617 dev_priv->last_instdone == instdone &&
1643 dev_priv->last_instdone1 == instdone1) { 1618 dev_priv->last_instdone1 == instdone1) {
1644 if (i915_hangcheck_hung(dev)) 1619 if (i915_hangcheck_hung(dev))
@@ -1646,9 +1621,7 @@ void i915_hangcheck_elapsed(unsigned long data)
1646 } else { 1621 } else {
1647 dev_priv->hangcheck_count = 0; 1622 dev_priv->hangcheck_count = 0;
1648 1623
1649 dev_priv->last_acthd = acthd; 1624 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
1650 dev_priv->last_acthd_bsd = acthd_bsd;
1651 dev_priv->last_acthd_blt = acthd_blt;
1652 dev_priv->last_instdone = instdone; 1625 dev_priv->last_instdone = instdone;
1653 dev_priv->last_instdone1 = instdone1; 1626 dev_priv->last_instdone1 = instdone1;
1654 } 1627 }
@@ -2597,8 +2570,7 @@ void intel_irq_init(struct drm_device *dev)
2597 2570
2598 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2571 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2599 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2572 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2600 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev) || 2573 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
2601 IS_VALLEYVIEW(dev)) {
2602 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 2574 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2603 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2575 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2604 } 2576 }
@@ -2624,6 +2596,14 @@ void intel_irq_init(struct drm_device *dev)
2624 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2596 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2625 dev->driver->enable_vblank = ivybridge_enable_vblank; 2597 dev->driver->enable_vblank = ivybridge_enable_vblank;
2626 dev->driver->disable_vblank = ivybridge_disable_vblank; 2598 dev->driver->disable_vblank = ivybridge_disable_vblank;
2599 } else if (IS_HASWELL(dev)) {
2600 /* Share interrupts handling with IVB */
2601 dev->driver->irq_handler = ivybridge_irq_handler;
2602 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2603 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2604 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2605 dev->driver->enable_vblank = ivybridge_enable_vblank;
2606 dev->driver->disable_vblank = ivybridge_disable_vblank;
2627 } else if (HAS_PCH_SPLIT(dev)) { 2607 } else if (HAS_PCH_SPLIT(dev)) {
2628 dev->driver->irq_handler = ironlake_irq_handler; 2608 dev->driver->irq_handler = ironlake_irq_handler;
2629 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2609 dev->driver->irq_preinstall = ironlake_irq_preinstall;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 10e71a9f8bd9..2d49b9507ed0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1697,9 +1697,12 @@
1697/* Video Data Island Packet control */ 1697/* Video Data Island Packet control */
1698#define VIDEO_DIP_DATA 0x61178 1698#define VIDEO_DIP_DATA 0x61178
1699#define VIDEO_DIP_CTL 0x61170 1699#define VIDEO_DIP_CTL 0x61170
1700/* Pre HSW: */
1700#define VIDEO_DIP_ENABLE (1 << 31) 1701#define VIDEO_DIP_ENABLE (1 << 31)
1701#define VIDEO_DIP_PORT_B (1 << 29) 1702#define VIDEO_DIP_PORT_B (1 << 29)
1702#define VIDEO_DIP_PORT_C (2 << 29) 1703#define VIDEO_DIP_PORT_C (2 << 29)
1704#define VIDEO_DIP_PORT_D (3 << 29)
1705#define VIDEO_DIP_PORT_MASK (3 << 29)
1703#define VIDEO_DIP_ENABLE_AVI (1 << 21) 1706#define VIDEO_DIP_ENABLE_AVI (1 << 21)
1704#define VIDEO_DIP_ENABLE_VENDOR (2 << 21) 1707#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
1705#define VIDEO_DIP_ENABLE_SPD (8 << 21) 1708#define VIDEO_DIP_ENABLE_SPD (8 << 21)
@@ -1710,6 +1713,10 @@
1710#define VIDEO_DIP_FREQ_ONCE (0 << 16) 1713#define VIDEO_DIP_FREQ_ONCE (0 << 16)
1711#define VIDEO_DIP_FREQ_VSYNC (1 << 16) 1714#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
1712#define VIDEO_DIP_FREQ_2VSYNC (2 << 16) 1715#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
1716#define VIDEO_DIP_FREQ_MASK (3 << 16)
1717/* HSW and later: */
1718#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
1719#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
1713 1720
1714/* Panel power sequencing */ 1721/* Panel power sequencing */
1715#define PP_STATUS 0x61200 1722#define PP_STATUS 0x61200
@@ -2476,7 +2483,8 @@
2476 2483
2477/* Pipe A */ 2484/* Pipe A */
2478#define _PIPEADSL 0x70000 2485#define _PIPEADSL 0x70000
2479#define DSL_LINEMASK 0x00000fff 2486#define DSL_LINEMASK_GEN2 0x00000fff
2487#define DSL_LINEMASK_GEN3 0x00001fff
2480#define _PIPEACONF 0x70008 2488#define _PIPEACONF 0x70008
2481#define PIPECONF_ENABLE (1<<31) 2489#define PIPECONF_ENABLE (1<<31)
2482#define PIPECONF_DISABLE 0 2490#define PIPECONF_DISABLE 0
@@ -3520,6 +3528,42 @@
3520#define VLV_TVIDEO_DIP_GCP(pipe) \ 3528#define VLV_TVIDEO_DIP_GCP(pipe) \
3521 _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B) 3529 _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
3522 3530
3531/* Haswell DIP controls */
3532#define HSW_VIDEO_DIP_CTL_A 0x60200
3533#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220
3534#define HSW_VIDEO_DIP_VS_DATA_A 0x60260
3535#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
3536#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
3537#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320
3538#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240
3539#define HSW_VIDEO_DIP_VS_ECC_A 0x60280
3540#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
3541#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300
3542#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344
3543#define HSW_VIDEO_DIP_GCP_A 0x60210
3544
3545#define HSW_VIDEO_DIP_CTL_B 0x61200
3546#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220
3547#define HSW_VIDEO_DIP_VS_DATA_B 0x61260
3548#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
3549#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
3550#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320
3551#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240
3552#define HSW_VIDEO_DIP_VS_ECC_B 0x61280
3553#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
3554#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300
3555#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
3556#define HSW_VIDEO_DIP_GCP_B 0x61210
3557
3558#define HSW_TVIDEO_DIP_CTL(pipe) \
3559 _PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
3560#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
3561 _PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
3562#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
3563 _PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
3564#define HSW_TVIDEO_DIP_GCP(pipe) \
3565 _PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
3566
3523#define _TRANS_HTOTAL_B 0xe1000 3567#define _TRANS_HTOTAL_B 0xe1000
3524#define _TRANS_HBLANK_B 0xe1004 3568#define _TRANS_HBLANK_B 0xe1004
3525#define _TRANS_HSYNC_B 0xe1008 3569#define _TRANS_HSYNC_B 0xe1008
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 73a5c3c12fe0..0ede02a99d91 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -876,12 +876,6 @@ int i915_restore_state(struct drm_device *dev)
876 I915_WRITE(IER, dev_priv->saveIER); 876 I915_WRITE(IER, dev_priv->saveIER);
877 I915_WRITE(IMR, dev_priv->saveIMR); 877 I915_WRITE(IMR, dev_priv->saveIMR);
878 } 878 }
879 mutex_unlock(&dev->struct_mutex);
880
881 if (drm_core_check_feature(dev, DRIVER_MODESET))
882 intel_modeset_init_hw(dev);
883
884 mutex_lock(&dev->struct_mutex);
885 879
886 /* Cache mode state */ 880 /* Cache mode state */
887 I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 881 I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 417ca99e697d..75a70c46ef1b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -615,7 +615,11 @@ void intel_crt_init(struct drm_device *dev)
615 crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT | 615 crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
616 1 << INTEL_ANALOG_CLONE_BIT | 616 1 << INTEL_ANALOG_CLONE_BIT |
617 1 << INTEL_SDVO_LVDS_CLONE_BIT); 617 1 << INTEL_SDVO_LVDS_CLONE_BIT);
618 crt->base.crtc_mask = (1 << 0) | (1 << 1); 618 if (IS_HASWELL(dev))
619 crt->base.crtc_mask = (1 << 0);
620 else
621 crt->base.crtc_mask = (1 << 0) | (1 << 1);
622
619 if (IS_GEN2(dev)) 623 if (IS_GEN2(dev))
620 connector->interlace_allowed = 0; 624 connector->interlace_allowed = 0;
621 else 625 else
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
new file mode 100644
index 000000000000..46d1e886c692
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -0,0 +1,755 @@
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
28#include "i915_drv.h"
29#include "intel_drv.h"
30
31/* HDMI/DVI modes ignore everything but the last 2 items. So we share
32 * them for both DP and FDI transports, allowing those ports to
33 * automatically adapt to HDMI connections as well
34 */
35static const u32 hsw_ddi_translations_dp[] = {
36 0x00FFFFFF, 0x0006000E, /* DP parameters */
37 0x00D75FFF, 0x0005000A,
38 0x00C30FFF, 0x00040006,
39 0x80AAAFFF, 0x000B0000,
40 0x00FFFFFF, 0x0005000A,
41 0x00D75FFF, 0x000C0004,
42 0x80C30FFF, 0x000B0000,
43 0x00FFFFFF, 0x00040006,
44 0x80D75FFF, 0x000B0000,
45 0x00FFFFFF, 0x00040006 /* HDMI parameters */
46};
47
48static const u32 hsw_ddi_translations_fdi[] = {
49 0x00FFFFFF, 0x0007000E, /* FDI parameters */
50 0x00D75FFF, 0x000F000A,
51 0x00C30FFF, 0x00060006,
52 0x00AAAFFF, 0x001E0000,
53 0x00FFFFFF, 0x000F000A,
54 0x00D75FFF, 0x00160004,
55 0x00C30FFF, 0x001E0000,
56 0x00FFFFFF, 0x00060006,
57 0x00D75FFF, 0x001E0000,
58 0x00FFFFFF, 0x00040006 /* HDMI parameters */
59};
60
61/* On Haswell, DDI port buffers must be programmed with correct values
62 * in advance. The buffer values are different for FDI and DP modes,
63 * but the HDMI/DVI fields are shared among those. So we program the DDI
64 * in either FDI or DP modes only, as HDMI connections will work with both
65 * of those
66 */
67void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
68{
69 struct drm_i915_private *dev_priv = dev->dev_private;
70 u32 reg;
71 int i;
72 const u32 *ddi_translations = ((use_fdi_mode) ?
73 hsw_ddi_translations_fdi :
74 hsw_ddi_translations_dp);
75
76 DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
77 port_name(port),
78 use_fdi_mode ? "FDI" : "DP");
79
80 WARN((use_fdi_mode && (port != PORT_E)),
81 "Programming port %c in FDI mode, this probably will not work.\n",
82 port_name(port));
83
84 for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
85 I915_WRITE(reg, ddi_translations[i]);
86 reg += 4;
87 }
88}
89
90/* Program DDI buffers translations for DP. By default, program ports A-D in DP
91 * mode and port E for FDI.
92 */
93void intel_prepare_ddi(struct drm_device *dev)
94{
95 int port;
96
97 if (IS_HASWELL(dev)) {
98 for (port = PORT_A; port < PORT_E; port++)
99 intel_prepare_ddi_buffers(dev, port, false);
100
101 /* DDI E is the suggested one to work in FDI mode, so program is as such by
102 * default. It will have to be re-programmed in case a digital DP output
103 * will be detected on it
104 */
105 intel_prepare_ddi_buffers(dev, PORT_E, true);
106 }
107}
108
109static const long hsw_ddi_buf_ctl_values[] = {
110 DDI_BUF_EMP_400MV_0DB_HSW,
111 DDI_BUF_EMP_400MV_3_5DB_HSW,
112 DDI_BUF_EMP_400MV_6DB_HSW,
113 DDI_BUF_EMP_400MV_9_5DB_HSW,
114 DDI_BUF_EMP_600MV_0DB_HSW,
115 DDI_BUF_EMP_600MV_3_5DB_HSW,
116 DDI_BUF_EMP_600MV_6DB_HSW,
117 DDI_BUF_EMP_800MV_0DB_HSW,
118 DDI_BUF_EMP_800MV_3_5DB_HSW
119};
120
121
122/* Starting with Haswell, different DDI ports can work in FDI mode for
123 * connection to the PCH-located connectors. For this, it is necessary to train
124 * both the DDI port and PCH receiver for the desired DDI buffer settings.
125 *
126 * The recommended port to work in FDI mode is DDI E, which we use here. Also,
127 * please note that when FDI mode is active on DDI E, it shares 2 lines with
128 * DDI A (which is used for eDP)
129 */
130
131void hsw_fdi_link_train(struct drm_crtc *crtc)
132{
133 struct drm_device *dev = crtc->dev;
134 struct drm_i915_private *dev_priv = dev->dev_private;
135 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
136 int pipe = intel_crtc->pipe;
137 u32 reg, temp, i;
138
139 /* Configure CPU PLL, wait for warmup */
140 I915_WRITE(SPLL_CTL,
141 SPLL_PLL_ENABLE |
142 SPLL_PLL_FREQ_1350MHz |
143 SPLL_PLL_SCC);
144
145 /* Use SPLL to drive the output when in FDI mode */
146 I915_WRITE(PORT_CLK_SEL(PORT_E),
147 PORT_CLK_SEL_SPLL);
148 I915_WRITE(PIPE_CLK_SEL(pipe),
149 PIPE_CLK_SEL_PORT(PORT_E));
150
151 udelay(20);
152
153 /* Start the training iterating through available voltages and emphasis */
154 for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
155 /* Configure DP_TP_CTL with auto-training */
156 I915_WRITE(DP_TP_CTL(PORT_E),
157 DP_TP_CTL_FDI_AUTOTRAIN |
158 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
159 DP_TP_CTL_LINK_TRAIN_PAT1 |
160 DP_TP_CTL_ENABLE);
161
162 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
163 temp = I915_READ(DDI_BUF_CTL(PORT_E));
164 temp = (temp & ~DDI_BUF_EMP_MASK);
165 I915_WRITE(DDI_BUF_CTL(PORT_E),
166 temp |
167 DDI_BUF_CTL_ENABLE |
168 DDI_PORT_WIDTH_X2 |
169 hsw_ddi_buf_ctl_values[i]);
170
171 udelay(600);
172
173 /* Enable CPU FDI Receiver with auto-training */
174 reg = FDI_RX_CTL(pipe);
175 I915_WRITE(reg,
176 I915_READ(reg) |
177 FDI_LINK_TRAIN_AUTO |
178 FDI_RX_ENABLE |
179 FDI_LINK_TRAIN_PATTERN_1_CPT |
180 FDI_RX_ENHANCE_FRAME_ENABLE |
181 FDI_PORT_WIDTH_2X_LPT |
182 FDI_RX_PLL_ENABLE);
183 POSTING_READ(reg);
184 udelay(100);
185
186 temp = I915_READ(DP_TP_STATUS(PORT_E));
187 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
188 DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
189
190 /* Enable normal pixel sending for FDI */
191 I915_WRITE(DP_TP_CTL(PORT_E),
192 DP_TP_CTL_FDI_AUTOTRAIN |
193 DP_TP_CTL_LINK_TRAIN_NORMAL |
194 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
195 DP_TP_CTL_ENABLE);
196
197 /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
198 temp = I915_READ(DDI_FUNC_CTL(pipe));
199 temp &= ~PIPE_DDI_PORT_MASK;
200 temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
201 PIPE_DDI_MODE_SELECT_FDI |
202 PIPE_DDI_FUNC_ENABLE |
203 PIPE_DDI_PORT_WIDTH_X2;
204 I915_WRITE(DDI_FUNC_CTL(pipe),
205 temp);
206 break;
207 } else {
208 DRM_ERROR("Error training BUF_CTL %d\n", i);
209
210 /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
211 I915_WRITE(DP_TP_CTL(PORT_E),
212 I915_READ(DP_TP_CTL(PORT_E)) &
213 ~DP_TP_CTL_ENABLE);
214 I915_WRITE(FDI_RX_CTL(pipe),
215 I915_READ(FDI_RX_CTL(pipe)) &
216 ~FDI_RX_PLL_ENABLE);
217 continue;
218 }
219 }
220
221 DRM_DEBUG_KMS("FDI train done.\n");
222}
223
224/* For DDI connections, it is possible to support different outputs over the
225 * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
226 * the time the output is detected what exactly is on the other end of it. This
227 * function aims at providing support for this detection and proper output
228 * configuration.
229 */
230void intel_ddi_init(struct drm_device *dev, enum port port)
231{
232 /* For now, we don't do any proper output detection and assume that we
233 * handle HDMI only */
234
235 switch(port){
236 case PORT_A:
237 /* We don't handle eDP and DP yet */
238 DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
239 break;
240 /* Assume that the ports B, C and D are working in HDMI mode for now */
241 case PORT_B:
242 case PORT_C:
243 case PORT_D:
244 intel_hdmi_init(dev, DDI_BUF_CTL(port));
245 break;
246 default:
247 DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
248 port);
249 break;
250 }
251}
252
253/* WRPLL clock dividers */
254struct wrpll_tmds_clock {
255 u32 clock;
256 u16 p; /* Post divider */
257 u16 n2; /* Feedback divider */
258 u16 r2; /* Reference divider */
259};
260
261/* Table of matching values for WRPLL clocks programming for each frequency */
262static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
263 {19750, 38, 25, 18},
264 {20000, 48, 32, 18},
265 {21000, 36, 21, 15},
266 {21912, 42, 29, 17},
267 {22000, 36, 22, 15},
268 {23000, 36, 23, 15},
269 {23500, 40, 40, 23},
270 {23750, 26, 16, 14},
271 {23750, 26, 16, 14},
272 {24000, 36, 24, 15},
273 {25000, 36, 25, 15},
274 {25175, 26, 40, 33},
275 {25200, 30, 21, 15},
276 {26000, 36, 26, 15},
277 {27000, 30, 21, 14},
278 {27027, 18, 100, 111},
279 {27500, 30, 29, 19},
280 {28000, 34, 30, 17},
281 {28320, 26, 30, 22},
282 {28322, 32, 42, 25},
283 {28750, 24, 23, 18},
284 {29000, 30, 29, 18},
285 {29750, 32, 30, 17},
286 {30000, 30, 25, 15},
287 {30750, 30, 41, 24},
288 {31000, 30, 31, 18},
289 {31500, 30, 28, 16},
290 {32000, 30, 32, 18},
291 {32500, 28, 32, 19},
292 {33000, 24, 22, 15},
293 {34000, 28, 30, 17},
294 {35000, 26, 32, 19},
295 {35500, 24, 30, 19},
296 {36000, 26, 26, 15},
297 {36750, 26, 46, 26},
298 {37000, 24, 23, 14},
299 {37762, 22, 40, 26},
300 {37800, 20, 21, 15},
301 {38000, 24, 27, 16},
302 {38250, 24, 34, 20},
303 {39000, 24, 26, 15},
304 {40000, 24, 32, 18},
305 {40500, 20, 21, 14},
306 {40541, 22, 147, 89},
307 {40750, 18, 19, 14},
308 {41000, 16, 17, 14},
309 {41500, 22, 44, 26},
310 {41540, 22, 44, 26},
311 {42000, 18, 21, 15},
312 {42500, 22, 45, 26},
313 {43000, 20, 43, 27},
314 {43163, 20, 24, 15},
315 {44000, 18, 22, 15},
316 {44900, 20, 108, 65},
317 {45000, 20, 25, 15},
318 {45250, 20, 52, 31},
319 {46000, 18, 23, 15},
320 {46750, 20, 45, 26},
321 {47000, 20, 40, 23},
322 {48000, 18, 24, 15},
323 {49000, 18, 49, 30},
324 {49500, 16, 22, 15},
325 {50000, 18, 25, 15},
326 {50500, 18, 32, 19},
327 {51000, 18, 34, 20},
328 {52000, 18, 26, 15},
329 {52406, 14, 34, 25},
330 {53000, 16, 22, 14},
331 {54000, 16, 24, 15},
332 {54054, 16, 173, 108},
333 {54500, 14, 24, 17},
334 {55000, 12, 22, 18},
335 {56000, 14, 45, 31},
336 {56250, 16, 25, 15},
337 {56750, 14, 25, 17},
338 {57000, 16, 27, 16},
339 {58000, 16, 43, 25},
340 {58250, 16, 38, 22},
341 {58750, 16, 40, 23},
342 {59000, 14, 26, 17},
343 {59341, 14, 40, 26},
344 {59400, 16, 44, 25},
345 {60000, 16, 32, 18},
346 {60500, 12, 39, 29},
347 {61000, 14, 49, 31},
348 {62000, 14, 37, 23},
349 {62250, 14, 42, 26},
350 {63000, 12, 21, 15},
351 {63500, 14, 28, 17},
352 {64000, 12, 27, 19},
353 {65000, 14, 32, 19},
354 {65250, 12, 29, 20},
355 {65500, 12, 32, 22},
356 {66000, 12, 22, 15},
357 {66667, 14, 38, 22},
358 {66750, 10, 21, 17},
359 {67000, 14, 33, 19},
360 {67750, 14, 58, 33},
361 {68000, 14, 30, 17},
362 {68179, 14, 46, 26},
363 {68250, 14, 46, 26},
364 {69000, 12, 23, 15},
365 {70000, 12, 28, 18},
366 {71000, 12, 30, 19},
367 {72000, 12, 24, 15},
368 {73000, 10, 23, 17},
369 {74000, 12, 23, 14},
370 {74176, 8, 100, 91},
371 {74250, 10, 22, 16},
372 {74481, 12, 43, 26},
373 {74500, 10, 29, 21},
374 {75000, 12, 25, 15},
375 {75250, 10, 39, 28},
376 {76000, 12, 27, 16},
377 {77000, 12, 53, 31},
378 {78000, 12, 26, 15},
379 {78750, 12, 28, 16},
380 {79000, 10, 38, 26},
381 {79500, 10, 28, 19},
382 {80000, 12, 32, 18},
383 {81000, 10, 21, 14},
384 {81081, 6, 100, 111},
385 {81624, 8, 29, 24},
386 {82000, 8, 17, 14},
387 {83000, 10, 40, 26},
388 {83950, 10, 28, 18},
389 {84000, 10, 28, 18},
390 {84750, 6, 16, 17},
391 {85000, 6, 17, 18},
392 {85250, 10, 30, 19},
393 {85750, 10, 27, 17},
394 {86000, 10, 43, 27},
395 {87000, 10, 29, 18},
396 {88000, 10, 44, 27},
397 {88500, 10, 41, 25},
398 {89000, 10, 28, 17},
399 {89012, 6, 90, 91},
400 {89100, 10, 33, 20},
401 {90000, 10, 25, 15},
402 {91000, 10, 32, 19},
403 {92000, 10, 46, 27},
404 {93000, 10, 31, 18},
405 {94000, 10, 40, 23},
406 {94500, 10, 28, 16},
407 {95000, 10, 44, 25},
408 {95654, 10, 39, 22},
409 {95750, 10, 39, 22},
410 {96000, 10, 32, 18},
411 {97000, 8, 23, 16},
412 {97750, 8, 42, 29},
413 {98000, 8, 45, 31},
414 {99000, 8, 22, 15},
415 {99750, 8, 34, 23},
416 {100000, 6, 20, 18},
417 {100500, 6, 19, 17},
418 {101000, 6, 37, 33},
419 {101250, 8, 21, 14},
420 {102000, 6, 17, 15},
421 {102250, 6, 25, 22},
422 {103000, 8, 29, 19},
423 {104000, 8, 37, 24},
424 {105000, 8, 28, 18},
425 {106000, 8, 22, 14},
426 {107000, 8, 46, 29},
427 {107214, 8, 27, 17},
428 {108000, 8, 24, 15},
429 {108108, 8, 173, 108},
430 {109000, 6, 23, 19},
431 {109000, 6, 23, 19},
432 {110000, 6, 22, 18},
433 {110013, 6, 22, 18},
434 {110250, 8, 49, 30},
435 {110500, 8, 36, 22},
436 {111000, 8, 23, 14},
437 {111264, 8, 150, 91},
438 {111375, 8, 33, 20},
439 {112000, 8, 63, 38},
440 {112500, 8, 25, 15},
441 {113100, 8, 57, 34},
442 {113309, 8, 42, 25},
443 {114000, 8, 27, 16},
444 {115000, 6, 23, 18},
445 {116000, 8, 43, 25},
446 {117000, 8, 26, 15},
447 {117500, 8, 40, 23},
448 {118000, 6, 38, 29},
449 {119000, 8, 30, 17},
450 {119500, 8, 46, 26},
451 {119651, 8, 39, 22},
452 {120000, 8, 32, 18},
453 {121000, 6, 39, 29},
454 {121250, 6, 31, 23},
455 {121750, 6, 23, 17},
456 {122000, 6, 42, 31},
457 {122614, 6, 30, 22},
458 {123000, 6, 41, 30},
459 {123379, 6, 37, 27},
460 {124000, 6, 51, 37},
461 {125000, 6, 25, 18},
462 {125250, 4, 13, 14},
463 {125750, 4, 27, 29},
464 {126000, 6, 21, 15},
465 {127000, 6, 24, 17},
466 {127250, 6, 41, 29},
467 {128000, 6, 27, 19},
468 {129000, 6, 43, 30},
469 {129859, 4, 25, 26},
470 {130000, 6, 26, 18},
471 {130250, 6, 42, 29},
472 {131000, 6, 32, 22},
473 {131500, 6, 38, 26},
474 {131850, 6, 41, 28},
475 {132000, 6, 22, 15},
476 {132750, 6, 28, 19},
477 {133000, 6, 34, 23},
478 {133330, 6, 37, 25},
479 {134000, 6, 61, 41},
480 {135000, 6, 21, 14},
481 {135250, 6, 167, 111},
482 {136000, 6, 62, 41},
483 {137000, 6, 35, 23},
484 {138000, 6, 23, 15},
485 {138500, 6, 40, 26},
486 {138750, 6, 37, 24},
487 {139000, 6, 34, 22},
488 {139050, 6, 34, 22},
489 {139054, 6, 34, 22},
490 {140000, 6, 28, 18},
491 {141000, 6, 36, 23},
492 {141500, 6, 22, 14},
493 {142000, 6, 30, 19},
494 {143000, 6, 27, 17},
495 {143472, 4, 17, 16},
496 {144000, 6, 24, 15},
497 {145000, 6, 29, 18},
498 {146000, 6, 47, 29},
499 {146250, 6, 26, 16},
500 {147000, 6, 49, 30},
501 {147891, 6, 23, 14},
502 {148000, 6, 23, 14},
503 {148250, 6, 28, 17},
504 {148352, 4, 100, 91},
505 {148500, 6, 33, 20},
506 {149000, 6, 48, 29},
507 {150000, 6, 25, 15},
508 {151000, 4, 19, 17},
509 {152000, 6, 27, 16},
510 {152280, 6, 44, 26},
511 {153000, 6, 34, 20},
512 {154000, 6, 53, 31},
513 {155000, 6, 31, 18},
514 {155250, 6, 50, 29},
515 {155750, 6, 45, 26},
516 {156000, 6, 26, 15},
517 {157000, 6, 61, 35},
518 {157500, 6, 28, 16},
519 {158000, 6, 65, 37},
520 {158250, 6, 44, 25},
521 {159000, 6, 53, 30},
522 {159500, 6, 39, 22},
523 {160000, 6, 32, 18},
524 {161000, 4, 31, 26},
525 {162000, 4, 18, 15},
526 {162162, 4, 131, 109},
527 {162500, 4, 53, 44},
528 {163000, 4, 29, 24},
529 {164000, 4, 17, 14},
530 {165000, 4, 22, 18},
531 {166000, 4, 32, 26},
532 {167000, 4, 26, 21},
533 {168000, 4, 46, 37},
534 {169000, 4, 104, 83},
535 {169128, 4, 64, 51},
536 {169500, 4, 39, 31},
537 {170000, 4, 34, 27},
538 {171000, 4, 19, 15},
539 {172000, 4, 51, 40},
540 {172750, 4, 32, 25},
541 {172800, 4, 32, 25},
542 {173000, 4, 41, 32},
543 {174000, 4, 49, 38},
544 {174787, 4, 22, 17},
545 {175000, 4, 35, 27},
546 {176000, 4, 30, 23},
547 {177000, 4, 38, 29},
548 {178000, 4, 29, 22},
549 {178500, 4, 37, 28},
550 {179000, 4, 53, 40},
551 {179500, 4, 73, 55},
552 {180000, 4, 20, 15},
553 {181000, 4, 55, 41},
554 {182000, 4, 31, 23},
555 {183000, 4, 42, 31},
556 {184000, 4, 30, 22},
557 {184750, 4, 26, 19},
558 {185000, 4, 37, 27},
559 {186000, 4, 51, 37},
560 {187000, 4, 36, 26},
561 {188000, 4, 32, 23},
562 {189000, 4, 21, 15},
563 {190000, 4, 38, 27},
564 {190960, 4, 41, 29},
565 {191000, 4, 41, 29},
566 {192000, 4, 27, 19},
567 {192250, 4, 37, 26},
568 {193000, 4, 20, 14},
569 {193250, 4, 53, 37},
570 {194000, 4, 23, 16},
571 {194208, 4, 23, 16},
572 {195000, 4, 26, 18},
573 {196000, 4, 45, 31},
574 {197000, 4, 35, 24},
575 {197750, 4, 41, 28},
576 {198000, 4, 22, 15},
577 {198500, 4, 25, 17},
578 {199000, 4, 28, 19},
579 {200000, 4, 37, 25},
580 {201000, 4, 61, 41},
581 {202000, 4, 112, 75},
582 {202500, 4, 21, 14},
583 {203000, 4, 146, 97},
584 {204000, 4, 62, 41},
585 {204750, 4, 44, 29},
586 {205000, 4, 38, 25},
587 {206000, 4, 29, 19},
588 {207000, 4, 23, 15},
589 {207500, 4, 40, 26},
590 {208000, 4, 37, 24},
591 {208900, 4, 48, 31},
592 {209000, 4, 48, 31},
593 {209250, 4, 31, 20},
594 {210000, 4, 28, 18},
595 {211000, 4, 25, 16},
596 {212000, 4, 22, 14},
597 {213000, 4, 30, 19},
598 {213750, 4, 38, 24},
599 {214000, 4, 46, 29},
600 {214750, 4, 35, 22},
601 {215000, 4, 43, 27},
602 {216000, 4, 24, 15},
603 {217000, 4, 37, 23},
604 {218000, 4, 42, 26},
605 {218250, 4, 42, 26},
606 {218750, 4, 34, 21},
607 {219000, 4, 47, 29},
608 {219000, 4, 47, 29},
609 {220000, 4, 44, 27},
610 {220640, 4, 49, 30},
611 {220750, 4, 36, 22},
612 {221000, 4, 36, 22},
613 {222000, 4, 23, 14},
614 {222525, 4, 28, 17},
615 {222750, 4, 33, 20},
616 {227000, 4, 37, 22},
617 {230250, 4, 29, 17},
618 {233500, 4, 38, 22},
619 {235000, 4, 40, 23},
620 {238000, 4, 30, 17},
621 {241500, 2, 17, 19},
622 {245250, 2, 20, 22},
623 {247750, 2, 22, 24},
624 {253250, 2, 15, 16},
625 {256250, 2, 18, 19},
626 {262500, 2, 31, 32},
627 {267250, 2, 66, 67},
628 {268500, 2, 94, 95},
629 {270000, 2, 14, 14},
630 {272500, 2, 77, 76},
631 {273750, 2, 57, 56},
632 {280750, 2, 24, 23},
633 {281250, 2, 23, 22},
634 {286000, 2, 17, 16},
635 {291750, 2, 26, 24},
636 {296703, 2, 56, 51},
637 {297000, 2, 22, 20},
638 {298000, 2, 21, 19},
639};
640
641void intel_ddi_mode_set(struct drm_encoder *encoder,
642 struct drm_display_mode *mode,
643 struct drm_display_mode *adjusted_mode)
644{
645 struct drm_device *dev = encoder->dev;
646 struct drm_i915_private *dev_priv = dev->dev_private;
647 struct drm_crtc *crtc = encoder->crtc;
648 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
649 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
650 int port = intel_hdmi->ddi_port;
651 int pipe = intel_crtc->pipe;
652 int p, n2, r2, valid=0;
653 u32 temp, i;
654
655 /* On Haswell, we need to enable the clocks and prepare DDI function to
656 * work in HDMI mode for this pipe.
657 */
658 DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
659
660 for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
661 if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
662 p = wrpll_tmds_clock_table[i].p;
663 n2 = wrpll_tmds_clock_table[i].n2;
664 r2 = wrpll_tmds_clock_table[i].r2;
665
666 DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
667 crtc->mode.clock,
668 p, n2, r2);
669
670 valid = 1;
671 break;
672 }
673 }
674
675 if (!valid) {
676 DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
677 crtc->mode.clock);
678 return;
679 }
680
681 /* Enable LCPLL if disabled */
682 temp = I915_READ(LCPLL_CTL);
683 if (temp & LCPLL_PLL_DISABLE)
684 I915_WRITE(LCPLL_CTL,
685 temp & ~LCPLL_PLL_DISABLE);
686
687 /* Configure WR PLL 1, program the correct divider values for
688 * the desired frequency and wait for warmup */
689 I915_WRITE(WRPLL_CTL1,
690 WRPLL_PLL_ENABLE |
691 WRPLL_PLL_SELECT_LCPLL_2700 |
692 WRPLL_DIVIDER_REFERENCE(r2) |
693 WRPLL_DIVIDER_FEEDBACK(n2) |
694 WRPLL_DIVIDER_POST(p));
695
696 udelay(20);
697
698 /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
699 * this port for connection.
700 */
701 I915_WRITE(PORT_CLK_SEL(port),
702 PORT_CLK_SEL_WRPLL1);
703 I915_WRITE(PIPE_CLK_SEL(pipe),
704 PIPE_CLK_SEL_PORT(port));
705
706 udelay(20);
707
708 if (intel_hdmi->has_audio) {
709 /* Proper support for digital audio needs a new logic and a new set
710 * of registers, so we leave it for future patch bombing.
711 */
712 DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n",
713 pipe_name(intel_crtc->pipe));
714 }
715
716 /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
717 temp = I915_READ(DDI_FUNC_CTL(pipe));
718 temp &= ~PIPE_DDI_PORT_MASK;
719 temp &= ~PIPE_DDI_BPC_12;
720 temp |= PIPE_DDI_SELECT_PORT(port) |
721 PIPE_DDI_MODE_SELECT_HDMI |
722 ((intel_crtc->bpp > 24) ?
723 PIPE_DDI_BPC_12 :
724 PIPE_DDI_BPC_8) |
725 PIPE_DDI_FUNC_ENABLE;
726
727 I915_WRITE(DDI_FUNC_CTL(pipe), temp);
728
729 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
730 intel_hdmi_set_spd_infoframe(encoder);
731}
732
733void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
734{
735 struct drm_device *dev = encoder->dev;
736 struct drm_i915_private *dev_priv = dev->dev_private;
737 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
738 int port = intel_hdmi->ddi_port;
739 u32 temp;
740
741 temp = I915_READ(DDI_BUF_CTL(port));
742
743 if (mode != DRM_MODE_DPMS_ON) {
744 temp &= ~DDI_BUF_CTL_ENABLE;
745 } else {
746 temp |= DDI_BUF_CTL_ENABLE;
747 }
748
749 /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
750 * and swing/emphasis values are ignored so nothing special needs
751 * to be done besides enabling the port.
752 */
753 I915_WRITE(DDI_BUF_CTL(port),
754 temp);
755}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6b4139064f9c..3c71850ddf20 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -784,6 +784,17 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
784 return true; 784 return true;
785} 785}
786 786
787static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
788{
789 struct drm_i915_private *dev_priv = dev->dev_private;
790 u32 frame, frame_reg = PIPEFRAME(pipe);
791
792 frame = I915_READ(frame_reg);
793
794 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
795 DRM_DEBUG_KMS("vblank wait timed out\n");
796}
797
787/** 798/**
788 * intel_wait_for_vblank - wait for vblank on a given pipe 799 * intel_wait_for_vblank - wait for vblank on a given pipe
789 * @dev: drm device 800 * @dev: drm device
@@ -797,6 +808,11 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
797 struct drm_i915_private *dev_priv = dev->dev_private; 808 struct drm_i915_private *dev_priv = dev->dev_private;
798 int pipestat_reg = PIPESTAT(pipe); 809 int pipestat_reg = PIPESTAT(pipe);
799 810
811 if (INTEL_INFO(dev)->gen >= 5) {
812 ironlake_wait_for_vblank(dev, pipe);
813 return;
814 }
815
800 /* Clear existing vblank status. Note this will clear any other 816 /* Clear existing vblank status. Note this will clear any other
801 * sticky status fields as well. 817 * sticky status fields as well.
802 * 818 *
@@ -849,15 +865,20 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
849 100)) 865 100))
850 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 866 DRM_DEBUG_KMS("pipe_off wait timed out\n");
851 } else { 867 } else {
852 u32 last_line; 868 u32 last_line, line_mask;
853 int reg = PIPEDSL(pipe); 869 int reg = PIPEDSL(pipe);
854 unsigned long timeout = jiffies + msecs_to_jiffies(100); 870 unsigned long timeout = jiffies + msecs_to_jiffies(100);
855 871
872 if (IS_GEN2(dev))
873 line_mask = DSL_LINEMASK_GEN2;
874 else
875 line_mask = DSL_LINEMASK_GEN3;
876
856 /* Wait for the display line to settle */ 877 /* Wait for the display line to settle */
857 do { 878 do {
858 last_line = I915_READ(reg) & DSL_LINEMASK; 879 last_line = I915_READ(reg) & line_mask;
859 mdelay(5); 880 mdelay(5);
860 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) && 881 } while (((I915_READ(reg) & line_mask) != last_line) &&
861 time_after(timeout, jiffies)); 882 time_after(timeout, jiffies));
862 if (time_after(jiffies, timeout)) 883 if (time_after(jiffies, timeout))
863 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 884 DRM_DEBUG_KMS("pipe_off wait timed out\n");
@@ -895,6 +916,11 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv,
895 u32 val; 916 u32 val;
896 bool cur_state; 917 bool cur_state;
897 918
919 if (HAS_PCH_LPT(dev_priv->dev)) {
920 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
921 return;
922 }
923
898 if (!intel_crtc->pch_pll) { 924 if (!intel_crtc->pch_pll) {
899 WARN(1, "asserting PCH PLL enabled with no PLL\n"); 925 WARN(1, "asserting PCH PLL enabled with no PLL\n");
900 return; 926 return;
@@ -927,9 +953,16 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
927 u32 val; 953 u32 val;
928 bool cur_state; 954 bool cur_state;
929 955
930 reg = FDI_TX_CTL(pipe); 956 if (IS_HASWELL(dev_priv->dev)) {
931 val = I915_READ(reg); 957 /* On Haswell, DDI is used instead of FDI_TX_CTL */
932 cur_state = !!(val & FDI_TX_ENABLE); 958 reg = DDI_FUNC_CTL(pipe);
959 val = I915_READ(reg);
960 cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
961 } else {
962 reg = FDI_TX_CTL(pipe);
963 val = I915_READ(reg);
964 cur_state = !!(val & FDI_TX_ENABLE);
965 }
933 WARN(cur_state != state, 966 WARN(cur_state != state,
934 "FDI TX state assertion failure (expected %s, current %s)\n", 967 "FDI TX state assertion failure (expected %s, current %s)\n",
935 state_string(state), state_string(cur_state)); 968 state_string(state), state_string(cur_state));
@@ -944,9 +977,14 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
944 u32 val; 977 u32 val;
945 bool cur_state; 978 bool cur_state;
946 979
947 reg = FDI_RX_CTL(pipe); 980 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
948 val = I915_READ(reg); 981 DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
949 cur_state = !!(val & FDI_RX_ENABLE); 982 return;
983 } else {
984 reg = FDI_RX_CTL(pipe);
985 val = I915_READ(reg);
986 cur_state = !!(val & FDI_RX_ENABLE);
987 }
950 WARN(cur_state != state, 988 WARN(cur_state != state,
951 "FDI RX state assertion failure (expected %s, current %s)\n", 989 "FDI RX state assertion failure (expected %s, current %s)\n",
952 state_string(state), state_string(cur_state)); 990 state_string(state), state_string(cur_state));
@@ -964,6 +1002,10 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
964 if (dev_priv->info->gen == 5) 1002 if (dev_priv->info->gen == 5)
965 return; 1003 return;
966 1004
1005 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1006 if (IS_HASWELL(dev_priv->dev))
1007 return;
1008
967 reg = FDI_TX_CTL(pipe); 1009 reg = FDI_TX_CTL(pipe);
968 val = I915_READ(reg); 1010 val = I915_READ(reg);
969 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1011 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
@@ -975,6 +1017,10 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
975 int reg; 1017 int reg;
976 u32 val; 1018 u32 val;
977 1019
1020 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1021 DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
1022 return;
1023 }
978 reg = FDI_RX_CTL(pipe); 1024 reg = FDI_RX_CTL(pipe);
979 val = I915_READ(reg); 1025 val = I915_READ(reg);
980 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); 1026 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1080,6 +1126,11 @@ static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1080 u32 val; 1126 u32 val;
1081 bool enabled; 1127 bool enabled;
1082 1128
1129 if (HAS_PCH_LPT(dev_priv->dev)) {
1130 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1131 return;
1132 }
1133
1083 val = I915_READ(PCH_DREF_CONTROL); 1134 val = I915_READ(PCH_DREF_CONTROL);
1084 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1135 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1085 DREF_SUPERSPREAD_SOURCE_MASK)); 1136 DREF_SUPERSPREAD_SOURCE_MASK));
@@ -1278,6 +1329,69 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1278 POSTING_READ(reg); 1329 POSTING_READ(reg);
1279} 1330}
1280 1331
1332/* SBI access */
1333static void
1334intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
1335{
1336 unsigned long flags;
1337
1338 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1339 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
1340 100)) {
1341 DRM_ERROR("timeout waiting for SBI to become ready\n");
1342 goto out_unlock;
1343 }
1344
1345 I915_WRITE(SBI_ADDR,
1346 (reg << 16));
1347 I915_WRITE(SBI_DATA,
1348 value);
1349 I915_WRITE(SBI_CTL_STAT,
1350 SBI_BUSY |
1351 SBI_CTL_OP_CRWR);
1352
1353 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
1354 100)) {
1355 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1356 goto out_unlock;
1357 }
1358
1359out_unlock:
1360 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1361}
1362
1363static u32
1364intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
1365{
1366 unsigned long flags;
1367 u32 value;
1368
1369 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1370 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
1371 100)) {
1372 DRM_ERROR("timeout waiting for SBI to become ready\n");
1373 goto out_unlock;
1374 }
1375
1376 I915_WRITE(SBI_ADDR,
1377 (reg << 16));
1378 I915_WRITE(SBI_CTL_STAT,
1379 SBI_BUSY |
1380 SBI_CTL_OP_CRRD);
1381
1382 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
1383 100)) {
1384 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1385 goto out_unlock;
1386 }
1387
1388 value = I915_READ(SBI_DATA);
1389
1390out_unlock:
1391 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1392 return value;
1393}
1394
1281/** 1395/**
1282 * intel_enable_pch_pll - enable PCH PLL 1396 * intel_enable_pch_pll - enable PCH PLL
1283 * @dev_priv: i915 private structure 1397 * @dev_priv: i915 private structure
@@ -1289,14 +1403,18 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1289static void intel_enable_pch_pll(struct intel_crtc *intel_crtc) 1403static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
1290{ 1404{
1291 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; 1405 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1292 struct intel_pch_pll *pll = intel_crtc->pch_pll; 1406 struct intel_pch_pll *pll;
1293 int reg; 1407 int reg;
1294 u32 val; 1408 u32 val;
1295 1409
1296 /* PCH only available on ILK+ */ 1410 /* PCH PLLs only available on ILK, SNB and IVB */
1297 BUG_ON(dev_priv->info->gen < 5); 1411 BUG_ON(dev_priv->info->gen < 5);
1298 BUG_ON(pll == NULL); 1412 pll = intel_crtc->pch_pll;
1299 BUG_ON(pll->refcount == 0); 1413 if (pll == NULL)
1414 return;
1415
1416 if (WARN_ON(pll->refcount == 0))
1417 return;
1300 1418
1301 DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n", 1419 DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1302 pll->pll_reg, pll->active, pll->on, 1420 pll->pll_reg, pll->active, pll->on,
@@ -1334,13 +1452,18 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
1334 if (pll == NULL) 1452 if (pll == NULL)
1335 return; 1453 return;
1336 1454
1337 BUG_ON(pll->refcount == 0); 1455 if (WARN_ON(pll->refcount == 0))
1456 return;
1338 1457
1339 DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n", 1458 DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1340 pll->pll_reg, pll->active, pll->on, 1459 pll->pll_reg, pll->active, pll->on,
1341 intel_crtc->base.base.id); 1460 intel_crtc->base.base.id);
1342 1461
1343 BUG_ON(pll->active == 0); 1462 if (WARN_ON(pll->active == 0)) {
1463 assert_pch_pll_disabled(dev_priv, intel_crtc);
1464 return;
1465 }
1466
1344 if (--pll->active) { 1467 if (--pll->active) {
1345 assert_pch_pll_enabled(dev_priv, intel_crtc); 1468 assert_pch_pll_enabled(dev_priv, intel_crtc);
1346 return; 1469 return;
@@ -1378,6 +1501,10 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1378 assert_fdi_tx_enabled(dev_priv, pipe); 1501 assert_fdi_tx_enabled(dev_priv, pipe);
1379 assert_fdi_rx_enabled(dev_priv, pipe); 1502 assert_fdi_rx_enabled(dev_priv, pipe);
1380 1503
1504 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1505 DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
1506 return;
1507 }
1381 reg = TRANSCONF(pipe); 1508 reg = TRANSCONF(pipe);
1382 val = I915_READ(reg); 1509 val = I915_READ(reg);
1383 pipeconf_val = I915_READ(PIPECONF(pipe)); 1510 pipeconf_val = I915_READ(PIPECONF(pipe));
@@ -1896,16 +2023,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1896 return 0; 2023 return 0;
1897 } 2024 }
1898 2025
1899 switch (intel_crtc->plane) { 2026 if(intel_crtc->plane > dev_priv->num_pipe) {
1900 case 0: 2027 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
1901 case 1: 2028 intel_crtc->plane,
1902 break; 2029 dev_priv->num_pipe);
1903 case 2:
1904 if (IS_IVYBRIDGE(dev))
1905 break;
1906 /* fall through otherwise */
1907 default:
1908 DRM_ERROR("no plane for crtc\n");
1909 return -EINVAL; 2030 return -EINVAL;
1910 } 2031 }
1911 2032
@@ -2426,14 +2547,18 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2426 POSTING_READ(reg); 2547 POSTING_READ(reg);
2427 udelay(200); 2548 udelay(200);
2428 2549
2429 /* Enable CPU FDI TX PLL, always on for Ironlake */ 2550 /* On Haswell, the PLL configuration for ports and pipes is handled
2430 reg = FDI_TX_CTL(pipe); 2551 * separately, as part of DDI setup */
2431 temp = I915_READ(reg); 2552 if (!IS_HASWELL(dev)) {
2432 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 2553 /* Enable CPU FDI TX PLL, always on for Ironlake */
2433 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 2554 reg = FDI_TX_CTL(pipe);
2555 temp = I915_READ(reg);
2556 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2557 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2434 2558
2435 POSTING_READ(reg); 2559 POSTING_READ(reg);
2436 udelay(100); 2560 udelay(100);
2561 }
2437 } 2562 }
2438} 2563}
2439 2564
@@ -2532,6 +2657,22 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2532 if (encoder->base.crtc != crtc) 2657 if (encoder->base.crtc != crtc)
2533 continue; 2658 continue;
2534 2659
2660 /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
2661 * CPU handles all others */
2662 if (IS_HASWELL(dev)) {
2663 /* It is still unclear how this will work on PPT, so throw up a warning */
2664 WARN_ON(!HAS_PCH_LPT(dev));
2665
2666 if (encoder->type == DRM_MODE_ENCODER_DAC) {
2667 DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
2668 return true;
2669 } else {
2670 DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
2671 encoder->type);
2672 return false;
2673 }
2674 }
2675
2535 switch (encoder->type) { 2676 switch (encoder->type) {
2536 case INTEL_OUTPUT_EDP: 2677 case INTEL_OUTPUT_EDP:
2537 if (!intel_encoder_is_pch_edp(&encoder->base)) 2678 if (!intel_encoder_is_pch_edp(&encoder->base))
@@ -2543,6 +2684,97 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2543 return true; 2684 return true;
2544} 2685}
2545 2686
2687/* Program iCLKIP clock to the desired frequency */
2688static void lpt_program_iclkip(struct drm_crtc *crtc)
2689{
2690 struct drm_device *dev = crtc->dev;
2691 struct drm_i915_private *dev_priv = dev->dev_private;
2692 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2693 u32 temp;
2694
2695 /* It is necessary to ungate the pixclk gate prior to programming
2696 * the divisors, and gate it back when it is done.
2697 */
2698 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2699
2700 /* Disable SSCCTL */
2701 intel_sbi_write(dev_priv, SBI_SSCCTL6,
2702 intel_sbi_read(dev_priv, SBI_SSCCTL6) |
2703 SBI_SSCCTL_DISABLE);
2704
2705 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
2706 if (crtc->mode.clock == 20000) {
2707 auxdiv = 1;
2708 divsel = 0x41;
2709 phaseinc = 0x20;
2710 } else {
2711 /* The iCLK virtual clock root frequency is in MHz,
2712 * but the crtc->mode.clock in in KHz. To get the divisors,
2713 * it is necessary to divide one by another, so we
2714 * convert the virtual clock precision to KHz here for higher
2715 * precision.
2716 */
2717 u32 iclk_virtual_root_freq = 172800 * 1000;
2718 u32 iclk_pi_range = 64;
2719 u32 desired_divisor, msb_divisor_value, pi_value;
2720
2721 desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
2722 msb_divisor_value = desired_divisor / iclk_pi_range;
2723 pi_value = desired_divisor % iclk_pi_range;
2724
2725 auxdiv = 0;
2726 divsel = msb_divisor_value - 2;
2727 phaseinc = pi_value;
2728 }
2729
2730 /* This should not happen with any sane values */
2731 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2732 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2733 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
2734 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2735
2736 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2737 crtc->mode.clock,
2738 auxdiv,
2739 divsel,
2740 phasedir,
2741 phaseinc);
2742
2743 /* Program SSCDIVINTPHASE6 */
2744 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
2745 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2746 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2747 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2748 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2749 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2750 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2751
2752 intel_sbi_write(dev_priv,
2753 SBI_SSCDIVINTPHASE6,
2754 temp);
2755
2756 /* Program SSCAUXDIV */
2757 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
2758 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2759 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2760 intel_sbi_write(dev_priv,
2761 SBI_SSCAUXDIV6,
2762 temp);
2763
2764
2765 /* Enable modulator and associated divider */
2766 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
2767 temp &= ~SBI_SSCCTL_DISABLE;
2768 intel_sbi_write(dev_priv,
2769 SBI_SSCCTL6,
2770 temp);
2771
2772 /* Wait for initialization time */
2773 udelay(24);
2774
2775 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2776}
2777
2546/* 2778/*
2547 * Enable PCH resources required for PCH ports: 2779 * Enable PCH resources required for PCH ports:
2548 * - PCH PLLs 2780 * - PCH PLLs
@@ -2559,12 +2791,17 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2559 int pipe = intel_crtc->pipe; 2791 int pipe = intel_crtc->pipe;
2560 u32 reg, temp; 2792 u32 reg, temp;
2561 2793
2794 assert_transcoder_disabled(dev_priv, pipe);
2795
2562 /* For PCH output, training FDI link */ 2796 /* For PCH output, training FDI link */
2563 dev_priv->display.fdi_link_train(crtc); 2797 dev_priv->display.fdi_link_train(crtc);
2564 2798
2565 intel_enable_pch_pll(intel_crtc); 2799 intel_enable_pch_pll(intel_crtc);
2566 2800
2567 if (HAS_PCH_CPT(dev)) { 2801 if (HAS_PCH_LPT(dev)) {
2802 DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
2803 lpt_program_iclkip(crtc);
2804 } else if (HAS_PCH_CPT(dev)) {
2568 u32 sel; 2805 u32 sel;
2569 2806
2570 temp = I915_READ(PCH_DPLL_SEL); 2807 temp = I915_READ(PCH_DPLL_SEL);
@@ -2601,7 +2838,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2601 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 2838 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
2602 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); 2839 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
2603 2840
2604 intel_fdi_normal_train(crtc); 2841 if (!IS_HASWELL(dev))
2842 intel_fdi_normal_train(crtc);
2605 2843
2606 /* For PCH DP, enable TRANS_DP_CTL */ 2844 /* For PCH DP, enable TRANS_DP_CTL */
2607 if (HAS_PCH_CPT(dev) && 2845 if (HAS_PCH_CPT(dev) &&
@@ -2673,6 +2911,17 @@ static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u3
2673 goto prepare; 2911 goto prepare;
2674 } 2912 }
2675 2913
2914 if (HAS_PCH_IBX(dev_priv->dev)) {
2915 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
2916 i = intel_crtc->pipe;
2917 pll = &dev_priv->pch_plls[i];
2918
2919 DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
2920 intel_crtc->base.base.id, pll->pll_reg);
2921
2922 goto found;
2923 }
2924
2676 for (i = 0; i < dev_priv->num_pch_pll; i++) { 2925 for (i = 0; i < dev_priv->num_pch_pll; i++) {
2677 pll = &dev_priv->pch_plls[i]; 2926 pll = &dev_priv->pch_plls[i];
2678 2927
@@ -3120,8 +3369,7 @@ void intel_encoder_commit(struct drm_encoder *encoder)
3120{ 3369{
3121 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3370 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3122 struct drm_device *dev = encoder->dev; 3371 struct drm_device *dev = encoder->dev;
3123 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3372 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
3124 struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3125 3373
3126 /* lvds has its own version of commit see intel_lvds_commit */ 3374 /* lvds has its own version of commit see intel_lvds_commit */
3127 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 3375 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
@@ -4312,8 +4560,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4312 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 4560 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
4313 drm_mode_debug_printmodeline(mode); 4561 drm_mode_debug_printmodeline(mode);
4314 4562
4315 /* CPU eDP is the only output that doesn't need a PCH PLL of its own */ 4563 /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
4316 if (!is_cpu_edp) { 4564 * pre-Haswell/LPT generation */
4565 if (HAS_PCH_LPT(dev)) {
4566 DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
4567 pipe);
4568 } else if (!is_cpu_edp) {
4317 struct intel_pch_pll *pll; 4569 struct intel_pch_pll *pll;
4318 4570
4319 pll = intel_get_pch_pll(intel_crtc, dpll, fp); 4571 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
@@ -4473,6 +4725,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4473 4725
4474 intel_update_watermarks(dev); 4726 intel_update_watermarks(dev);
4475 4727
4728 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
4729
4476 return ret; 4730 return ret;
4477} 4731}
4478 4732
@@ -5538,6 +5792,9 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
5538 mod_timer(&dev_priv->idle_timer, jiffies + 5792 mod_timer(&dev_priv->idle_timer, jiffies +
5539 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 5793 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5540 5794
5795 if (obj == NULL)
5796 return;
5797
5541 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 5798 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5542 if (!crtc->fb) 5799 if (!crtc->fb)
5543 continue; 5800 continue;
@@ -5987,6 +6244,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5987 goto cleanup_pending; 6244 goto cleanup_pending;
5988 6245
5989 intel_disable_fbc(dev); 6246 intel_disable_fbc(dev);
6247 intel_mark_busy(dev, obj);
5990 mutex_unlock(&dev->struct_mutex); 6248 mutex_unlock(&dev->struct_mutex);
5991 6249
5992 trace_i915_flip_request(intel_crtc->plane, obj); 6250 trace_i915_flip_request(intel_crtc->plane, obj);
@@ -6015,10 +6273,11 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
6015{ 6273{
6016 struct drm_i915_private *dev_priv = dev->dev_private; 6274 struct drm_i915_private *dev_priv = dev->dev_private;
6017 u32 reg, val; 6275 u32 reg, val;
6276 int i;
6018 6277
6019 /* Clear any frame start delays used for debugging left by the BIOS */ 6278 /* Clear any frame start delays used for debugging left by the BIOS */
6020 for_each_pipe(pipe) { 6279 for_each_pipe(i) {
6021 reg = PIPECONF(pipe); 6280 reg = PIPECONF(i);
6022 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 6281 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
6023 } 6282 }
6024 6283
@@ -6238,7 +6497,26 @@ static void intel_setup_outputs(struct drm_device *dev)
6238 6497
6239 intel_crt_init(dev); 6498 intel_crt_init(dev);
6240 6499
6241 if (HAS_PCH_SPLIT(dev)) { 6500 if (IS_HASWELL(dev)) {
6501 int found;
6502
6503 /* Haswell uses DDI functions to detect digital outputs */
6504 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
6505 /* DDI A only supports eDP */
6506 if (found)
6507 intel_ddi_init(dev, PORT_A);
6508
6509 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
6510 * register */
6511 found = I915_READ(SFUSE_STRAP);
6512
6513 if (found & SFUSE_STRAP_DDIB_DETECTED)
6514 intel_ddi_init(dev, PORT_B);
6515 if (found & SFUSE_STRAP_DDIC_DETECTED)
6516 intel_ddi_init(dev, PORT_C);
6517 if (found & SFUSE_STRAP_DDID_DETECTED)
6518 intel_ddi_init(dev, PORT_D);
6519 } else if (HAS_PCH_SPLIT(dev)) {
6242 int found; 6520 int found;
6243 6521
6244 if (I915_READ(HDMIB) & PORT_DETECTED) { 6522 if (I915_READ(HDMIB) & PORT_DETECTED) {
@@ -6467,6 +6745,9 @@ static void intel_init_display(struct drm_device *dev)
6467 /* FIXME: detect B0+ stepping and use auto training */ 6745 /* FIXME: detect B0+ stepping and use auto training */
6468 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 6746 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
6469 dev_priv->display.write_eld = ironlake_write_eld; 6747 dev_priv->display.write_eld = ironlake_write_eld;
6748 } else if (IS_HASWELL(dev)) {
6749 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
6750 dev_priv->display.write_eld = ironlake_write_eld;
6470 } else 6751 } else
6471 dev_priv->display.update_wm = NULL; 6752 dev_priv->display.update_wm = NULL;
6472 } else if (IS_VALLEYVIEW(dev)) { 6753 } else if (IS_VALLEYVIEW(dev)) {
@@ -6634,6 +6915,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
6634 6915
6635 if (IS_IRONLAKE_M(dev)) { 6916 if (IS_IRONLAKE_M(dev)) {
6636 ironlake_enable_drps(dev); 6917 ironlake_enable_drps(dev);
6918 ironlake_enable_rc6(dev);
6637 intel_init_emon(dev); 6919 intel_init_emon(dev);
6638 } 6920 }
6639 6921
@@ -6665,6 +6947,8 @@ void intel_modeset_init(struct drm_device *dev)
6665 6947
6666 intel_init_pm(dev); 6948 intel_init_pm(dev);
6667 6949
6950 intel_prepare_ddi(dev);
6951
6668 intel_init_display(dev); 6952 intel_init_display(dev);
6669 6953
6670 if (IS_GEN2(dev)) { 6954 if (IS_GEN2(dev)) {
@@ -6695,8 +6979,6 @@ void intel_modeset_init(struct drm_device *dev)
6695 i915_disable_vga(dev); 6979 i915_disable_vga(dev);
6696 intel_setup_outputs(dev); 6980 intel_setup_outputs(dev);
6697 6981
6698 intel_modeset_init_hw(dev);
6699
6700 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 6982 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
6701 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 6983 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
6702 (unsigned long)dev); 6984 (unsigned long)dev);
@@ -6704,8 +6986,7 @@ void intel_modeset_init(struct drm_device *dev)
6704 6986
6705void intel_modeset_gem_init(struct drm_device *dev) 6987void intel_modeset_gem_init(struct drm_device *dev)
6706{ 6988{
6707 if (IS_IRONLAKE_M(dev)) 6989 intel_modeset_init_hw(dev);
6708 ironlake_enable_rc6(dev);
6709 6990
6710 intel_setup_overlay(dev); 6991 intel_setup_overlay(dev);
6711} 6992}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e5ee166e2faf..3e0918834e7e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -280,16 +280,29 @@ struct dip_infoframe {
280 uint16_t bottom_bar_start; 280 uint16_t bottom_bar_start;
281 uint16_t left_bar_end; 281 uint16_t left_bar_end;
282 uint16_t right_bar_start; 282 uint16_t right_bar_start;
283 } avi; 283 } __attribute__ ((packed)) avi;
284 struct { 284 struct {
285 uint8_t vn[8]; 285 uint8_t vn[8];
286 uint8_t pd[16]; 286 uint8_t pd[16];
287 uint8_t sdi; 287 uint8_t sdi;
288 } spd; 288 } __attribute__ ((packed)) spd;
289 uint8_t payload[27]; 289 uint8_t payload[27];
290 } __attribute__ ((packed)) body; 290 } __attribute__ ((packed)) body;
291} __attribute__((packed)); 291} __attribute__((packed));
292 292
293struct intel_hdmi {
294 struct intel_encoder base;
295 u32 sdvox_reg;
296 int ddc_bus;
297 int ddi_port;
298 uint32_t color_range;
299 bool has_hdmi_sink;
300 bool has_audio;
301 enum hdmi_force_audio force_audio;
302 void (*write_infoframe)(struct drm_encoder *encoder,
303 struct dip_infoframe *frame);
304};
305
293static inline struct drm_crtc * 306static inline struct drm_crtc *
294intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) 307intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
295{ 308{
@@ -329,7 +342,11 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
329 342
330extern void intel_crt_init(struct drm_device *dev); 343extern void intel_crt_init(struct drm_device *dev);
331extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); 344extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
332void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); 345extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
346extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
347 struct drm_display_mode *adjusted_mode);
348extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder);
349extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
333extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, 350extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
334 bool is_sdvob); 351 bool is_sdvob);
335extern void intel_dvo_init(struct drm_device *dev); 352extern void intel_dvo_init(struct drm_device *dev);
@@ -446,12 +463,17 @@ extern void intel_init_clock_gating(struct drm_device *dev);
446extern void intel_write_eld(struct drm_encoder *encoder, 463extern void intel_write_eld(struct drm_encoder *encoder,
447 struct drm_display_mode *mode); 464 struct drm_display_mode *mode);
448extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe); 465extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
466extern void intel_prepare_ddi(struct drm_device *dev);
467extern void hsw_fdi_link_train(struct drm_crtc *crtc);
468extern void intel_ddi_init(struct drm_device *dev, enum port port);
449 469
450/* For use by IVB LP watermark workaround in intel_sprite.c */ 470/* For use by IVB LP watermark workaround in intel_sprite.c */
451extern void intel_update_watermarks(struct drm_device *dev); 471extern void intel_update_watermarks(struct drm_device *dev);
452extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, 472extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
453 uint32_t sprite_width, 473 uint32_t sprite_width,
454 int pixel_size); 474 int pixel_size);
475extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
476 struct drm_display_mode *mode);
455 477
456extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 478extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
457 struct drm_file *file_priv); 479 struct drm_file *file_priv);
@@ -475,4 +497,9 @@ extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
475extern void gen6_disable_rps(struct drm_device *dev); 497extern void gen6_disable_rps(struct drm_device *dev);
476extern void intel_init_emon(struct drm_device *dev); 498extern void intel_init_emon(struct drm_device *dev);
477 499
500extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
501extern void intel_ddi_mode_set(struct drm_encoder *encoder,
502 struct drm_display_mode *mode,
503 struct drm_display_mode *adjusted_mode);
504
478#endif /* __INTEL_DRV_H__ */ 505#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index bf218753cbaf..2ead3bf7c21d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -37,19 +37,7 @@
37#include "i915_drm.h" 37#include "i915_drm.h"
38#include "i915_drv.h" 38#include "i915_drv.h"
39 39
40struct intel_hdmi { 40struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
41 struct intel_encoder base;
42 u32 sdvox_reg;
43 int ddc_bus;
44 uint32_t color_range;
45 bool has_hdmi_sink;
46 bool has_audio;
47 enum hdmi_force_audio force_audio;
48 void (*write_infoframe)(struct drm_encoder *encoder,
49 struct dip_infoframe *frame);
50};
51
52static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
53{ 41{
54 return container_of(encoder, struct intel_hdmi, base.base); 42 return container_of(encoder, struct intel_hdmi, base.base);
55} 43}
@@ -75,107 +63,182 @@ void intel_dip_infoframe_csum(struct dip_infoframe *frame)
75 frame->checksum = 0x100 - sum; 63 frame->checksum = 0x100 - sum;
76} 64}
77 65
78static u32 intel_infoframe_index(struct dip_infoframe *frame) 66static u32 g4x_infoframe_index(struct dip_infoframe *frame)
79{ 67{
80 u32 flags = 0;
81
82 switch (frame->type) { 68 switch (frame->type) {
83 case DIP_TYPE_AVI: 69 case DIP_TYPE_AVI:
84 flags |= VIDEO_DIP_SELECT_AVI; 70 return VIDEO_DIP_SELECT_AVI;
85 break;
86 case DIP_TYPE_SPD: 71 case DIP_TYPE_SPD:
87 flags |= VIDEO_DIP_SELECT_SPD; 72 return VIDEO_DIP_SELECT_SPD;
88 break;
89 default: 73 default:
90 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 74 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
91 break; 75 return 0;
92 } 76 }
93
94 return flags;
95} 77}
96 78
97static u32 intel_infoframe_flags(struct dip_infoframe *frame) 79static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
98{ 80{
99 u32 flags = 0; 81 switch (frame->type) {
82 case DIP_TYPE_AVI:
83 return VIDEO_DIP_ENABLE_AVI;
84 case DIP_TYPE_SPD:
85 return VIDEO_DIP_ENABLE_SPD;
86 default:
87 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
88 return 0;
89 }
90}
100 91
92static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
93{
101 switch (frame->type) { 94 switch (frame->type) {
102 case DIP_TYPE_AVI: 95 case DIP_TYPE_AVI:
103 flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC; 96 return VIDEO_DIP_ENABLE_AVI_HSW;
104 break;
105 case DIP_TYPE_SPD: 97 case DIP_TYPE_SPD:
106 flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC; 98 return VIDEO_DIP_ENABLE_SPD_HSW;
107 break;
108 default: 99 default:
109 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 100 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
110 break; 101 return 0;
111 } 102 }
103}
112 104
113 return flags; 105static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe)
106{
107 switch (frame->type) {
108 case DIP_TYPE_AVI:
109 return HSW_TVIDEO_DIP_AVI_DATA(pipe);
110 case DIP_TYPE_SPD:
111 return HSW_TVIDEO_DIP_SPD_DATA(pipe);
112 default:
113 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
114 return 0;
115 }
114} 116}
115 117
116static void i9xx_write_infoframe(struct drm_encoder *encoder, 118static void g4x_write_infoframe(struct drm_encoder *encoder,
117 struct dip_infoframe *frame) 119 struct dip_infoframe *frame)
118{ 120{
119 uint32_t *data = (uint32_t *)frame; 121 uint32_t *data = (uint32_t *)frame;
120 struct drm_device *dev = encoder->dev; 122 struct drm_device *dev = encoder->dev;
121 struct drm_i915_private *dev_priv = dev->dev_private; 123 struct drm_i915_private *dev_priv = dev->dev_private;
122 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 124 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
123 u32 port, flags, val = I915_READ(VIDEO_DIP_CTL); 125 u32 val = I915_READ(VIDEO_DIP_CTL);
124 unsigned i, len = DIP_HEADER_SIZE + frame->len; 126 unsigned i, len = DIP_HEADER_SIZE + frame->len;
125 127
126 128 val &= ~VIDEO_DIP_PORT_MASK;
127 /* XXX first guess at handling video port, is this corrent? */
128 if (intel_hdmi->sdvox_reg == SDVOB) 129 if (intel_hdmi->sdvox_reg == SDVOB)
129 port = VIDEO_DIP_PORT_B; 130 val |= VIDEO_DIP_PORT_B;
130 else if (intel_hdmi->sdvox_reg == SDVOC) 131 else if (intel_hdmi->sdvox_reg == SDVOC)
131 port = VIDEO_DIP_PORT_C; 132 val |= VIDEO_DIP_PORT_C;
132 else 133 else
133 return; 134 return;
134 135
135 flags = intel_infoframe_index(frame); 136 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
137 val |= g4x_infoframe_index(frame);
136 138
137 val &= ~VIDEO_DIP_SELECT_MASK; 139 val &= ~g4x_infoframe_enable(frame);
140 val |= VIDEO_DIP_ENABLE;
138 141
139 I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags); 142 I915_WRITE(VIDEO_DIP_CTL, val);
140 143
141 for (i = 0; i < len; i += 4) { 144 for (i = 0; i < len; i += 4) {
142 I915_WRITE(VIDEO_DIP_DATA, *data); 145 I915_WRITE(VIDEO_DIP_DATA, *data);
143 data++; 146 data++;
144 } 147 }
145 148
146 flags |= intel_infoframe_flags(frame); 149 val |= g4x_infoframe_enable(frame);
150 val &= ~VIDEO_DIP_FREQ_MASK;
151 val |= VIDEO_DIP_FREQ_VSYNC;
147 152
148 I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags); 153 I915_WRITE(VIDEO_DIP_CTL, val);
149} 154}
150 155
151static void ironlake_write_infoframe(struct drm_encoder *encoder, 156static void ibx_write_infoframe(struct drm_encoder *encoder,
152 struct dip_infoframe *frame) 157 struct dip_infoframe *frame)
153{ 158{
154 uint32_t *data = (uint32_t *)frame; 159 uint32_t *data = (uint32_t *)frame;
155 struct drm_device *dev = encoder->dev; 160 struct drm_device *dev = encoder->dev;
156 struct drm_i915_private *dev_priv = dev->dev_private; 161 struct drm_i915_private *dev_priv = dev->dev_private;
157 struct drm_crtc *crtc = encoder->crtc; 162 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
158 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 163 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
159 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 164 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
160 unsigned i, len = DIP_HEADER_SIZE + frame->len; 165 unsigned i, len = DIP_HEADER_SIZE + frame->len;
161 u32 flags, val = I915_READ(reg); 166 u32 val = I915_READ(reg);
167
168 val &= ~VIDEO_DIP_PORT_MASK;
169 switch (intel_hdmi->sdvox_reg) {
170 case HDMIB:
171 val |= VIDEO_DIP_PORT_B;
172 break;
173 case HDMIC:
174 val |= VIDEO_DIP_PORT_C;
175 break;
176 case HDMID:
177 val |= VIDEO_DIP_PORT_D;
178 break;
179 default:
180 return;
181 }
162 182
163 intel_wait_for_vblank(dev, intel_crtc->pipe); 183 intel_wait_for_vblank(dev, intel_crtc->pipe);
164 184
165 flags = intel_infoframe_index(frame); 185 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
186 val |= g4x_infoframe_index(frame);
187
188 val &= ~g4x_infoframe_enable(frame);
189 val |= VIDEO_DIP_ENABLE;
190
191 I915_WRITE(reg, val);
192
193 for (i = 0; i < len; i += 4) {
194 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
195 data++;
196 }
197
198 val |= g4x_infoframe_enable(frame);
199 val &= ~VIDEO_DIP_FREQ_MASK;
200 val |= VIDEO_DIP_FREQ_VSYNC;
201
202 I915_WRITE(reg, val);
203}
204
205static void cpt_write_infoframe(struct drm_encoder *encoder,
206 struct dip_infoframe *frame)
207{
208 uint32_t *data = (uint32_t *)frame;
209 struct drm_device *dev = encoder->dev;
210 struct drm_i915_private *dev_priv = dev->dev_private;
211 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
212 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
213 unsigned i, len = DIP_HEADER_SIZE + frame->len;
214 u32 val = I915_READ(reg);
215
216 intel_wait_for_vblank(dev, intel_crtc->pipe);
166 217
167 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 218 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
219 val |= g4x_infoframe_index(frame);
220
221 /* The DIP control register spec says that we need to update the AVI
222 * infoframe without clearing its enable bit */
223 if (frame->type == DIP_TYPE_AVI)
224 val |= VIDEO_DIP_ENABLE_AVI;
225 else
226 val &= ~g4x_infoframe_enable(frame);
227
228 val |= VIDEO_DIP_ENABLE;
168 229
169 I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); 230 I915_WRITE(reg, val);
170 231
171 for (i = 0; i < len; i += 4) { 232 for (i = 0; i < len; i += 4) {
172 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); 233 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
173 data++; 234 data++;
174 } 235 }
175 236
176 flags |= intel_infoframe_flags(frame); 237 val |= g4x_infoframe_enable(frame);
238 val &= ~VIDEO_DIP_FREQ_MASK;
239 val |= VIDEO_DIP_FREQ_VSYNC;
177 240
178 I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); 241 I915_WRITE(reg, val);
179} 242}
180 243
181static void vlv_write_infoframe(struct drm_encoder *encoder, 244static void vlv_write_infoframe(struct drm_encoder *encoder,
@@ -184,28 +247,60 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
184 uint32_t *data = (uint32_t *)frame; 247 uint32_t *data = (uint32_t *)frame;
185 struct drm_device *dev = encoder->dev; 248 struct drm_device *dev = encoder->dev;
186 struct drm_i915_private *dev_priv = dev->dev_private; 249 struct drm_i915_private *dev_priv = dev->dev_private;
187 struct drm_crtc *crtc = encoder->crtc; 250 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
188 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
189 int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 251 int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
190 unsigned i, len = DIP_HEADER_SIZE + frame->len; 252 unsigned i, len = DIP_HEADER_SIZE + frame->len;
191 u32 flags, val = I915_READ(reg); 253 u32 val = I915_READ(reg);
192 254
193 intel_wait_for_vblank(dev, intel_crtc->pipe); 255 intel_wait_for_vblank(dev, intel_crtc->pipe);
194 256
195 flags = intel_infoframe_index(frame);
196
197 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 257 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
258 val |= g4x_infoframe_index(frame);
198 259
199 I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); 260 val &= ~g4x_infoframe_enable(frame);
261 val |= VIDEO_DIP_ENABLE;
262
263 I915_WRITE(reg, val);
200 264
201 for (i = 0; i < len; i += 4) { 265 for (i = 0; i < len; i += 4) {
202 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); 266 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
203 data++; 267 data++;
204 } 268 }
205 269
206 flags |= intel_infoframe_flags(frame); 270 val |= g4x_infoframe_enable(frame);
271 val &= ~VIDEO_DIP_FREQ_MASK;
272 val |= VIDEO_DIP_FREQ_VSYNC;
207 273
208 I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); 274 I915_WRITE(reg, val);
275}
276
277static void hsw_write_infoframe(struct drm_encoder *encoder,
278 struct dip_infoframe *frame)
279{
280 uint32_t *data = (uint32_t *)frame;
281 struct drm_device *dev = encoder->dev;
282 struct drm_i915_private *dev_priv = dev->dev_private;
283 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
284 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
285 u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe);
286 unsigned int i, len = DIP_HEADER_SIZE + frame->len;
287 u32 val = I915_READ(ctl_reg);
288
289 if (data_reg == 0)
290 return;
291
292 intel_wait_for_vblank(dev, intel_crtc->pipe);
293
294 val &= ~hsw_infoframe_enable(frame);
295 I915_WRITE(ctl_reg, val);
296
297 for (i = 0; i < len; i += 4) {
298 I915_WRITE(data_reg + i, *data);
299 data++;
300 }
301
302 val |= hsw_infoframe_enable(frame);
303 I915_WRITE(ctl_reg, val);
209} 304}
210 305
211static void intel_set_infoframe(struct drm_encoder *encoder, 306static void intel_set_infoframe(struct drm_encoder *encoder,
@@ -220,7 +315,7 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
220 intel_hdmi->write_infoframe(encoder, frame); 315 intel_hdmi->write_infoframe(encoder, frame);
221} 316}
222 317
223static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, 318void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
224 struct drm_display_mode *adjusted_mode) 319 struct drm_display_mode *adjusted_mode)
225{ 320{
226 struct dip_infoframe avi_if = { 321 struct dip_infoframe avi_if = {
@@ -235,7 +330,7 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
235 intel_set_infoframe(encoder, &avi_if); 330 intel_set_infoframe(encoder, &avi_if);
236} 331}
237 332
238static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) 333void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
239{ 334{
240 struct dip_infoframe spd_if; 335 struct dip_infoframe spd_if;
241 336
@@ -256,8 +351,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
256{ 351{
257 struct drm_device *dev = encoder->dev; 352 struct drm_device *dev = encoder->dev;
258 struct drm_i915_private *dev_priv = dev->dev_private; 353 struct drm_i915_private *dev_priv = dev->dev_private;
259 struct drm_crtc *crtc = encoder->crtc; 354 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
260 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
261 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 355 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
262 u32 sdvox; 356 u32 sdvox;
263 357
@@ -431,8 +525,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
431 525
432static int 526static int
433intel_hdmi_set_property(struct drm_connector *connector, 527intel_hdmi_set_property(struct drm_connector *connector,
434 struct drm_property *property, 528 struct drm_property *property,
435 uint64_t val) 529 uint64_t val)
436{ 530{
437 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 531 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
438 struct drm_i915_private *dev_priv = connector->dev->dev_private; 532 struct drm_i915_private *dev_priv = connector->dev->dev_private;
@@ -491,6 +585,14 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
491 kfree(connector); 585 kfree(connector);
492} 586}
493 587
588static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
589 .dpms = intel_ddi_dpms,
590 .mode_fixup = intel_hdmi_mode_fixup,
591 .prepare = intel_encoder_prepare,
592 .mode_set = intel_ddi_mode_set,
593 .commit = intel_encoder_commit,
594};
595
494static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { 596static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
495 .dpms = intel_hdmi_dpms, 597 .dpms = intel_hdmi_dpms,
496 .mode_fixup = intel_hdmi_mode_fixup, 598 .mode_fixup = intel_hdmi_mode_fixup,
@@ -580,24 +682,60 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
580 intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); 682 intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
581 intel_hdmi->ddc_bus = GMBUS_PORT_DPD; 683 intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
582 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; 684 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
685 } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
686 DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
687 intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
688 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
689 intel_hdmi->ddi_port = PORT_B;
690 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
691 } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
692 DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
693 intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
694 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
695 intel_hdmi->ddi_port = PORT_C;
696 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
697 } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
698 DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
699 intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
700 intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
701 intel_hdmi->ddi_port = PORT_D;
702 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
703 } else {
704 /* If we got an unknown sdvox_reg, things are pretty much broken
705 * in a way that we should let the kernel know about it */
706 BUG();
583 } 707 }
584 708
585 intel_hdmi->sdvox_reg = sdvox_reg; 709 intel_hdmi->sdvox_reg = sdvox_reg;
586 710
587 if (!HAS_PCH_SPLIT(dev)) { 711 if (!HAS_PCH_SPLIT(dev)) {
588 intel_hdmi->write_infoframe = i9xx_write_infoframe; 712 intel_hdmi->write_infoframe = g4x_write_infoframe;
589 I915_WRITE(VIDEO_DIP_CTL, 0); 713 I915_WRITE(VIDEO_DIP_CTL, 0);
590 } else if (IS_VALLEYVIEW(dev)) { 714 } else if (IS_VALLEYVIEW(dev)) {
591 intel_hdmi->write_infoframe = vlv_write_infoframe; 715 intel_hdmi->write_infoframe = vlv_write_infoframe;
592 for_each_pipe(i) 716 for_each_pipe(i)
593 I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0); 717 I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0);
594 } else { 718 } else if (IS_HASWELL(dev)) {
595 intel_hdmi->write_infoframe = ironlake_write_infoframe; 719 /* FIXME: Haswell has a new set of DIP frame registers, but we are
720 * just doing the minimal required for HDMI to work at this stage.
721 */
722 intel_hdmi->write_infoframe = hsw_write_infoframe;
723 for_each_pipe(i)
724 I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0);
725 } else if (HAS_PCH_IBX(dev)) {
726 intel_hdmi->write_infoframe = ibx_write_infoframe;
727 for_each_pipe(i)
728 I915_WRITE(TVIDEO_DIP_CTL(i), 0);
729 } else {
730 intel_hdmi->write_infoframe = cpt_write_infoframe;
596 for_each_pipe(i) 731 for_each_pipe(i)
597 I915_WRITE(TVIDEO_DIP_CTL(i), 0); 732 I915_WRITE(TVIDEO_DIP_CTL(i), 0);
598 } 733 }
599 734
600 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); 735 if (IS_HASWELL(dev))
736 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw);
737 else
738 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
601 739
602 intel_hdmi_add_properties(intel_hdmi, connector); 740 intel_hdmi_add_properties(intel_hdmi, connector);
603 741
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index e04255edc801..4a9707dd0f9c 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -490,6 +490,10 @@ int intel_setup_gmbus(struct drm_device *dev)
490 /* By default use a conservative clock rate */ 490 /* By default use a conservative clock rate */
491 bus->reg0 = port | GMBUS_RATE_100KHZ; 491 bus->reg0 = port | GMBUS_RATE_100KHZ;
492 492
493 /* gmbus seems to be broken on i830 */
494 if (IS_I830(dev))
495 bus->force_bit = true;
496
493 intel_gpio_setup(bus, port); 497 intel_gpio_setup(bus, port);
494 } 498 }
495 499
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 43892341079a..8e79ff67ec98 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1803,8 +1803,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1803 enabled |= 2; 1803 enabled |= 2;
1804 } 1804 }
1805 1805
1806 /* IVB has 3 pipes */ 1806 if ((dev_priv->num_pipe == 3) &&
1807 if (IS_IVYBRIDGE(dev) &&
1808 g4x_compute_wm0(dev, 2, 1807 g4x_compute_wm0(dev, 2,
1809 &sandybridge_display_wm_info, latency, 1808 &sandybridge_display_wm_info, latency,
1810 &sandybridge_cursor_wm_info, latency, 1809 &sandybridge_cursor_wm_info, latency,
@@ -1884,6 +1883,33 @@ static void sandybridge_update_wm(struct drm_device *dev)
1884 cursor_wm); 1883 cursor_wm);
1885} 1884}
1886 1885
1886static void
1887haswell_update_linetime_wm(struct drm_device *dev, int pipe,
1888 struct drm_display_mode *mode)
1889{
1890 struct drm_i915_private *dev_priv = dev->dev_private;
1891 u32 temp;
1892
1893 temp = I915_READ(PIPE_WM_LINETIME(pipe));
1894 temp &= ~PIPE_WM_LINETIME_MASK;
1895
1896 /* The WM are computed with base on how long it takes to fill a single
1897 * row at the given clock rate, multiplied by 8.
1898 * */
1899 temp |= PIPE_WM_LINETIME_TIME(
1900 ((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
1901
1902 /* IPS watermarks are only used by pipe A, and are ignored by
1903 * pipes B and C. They are calculated similarly to the common
1904 * linetime values, except that we are using CD clock frequency
1905 * in MHz instead of pixel rate for the division.
1906 *
1907 * This is a placeholder for the IPS watermark calculation code.
1908 */
1909
1910 I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
1911}
1912
1887static bool 1913static bool
1888sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, 1914sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1889 uint32_t sprite_width, int pixel_size, 1915 uint32_t sprite_width, int pixel_size,
@@ -2079,6 +2105,15 @@ void intel_update_watermarks(struct drm_device *dev)
2079 dev_priv->display.update_wm(dev); 2105 dev_priv->display.update_wm(dev);
2080} 2106}
2081 2107
2108void intel_update_linetime_watermarks(struct drm_device *dev,
2109 int pipe, struct drm_display_mode *mode)
2110{
2111 struct drm_i915_private *dev_priv = dev->dev_private;
2112
2113 if (dev_priv->display.update_linetime_wm)
2114 dev_priv->display.update_linetime_wm(dev, pipe, mode);
2115}
2116
2082void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, 2117void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
2083 uint32_t sprite_width, int pixel_size) 2118 uint32_t sprite_width, int pixel_size)
2084{ 2119{
@@ -2291,6 +2326,7 @@ int intel_enable_rc6(const struct drm_device *dev)
2291 2326
2292void gen6_enable_rps(struct drm_i915_private *dev_priv) 2327void gen6_enable_rps(struct drm_i915_private *dev_priv)
2293{ 2328{
2329 struct intel_ring_buffer *ring;
2294 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 2330 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2295 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 2331 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2296 u32 pcu_mbox, rc6_mask = 0; 2332 u32 pcu_mbox, rc6_mask = 0;
@@ -2325,8 +2361,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
2325 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 2361 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
2326 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 2362 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
2327 2363
2328 for (i = 0; i < I915_NUM_RINGS; i++) 2364 for_each_ring(ring, dev_priv, i)
2329 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); 2365 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
2330 2366
2331 I915_WRITE(GEN6_RC_SLEEP, 0); 2367 I915_WRITE(GEN6_RC_SLEEP, 0);
2332 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 2368 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
@@ -3560,6 +3596,37 @@ void intel_sanitize_pm(struct drm_device *dev)
3560 dev_priv->display.sanitize_pm(dev); 3596 dev_priv->display.sanitize_pm(dev);
3561} 3597}
3562 3598
3599/* Starting with Haswell, we have different power wells for
3600 * different parts of the GPU. This attempts to enable them all.
3601 */
3602void intel_init_power_wells(struct drm_device *dev)
3603{
3604 struct drm_i915_private *dev_priv = dev->dev_private;
3605 unsigned long power_wells[] = {
3606 HSW_PWR_WELL_CTL1,
3607 HSW_PWR_WELL_CTL2,
3608 HSW_PWR_WELL_CTL4
3609 };
3610 int i;
3611
3612 if (!IS_HASWELL(dev))
3613 return;
3614
3615 mutex_lock(&dev->struct_mutex);
3616
3617 for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
3618 int well = I915_READ(power_wells[i]);
3619
3620 if ((well & HSW_PWR_WELL_STATE) == 0) {
3621 I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
3622 if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
3623 DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
3624 }
3625 }
3626
3627 mutex_unlock(&dev->struct_mutex);
3628}
3629
3563/* Set up chip specific power management-related functions */ 3630/* Set up chip specific power management-related functions */
3564void intel_init_pm(struct drm_device *dev) 3631void intel_init_pm(struct drm_device *dev)
3565{ 3632{
@@ -3655,6 +3722,18 @@ void intel_init_pm(struct drm_device *dev)
3655 } 3722 }
3656 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 3723 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3657 dev_priv->display.sanitize_pm = gen6_sanitize_pm; 3724 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3725 } else if (IS_HASWELL(dev)) {
3726 if (SNB_READ_WM0_LATENCY()) {
3727 dev_priv->display.update_wm = sandybridge_update_wm;
3728 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3729 dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
3730 } else {
3731 DRM_DEBUG_KMS("Failed to read display plane latency. "
3732 "Disable CxSR\n");
3733 dev_priv->display.update_wm = NULL;
3734 }
3735 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3736 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3658 } else 3737 } else
3659 dev_priv->display.update_wm = NULL; 3738 dev_priv->display.update_wm = NULL;
3660 } else if (IS_VALLEYVIEW(dev)) { 3739 } else if (IS_VALLEYVIEW(dev)) {
@@ -3708,5 +3787,10 @@ void intel_init_pm(struct drm_device *dev)
3708 else 3787 else
3709 dev_priv->display.get_fifo_size = i830_get_fifo_size; 3788 dev_priv->display.get_fifo_size = i830_get_fifo_size;
3710 } 3789 }
3790
3791 /* We attempt to init the necessary power wells early in the initialization
3792 * time, so the subsystems that expect power to be enabled can work.
3793 */
3794 intel_init_power_wells(dev);
3711} 3795}
3712 3796
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index baba75714578..55d3da26bae7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -119,6 +119,12 @@ struct intel_ring_buffer {
119 void *private; 119 void *private;
120}; 120};
121 121
122static inline bool
123intel_ring_initialized(struct intel_ring_buffer *ring)
124{
125 return ring->obj != NULL;
126}
127
122static inline unsigned 128static inline unsigned
123intel_ring_flag(struct intel_ring_buffer *ring) 129intel_ring_flag(struct intel_ring_buffer *ring)
124{ 130{
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 7d3f238e8265..125228e77c50 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -887,17 +887,24 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
887 }; 887 };
888 uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; 888 uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
889 uint8_t set_buf_index[2] = { 1, 0 }; 889 uint8_t set_buf_index[2] = { 1, 0 };
890 uint64_t *data = (uint64_t *)&avi_if; 890 uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
891 uint64_t *data = (uint64_t *)sdvo_data;
891 unsigned i; 892 unsigned i;
892 893
893 intel_dip_infoframe_csum(&avi_if); 894 intel_dip_infoframe_csum(&avi_if);
894 895
896 /* sdvo spec says that the ecc is handled by the hw, and it looks like
897 * we must not send the ecc field, either. */
898 memcpy(sdvo_data, &avi_if, 3);
899 sdvo_data[3] = avi_if.checksum;
900 memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
901
895 if (!intel_sdvo_set_value(intel_sdvo, 902 if (!intel_sdvo_set_value(intel_sdvo,
896 SDVO_CMD_SET_HBUF_INDEX, 903 SDVO_CMD_SET_HBUF_INDEX,
897 set_buf_index, 2)) 904 set_buf_index, 2))
898 return false; 905 return false;
899 906
900 for (i = 0; i < sizeof(avi_if); i += 8) { 907 for (i = 0; i < sizeof(sdvo_data); i += 8) {
901 if (!intel_sdvo_set_value(intel_sdvo, 908 if (!intel_sdvo_set_value(intel_sdvo,
902 SDVO_CMD_SET_HBUF_DATA, 909 SDVO_CMD_SET_HBUF_DATA,
903 data, 8)) 910 data, 8))