aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/drm_edid.c19
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c14
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h18
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c96
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c30
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h11
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c13
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c45
-rw-r--r--drivers/gpu/drm/i915/intel_display.c355
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c21
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c22
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c126
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h30
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c31
-rw-r--r--include/drm/drm_crtc.h1
24 files changed, 591 insertions, 280 deletions
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 484c36a4b7a5..5a3770fbd770 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2079,3 +2079,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
2079 return num_modes; 2079 return num_modes;
2080} 2080}
2081EXPORT_SYMBOL(drm_add_modes_noedid); 2081EXPORT_SYMBOL(drm_add_modes_noedid);
2082
2083/**
2084 * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
2085 * @mode: mode
2086 *
2087 * RETURNS:
2088 * The VIC number, 0 in case it's not a CEA-861 mode.
2089 */
2090uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
2091{
2092 uint8_t i;
2093
2094 for (i = 0; i < drm_num_cea_modes; i++)
2095 if (drm_mode_equal(mode, &edid_cea_modes[i]))
2096 return i + 1;
2097
2098 return 0;
2099}
2100EXPORT_SYMBOL(drm_mode_cea_vic);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4568e7d8a060..e6a11ca85eaf 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -317,7 +317,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
317 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 317 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
318 pipe, plane); 318 pipe, plane);
319 } else { 319 } else {
320 if (!work->pending) { 320 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
321 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 321 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
322 pipe, plane); 322 pipe, plane);
323 } else { 323 } else {
@@ -328,7 +328,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
328 seq_printf(m, "Stall check enabled, "); 328 seq_printf(m, "Stall check enabled, ");
329 else 329 else
330 seq_printf(m, "Stall check waiting for page flip ioctl, "); 330 seq_printf(m, "Stall check waiting for page flip ioctl, ");
331 seq_printf(m, "%d prepares\n", work->pending); 331 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
332 332
333 if (work->old_fb_obj) { 333 if (work->old_fb_obj) {
334 struct drm_i915_gem_object *obj = work->old_fb_obj; 334 struct drm_i915_gem_object *obj = work->old_fb_obj;
@@ -655,10 +655,12 @@ static void i915_ring_error_state(struct seq_file *m,
655 if (INTEL_INFO(dev)->gen >= 6) { 655 if (INTEL_INFO(dev)->gen >= 6) {
656 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); 656 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
657 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 657 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
658 seq_printf(m, " SYNC_0: 0x%08x\n", 658 seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
659 error->semaphore_mboxes[ring][0]); 659 error->semaphore_mboxes[ring][0],
660 seq_printf(m, " SYNC_1: 0x%08x\n", 660 error->semaphore_seqno[ring][0]);
661 error->semaphore_mboxes[ring][1]); 661 seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
662 error->semaphore_mboxes[ring][1],
663 error->semaphore_seqno[ring][1]);
662 } 664 }
663 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 665 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
664 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 666 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a48e4910ea2c..8f63cd5de4b4 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -141,7 +141,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
141 141
142 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 142 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
143 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 143 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
144 ring->space = ring->head - (ring->tail + 8); 144 ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
145 if (ring->space < 0) 145 if (ring->space < 0)
146 ring->space += ring->size; 146 ring->space += ring->size;
147 147
@@ -592,10 +592,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
592 592
593static int i915_quiescent(struct drm_device *dev) 593static int i915_quiescent(struct drm_device *dev)
594{ 594{
595 struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
596
597 i915_kernel_lost_context(dev); 595 i915_kernel_lost_context(dev);
598 return intel_wait_ring_idle(ring); 596 return intel_ring_idle(LP_RING(dev->dev_private));
599} 597}
600 598
601static int i915_flush_ioctl(struct drm_device *dev, void *data, 599static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -1045,7 +1043,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
1045{ 1043{
1046 drm_i915_private_t *dev_priv = dev->dev_private; 1044 drm_i915_private_t *dev_priv = dev->dev_private;
1047 drm_i915_hws_addr_t *hws = data; 1045 drm_i915_hws_addr_t *hws = data;
1048 struct intel_ring_buffer *ring = LP_RING(dev_priv); 1046 struct intel_ring_buffer *ring;
1049 1047
1050 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1048 if (drm_core_check_feature(dev, DRIVER_MODESET))
1051 return -ENODEV; 1049 return -ENODEV;
@@ -1065,6 +1063,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
1065 1063
1066 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); 1064 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1067 1065
1066 ring = LP_RING(dev_priv);
1068 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 1067 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1069 1068
1070 dev_priv->dri1.gfx_hws_cpu_addr = 1069 dev_priv->dri1.gfx_hws_cpu_addr =
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6745c7f976db..530db83ef320 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -554,8 +554,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
554 554
555 /* KMS EnterVT equivalent */ 555 /* KMS EnterVT equivalent */
556 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 556 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
557 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 557 intel_init_pch_refclk(dev);
558 ironlake_init_pch_refclk(dev);
559 558
560 mutex_lock(&dev->struct_mutex); 559 mutex_lock(&dev->struct_mutex);
561 dev_priv->mm.suspended = 0; 560 dev_priv->mm.suspended = 0;
@@ -564,7 +563,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
564 mutex_unlock(&dev->struct_mutex); 563 mutex_unlock(&dev->struct_mutex);
565 564
566 intel_modeset_init_hw(dev); 565 intel_modeset_init_hw(dev);
567 intel_modeset_setup_hw_state(dev); 566 intel_modeset_setup_hw_state(dev, false);
568 drm_irq_install(dev); 567 drm_irq_install(dev);
569 } 568 }
570 569
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 87c06f97fa89..557843dd4b2e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -197,6 +197,7 @@ struct drm_i915_error_state {
197 u32 instdone[I915_NUM_RINGS]; 197 u32 instdone[I915_NUM_RINGS];
198 u32 acthd[I915_NUM_RINGS]; 198 u32 acthd[I915_NUM_RINGS];
199 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; 199 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
200 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
200 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ 201 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
201 /* our own tracking of ring head and tail */ 202 /* our own tracking of ring head and tail */
202 u32 cpu_ring_head[I915_NUM_RINGS]; 203 u32 cpu_ring_head[I915_NUM_RINGS];
@@ -381,6 +382,11 @@ enum intel_pch {
381 PCH_LPT, /* Lynxpoint PCH */ 382 PCH_LPT, /* Lynxpoint PCH */
382}; 383};
383 384
385enum intel_sbi_destination {
386 SBI_ICLK,
387 SBI_MPHY,
388};
389
384#define QUIRK_PIPEA_FORCE (1<<0) 390#define QUIRK_PIPEA_FORCE (1<<0)
385#define QUIRK_LVDS_SSC_DISABLE (1<<1) 391#define QUIRK_LVDS_SSC_DISABLE (1<<1)
386#define QUIRK_INVERT_BRIGHTNESS (1<<2) 392#define QUIRK_INVERT_BRIGHTNESS (1<<2)
@@ -909,6 +915,8 @@ typedef struct drm_i915_private {
909 bool hw_contexts_disabled; 915 bool hw_contexts_disabled;
910 uint32_t hw_context_size; 916 uint32_t hw_context_size;
911 917
918 bool fdi_rx_polarity_reversed;
919
912 struct i915_suspend_saved_registers regfile; 920 struct i915_suspend_saved_registers regfile;
913 921
914 /* Old dri1 support infrastructure, beware the dragons ya fools entering 922 /* Old dri1 support infrastructure, beware the dragons ya fools entering
@@ -1417,8 +1425,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1417int i915_gem_object_sync(struct drm_i915_gem_object *obj, 1425int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1418 struct intel_ring_buffer *to); 1426 struct intel_ring_buffer *to);
1419void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1427void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1420 struct intel_ring_buffer *ring, 1428 struct intel_ring_buffer *ring);
1421 u32 seqno);
1422 1429
1423int i915_gem_dumb_create(struct drm_file *file_priv, 1430int i915_gem_dumb_create(struct drm_file *file_priv,
1424 struct drm_device *dev, 1431 struct drm_device *dev,
@@ -1436,7 +1443,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1436 return (int32_t)(seq1 - seq2) >= 0; 1443 return (int32_t)(seq1 - seq2) >= 0;
1437} 1444}
1438 1445
1439u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); 1446extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1440 1447
1441int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 1448int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1442int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1449int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
@@ -1652,11 +1659,12 @@ extern void intel_modeset_init(struct drm_device *dev);
1652extern void intel_modeset_gem_init(struct drm_device *dev); 1659extern void intel_modeset_gem_init(struct drm_device *dev);
1653extern void intel_modeset_cleanup(struct drm_device *dev); 1660extern void intel_modeset_cleanup(struct drm_device *dev);
1654extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1661extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1655extern void intel_modeset_setup_hw_state(struct drm_device *dev); 1662extern void intel_modeset_setup_hw_state(struct drm_device *dev,
1663 bool force_restore);
1656extern bool intel_fbc_enabled(struct drm_device *dev); 1664extern bool intel_fbc_enabled(struct drm_device *dev);
1657extern void intel_disable_fbc(struct drm_device *dev); 1665extern void intel_disable_fbc(struct drm_device *dev);
1658extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1666extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1659extern void ironlake_init_pch_refclk(struct drm_device *dev); 1667extern void intel_init_pch_refclk(struct drm_device *dev);
1660extern void gen6_set_rps(struct drm_device *dev, u8 val); 1668extern void gen6_set_rps(struct drm_device *dev, u8 val);
1661extern void intel_detect_pch(struct drm_device *dev); 1669extern void intel_detect_pch(struct drm_device *dev);
1662extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 1670extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b0016bb65631..c1f691958f89 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1696,10 +1696,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1696 if (obj->pages_pin_count) 1696 if (obj->pages_pin_count)
1697 return -EBUSY; 1697 return -EBUSY;
1698 1698
1699 /* ->put_pages might need to allocate memory for the bit17 swizzle
1700 * array, hence protect them from being reaped by removing them from gtt
1701 * lists early. */
1702 list_del(&obj->gtt_list);
1703
1699 ops->put_pages(obj); 1704 ops->put_pages(obj);
1700 obj->pages = NULL; 1705 obj->pages = NULL;
1701 1706
1702 list_del(&obj->gtt_list);
1703 if (i915_gem_object_is_purgeable(obj)) 1707 if (i915_gem_object_is_purgeable(obj))
1704 i915_gem_object_truncate(obj); 1708 i915_gem_object_truncate(obj);
1705 1709
@@ -1857,11 +1861,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1857 1861
1858void 1862void
1859i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1863i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1860 struct intel_ring_buffer *ring, 1864 struct intel_ring_buffer *ring)
1861 u32 seqno)
1862{ 1865{
1863 struct drm_device *dev = obj->base.dev; 1866 struct drm_device *dev = obj->base.dev;
1864 struct drm_i915_private *dev_priv = dev->dev_private; 1867 struct drm_i915_private *dev_priv = dev->dev_private;
1868 u32 seqno = intel_ring_get_seqno(ring);
1865 1869
1866 BUG_ON(ring == NULL); 1870 BUG_ON(ring == NULL);
1867 obj->ring = ring; 1871 obj->ring = ring;
@@ -1922,26 +1926,54 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1922 WARN_ON(i915_verify_lists(dev)); 1926 WARN_ON(i915_verify_lists(dev));
1923} 1927}
1924 1928
1925static u32 1929static int
1926i915_gem_get_seqno(struct drm_device *dev) 1930i915_gem_handle_seqno_wrap(struct drm_device *dev)
1927{ 1931{
1928 drm_i915_private_t *dev_priv = dev->dev_private; 1932 struct drm_i915_private *dev_priv = dev->dev_private;
1929 u32 seqno = dev_priv->next_seqno; 1933 struct intel_ring_buffer *ring;
1934 int ret, i, j;
1930 1935
1931 /* reserve 0 for non-seqno */ 1936 /* The hardware uses various monotonic 32-bit counters, if we
1932 if (++dev_priv->next_seqno == 0) 1937 * detect that they will wraparound we need to idle the GPU
1933 dev_priv->next_seqno = 1; 1938 * and reset those counters.
1939 */
1940 ret = 0;
1941 for_each_ring(ring, dev_priv, i) {
1942 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1943 ret |= ring->sync_seqno[j] != 0;
1944 }
1945 if (ret == 0)
1946 return ret;
1947
1948 ret = i915_gpu_idle(dev);
1949 if (ret)
1950 return ret;
1951
1952 i915_gem_retire_requests(dev);
1953 for_each_ring(ring, dev_priv, i) {
1954 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1955 ring->sync_seqno[j] = 0;
1956 }
1934 1957
1935 return seqno; 1958 return 0;
1936} 1959}
1937 1960
1938u32 1961int
1939i915_gem_next_request_seqno(struct intel_ring_buffer *ring) 1962i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1940{ 1963{
1941 if (ring->outstanding_lazy_request == 0) 1964 struct drm_i915_private *dev_priv = dev->dev_private;
1942 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev); 1965
1966 /* reserve 0 for non-seqno */
1967 if (dev_priv->next_seqno == 0) {
1968 int ret = i915_gem_handle_seqno_wrap(dev);
1969 if (ret)
1970 return ret;
1971
1972 dev_priv->next_seqno = 1;
1973 }
1943 1974
1944 return ring->outstanding_lazy_request; 1975 *seqno = dev_priv->next_seqno++;
1976 return 0;
1945} 1977}
1946 1978
1947int 1979int
@@ -1952,7 +1984,6 @@ i915_add_request(struct intel_ring_buffer *ring,
1952 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1984 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1953 struct drm_i915_gem_request *request; 1985 struct drm_i915_gem_request *request;
1954 u32 request_ring_position; 1986 u32 request_ring_position;
1955 u32 seqno;
1956 int was_empty; 1987 int was_empty;
1957 int ret; 1988 int ret;
1958 1989
@@ -1971,7 +2002,6 @@ i915_add_request(struct intel_ring_buffer *ring,
1971 if (request == NULL) 2002 if (request == NULL)
1972 return -ENOMEM; 2003 return -ENOMEM;
1973 2004
1974 seqno = i915_gem_next_request_seqno(ring);
1975 2005
1976 /* Record the position of the start of the request so that 2006 /* Record the position of the start of the request so that
1977 * should we detect the updated seqno part-way through the 2007 * should we detect the updated seqno part-way through the
@@ -1980,15 +2010,13 @@ i915_add_request(struct intel_ring_buffer *ring,
1980 */ 2010 */
1981 request_ring_position = intel_ring_get_tail(ring); 2011 request_ring_position = intel_ring_get_tail(ring);
1982 2012
1983 ret = ring->add_request(ring, &seqno); 2013 ret = ring->add_request(ring);
1984 if (ret) { 2014 if (ret) {
1985 kfree(request); 2015 kfree(request);
1986 return ret; 2016 return ret;
1987 } 2017 }
1988 2018
1989 trace_i915_gem_request_add(ring, seqno); 2019 request->seqno = intel_ring_get_seqno(ring);
1990
1991 request->seqno = seqno;
1992 request->ring = ring; 2020 request->ring = ring;
1993 request->tail = request_ring_position; 2021 request->tail = request_ring_position;
1994 request->emitted_jiffies = jiffies; 2022 request->emitted_jiffies = jiffies;
@@ -2006,6 +2034,7 @@ i915_add_request(struct intel_ring_buffer *ring,
2006 spin_unlock(&file_priv->mm.lock); 2034 spin_unlock(&file_priv->mm.lock);
2007 } 2035 }
2008 2036
2037 trace_i915_gem_request_add(ring, request->seqno);
2009 ring->outstanding_lazy_request = 0; 2038 ring->outstanding_lazy_request = 0;
2010 2039
2011 if (!dev_priv->mm.suspended) { 2040 if (!dev_priv->mm.suspended) {
@@ -2022,7 +2051,7 @@ i915_add_request(struct intel_ring_buffer *ring,
2022 } 2051 }
2023 2052
2024 if (out_seqno) 2053 if (out_seqno)
2025 *out_seqno = seqno; 2054 *out_seqno = request->seqno;
2026 return 0; 2055 return 0;
2027} 2056}
2028 2057
@@ -2120,7 +2149,6 @@ void
2120i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) 2149i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2121{ 2150{
2122 uint32_t seqno; 2151 uint32_t seqno;
2123 int i;
2124 2152
2125 if (list_empty(&ring->request_list)) 2153 if (list_empty(&ring->request_list))
2126 return; 2154 return;
@@ -2129,10 +2157,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2129 2157
2130 seqno = ring->get_seqno(ring, true); 2158 seqno = ring->get_seqno(ring, true);
2131 2159
2132 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
2133 if (seqno >= ring->sync_seqno[i])
2134 ring->sync_seqno[i] = 0;
2135
2136 while (!list_empty(&ring->request_list)) { 2160 while (!list_empty(&ring->request_list)) {
2137 struct drm_i915_gem_request *request; 2161 struct drm_i915_gem_request *request;
2138 2162
@@ -2377,7 +2401,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
2377 2401
2378 ret = to->sync_to(to, from, seqno); 2402 ret = to->sync_to(to, from, seqno);
2379 if (!ret) 2403 if (!ret)
2380 from->sync_seqno[idx] = seqno; 2404 /* We use last_read_seqno because sync_to()
2405 * might have just caused seqno wrap under
2406 * the radar.
2407 */
2408 from->sync_seqno[idx] = obj->last_read_seqno;
2381 2409
2382 return ret; 2410 return ret;
2383} 2411}
@@ -2460,14 +2488,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2460 return 0; 2488 return 0;
2461} 2489}
2462 2490
2463static int i915_ring_idle(struct intel_ring_buffer *ring)
2464{
2465 if (list_empty(&ring->active_list))
2466 return 0;
2467
2468 return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
2469}
2470
2471int i915_gpu_idle(struct drm_device *dev) 2491int i915_gpu_idle(struct drm_device *dev)
2472{ 2492{
2473 drm_i915_private_t *dev_priv = dev->dev_private; 2493 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2480,7 +2500,7 @@ int i915_gpu_idle(struct drm_device *dev)
2480 if (ret) 2500 if (ret)
2481 return ret; 2501 return ret;
2482 2502
2483 ret = i915_ring_idle(ring); 2503 ret = intel_ring_idle(ring);
2484 if (ret) 2504 if (ret)
2485 return ret; 2505 return ret;
2486 } 2506 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 0e510df80d73..a3f06bcad551 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -410,9 +410,8 @@ static int do_switch(struct i915_hw_context *to)
410 * MI_SET_CONTEXT instead of when the next seqno has completed. 410 * MI_SET_CONTEXT instead of when the next seqno has completed.
411 */ 411 */
412 if (from_obj != NULL) { 412 if (from_obj != NULL) {
413 u32 seqno = i915_gem_next_request_seqno(ring);
414 from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 413 from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
415 i915_gem_object_move_to_active(from_obj, ring, seqno); 414 i915_gem_object_move_to_active(from_obj, ring);
416 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 415 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
417 * whole damn pipeline, we don't need to explicitly mark the 416 * whole damn pipeline, we don't need to explicitly mark the
418 * object dirty. The only exception is that the context must be 417 * object dirty. The only exception is that the context must be
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 48e4317e72dc..ee8f97f0539e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -713,8 +713,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
713 713
714static void 714static void
715i915_gem_execbuffer_move_to_active(struct list_head *objects, 715i915_gem_execbuffer_move_to_active(struct list_head *objects,
716 struct intel_ring_buffer *ring, 716 struct intel_ring_buffer *ring)
717 u32 seqno)
718{ 717{
719 struct drm_i915_gem_object *obj; 718 struct drm_i915_gem_object *obj;
720 719
@@ -726,10 +725,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
726 obj->base.write_domain = obj->base.pending_write_domain; 725 obj->base.write_domain = obj->base.pending_write_domain;
727 obj->fenced_gpu_access = obj->pending_fenced_gpu_access; 726 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
728 727
729 i915_gem_object_move_to_active(obj, ring, seqno); 728 i915_gem_object_move_to_active(obj, ring);
730 if (obj->base.write_domain) { 729 if (obj->base.write_domain) {
731 obj->dirty = 1; 730 obj->dirty = 1;
732 obj->last_write_seqno = seqno; 731 obj->last_write_seqno = intel_ring_get_seqno(ring);
733 if (obj->pin_count) /* check for potential scanout */ 732 if (obj->pin_count) /* check for potential scanout */
734 intel_mark_fb_busy(obj); 733 intel_mark_fb_busy(obj);
735 } 734 }
@@ -789,7 +788,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
789 struct intel_ring_buffer *ring; 788 struct intel_ring_buffer *ring;
790 u32 ctx_id = i915_execbuffer2_get_context_id(*args); 789 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
791 u32 exec_start, exec_len; 790 u32 exec_start, exec_len;
792 u32 seqno;
793 u32 mask; 791 u32 mask;
794 u32 flags; 792 u32 flags;
795 int ret, mode, i; 793 int ret, mode, i;
@@ -994,22 +992,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
994 if (ret) 992 if (ret)
995 goto err; 993 goto err;
996 994
997 seqno = i915_gem_next_request_seqno(ring);
998 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
999 if (seqno < ring->sync_seqno[i]) {
1000 /* The GPU can not handle its semaphore value wrapping,
1001 * so every billion or so execbuffers, we need to stall
1002 * the GPU in order to reset the counters.
1003 */
1004 ret = i915_gpu_idle(dev);
1005 if (ret)
1006 goto err;
1007 i915_gem_retire_requests(dev);
1008
1009 BUG_ON(ring->sync_seqno[i]);
1010 }
1011 }
1012
1013 ret = i915_switch_context(ring, file, ctx_id); 995 ret = i915_switch_context(ring, file, ctx_id);
1014 if (ret) 996 if (ret)
1015 goto err; 997 goto err;
@@ -1035,8 +1017,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1035 goto err; 1017 goto err;
1036 } 1018 }
1037 1019
1038 trace_i915_gem_ring_dispatch(ring, seqno, flags);
1039
1040 exec_start = batch_obj->gtt_offset + args->batch_start_offset; 1020 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1041 exec_len = args->batch_len; 1021 exec_len = args->batch_len;
1042 if (cliprects) { 1022 if (cliprects) {
@@ -1060,7 +1040,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1060 goto err; 1040 goto err;
1061 } 1041 }
1062 1042
1063 i915_gem_execbuffer_move_to_active(&objects, ring, seqno); 1043 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1044
1045 i915_gem_execbuffer_move_to_active(&objects, ring);
1064 i915_gem_execbuffer_retire_commands(dev, file, ring); 1046 i915_gem_execbuffer_retire_commands(dev, file, ring);
1065 1047
1066err: 1048err:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index f7ac61ee1504..2c150dee78a7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -639,6 +639,10 @@ int i915_gem_gtt_init(struct drm_device *dev)
639 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) 639 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
640 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); 640 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
641 641
642#ifdef CONFIG_INTEL_IOMMU
643 dev_priv->mm.gtt->needs_dmar = 1;
644#endif
645
642 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */ 646 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
643 gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20); 647 gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
644 dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2); 648 dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2604867e6b7d..a4dc97f8b9f0 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1120,6 +1120,8 @@ static void i915_record_ring_state(struct drm_device *dev,
1120 = I915_READ(RING_SYNC_0(ring->mmio_base)); 1120 = I915_READ(RING_SYNC_0(ring->mmio_base));
1121 error->semaphore_mboxes[ring->id][1] 1121 error->semaphore_mboxes[ring->id][1]
1122 = I915_READ(RING_SYNC_1(ring->mmio_base)); 1122 = I915_READ(RING_SYNC_1(ring->mmio_base));
1123 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1124 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1123 } 1125 }
1124 1126
1125 if (INTEL_INFO(dev)->gen >= 4) { 1127 if (INTEL_INFO(dev)->gen >= 4) {
@@ -1464,7 +1466,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1464 spin_lock_irqsave(&dev->event_lock, flags); 1466 spin_lock_irqsave(&dev->event_lock, flags);
1465 work = intel_crtc->unpin_work; 1467 work = intel_crtc->unpin_work;
1466 1468
1467 if (work == NULL || work->pending || !work->enable_stall_check) { 1469 if (work == NULL ||
1470 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1471 !work->enable_stall_check) {
1468 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 1472 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1469 spin_unlock_irqrestore(&dev->event_lock, flags); 1473 spin_unlock_irqrestore(&dev->event_lock, flags);
1470 return; 1474 return;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 97fbd9d1823b..3f75cfaf1c3f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3843,7 +3843,9 @@
3843#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) 3843#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
3844#define FDI_BC_BIFURCATION_SELECT (1 << 12) 3844#define FDI_BC_BIFURCATION_SELECT (1 << 12)
3845#define SOUTH_CHICKEN2 0xc2004 3845#define SOUTH_CHICKEN2 0xc2004
3846#define DPLS_EDP_PPS_FIX_DIS (1<<0) 3846#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
3847#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
3848#define DPLS_EDP_PPS_FIX_DIS (1<<0)
3847 3849
3848#define _FDI_RXA_CHICKEN 0xc200c 3850#define _FDI_RXA_CHICKEN 0xc200c
3849#define _FDI_RXB_CHICKEN 0xc2010 3851#define _FDI_RXB_CHICKEN 0xc2010
@@ -3915,6 +3917,7 @@
3915#define FDI_FS_ERRC_ENABLE (1<<27) 3917#define FDI_FS_ERRC_ENABLE (1<<27)
3916#define FDI_FE_ERRC_ENABLE (1<<26) 3918#define FDI_FE_ERRC_ENABLE (1<<26)
3917#define FDI_DP_PORT_WIDTH_X8 (7<<19) 3919#define FDI_DP_PORT_WIDTH_X8 (7<<19)
3920#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
3918#define FDI_8BPC (0<<16) 3921#define FDI_8BPC (0<<16)
3919#define FDI_10BPC (1<<16) 3922#define FDI_10BPC (1<<16)
3920#define FDI_6BPC (2<<16) 3923#define FDI_6BPC (2<<16)
@@ -4534,6 +4537,10 @@
4534#define SBI_ADDR 0xC6000 4537#define SBI_ADDR 0xC6000
4535#define SBI_DATA 0xC6004 4538#define SBI_DATA 0xC6004
4536#define SBI_CTL_STAT 0xC6008 4539#define SBI_CTL_STAT 0xC6008
4540#define SBI_CTL_DEST_ICLK (0x0<<16)
4541#define SBI_CTL_DEST_MPHY (0x1<<16)
4542#define SBI_CTL_OP_IORD (0x2<<8)
4543#define SBI_CTL_OP_IOWR (0x3<<8)
4537#define SBI_CTL_OP_CRRD (0x6<<8) 4544#define SBI_CTL_OP_CRRD (0x6<<8)
4538#define SBI_CTL_OP_CRWR (0x7<<8) 4545#define SBI_CTL_OP_CRWR (0x7<<8)
4539#define SBI_RESPONSE_FAIL (0x1<<1) 4546#define SBI_RESPONSE_FAIL (0x1<<1)
@@ -4551,10 +4558,12 @@
4551#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) 4558#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
4552#define SBI_SSCCTL 0x020c 4559#define SBI_SSCCTL 0x020c
4553#define SBI_SSCCTL6 0x060C 4560#define SBI_SSCCTL6 0x060C
4561#define SBI_SSCCTL_PATHALT (1<<3)
4554#define SBI_SSCCTL_DISABLE (1<<0) 4562#define SBI_SSCCTL_DISABLE (1<<0)
4555#define SBI_SSCAUXDIV6 0x0610 4563#define SBI_SSCAUXDIV6 0x0610
4556#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) 4564#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
4557#define SBI_DBUFF0 0x2a00 4565#define SBI_DBUFF0 0x2a00
4566#define SBI_DBUFF0_ENABLE (1<<0)
4558 4567
4559/* LPT PIXCLK_GATE */ 4568/* LPT PIXCLK_GATE */
4560#define PIXCLK_GATE 0xC6020 4569#define PIXCLK_GATE 0xC6020
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 3bf51d58319d..9462081b1e60 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -97,7 +97,7 @@ static struct attribute_group rc6_attr_group = {
97 97
98static int l3_access_valid(struct drm_device *dev, loff_t offset) 98static int l3_access_valid(struct drm_device *dev, loff_t offset)
99{ 99{
100 if (!IS_IVYBRIDGE(dev)) 100 if (!HAS_L3_GPU_CACHE(dev))
101 return -EPERM; 101 return -EPERM;
102 102
103 if (offset % 4 != 0) 103 if (offset % 4 != 0)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 331af3bc6894..9293878ec7eb 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -198,6 +198,11 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
198 if (mode->clock > max_clock) 198 if (mode->clock > max_clock)
199 return MODE_CLOCK_HIGH; 199 return MODE_CLOCK_HIGH;
200 200
201 /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
202 if (HAS_PCH_LPT(dev) &&
203 (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
204 return MODE_CLOCK_HIGH;
205
201 return MODE_OK; 206 return MODE_OK;
202} 207}
203 208
@@ -793,4 +798,12 @@ void intel_crt_init(struct drm_device *dev)
793 crt->force_hotplug_required = 0; 798 crt->force_hotplug_required = 0;
794 799
795 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; 800 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
801
802 /*
803 * TODO: find a proper way to discover whether we need to set the
804 * polarity reversal bit or not, instead of relying on the BIOS.
805 */
806 if (HAS_PCH_LPT(dev))
807 dev_priv->fdi_rx_polarity_reversed =
808 !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
796} 809}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 852012b6fc5b..4bad0f724019 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -138,6 +138,19 @@ static const long hsw_ddi_buf_ctl_values[] = {
138 DDI_BUF_EMP_800MV_3_5DB_HSW 138 DDI_BUF_EMP_800MV_3_5DB_HSW
139}; 139};
140 140
141static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
142 enum port port)
143{
144 uint32_t reg = DDI_BUF_CTL(port);
145 int i;
146
147 for (i = 0; i < 8; i++) {
148 udelay(1);
149 if (I915_READ(reg) & DDI_BUF_IS_IDLE)
150 return;
151 }
152 DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
153}
141 154
142/* Starting with Haswell, different DDI ports can work in FDI mode for 155/* Starting with Haswell, different DDI ports can work in FDI mode for
143 * connection to the PCH-located connectors. For this, it is necessary to train 156 * connection to the PCH-located connectors. For this, it is necessary to train
@@ -167,6 +180,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
167 /* Enable the PCH Receiver FDI PLL */ 180 /* Enable the PCH Receiver FDI PLL */
168 rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE | 181 rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
169 ((intel_crtc->fdi_lanes - 1) << 19); 182 ((intel_crtc->fdi_lanes - 1) << 19);
183 if (dev_priv->fdi_rx_polarity_reversed)
184 rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
170 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); 185 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
171 POSTING_READ(_FDI_RXA_CTL); 186 POSTING_READ(_FDI_RXA_CTL);
172 udelay(220); 187 udelay(220);
@@ -231,18 +246,30 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
231 return; 246 return;
232 } 247 }
233 248
249 temp = I915_READ(DDI_BUF_CTL(PORT_E));
250 temp &= ~DDI_BUF_CTL_ENABLE;
251 I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
252 POSTING_READ(DDI_BUF_CTL(PORT_E));
253
234 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ 254 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
235 I915_WRITE(DP_TP_CTL(PORT_E), 255 temp = I915_READ(DP_TP_CTL(PORT_E));
236 I915_READ(DP_TP_CTL(PORT_E)) & ~DP_TP_CTL_ENABLE); 256 temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
257 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
258 I915_WRITE(DP_TP_CTL(PORT_E), temp);
259 POSTING_READ(DP_TP_CTL(PORT_E));
260
261 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
237 262
238 rx_ctl_val &= ~FDI_RX_ENABLE; 263 rx_ctl_val &= ~FDI_RX_ENABLE;
239 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); 264 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
265 POSTING_READ(_FDI_RXA_CTL);
240 266
241 /* Reset FDI_RX_MISC pwrdn lanes */ 267 /* Reset FDI_RX_MISC pwrdn lanes */
242 temp = I915_READ(_FDI_RXA_MISC); 268 temp = I915_READ(_FDI_RXA_MISC);
243 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); 269 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
244 temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); 270 temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
245 I915_WRITE(_FDI_RXA_MISC, temp); 271 I915_WRITE(_FDI_RXA_MISC, temp);
272 POSTING_READ(_FDI_RXA_MISC);
246 } 273 }
247 274
248 DRM_ERROR("FDI link training failed!\n"); 275 DRM_ERROR("FDI link training failed!\n");
@@ -1222,20 +1249,6 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1222 } 1249 }
1223} 1250}
1224 1251
1225static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
1226 enum port port)
1227{
1228 uint32_t reg = DDI_BUF_CTL(port);
1229 int i;
1230
1231 for (i = 0; i < 8; i++) {
1232 udelay(1);
1233 if (I915_READ(reg) & DDI_BUF_IS_IDLE)
1234 return;
1235 }
1236 DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
1237}
1238
1239static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) 1252static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1240{ 1253{
1241 struct drm_encoder *encoder = &intel_encoder->base; 1254 struct drm_encoder *encoder = &intel_encoder->base;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index de51489de23c..5d127e068950 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1506,24 +1506,26 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1506 1506
1507/* SBI access */ 1507/* SBI access */
1508static void 1508static void
1509intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value) 1509intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
1510 enum intel_sbi_destination destination)
1510{ 1511{
1511 unsigned long flags; 1512 unsigned long flags;
1513 u32 tmp;
1512 1514
1513 spin_lock_irqsave(&dev_priv->dpio_lock, flags); 1515 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1514 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 1516 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
1515 100)) {
1516 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1517 DRM_ERROR("timeout waiting for SBI to become ready\n");
1517 goto out_unlock; 1518 goto out_unlock;
1518 } 1519 }
1519 1520
1520 I915_WRITE(SBI_ADDR, 1521 I915_WRITE(SBI_ADDR, (reg << 16));
1521 (reg << 16)); 1522 I915_WRITE(SBI_DATA, value);
1522 I915_WRITE(SBI_DATA, 1523
1523 value); 1524 if (destination == SBI_ICLK)
1524 I915_WRITE(SBI_CTL_STAT, 1525 tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
1525 SBI_BUSY | 1526 else
1526 SBI_CTL_OP_CRWR); 1527 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
1528 I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
1527 1529
1528 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1530 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1529 100)) { 1531 100)) {
@@ -1536,23 +1538,25 @@ out_unlock:
1536} 1538}
1537 1539
1538static u32 1540static u32
1539intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg) 1541intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
1542 enum intel_sbi_destination destination)
1540{ 1543{
1541 unsigned long flags; 1544 unsigned long flags;
1542 u32 value = 0; 1545 u32 value = 0;
1543 1546
1544 spin_lock_irqsave(&dev_priv->dpio_lock, flags); 1547 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1545 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 1548 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
1546 100)) {
1547 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1549 DRM_ERROR("timeout waiting for SBI to become ready\n");
1548 goto out_unlock; 1550 goto out_unlock;
1549 } 1551 }
1550 1552
1551 I915_WRITE(SBI_ADDR, 1553 I915_WRITE(SBI_ADDR, (reg << 16));
1552 (reg << 16)); 1554
1553 I915_WRITE(SBI_CTL_STAT, 1555 if (destination == SBI_ICLK)
1554 SBI_BUSY | 1556 value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
1555 SBI_CTL_OP_CRRD); 1557 else
1558 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
1559 I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
1556 1560
1557 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1561 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1558 100)) { 1562 100)) {
@@ -2424,18 +2428,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
2424 FDI_FE_ERRC_ENABLE); 2428 FDI_FE_ERRC_ENABLE);
2425} 2429}
2426 2430
2427static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2428{
2429 struct drm_i915_private *dev_priv = dev->dev_private;
2430 u32 flags = I915_READ(SOUTH_CHICKEN1);
2431
2432 flags |= FDI_PHASE_SYNC_OVR(pipe);
2433 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2434 flags |= FDI_PHASE_SYNC_EN(pipe);
2435 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2436 POSTING_READ(SOUTH_CHICKEN1);
2437}
2438
2439static void ivb_modeset_global_resources(struct drm_device *dev) 2431static void ivb_modeset_global_resources(struct drm_device *dev)
2440{ 2432{
2441 struct drm_i915_private *dev_priv = dev->dev_private; 2433 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2610,8 +2602,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2610 POSTING_READ(reg); 2602 POSTING_READ(reg);
2611 udelay(150); 2603 udelay(150);
2612 2604
2613 cpt_phase_pointer_enable(dev, pipe);
2614
2615 for (i = 0; i < 4; i++) { 2605 for (i = 0; i < 4; i++) {
2616 reg = FDI_TX_CTL(pipe); 2606 reg = FDI_TX_CTL(pipe);
2617 temp = I915_READ(reg); 2607 temp = I915_READ(reg);
@@ -2744,8 +2734,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2744 POSTING_READ(reg); 2734 POSTING_READ(reg);
2745 udelay(150); 2735 udelay(150);
2746 2736
2747 cpt_phase_pointer_enable(dev, pipe);
2748
2749 for (i = 0; i < 4; i++) { 2737 for (i = 0; i < 4; i++) {
2750 reg = FDI_TX_CTL(pipe); 2738 reg = FDI_TX_CTL(pipe);
2751 temp = I915_READ(reg); 2739 temp = I915_READ(reg);
@@ -2884,17 +2872,6 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2884 udelay(100); 2872 udelay(100);
2885} 2873}
2886 2874
2887static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2888{
2889 struct drm_i915_private *dev_priv = dev->dev_private;
2890 u32 flags = I915_READ(SOUTH_CHICKEN1);
2891
2892 flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2893 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2894 flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2895 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2896 POSTING_READ(SOUTH_CHICKEN1);
2897}
2898static void ironlake_fdi_disable(struct drm_crtc *crtc) 2875static void ironlake_fdi_disable(struct drm_crtc *crtc)
2899{ 2876{
2900 struct drm_device *dev = crtc->dev; 2877 struct drm_device *dev = crtc->dev;
@@ -2921,8 +2898,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
2921 /* Ironlake workaround, disable clock pointer after downing FDI */ 2898 /* Ironlake workaround, disable clock pointer after downing FDI */
2922 if (HAS_PCH_IBX(dev)) { 2899 if (HAS_PCH_IBX(dev)) {
2923 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2900 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2924 } else if (HAS_PCH_CPT(dev)) {
2925 cpt_phase_pointer_disable(dev, pipe);
2926 } 2901 }
2927 2902
2928 /* still set train pattern 1 */ 2903 /* still set train pattern 1 */
@@ -3024,8 +2999,9 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
3024 2999
3025 /* Disable SSCCTL */ 3000 /* Disable SSCCTL */
3026 intel_sbi_write(dev_priv, SBI_SSCCTL6, 3001 intel_sbi_write(dev_priv, SBI_SSCCTL6,
3027 intel_sbi_read(dev_priv, SBI_SSCCTL6) | 3002 intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3028 SBI_SSCCTL_DISABLE); 3003 SBI_SSCCTL_DISABLE,
3004 SBI_ICLK);
3029 3005
3030 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 3006 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
3031 if (crtc->mode.clock == 20000) { 3007 if (crtc->mode.clock == 20000) {
@@ -3066,33 +3042,25 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
3066 phaseinc); 3042 phaseinc);
3067 3043
3068 /* Program SSCDIVINTPHASE6 */ 3044 /* Program SSCDIVINTPHASE6 */
3069 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6); 3045 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3070 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 3046 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3071 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 3047 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3072 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 3048 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3073 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 3049 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3074 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 3050 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3075 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 3051 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3076 3052 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3077 intel_sbi_write(dev_priv,
3078 SBI_SSCDIVINTPHASE6,
3079 temp);
3080 3053
3081 /* Program SSCAUXDIV */ 3054 /* Program SSCAUXDIV */
3082 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6); 3055 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3083 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 3056 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3084 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 3057 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3085 intel_sbi_write(dev_priv, 3058 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3086 SBI_SSCAUXDIV6,
3087 temp);
3088
3089 3059
3090 /* Enable modulator and associated divider */ 3060 /* Enable modulator and associated divider */
3091 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6); 3061 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3092 temp &= ~SBI_SSCCTL_DISABLE; 3062 temp &= ~SBI_SSCCTL_DISABLE;
3093 intel_sbi_write(dev_priv, 3063 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3094 SBI_SSCCTL6,
3095 temp);
3096 3064
3097 /* Wait for initialization time */ 3065 /* Wait for initialization time */
3098 udelay(24); 3066 udelay(24);
@@ -4878,10 +4846,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4878 return ret; 4846 return ret;
4879} 4847}
4880 4848
4881/* 4849static void ironlake_init_pch_refclk(struct drm_device *dev)
4882 * Initialize reference clocks when the driver loads
4883 */
4884void ironlake_init_pch_refclk(struct drm_device *dev)
4885{ 4850{
4886 struct drm_i915_private *dev_priv = dev->dev_private; 4851 struct drm_i915_private *dev_priv = dev->dev_private;
4887 struct drm_mode_config *mode_config = &dev->mode_config; 4852 struct drm_mode_config *mode_config = &dev->mode_config;
@@ -4995,6 +4960,182 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
4995 } 4960 }
4996} 4961}
4997 4962
4963/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
4964static void lpt_init_pch_refclk(struct drm_device *dev)
4965{
4966 struct drm_i915_private *dev_priv = dev->dev_private;
4967 struct drm_mode_config *mode_config = &dev->mode_config;
4968 struct intel_encoder *encoder;
4969 bool has_vga = false;
4970 bool is_sdv = false;
4971 u32 tmp;
4972
4973 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4974 switch (encoder->type) {
4975 case INTEL_OUTPUT_ANALOG:
4976 has_vga = true;
4977 break;
4978 }
4979 }
4980
4981 if (!has_vga)
4982 return;
4983
4984 /* XXX: Rip out SDV support once Haswell ships for real. */
4985 if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
4986 is_sdv = true;
4987
4988 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4989 tmp &= ~SBI_SSCCTL_DISABLE;
4990 tmp |= SBI_SSCCTL_PATHALT;
4991 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4992
4993 udelay(24);
4994
4995 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4996 tmp &= ~SBI_SSCCTL_PATHALT;
4997 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4998
4999 if (!is_sdv) {
5000 tmp = I915_READ(SOUTH_CHICKEN2);
5001 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5002 I915_WRITE(SOUTH_CHICKEN2, tmp);
5003
5004 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
5005 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5006 DRM_ERROR("FDI mPHY reset assert timeout\n");
5007
5008 tmp = I915_READ(SOUTH_CHICKEN2);
5009 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5010 I915_WRITE(SOUTH_CHICKEN2, tmp);
5011
5012 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
5013 FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
5014 100))
5015 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
5016 }
5017
5018 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5019 tmp &= ~(0xFF << 24);
5020 tmp |= (0x12 << 24);
5021 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5022
5023 if (!is_sdv) {
5024 tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
5025 tmp &= ~(0x3 << 6);
5026 tmp |= (1 << 6) | (1 << 0);
5027 intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
5028 }
5029
5030 if (is_sdv) {
5031 tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
5032 tmp |= 0x7FFF;
5033 intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
5034 }
5035
5036 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5037 tmp |= (1 << 11);
5038 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5039
5040 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5041 tmp |= (1 << 11);
5042 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5043
5044 if (is_sdv) {
5045 tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
5046 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5047 intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
5048
5049 tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
5050 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5051 intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
5052
5053 tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
5054 tmp |= (0x3F << 8);
5055 intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
5056
5057 tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
5058 tmp |= (0x3F << 8);
5059 intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
5060 }
5061
5062 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5063 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5064 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5065
5066 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5067 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5068 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5069
5070 if (!is_sdv) {
5071 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5072 tmp &= ~(7 << 13);
5073 tmp |= (5 << 13);
5074 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5075
5076 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5077 tmp &= ~(7 << 13);
5078 tmp |= (5 << 13);
5079 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5080 }
5081
5082 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5083 tmp &= ~0xFF;
5084 tmp |= 0x1C;
5085 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5086
5087 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5088 tmp &= ~0xFF;
5089 tmp |= 0x1C;
5090 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5091
5092 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5093 tmp &= ~(0xFF << 16);
5094 tmp |= (0x1C << 16);
5095 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5096
5097 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5098 tmp &= ~(0xFF << 16);
5099 tmp |= (0x1C << 16);
5100 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5101
5102 if (!is_sdv) {
5103 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5104 tmp |= (1 << 27);
5105 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5106
5107 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5108 tmp |= (1 << 27);
5109 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5110
5111 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5112 tmp &= ~(0xF << 28);
5113 tmp |= (4 << 28);
5114 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5115
5116 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5117 tmp &= ~(0xF << 28);
5118 tmp |= (4 << 28);
5119 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5120 }
5121
5122 /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
5123 tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
5124 tmp |= SBI_DBUFF0_ENABLE;
5125 intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
5126}
5127
5128/*
5129 * Initialize reference clocks when the driver loads
5130 */
5131void intel_init_pch_refclk(struct drm_device *dev)
5132{
5133 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5134 ironlake_init_pch_refclk(dev);
5135 else if (HAS_PCH_LPT(dev))
5136 lpt_init_pch_refclk(dev);
5137}
5138
4998static int ironlake_get_refclk(struct drm_crtc *crtc) 5139static int ironlake_get_refclk(struct drm_crtc *crtc)
4999{ 5140{
5000 struct drm_device *dev = crtc->dev; 5141 struct drm_device *dev = crtc->dev;
@@ -5239,6 +5380,17 @@ static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
5239 } 5380 }
5240} 5381}
5241 5382
5383int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
5384{
5385 /*
5386 * Account for spread spectrum to avoid
5387 * oversubscribing the link. Max center spread
5388 * is 2.5%; use 5% for safety's sake.
5389 */
5390 u32 bps = target_clock * bpp * 21 / 20;
5391 return bps / (link_bw * 8) + 1;
5392}
5393
5242static void ironlake_set_m_n(struct drm_crtc *crtc, 5394static void ironlake_set_m_n(struct drm_crtc *crtc,
5243 struct drm_display_mode *mode, 5395 struct drm_display_mode *mode,
5244 struct drm_display_mode *adjusted_mode) 5396 struct drm_display_mode *adjusted_mode)
@@ -5292,15 +5444,9 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
5292 else 5444 else
5293 target_clock = adjusted_mode->clock; 5445 target_clock = adjusted_mode->clock;
5294 5446
5295 if (!lane) { 5447 if (!lane)
5296 /* 5448 lane = ironlake_get_lanes_required(target_clock, link_bw,
5297 * Account for spread spectrum to avoid 5449 intel_crtc->bpp);
5298 * oversubscribing the link. Max center spread
5299 * is 2.5%; use 5% for safety's sake.
5300 */
5301 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5302 lane = bps / (link_bw * 8) + 1;
5303 }
5304 5450
5305 intel_crtc->fdi_lanes = lane; 5451 intel_crtc->fdi_lanes = lane;
5306 5452
@@ -6940,11 +7086,18 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
6940 7086
6941 spin_lock_irqsave(&dev->event_lock, flags); 7087 spin_lock_irqsave(&dev->event_lock, flags);
6942 work = intel_crtc->unpin_work; 7088 work = intel_crtc->unpin_work;
6943 if (work == NULL || !work->pending) { 7089
7090 /* Ensure we don't miss a work->pending update ... */
7091 smp_rmb();
7092
7093 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
6944 spin_unlock_irqrestore(&dev->event_lock, flags); 7094 spin_unlock_irqrestore(&dev->event_lock, flags);
6945 return; 7095 return;
6946 } 7096 }
6947 7097
7098 /* and that the unpin work is consistent wrt ->pending. */
7099 smp_rmb();
7100
6948 intel_crtc->unpin_work = NULL; 7101 intel_crtc->unpin_work = NULL;
6949 7102
6950 if (work->event) 7103 if (work->event)
@@ -6988,16 +7141,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
6988 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 7141 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
6989 unsigned long flags; 7142 unsigned long flags;
6990 7143
7144 /* NB: An MMIO update of the plane base pointer will also
7145 * generate a page-flip completion irq, i.e. every modeset
7146 * is also accompanied by a spurious intel_prepare_page_flip().
7147 */
6991 spin_lock_irqsave(&dev->event_lock, flags); 7148 spin_lock_irqsave(&dev->event_lock, flags);
6992 if (intel_crtc->unpin_work) { 7149 if (intel_crtc->unpin_work)
6993 if ((++intel_crtc->unpin_work->pending) > 1) 7150 atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
6994 DRM_ERROR("Prepared flip multiple times\n");
6995 } else {
6996 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
6997 }
6998 spin_unlock_irqrestore(&dev->event_lock, flags); 7151 spin_unlock_irqrestore(&dev->event_lock, flags);
6999} 7152}
7000 7153
7154inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
7155{
7156 /* Ensure that the work item is consistent when activating it ... */
7157 smp_wmb();
7158 atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
7159 /* and that it is marked active as soon as the irq could fire. */
7160 smp_wmb();
7161}
7162
7001static int intel_gen2_queue_flip(struct drm_device *dev, 7163static int intel_gen2_queue_flip(struct drm_device *dev,
7002 struct drm_crtc *crtc, 7164 struct drm_crtc *crtc,
7003 struct drm_framebuffer *fb, 7165 struct drm_framebuffer *fb,
@@ -7031,6 +7193,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7031 intel_ring_emit(ring, fb->pitches[0]); 7193 intel_ring_emit(ring, fb->pitches[0]);
7032 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7194 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
7033 intel_ring_emit(ring, 0); /* aux display base address, unused */ 7195 intel_ring_emit(ring, 0); /* aux display base address, unused */
7196
7197 intel_mark_page_flip_active(intel_crtc);
7034 intel_ring_advance(ring); 7198 intel_ring_advance(ring);
7035 return 0; 7199 return 0;
7036 7200
@@ -7071,6 +7235,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
7071 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7235 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
7072 intel_ring_emit(ring, MI_NOOP); 7236 intel_ring_emit(ring, MI_NOOP);
7073 7237
7238 intel_mark_page_flip_active(intel_crtc);
7074 intel_ring_advance(ring); 7239 intel_ring_advance(ring);
7075 return 0; 7240 return 0;
7076 7241
@@ -7117,6 +7282,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
7117 pf = 0; 7282 pf = 0;
7118 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 7283 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7119 intel_ring_emit(ring, pf | pipesrc); 7284 intel_ring_emit(ring, pf | pipesrc);
7285
7286 intel_mark_page_flip_active(intel_crtc);
7120 intel_ring_advance(ring); 7287 intel_ring_advance(ring);
7121 return 0; 7288 return 0;
7122 7289
@@ -7159,6 +7326,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
7159 pf = 0; 7326 pf = 0;
7160 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 7327 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7161 intel_ring_emit(ring, pf | pipesrc); 7328 intel_ring_emit(ring, pf | pipesrc);
7329
7330 intel_mark_page_flip_active(intel_crtc);
7162 intel_ring_advance(ring); 7331 intel_ring_advance(ring);
7163 return 0; 7332 return 0;
7164 7333
@@ -7213,6 +7382,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7213 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 7382 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7214 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7383 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
7215 intel_ring_emit(ring, (MI_NOOP)); 7384 intel_ring_emit(ring, (MI_NOOP));
7385
7386 intel_mark_page_flip_active(intel_crtc);
7216 intel_ring_advance(ring); 7387 intel_ring_advance(ring);
7217 return 0; 7388 return 0;
7218 7389
@@ -8394,8 +8565,7 @@ static void intel_setup_outputs(struct drm_device *dev)
8394 intel_encoder_clones(encoder); 8565 intel_encoder_clones(encoder);
8395 } 8566 }
8396 8567
8397 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 8568 intel_init_pch_refclk(dev);
8398 ironlake_init_pch_refclk(dev);
8399 8569
8400 drm_helper_move_panel_connectors_to_head(dev); 8570 drm_helper_move_panel_connectors_to_head(dev);
8401} 8571}
@@ -8999,7 +9169,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
8999 9169
9000/* Scan out the current hw modeset state, sanitizes it and maps it into the drm 9170/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
9001 * and i915 state tracking structures. */ 9171 * and i915 state tracking structures. */
9002void intel_modeset_setup_hw_state(struct drm_device *dev) 9172void intel_modeset_setup_hw_state(struct drm_device *dev,
9173 bool force_restore)
9003{ 9174{
9004 struct drm_i915_private *dev_priv = dev->dev_private; 9175 struct drm_i915_private *dev_priv = dev->dev_private;
9005 enum pipe pipe; 9176 enum pipe pipe;
@@ -9098,7 +9269,15 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
9098 intel_sanitize_crtc(crtc); 9269 intel_sanitize_crtc(crtc);
9099 } 9270 }
9100 9271
9101 intel_modeset_update_staged_output_state(dev); 9272 if (force_restore) {
9273 for_each_pipe(pipe) {
9274 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9275 intel_set_mode(&crtc->base, &crtc->base.mode,
9276 crtc->base.x, crtc->base.y, crtc->base.fb);
9277 }
9278 } else {
9279 intel_modeset_update_staged_output_state(dev);
9280 }
9102 9281
9103 intel_modeset_check_state(dev); 9282 intel_modeset_check_state(dev);
9104 9283
@@ -9111,7 +9290,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
9111 9290
9112 intel_setup_overlay(dev); 9291 intel_setup_overlay(dev);
9113 9292
9114 intel_modeset_setup_hw_state(dev); 9293 intel_modeset_setup_hw_state(dev, false);
9115} 9294}
9116 9295
9117void intel_modeset_cleanup(struct drm_device *dev) 9296void intel_modeset_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 522061ca0685..8a1bd4a3ad0d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -401,7 +401,10 @@ struct intel_unpin_work {
401 struct drm_i915_gem_object *old_fb_obj; 401 struct drm_i915_gem_object *old_fb_obj;
402 struct drm_i915_gem_object *pending_flip_obj; 402 struct drm_i915_gem_object *pending_flip_obj;
403 struct drm_pending_vblank_event *event; 403 struct drm_pending_vblank_event *event;
404 int pending; 404 atomic_t pending;
405#define INTEL_FLIP_INACTIVE 0
406#define INTEL_FLIP_PENDING 1
407#define INTEL_FLIP_COMPLETE 2
405 bool enable_stall_check; 408 bool enable_stall_check;
406}; 409};
407 410
@@ -556,6 +559,7 @@ intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
556 enum pipe pipe); 559 enum pipe pipe);
557extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 560extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
558extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); 561extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
562extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
559 563
560struct intel_load_detect_pipe { 564struct intel_load_detect_pipe {
561 struct drm_framebuffer *release_fb; 565 struct drm_framebuffer *release_fb;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 5c279b48df97..2ee9821b9d93 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -340,6 +340,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
340 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 340 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
341 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; 341 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
342 342
343 avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
344
343 intel_set_infoframe(encoder, &avi_if); 345 intel_set_infoframe(encoder, &avi_if);
344} 346}
345 347
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 81502e8be26b..b9a660a53677 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -532,7 +532,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
532 dev_priv->modeset_on_lid = 0; 532 dev_priv->modeset_on_lid = 0;
533 533
534 mutex_lock(&dev->mode_config.mutex); 534 mutex_lock(&dev->mode_config.mutex);
535 intel_modeset_check_state(dev); 535 intel_modeset_setup_hw_state(dev, true);
536 mutex_unlock(&dev->mode_config.mutex); 536 mutex_unlock(&dev->mode_config.mutex);
537 537
538 return NOTIFY_OK; 538 return NOTIFY_OK;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index c758ad277473..bee8cb6108a7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -130,8 +130,9 @@ static int is_backlight_combination_mode(struct drm_device *dev)
130 return 0; 130 return 0;
131} 131}
132 132
133static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) 133static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
134{ 134{
135 struct drm_i915_private *dev_priv = dev->dev_private;
135 u32 val; 136 u32 val;
136 137
137 /* Restore the CTL value if it lost, e.g. GPU reset */ 138 /* Restore the CTL value if it lost, e.g. GPU reset */
@@ -141,21 +142,22 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
141 if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) { 142 if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
142 dev_priv->regfile.saveBLC_PWM_CTL2 = val; 143 dev_priv->regfile.saveBLC_PWM_CTL2 = val;
143 } else if (val == 0) { 144 } else if (val == 0) {
144 I915_WRITE(BLC_PWM_PCH_CTL2,
145 dev_priv->regfile.saveBLC_PWM_CTL2);
146 val = dev_priv->regfile.saveBLC_PWM_CTL2; 145 val = dev_priv->regfile.saveBLC_PWM_CTL2;
146 I915_WRITE(BLC_PWM_PCH_CTL2, val);
147 } 147 }
148 } else { 148 } else {
149 val = I915_READ(BLC_PWM_CTL); 149 val = I915_READ(BLC_PWM_CTL);
150 if (dev_priv->regfile.saveBLC_PWM_CTL == 0) { 150 if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
151 dev_priv->regfile.saveBLC_PWM_CTL = val; 151 dev_priv->regfile.saveBLC_PWM_CTL = val;
152 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); 152 if (INTEL_INFO(dev)->gen >= 4)
153 dev_priv->regfile.saveBLC_PWM_CTL2 =
154 I915_READ(BLC_PWM_CTL2);
153 } else if (val == 0) { 155 } else if (val == 0) {
154 I915_WRITE(BLC_PWM_CTL,
155 dev_priv->regfile.saveBLC_PWM_CTL);
156 I915_WRITE(BLC_PWM_CTL2,
157 dev_priv->regfile.saveBLC_PWM_CTL2);
158 val = dev_priv->regfile.saveBLC_PWM_CTL; 156 val = dev_priv->regfile.saveBLC_PWM_CTL;
157 I915_WRITE(BLC_PWM_CTL, val);
158 if (INTEL_INFO(dev)->gen >= 4)
159 I915_WRITE(BLC_PWM_CTL2,
160 dev_priv->regfile.saveBLC_PWM_CTL2);
159 } 161 }
160 } 162 }
161 163
@@ -164,10 +166,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
164 166
165static u32 _intel_panel_get_max_backlight(struct drm_device *dev) 167static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
166{ 168{
167 struct drm_i915_private *dev_priv = dev->dev_private;
168 u32 max; 169 u32 max;
169 170
170 max = i915_read_blc_pwm_ctl(dev_priv); 171 max = i915_read_blc_pwm_ctl(dev);
171 172
172 if (HAS_PCH_SPLIT(dev)) { 173 if (HAS_PCH_SPLIT(dev)) {
173 max >>= 16; 174 max >>= 16;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 9e619ada0567..496caa73eb70 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1325,10 +1325,11 @@ static void valleyview_update_wm(struct drm_device *dev)
1325 (planeb_wm << DSPFW_PLANEB_SHIFT) | 1325 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1326 planea_wm); 1326 planea_wm);
1327 I915_WRITE(DSPFW2, 1327 I915_WRITE(DSPFW2,
1328 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | 1328 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1329 (cursora_wm << DSPFW_CURSORA_SHIFT)); 1329 (cursora_wm << DSPFW_CURSORA_SHIFT));
1330 I915_WRITE(DSPFW3, 1330 I915_WRITE(DSPFW3,
1331 (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT))); 1331 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1332 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1332} 1333}
1333 1334
1334static void g4x_update_wm(struct drm_device *dev) 1335static void g4x_update_wm(struct drm_device *dev)
@@ -1374,11 +1375,11 @@ static void g4x_update_wm(struct drm_device *dev)
1374 (planeb_wm << DSPFW_PLANEB_SHIFT) | 1375 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1375 planea_wm); 1376 planea_wm);
1376 I915_WRITE(DSPFW2, 1377 I915_WRITE(DSPFW2,
1377 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | 1378 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1378 (cursora_wm << DSPFW_CURSORA_SHIFT)); 1379 (cursora_wm << DSPFW_CURSORA_SHIFT));
1379 /* HPLL off in SR has some issues on G4x... disable it */ 1380 /* HPLL off in SR has some issues on G4x... disable it */
1380 I915_WRITE(DSPFW3, 1381 I915_WRITE(DSPFW3,
1381 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | 1382 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1382 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1383 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1383} 1384}
1384 1385
@@ -2647,6 +2648,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2647{ 2648{
2648 struct drm_i915_private *dev_priv = dev->dev_private; 2649 struct drm_i915_private *dev_priv = dev->dev_private;
2649 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 2650 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
2651 bool was_interruptible;
2650 int ret; 2652 int ret;
2651 2653
2652 /* rc6 disabled by default due to repeated reports of hanging during 2654 /* rc6 disabled by default due to repeated reports of hanging during
@@ -2661,6 +2663,9 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2661 if (ret) 2663 if (ret)
2662 return; 2664 return;
2663 2665
2666 was_interruptible = dev_priv->mm.interruptible;
2667 dev_priv->mm.interruptible = false;
2668
2664 /* 2669 /*
2665 * GPU can automatically power down the render unit if given a page 2670 * GPU can automatically power down the render unit if given a page
2666 * to save state. 2671 * to save state.
@@ -2668,6 +2673,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2668 ret = intel_ring_begin(ring, 6); 2673 ret = intel_ring_begin(ring, 6);
2669 if (ret) { 2674 if (ret) {
2670 ironlake_teardown_rc6(dev); 2675 ironlake_teardown_rc6(dev);
2676 dev_priv->mm.interruptible = was_interruptible;
2671 return; 2677 return;
2672 } 2678 }
2673 2679
@@ -2688,7 +2694,8 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2688 * does an implicit flush, combined with MI_FLUSH above, it should be 2694 * does an implicit flush, combined with MI_FLUSH above, it should be
2689 * safe to assume that renderctx is valid 2695 * safe to assume that renderctx is valid
2690 */ 2696 */
2691 ret = intel_wait_ring_idle(ring); 2697 ret = intel_ring_idle(ring);
2698 dev_priv->mm.interruptible = was_interruptible;
2692 if (ret) { 2699 if (ret) {
2693 DRM_ERROR("failed to enable ironlake power power savings\n"); 2700 DRM_ERROR("failed to enable ironlake power power savings\n");
2694 ironlake_teardown_rc6(dev); 2701 ironlake_teardown_rc6(dev);
@@ -3440,6 +3447,11 @@ static void cpt_init_clock_gating(struct drm_device *dev)
3440 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 3447 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3441 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 3448 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
3442 DPLS_EDP_PPS_FIX_DIS); 3449 DPLS_EDP_PPS_FIX_DIS);
3450 /* The below fixes the weird display corruption, a few pixels shifted
3451 * downward, on (only) LVDS of some HP laptops with IVY.
3452 */
3453 for_each_pipe(pipe)
3454 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
3443 /* WADP0ClockGatingDisable */ 3455 /* WADP0ClockGatingDisable */
3444 for_each_pipe(pipe) { 3456 for_each_pipe(pipe) {
3445 I915_WRITE(TRANS_CHICKEN1(pipe), 3457 I915_WRITE(TRANS_CHICKEN1(pipe),
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 987eb5fdaf39..2346b920bd86 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -45,7 +45,7 @@ struct pipe_control {
45 45
46static inline int ring_space(struct intel_ring_buffer *ring) 46static inline int ring_space(struct intel_ring_buffer *ring)
47{ 47{
48 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); 48 int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
49 if (space < 0) 49 if (space < 0)
50 space += ring->size; 50 space += ring->size;
51 return space; 51 return space;
@@ -555,12 +555,11 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
555 555
556static void 556static void
557update_mboxes(struct intel_ring_buffer *ring, 557update_mboxes(struct intel_ring_buffer *ring,
558 u32 seqno, 558 u32 mmio_offset)
559 u32 mmio_offset)
560{ 559{
561 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 560 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
562 intel_ring_emit(ring, mmio_offset); 561 intel_ring_emit(ring, mmio_offset);
563 intel_ring_emit(ring, seqno); 562 intel_ring_emit(ring, ring->outstanding_lazy_request);
564} 563}
565 564
566/** 565/**
@@ -573,8 +572,7 @@ update_mboxes(struct intel_ring_buffer *ring,
573 * This acts like a signal in the canonical semaphore. 572 * This acts like a signal in the canonical semaphore.
574 */ 573 */
575static int 574static int
576gen6_add_request(struct intel_ring_buffer *ring, 575gen6_add_request(struct intel_ring_buffer *ring)
577 u32 *seqno)
578{ 576{
579 u32 mbox1_reg; 577 u32 mbox1_reg;
580 u32 mbox2_reg; 578 u32 mbox2_reg;
@@ -587,13 +585,11 @@ gen6_add_request(struct intel_ring_buffer *ring,
587 mbox1_reg = ring->signal_mbox[0]; 585 mbox1_reg = ring->signal_mbox[0];
588 mbox2_reg = ring->signal_mbox[1]; 586 mbox2_reg = ring->signal_mbox[1];
589 587
590 *seqno = i915_gem_next_request_seqno(ring); 588 update_mboxes(ring, mbox1_reg);
591 589 update_mboxes(ring, mbox2_reg);
592 update_mboxes(ring, *seqno, mbox1_reg);
593 update_mboxes(ring, *seqno, mbox2_reg);
594 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 590 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
595 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 591 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
596 intel_ring_emit(ring, *seqno); 592 intel_ring_emit(ring, ring->outstanding_lazy_request);
597 intel_ring_emit(ring, MI_USER_INTERRUPT); 593 intel_ring_emit(ring, MI_USER_INTERRUPT);
598 intel_ring_advance(ring); 594 intel_ring_advance(ring);
599 595
@@ -650,10 +646,8 @@ do { \
650} while (0) 646} while (0)
651 647
652static int 648static int
653pc_render_add_request(struct intel_ring_buffer *ring, 649pc_render_add_request(struct intel_ring_buffer *ring)
654 u32 *result)
655{ 650{
656 u32 seqno = i915_gem_next_request_seqno(ring);
657 struct pipe_control *pc = ring->private; 651 struct pipe_control *pc = ring->private;
658 u32 scratch_addr = pc->gtt_offset + 128; 652 u32 scratch_addr = pc->gtt_offset + 128;
659 int ret; 653 int ret;
@@ -674,7 +668,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
674 PIPE_CONTROL_WRITE_FLUSH | 668 PIPE_CONTROL_WRITE_FLUSH |
675 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 669 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
676 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 670 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
677 intel_ring_emit(ring, seqno); 671 intel_ring_emit(ring, ring->outstanding_lazy_request);
678 intel_ring_emit(ring, 0); 672 intel_ring_emit(ring, 0);
679 PIPE_CONTROL_FLUSH(ring, scratch_addr); 673 PIPE_CONTROL_FLUSH(ring, scratch_addr);
680 scratch_addr += 128; /* write to separate cachelines */ 674 scratch_addr += 128; /* write to separate cachelines */
@@ -693,11 +687,10 @@ pc_render_add_request(struct intel_ring_buffer *ring,
693 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 687 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
694 PIPE_CONTROL_NOTIFY); 688 PIPE_CONTROL_NOTIFY);
695 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 689 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
696 intel_ring_emit(ring, seqno); 690 intel_ring_emit(ring, ring->outstanding_lazy_request);
697 intel_ring_emit(ring, 0); 691 intel_ring_emit(ring, 0);
698 intel_ring_advance(ring); 692 intel_ring_advance(ring);
699 693
700 *result = seqno;
701 return 0; 694 return 0;
702} 695}
703 696
@@ -885,25 +878,20 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
885} 878}
886 879
887static int 880static int
888i9xx_add_request(struct intel_ring_buffer *ring, 881i9xx_add_request(struct intel_ring_buffer *ring)
889 u32 *result)
890{ 882{
891 u32 seqno;
892 int ret; 883 int ret;
893 884
894 ret = intel_ring_begin(ring, 4); 885 ret = intel_ring_begin(ring, 4);
895 if (ret) 886 if (ret)
896 return ret; 887 return ret;
897 888
898 seqno = i915_gem_next_request_seqno(ring);
899
900 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 889 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
901 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 890 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
902 intel_ring_emit(ring, seqno); 891 intel_ring_emit(ring, ring->outstanding_lazy_request);
903 intel_ring_emit(ring, MI_USER_INTERRUPT); 892 intel_ring_emit(ring, MI_USER_INTERRUPT);
904 intel_ring_advance(ring); 893 intel_ring_advance(ring);
905 894
906 *result = seqno;
907 return 0; 895 return 0;
908} 896}
909 897
@@ -1110,6 +1098,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1110 INIT_LIST_HEAD(&ring->active_list); 1098 INIT_LIST_HEAD(&ring->active_list);
1111 INIT_LIST_HEAD(&ring->request_list); 1099 INIT_LIST_HEAD(&ring->request_list);
1112 ring->size = 32 * PAGE_SIZE; 1100 ring->size = 32 * PAGE_SIZE;
1101 memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
1113 1102
1114 init_waitqueue_head(&ring->irq_queue); 1103 init_waitqueue_head(&ring->irq_queue);
1115 1104
@@ -1186,7 +1175,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1186 1175
1187 /* Disable the ring buffer. The ring must be idle at this point */ 1176 /* Disable the ring buffer. The ring must be idle at this point */
1188 dev_priv = ring->dev->dev_private; 1177 dev_priv = ring->dev->dev_private;
1189 ret = intel_wait_ring_idle(ring); 1178 ret = intel_ring_idle(ring);
1190 if (ret) 1179 if (ret)
1191 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 1180 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1192 ring->name, ret); 1181 ring->name, ret);
@@ -1205,28 +1194,6 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1205 cleanup_status_page(ring); 1194 cleanup_status_page(ring);
1206} 1195}
1207 1196
1208static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1209{
1210 uint32_t __iomem *virt;
1211 int rem = ring->size - ring->tail;
1212
1213 if (ring->space < rem) {
1214 int ret = intel_wait_ring_buffer(ring, rem);
1215 if (ret)
1216 return ret;
1217 }
1218
1219 virt = ring->virtual_start + ring->tail;
1220 rem /= 4;
1221 while (rem--)
1222 iowrite32(MI_NOOP, virt++);
1223
1224 ring->tail = 0;
1225 ring->space = ring_space(ring);
1226
1227 return 0;
1228}
1229
1230static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) 1197static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1231{ 1198{
1232 int ret; 1199 int ret;
@@ -1260,7 +1227,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1260 if (request->tail == -1) 1227 if (request->tail == -1)
1261 continue; 1228 continue;
1262 1229
1263 space = request->tail - (ring->tail + 8); 1230 space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
1264 if (space < 0) 1231 if (space < 0)
1265 space += ring->size; 1232 space += ring->size;
1266 if (space >= n) { 1233 if (space >= n) {
@@ -1295,7 +1262,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1295 return 0; 1262 return 0;
1296} 1263}
1297 1264
1298int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) 1265static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1299{ 1266{
1300 struct drm_device *dev = ring->dev; 1267 struct drm_device *dev = ring->dev;
1301 struct drm_i915_private *dev_priv = dev->dev_private; 1268 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1338,6 +1305,60 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1338 return -EBUSY; 1305 return -EBUSY;
1339} 1306}
1340 1307
1308static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1309{
1310 uint32_t __iomem *virt;
1311 int rem = ring->size - ring->tail;
1312
1313 if (ring->space < rem) {
1314 int ret = ring_wait_for_space(ring, rem);
1315 if (ret)
1316 return ret;
1317 }
1318
1319 virt = ring->virtual_start + ring->tail;
1320 rem /= 4;
1321 while (rem--)
1322 iowrite32(MI_NOOP, virt++);
1323
1324 ring->tail = 0;
1325 ring->space = ring_space(ring);
1326
1327 return 0;
1328}
1329
1330int intel_ring_idle(struct intel_ring_buffer *ring)
1331{
1332 u32 seqno;
1333 int ret;
1334
1335 /* We need to add any requests required to flush the objects and ring */
1336 if (ring->outstanding_lazy_request) {
1337 ret = i915_add_request(ring, NULL, NULL);
1338 if (ret)
1339 return ret;
1340 }
1341
1342 /* Wait upon the last request to be completed */
1343 if (list_empty(&ring->request_list))
1344 return 0;
1345
1346 seqno = list_entry(ring->request_list.prev,
1347 struct drm_i915_gem_request,
1348 list)->seqno;
1349
1350 return i915_wait_seqno(ring, seqno);
1351}
1352
1353static int
1354intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1355{
1356 if (ring->outstanding_lazy_request)
1357 return 0;
1358
1359 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
1360}
1361
1341int intel_ring_begin(struct intel_ring_buffer *ring, 1362int intel_ring_begin(struct intel_ring_buffer *ring,
1342 int num_dwords) 1363 int num_dwords)
1343{ 1364{
@@ -1349,6 +1370,11 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1349 if (ret) 1370 if (ret)
1350 return ret; 1371 return ret;
1351 1372
1373 /* Preallocate the olr before touching the ring */
1374 ret = intel_ring_alloc_seqno(ring);
1375 if (ret)
1376 return ret;
1377
1352 if (unlikely(ring->tail + n > ring->effective_size)) { 1378 if (unlikely(ring->tail + n > ring->effective_size)) {
1353 ret = intel_wrap_ring_buffer(ring); 1379 ret = intel_wrap_ring_buffer(ring);
1354 if (unlikely(ret)) 1380 if (unlikely(ret))
@@ -1356,7 +1382,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1356 } 1382 }
1357 1383
1358 if (unlikely(ring->space < n)) { 1384 if (unlikely(ring->space < n)) {
1359 ret = intel_wait_ring_buffer(ring, n); 1385 ret = ring_wait_for_space(ring, n);
1360 if (unlikely(ret)) 1386 if (unlikely(ret))
1361 return ret; 1387 return ret;
1362 } 1388 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 5af65b89765f..526182ed0c6d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,6 +1,17 @@
1#ifndef _INTEL_RINGBUFFER_H_ 1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_ 2#define _INTEL_RINGBUFFER_H_
3 3
4/*
5 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
6 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
7 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
8 *
9 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
10 * cacheline, the Head Pointer must not be greater than the Tail
11 * Pointer."
12 */
13#define I915_RING_FREE_SPACE 64
14
4struct intel_hw_status_page { 15struct intel_hw_status_page {
5 u32 *page_addr; 16 u32 *page_addr;
6 unsigned int gfx_addr; 17 unsigned int gfx_addr;
@@ -70,8 +81,7 @@ struct intel_ring_buffer {
70 int __must_check (*flush)(struct intel_ring_buffer *ring, 81 int __must_check (*flush)(struct intel_ring_buffer *ring,
71 u32 invalidate_domains, 82 u32 invalidate_domains,
72 u32 flush_domains); 83 u32 flush_domains);
73 int (*add_request)(struct intel_ring_buffer *ring, 84 int (*add_request)(struct intel_ring_buffer *ring);
74 u32 *seqno);
75 /* Some chipsets are not quite as coherent as advertised and need 85 /* Some chipsets are not quite as coherent as advertised and need
76 * an expensive kick to force a true read of the up-to-date seqno. 86 * an expensive kick to force a true read of the up-to-date seqno.
77 * However, the up-to-date seqno is not always required and the last 87 * However, the up-to-date seqno is not always required and the last
@@ -188,24 +198,16 @@ intel_read_status_page(struct intel_ring_buffer *ring,
188 198
189void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 199void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
190 200
191int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
192static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
193{
194 return intel_wait_ring_buffer(ring, ring->size - 8);
195}
196
197int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 201int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
198
199static inline void intel_ring_emit(struct intel_ring_buffer *ring, 202static inline void intel_ring_emit(struct intel_ring_buffer *ring,
200 u32 data) 203 u32 data)
201{ 204{
202 iowrite32(data, ring->virtual_start + ring->tail); 205 iowrite32(data, ring->virtual_start + ring->tail);
203 ring->tail += 4; 206 ring->tail += 4;
204} 207}
205
206void intel_ring_advance(struct intel_ring_buffer *ring); 208void intel_ring_advance(struct intel_ring_buffer *ring);
209int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
207 210
208u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
209int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); 211int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
210int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); 212int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
211 213
@@ -221,6 +223,12 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
221 return ring->tail; 223 return ring->tail;
222} 224}
223 225
226static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
227{
228 BUG_ON(ring->outstanding_lazy_request == 0);
229 return ring->outstanding_lazy_request;
230}
231
224static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) 232static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
225{ 233{
226 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) 234 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 4b07401540ef..c275bf0fa36d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -509,7 +509,7 @@ out:
509static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, 509static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
510 void *response, int response_len) 510 void *response, int response_len)
511{ 511{
512 u8 retry = 5; 512 u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
513 u8 status; 513 u8 status;
514 int i; 514 int i;
515 515
@@ -522,14 +522,27 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
522 * command to be complete. 522 * command to be complete.
523 * 523 *
524 * Check 5 times in case the hardware failed to read the docs. 524 * Check 5 times in case the hardware failed to read the docs.
525 *
526 * Also beware that the first response by many devices is to
527 * reply PENDING and stall for time. TVs are notorious for
528 * requiring longer than specified to complete their replies.
529 * Originally (in the DDX long ago), the delay was only ever 15ms
530 * with an additional delay of 30ms applied for TVs added later after
531 * many experiments. To accommodate both sets of delays, we do a
532 * sequence of slow checks if the device is falling behind and fails
533 * to reply within 5*15µs.
525 */ 534 */
526 if (!intel_sdvo_read_byte(intel_sdvo, 535 if (!intel_sdvo_read_byte(intel_sdvo,
527 SDVO_I2C_CMD_STATUS, 536 SDVO_I2C_CMD_STATUS,
528 &status)) 537 &status))
529 goto log_fail; 538 goto log_fail;
530 539
531 while (status == SDVO_CMD_STATUS_PENDING && retry--) { 540 while (status == SDVO_CMD_STATUS_PENDING && --retry) {
532 udelay(15); 541 if (retry < 10)
542 msleep(15);
543 else
544 udelay(15);
545
533 if (!intel_sdvo_read_byte(intel_sdvo, 546 if (!intel_sdvo_read_byte(intel_sdvo,
534 SDVO_I2C_CMD_STATUS, 547 SDVO_I2C_CMD_STATUS,
535 &status)) 548 &status))
@@ -1535,15 +1548,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1535 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1548 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1536 enum drm_connector_status ret; 1549 enum drm_connector_status ret;
1537 1550
1538 if (!intel_sdvo_write_cmd(intel_sdvo, 1551 if (!intel_sdvo_get_value(intel_sdvo,
1539 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) 1552 SDVO_CMD_GET_ATTACHED_DISPLAYS,
1540 return connector_status_unknown; 1553 &response, 2))
1541
1542 /* add 30ms delay when the output type might be TV */
1543 if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
1544 msleep(30);
1545
1546 if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
1547 return connector_status_unknown; 1554 return connector_status_unknown;
1548 1555
1549 DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", 1556 DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index ee9b0b59237f..00d78b5161c0 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -1047,6 +1047,7 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
1047 int GTF_2C, int GTF_K, int GTF_2J); 1047 int GTF_2C, int GTF_K, int GTF_2J);
1048extern int drm_add_modes_noedid(struct drm_connector *connector, 1048extern int drm_add_modes_noedid(struct drm_connector *connector,
1049 int hdisplay, int vdisplay); 1049 int hdisplay, int vdisplay);
1050extern uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode);
1050 1051
1051extern int drm_edid_header_is_valid(const u8 *raw_edid); 1052extern int drm_edid_header_is_valid(const u8 *raw_edid);
1052extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid); 1053extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);