aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c46
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c37
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h30
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c21
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c51
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h22
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c228
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c36
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c39
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c12
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c3
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c37
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c109
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h16
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c112
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c43
22 files changed, 618 insertions, 263 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3601466c5502..4ff9b6cc973f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
865 int max_freq; 865 int max_freq;
866 866
867 /* RPSTAT1 is in the GT power well */ 867 /* RPSTAT1 is in the GT power well */
868 __gen6_force_wake_get(dev_priv); 868 __gen6_gt_force_wake_get(dev_priv);
869 869
870 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 870 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
871 seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); 871 seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
@@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
888 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 888 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
889 max_freq * 100); 889 max_freq * 100);
890 890
891 __gen6_force_wake_put(dev_priv); 891 __gen6_gt_force_wake_put(dev_priv);
892 } else { 892 } else {
893 seq_printf(m, "no P-state info available\n"); 893 seq_printf(m, "no P-state info available\n");
894 } 894 }
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 844f3c972b04..e33d9be7df3b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -152,7 +152,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
152{ 152{
153 drm_i915_private_t *dev_priv = dev->dev_private; 153 drm_i915_private_t *dev_priv = dev->dev_private;
154 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 154 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
155 struct intel_ring_buffer *ring = LP_RING(dev_priv); 155 int ret;
156 156
157 master_priv->sarea = drm_getsarea(dev); 157 master_priv->sarea = drm_getsarea(dev);
158 if (master_priv->sarea) { 158 if (master_priv->sarea) {
@@ -163,33 +163,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
163 } 163 }
164 164
165 if (init->ring_size != 0) { 165 if (init->ring_size != 0) {
166 if (ring->obj != NULL) { 166 if (LP_RING(dev_priv)->obj != NULL) {
167 i915_dma_cleanup(dev); 167 i915_dma_cleanup(dev);
168 DRM_ERROR("Client tried to initialize ringbuffer in " 168 DRM_ERROR("Client tried to initialize ringbuffer in "
169 "GEM mode\n"); 169 "GEM mode\n");
170 return -EINVAL; 170 return -EINVAL;
171 } 171 }
172 172
173 ring->size = init->ring_size; 173 ret = intel_render_ring_init_dri(dev,
174 174 init->ring_start,
175 ring->map.offset = init->ring_start; 175 init->ring_size);
176 ring->map.size = init->ring_size; 176 if (ret) {
177 ring->map.type = 0;
178 ring->map.flags = 0;
179 ring->map.mtrr = 0;
180
181 drm_core_ioremap_wc(&ring->map, dev);
182
183 if (ring->map.handle == NULL) {
184 i915_dma_cleanup(dev); 177 i915_dma_cleanup(dev);
185 DRM_ERROR("can not ioremap virtual address for" 178 return ret;
186 " ring buffer\n");
187 return -ENOMEM;
188 } 179 }
189 } 180 }
190 181
191 ring->virtual_start = ring->map.handle;
192
193 dev_priv->cpp = init->cpp; 182 dev_priv->cpp = init->cpp;
194 dev_priv->back_offset = init->back_offset; 183 dev_priv->back_offset = init->back_offset;
195 dev_priv->front_offset = init->front_offset; 184 dev_priv->front_offset = init->front_offset;
@@ -1226,9 +1215,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
1226 if (ret) 1215 if (ret)
1227 DRM_INFO("failed to find VBIOS tables\n"); 1216 DRM_INFO("failed to find VBIOS tables\n");
1228 1217
1229 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 1218 /* If we have > 1 VGA cards, then we need to arbitrate access
1219 * to the common VGA resources.
1220 *
1221 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1222 * then we do not take part in VGA arbitration and the
1223 * vga_client_register() fails with -ENODEV.
1224 */
1230 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1225 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1231 if (ret) 1226 if (ret && ret != -ENODEV)
1232 goto cleanup_ringbuffer; 1227 goto cleanup_ringbuffer;
1233 1228
1234 intel_register_dsm_handler(); 1229 intel_register_dsm_handler();
@@ -1900,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1900 if (IS_GEN2(dev)) 1895 if (IS_GEN2(dev))
1901 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1896 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1902 1897
1898 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1899 * using 32bit addressing, overwriting memory if HWS is located
1900 * above 4GB.
1901 *
1902 * The documentation also mentions an issue with undefined
1903 * behaviour if any general state is accessed within a page above 4GB,
1904 * which also needs to be handled carefully.
1905 */
1906 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1907 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1908
1903 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1909 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1904 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); 1910 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
1905 if (!dev_priv->regs) { 1911 if (!dev_priv->regs) {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 72fea2bcfc4f..22ec066adae6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -46,6 +46,12 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
46unsigned int i915_powersave = 1; 46unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0600); 47module_param_named(powersave, i915_powersave, int, 0600);
48 48
49unsigned int i915_semaphores = 0;
50module_param_named(semaphores, i915_semaphores, int, 0600);
51
52unsigned int i915_enable_rc6 = 0;
53module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
54
49unsigned int i915_lvds_downclock = 0; 55unsigned int i915_lvds_downclock = 0;
50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 56module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
51 57
@@ -60,7 +66,7 @@ extern int intel_agp_enabled;
60 66
61#define INTEL_VGA_DEVICE(id, info) { \ 67#define INTEL_VGA_DEVICE(id, info) { \
62 .class = PCI_CLASS_DISPLAY_VGA << 8, \ 68 .class = PCI_CLASS_DISPLAY_VGA << 8, \
63 .class_mask = 0xffff00, \ 69 .class_mask = 0xff0000, \
64 .vendor = 0x8086, \ 70 .vendor = 0x8086, \
65 .device = id, \ 71 .device = id, \
66 .subvendor = PCI_ANY_ID, \ 72 .subvendor = PCI_ANY_ID, \
@@ -251,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev)
251 } 257 }
252} 258}
253 259
254void __gen6_force_wake_get(struct drm_i915_private *dev_priv) 260void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
255{ 261{
256 int count; 262 int count;
257 263
@@ -267,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
267 udelay(10); 273 udelay(10);
268} 274}
269 275
270void __gen6_force_wake_put(struct drm_i915_private *dev_priv) 276void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
271{ 277{
272 I915_WRITE_NOTRACE(FORCEWAKE, 0); 278 I915_WRITE_NOTRACE(FORCEWAKE, 0);
273 POSTING_READ(FORCEWAKE); 279 POSTING_READ(FORCEWAKE);
274} 280}
275 281
282void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
283{
284 int loop = 500;
285 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
286 while (fifo < 20 && loop--) {
287 udelay(10);
288 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
289 }
290}
291
276static int i915_drm_freeze(struct drm_device *dev) 292static int i915_drm_freeze(struct drm_device *dev)
277{ 293{
278 struct drm_i915_private *dev_priv = dev->dev_private; 294 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -354,12 +370,13 @@ static int i915_drm_thaw(struct drm_device *dev)
354 error = i915_gem_init_ringbuffer(dev); 370 error = i915_gem_init_ringbuffer(dev);
355 mutex_unlock(&dev->struct_mutex); 371 mutex_unlock(&dev->struct_mutex);
356 372
373 drm_mode_config_reset(dev);
357 drm_irq_install(dev); 374 drm_irq_install(dev);
358 375
359 /* Resume the modeset for every activated CRTC */ 376 /* Resume the modeset for every activated CRTC */
360 drm_helper_resume_force_mode(dev); 377 drm_helper_resume_force_mode(dev);
361 378
362 if (dev_priv->renderctx && dev_priv->pwrctx) 379 if (IS_IRONLAKE_M(dev))
363 ironlake_enable_rc6(dev); 380 ironlake_enable_rc6(dev);
364 } 381 }
365 382
@@ -542,6 +559,7 @@ int i915_reset(struct drm_device *dev, u8 flags)
542 559
543 mutex_unlock(&dev->struct_mutex); 560 mutex_unlock(&dev->struct_mutex);
544 drm_irq_uninstall(dev); 561 drm_irq_uninstall(dev);
562 drm_mode_config_reset(dev);
545 drm_irq_install(dev); 563 drm_irq_install(dev);
546 mutex_lock(&dev->struct_mutex); 564 mutex_lock(&dev->struct_mutex);
547 } 565 }
@@ -566,6 +584,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
566static int __devinit 584static int __devinit
567i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 585i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
568{ 586{
587 /* Only bind to function 0 of the device. Early generations
588 * used function 1 as a placeholder for multi-head. This causes
589 * us confusion instead, especially on the systems where both
590 * functions have the same PCI-ID!
591 */
592 if (PCI_FUNC(pdev->devfn))
593 return -ENODEV;
594
569 return drm_get_pci_dev(pdev, ent, &driver); 595 return drm_get_pci_dev(pdev, ent, &driver);
570} 596}
571 597
@@ -752,6 +778,9 @@ static int __init i915_init(void)
752 driver.driver_features &= ~DRIVER_MODESET; 778 driver.driver_features &= ~DRIVER_MODESET;
753#endif 779#endif
754 780
781 if (!(driver.driver_features & DRIVER_MODESET))
782 driver.get_vblank_timestamp = NULL;
783
755 return drm_init(&driver); 784 return drm_init(&driver);
756} 785}
757 786
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5969f46ac2d6..456f40484838 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -543,8 +543,11 @@ typedef struct drm_i915_private {
543 /** List of all objects in gtt_space. Used to restore gtt 543 /** List of all objects in gtt_space. Used to restore gtt
544 * mappings on resume */ 544 * mappings on resume */
545 struct list_head gtt_list; 545 struct list_head gtt_list;
546 /** End of mappable part of GTT */ 546
547 /** Usable portion of the GTT for GEM */
548 unsigned long gtt_start;
547 unsigned long gtt_mappable_end; 549 unsigned long gtt_mappable_end;
550 unsigned long gtt_end;
548 551
549 struct io_mapping *gtt_mapping; 552 struct io_mapping *gtt_mapping;
550 int gtt_mtrr; 553 int gtt_mtrr;
@@ -953,8 +956,10 @@ extern struct drm_ioctl_desc i915_ioctls[];
953extern int i915_max_ioctl; 956extern int i915_max_ioctl;
954extern unsigned int i915_fbpercrtc; 957extern unsigned int i915_fbpercrtc;
955extern unsigned int i915_powersave; 958extern unsigned int i915_powersave;
959extern unsigned int i915_semaphores;
956extern unsigned int i915_lvds_downclock; 960extern unsigned int i915_lvds_downclock;
957extern unsigned int i915_panel_use_ssc; 961extern unsigned int i915_panel_use_ssc;
962extern unsigned int i915_enable_rc6;
958 963
959extern int i915_suspend(struct drm_device *dev, pm_message_t state); 964extern int i915_suspend(struct drm_device *dev, pm_message_t state);
960extern int i915_resume(struct drm_device *dev); 965extern int i915_resume(struct drm_device *dev);
@@ -1173,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
1173void i915_gem_free_all_phys_object(struct drm_device *dev); 1178void i915_gem_free_all_phys_object(struct drm_device *dev);
1174void i915_gem_release(struct drm_device *dev, struct drm_file *file); 1179void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1175 1180
1181uint32_t
1182i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
1183
1176/* i915_gem_gtt.c */ 1184/* i915_gem_gtt.c */
1177void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1185void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1178int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); 1186int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
@@ -1349,22 +1357,32 @@ __i915_write(64, q)
1349 * must be set to prevent GT core from power down and stale values being 1357 * must be set to prevent GT core from power down and stale values being
1350 * returned. 1358 * returned.
1351 */ 1359 */
1352void __gen6_force_wake_get(struct drm_i915_private *dev_priv); 1360void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1353void __gen6_force_wake_put (struct drm_i915_private *dev_priv); 1361void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1354static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) 1362void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1363
1364static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
1355{ 1365{
1356 u32 val; 1366 u32 val;
1357 1367
1358 if (dev_priv->info->gen >= 6) { 1368 if (dev_priv->info->gen >= 6) {
1359 __gen6_force_wake_get(dev_priv); 1369 __gen6_gt_force_wake_get(dev_priv);
1360 val = I915_READ(reg); 1370 val = I915_READ(reg);
1361 __gen6_force_wake_put(dev_priv); 1371 __gen6_gt_force_wake_put(dev_priv);
1362 } else 1372 } else
1363 val = I915_READ(reg); 1373 val = I915_READ(reg);
1364 1374
1365 return val; 1375 return val;
1366} 1376}
1367 1377
1378static inline void i915_gt_write(struct drm_i915_private *dev_priv,
1379 u32 reg, u32 val)
1380{
1381 if (dev_priv->info->gen >= 6)
1382 __gen6_gt_wait_for_fifo(dev_priv);
1383 I915_WRITE(reg, val);
1384}
1385
1368static inline void 1386static inline void
1369i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) 1387i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
1370{ 1388{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3dfc848ff755..36e66cc5225e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -140,12 +140,16 @@ void i915_gem_do_init(struct drm_device *dev,
140{ 140{
141 drm_i915_private_t *dev_priv = dev->dev_private; 141 drm_i915_private_t *dev_priv = dev->dev_private;
142 142
143 drm_mm_init(&dev_priv->mm.gtt_space, start, 143 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
144 end - start);
145 144
145 dev_priv->mm.gtt_start = start;
146 dev_priv->mm.gtt_mappable_end = mappable_end;
147 dev_priv->mm.gtt_end = end;
146 dev_priv->mm.gtt_total = end - start; 148 dev_priv->mm.gtt_total = end - start;
147 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; 149 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
148 dev_priv->mm.gtt_mappable_end = mappable_end; 150
151 /* Take over this portion of the GTT */
152 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
149} 153}
150 154
151int 155int
@@ -1394,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
1394 * Return the required GTT alignment for an object, only taking into account 1398 * Return the required GTT alignment for an object, only taking into account
1395 * unfenced tiled surface requirements. 1399 * unfenced tiled surface requirements.
1396 */ 1400 */
1397static uint32_t 1401uint32_t
1398i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) 1402i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
1399{ 1403{
1400 struct drm_device *dev = obj->base.dev; 1404 struct drm_device *dev = obj->base.dev;
@@ -1857,7 +1861,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1857 1861
1858 seqno = ring->get_seqno(ring); 1862 seqno = ring->get_seqno(ring);
1859 1863
1860 for (i = 0; i < I915_NUM_RINGS; i++) 1864 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1861 if (seqno >= ring->sync_seqno[i]) 1865 if (seqno >= ring->sync_seqno[i])
1862 ring->sync_seqno[i] = 0; 1866 ring->sync_seqno[i] = 0;
1863 1867
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index dcfdf4151b6d..50ab1614571c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
772 if (from == NULL || to == from) 772 if (from == NULL || to == from)
773 return 0; 773 return 0;
774 774
775 /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ 775 /* XXX gpu semaphores are implicated in various hard hangs on SNB */
776 if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) 776 if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
777 return i915_gem_object_wait_rendering(obj, true); 777 return i915_gem_object_wait_rendering(obj, true);
778 778
779 idx = intel_ring_sync_index(from, to); 779 idx = intel_ring_sync_index(from, to);
@@ -1175,7 +1175,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1175 goto err; 1175 goto err;
1176 1176
1177 seqno = i915_gem_next_request_seqno(dev, ring); 1177 seqno = i915_gem_next_request_seqno(dev, ring);
1178 for (i = 0; i < I915_NUM_RINGS-1; i++) { 1178 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
1179 if (seqno < ring->sync_seqno[i]) { 1179 if (seqno < ring->sync_seqno[i]) {
1180 /* The GPU can not handle its semaphore value wrapping, 1180 /* The GPU can not handle its semaphore value wrapping,
1181 * so every billion or so execbuffers, we need to stall 1181 * so every billion or so execbuffers, we need to stall
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 70433ae50ac8..b0abdc64aa9f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -34,6 +34,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
34 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
35 struct drm_i915_gem_object *obj; 35 struct drm_i915_gem_object *obj;
36 36
37 /* First fill our portion of the GTT with scratch pages */
38 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
39 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
40
37 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 41 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
38 i915_gem_clflush_object(obj); 42 i915_gem_clflush_object(obj);
39 43
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9932c5..d64843e18df2 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -349,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
349 (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && 349 (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
350 i915_gem_object_fence_ok(obj, args->tiling_mode)); 350 i915_gem_object_fence_ok(obj, args->tiling_mode));
351 351
352 obj->tiling_changed = true; 352 /* Rebind if we need a change of alignment */
353 obj->tiling_mode = args->tiling_mode; 353 if (!obj->map_and_fenceable) {
354 obj->stride = args->stride; 354 u32 unfenced_alignment =
355 i915_gem_get_unfenced_gtt_alignment(obj);
356 if (obj->gtt_offset & (unfenced_alignment - 1))
357 ret = i915_gem_object_unbind(obj);
358 }
359
360 if (ret == 0) {
361 obj->tiling_changed = true;
362 obj->tiling_mode = args->tiling_mode;
363 obj->stride = args->stride;
364 }
355 } 365 }
366 /* we have to maintain this existing ABI... */
367 args->stride = obj->stride;
368 args->tiling_mode = obj->tiling_mode;
356 drm_gem_object_unreference(&obj->base); 369 drm_gem_object_unreference(&obj->base);
357 mutex_unlock(&dev->struct_mutex); 370 mutex_unlock(&dev->struct_mutex);
358 371
359 return 0; 372 return ret;
360} 373}
361 374
362/** 375/**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b8e509ae065e..8a9e08bf1cf7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -274,24 +274,35 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
274 return ret; 274 return ret;
275} 275}
276 276
277int i915_get_vblank_timestamp(struct drm_device *dev, int crtc, 277int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
278 int *max_error, 278 int *max_error,
279 struct timeval *vblank_time, 279 struct timeval *vblank_time,
280 unsigned flags) 280 unsigned flags)
281{ 281{
282 struct drm_crtc *drmcrtc; 282 struct drm_i915_private *dev_priv = dev->dev_private;
283 struct drm_crtc *crtc;
283 284
284 if (crtc < 0 || crtc >= dev->num_crtcs) { 285 if (pipe < 0 || pipe >= dev_priv->num_pipe) {
285 DRM_ERROR("Invalid crtc %d\n", crtc); 286 DRM_ERROR("Invalid crtc %d\n", pipe);
286 return -EINVAL; 287 return -EINVAL;
287 } 288 }
288 289
289 /* Get drm_crtc to timestamp: */ 290 /* Get drm_crtc to timestamp: */
290 drmcrtc = intel_get_crtc_for_pipe(dev, crtc); 291 crtc = intel_get_crtc_for_pipe(dev, pipe);
292 if (crtc == NULL) {
293 DRM_ERROR("Invalid crtc %d\n", pipe);
294 return -EINVAL;
295 }
296
297 if (!crtc->enabled) {
298 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
299 return -EBUSY;
300 }
291 301
292 /* Helper routine in DRM core does all the work: */ 302 /* Helper routine in DRM core does all the work: */
293 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 303 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
294 vblank_time, flags, drmcrtc); 304 vblank_time, flags,
305 crtc);
295} 306}
296 307
297/* 308/*
@@ -305,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
305 struct drm_mode_config *mode_config = &dev->mode_config; 316 struct drm_mode_config *mode_config = &dev->mode_config;
306 struct intel_encoder *encoder; 317 struct intel_encoder *encoder;
307 318
319 DRM_DEBUG_KMS("running encoder hotplug functions\n");
320
308 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 321 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
309 if (encoder->hot_plug) 322 if (encoder->hot_plug)
310 encoder->hot_plug(encoder); 323 encoder->hot_plug(encoder);
@@ -348,8 +361,12 @@ static void notify_ring(struct drm_device *dev,
348 struct intel_ring_buffer *ring) 361 struct intel_ring_buffer *ring)
349{ 362{
350 struct drm_i915_private *dev_priv = dev->dev_private; 363 struct drm_i915_private *dev_priv = dev->dev_private;
351 u32 seqno = ring->get_seqno(ring); 364 u32 seqno;
352 365
366 if (ring->obj == NULL)
367 return;
368
369 seqno = ring->get_seqno(ring);
353 trace_i915_gem_request_complete(dev, seqno); 370 trace_i915_gem_request_complete(dev, seqno);
354 371
355 ring->irq_seqno = seqno; 372 ring->irq_seqno = seqno;
@@ -831,6 +848,8 @@ static void i915_capture_error_state(struct drm_device *dev)
831 i++; 848 i++;
832 error->pinned_bo_count = i - error->active_bo_count; 849 error->pinned_bo_count = i - error->active_bo_count;
833 850
851 error->active_bo = NULL;
852 error->pinned_bo = NULL;
834 if (i) { 853 if (i) {
835 error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 854 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
836 GFP_ATOMIC); 855 GFP_ATOMIC);
@@ -1179,18 +1198,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1179 intel_finish_page_flip_plane(dev, 1); 1198 intel_finish_page_flip_plane(dev, 1);
1180 } 1199 }
1181 1200
1182 if (pipea_stats & vblank_status) { 1201 if (pipea_stats & vblank_status &&
1202 drm_handle_vblank(dev, 0)) {
1183 vblank++; 1203 vblank++;
1184 drm_handle_vblank(dev, 0);
1185 if (!dev_priv->flip_pending_is_done) { 1204 if (!dev_priv->flip_pending_is_done) {
1186 i915_pageflip_stall_check(dev, 0); 1205 i915_pageflip_stall_check(dev, 0);
1187 intel_finish_page_flip(dev, 0); 1206 intel_finish_page_flip(dev, 0);
1188 } 1207 }
1189 } 1208 }
1190 1209
1191 if (pipeb_stats & vblank_status) { 1210 if (pipeb_stats & vblank_status &&
1211 drm_handle_vblank(dev, 1)) {
1192 vblank++; 1212 vblank++;
1193 drm_handle_vblank(dev, 1);
1194 if (!dev_priv->flip_pending_is_done) { 1213 if (!dev_priv->flip_pending_is_done) {
1195 i915_pageflip_stall_check(dev, 1); 1214 i915_pageflip_stall_check(dev, 1);
1196 intel_finish_page_flip(dev, 1); 1215 intel_finish_page_flip(dev, 1);
@@ -1278,12 +1297,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1278 if (master_priv->sarea_priv) 1297 if (master_priv->sarea_priv)
1279 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1298 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1280 1299
1281 ret = -ENODEV;
1282 if (ring->irq_get(ring)) { 1300 if (ring->irq_get(ring)) {
1283 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, 1301 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
1284 READ_BREADCRUMB(dev_priv) >= irq_nr); 1302 READ_BREADCRUMB(dev_priv) >= irq_nr);
1285 ring->irq_put(ring); 1303 ring->irq_put(ring);
1286 } 1304 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
1305 ret = -EBUSY;
1287 1306
1288 if (ret == -EBUSY) { 1307 if (ret == -EBUSY) {
1289 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 1308 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1632,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1632 } else { 1651 } else {
1633 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1652 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1634 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1653 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1635 hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; 1654 hotplug_mask |= SDE_AUX_MASK;
1636 I915_WRITE(FDI_RXA_IMR, 0);
1637 I915_WRITE(FDI_RXB_IMR, 0);
1638 } 1655 }
1639 1656
1640 dev_priv->pch_irq_mask = ~hotplug_mask; 1657 dev_priv->pch_irq_mask = ~hotplug_mask;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 40a407f41f61..3e6f486f4605 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -174,7 +174,9 @@
174 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! 174 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
175 */ 175 */
176#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 176#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
177#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ 177#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
178#define MI_INVALIDATE_TLB (1<<18)
179#define MI_INVALIDATE_BSD (1<<7)
178#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 180#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
179#define MI_BATCH_NON_SECURE (1) 181#define MI_BATCH_NON_SECURE (1)
180#define MI_BATCH_NON_SECURE_I965 (1<<8) 182#define MI_BATCH_NON_SECURE_I965 (1<<8)
@@ -513,6 +515,10 @@
513#define GEN6_BLITTER_SYNC_STATUS (1 << 24) 515#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
514#define GEN6_BLITTER_USER_INTERRUPT (1 << 22) 516#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
515 517
518#define GEN6_BLITTER_ECOSKPD 0x221d0
519#define GEN6_BLITTER_LOCK_SHIFT 16
520#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
521
516#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 522#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
517#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) 523#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
518#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) 524#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
@@ -1547,17 +1553,7 @@
1547 1553
1548/* Backlight control */ 1554/* Backlight control */
1549#define BLC_PWM_CTL 0x61254 1555#define BLC_PWM_CTL 0x61254
1550#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
1551#define BLC_PWM_CTL2 0x61250 /* 965+ only */ 1556#define BLC_PWM_CTL2 0x61250 /* 965+ only */
1552#define BLM_COMBINATION_MODE (1 << 30)
1553/*
1554 * This is the most significant 15 bits of the number of backlight cycles in a
1555 * complete cycle of the modulated backlight control.
1556 *
1557 * The actual value is this field multiplied by two.
1558 */
1559#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
1560#define BLM_LEGACY_MODE (1 << 16)
1561/* 1557/*
1562 * This is the number of cycles out of the backlight modulation cycle for which 1558 * This is the number of cycles out of the backlight modulation cycle for which
1563 * the backlight is on. 1559 * the backlight is on.
@@ -2626,6 +2622,8 @@
2626#define DISPLAY_PORT_PLL_BIOS_2 0x46014 2622#define DISPLAY_PORT_PLL_BIOS_2 0x46014
2627 2623
2628#define PCH_DSPCLK_GATE_D 0x42020 2624#define PCH_DSPCLK_GATE_D 0x42020
2625# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
2626# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
2629# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) 2627# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
2630# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) 2628# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
2631 2629
@@ -3263,6 +3261,8 @@
3263#define FORCEWAKE 0xA18C 3261#define FORCEWAKE 0xA18C
3264#define FORCEWAKE_ACK 0x130090 3262#define FORCEWAKE_ACK 0x130090
3265 3263
3264#define GT_FIFO_FREE_ENTRIES 0x120008
3265
3266#define GEN6_RPNSWREQ 0xA008 3266#define GEN6_RPNSWREQ 0xA008
3267#define GEN6_TURBO_DISABLE (1<<31) 3267#define GEN6_TURBO_DISABLE (1<<31)
3268#define GEN6_FREQUENCY(x) ((x)<<25) 3268#define GEN6_FREQUENCY(x) ((x)<<25)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 17035b87ee46..8a77ff4a7237 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -535,6 +535,15 @@ static int intel_crt_set_property(struct drm_connector *connector,
535 return 0; 535 return 0;
536} 536}
537 537
538static void intel_crt_reset(struct drm_connector *connector)
539{
540 struct drm_device *dev = connector->dev;
541 struct intel_crt *crt = intel_attached_crt(connector);
542
543 if (HAS_PCH_SPLIT(dev))
544 crt->force_hotplug_required = 1;
545}
546
538/* 547/*
539 * Routines for controlling stuff on the analog port 548 * Routines for controlling stuff on the analog port
540 */ 549 */
@@ -548,6 +557,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
548}; 557};
549 558
550static const struct drm_connector_funcs intel_crt_connector_funcs = { 559static const struct drm_connector_funcs intel_crt_connector_funcs = {
560 .reset = intel_crt_reset,
551 .dpms = drm_helper_connector_dpms, 561 .dpms = drm_helper_connector_dpms,
552 .detect = intel_crt_detect, 562 .detect = intel_crt_detect,
553 .fill_modes = drm_helper_probe_single_connector_modes, 563 .fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 98967f3b7724..49fb54fd9a18 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1213,6 +1213,26 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
1213 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 1213 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1214} 1214}
1215 1215
1216static void sandybridge_blit_fbc_update(struct drm_device *dev)
1217{
1218 struct drm_i915_private *dev_priv = dev->dev_private;
1219 u32 blt_ecoskpd;
1220
1221 /* Make sure blitter notifies FBC of writes */
1222 __gen6_gt_force_wake_get(dev_priv);
1223 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1224 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1225 GEN6_BLITTER_LOCK_SHIFT;
1226 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1227 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1228 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1229 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1230 GEN6_BLITTER_LOCK_SHIFT);
1231 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1232 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1233 __gen6_gt_force_wake_put(dev_priv);
1234}
1235
1216static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1236static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1217{ 1237{
1218 struct drm_device *dev = crtc->dev; 1238 struct drm_device *dev = crtc->dev;
@@ -1266,6 +1286,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1266 I915_WRITE(SNB_DPFC_CTL_SA, 1286 I915_WRITE(SNB_DPFC_CTL_SA,
1267 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); 1287 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
1268 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 1288 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1289 sandybridge_blit_fbc_update(dev);
1269 } 1290 }
1270 1291
1271 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1292 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
@@ -1609,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1609 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 1630 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1610 1631
1611 wait_event(dev_priv->pending_flip_queue, 1632 wait_event(dev_priv->pending_flip_queue,
1633 atomic_read(&dev_priv->mm.wedged) ||
1612 atomic_read(&obj->pending_flip) == 0); 1634 atomic_read(&obj->pending_flip) == 0);
1613 1635
1614 /* Big Hammer, we also need to ensure that any pending 1636 /* Big Hammer, we also need to ensure that any pending
1615 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 1637 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1616 * current scanout is retired before unpinning the old 1638 * current scanout is retired before unpinning the old
1617 * framebuffer. 1639 * framebuffer.
1640 *
1641 * This should only fail upon a hung GPU, in which case we
1642 * can safely continue.
1618 */ 1643 */
1619 ret = i915_gem_object_flush_gpu(obj, false); 1644 ret = i915_gem_object_flush_gpu(obj, false);
1620 if (ret) { 1645 (void) ret;
1621 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
1622 mutex_unlock(&dev->struct_mutex);
1623 return ret;
1624 }
1625 } 1646 }
1626 1647
1627 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 1648 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2024,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2024 atomic_read(&obj->pending_flip) == 0); 2045 atomic_read(&obj->pending_flip) == 0);
2025} 2046}
2026 2047
2048static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2049{
2050 struct drm_device *dev = crtc->dev;
2051 struct drm_mode_config *mode_config = &dev->mode_config;
2052 struct intel_encoder *encoder;
2053
2054 /*
2055 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2056 * must be driven by its own crtc; no sharing is possible.
2057 */
2058 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2059 if (encoder->base.crtc != crtc)
2060 continue;
2061
2062 switch (encoder->type) {
2063 case INTEL_OUTPUT_EDP:
2064 if (!intel_encoder_is_pch_edp(&encoder->base))
2065 return false;
2066 continue;
2067 }
2068 }
2069
2070 return true;
2071}
2072
2027static void ironlake_crtc_enable(struct drm_crtc *crtc) 2073static void ironlake_crtc_enable(struct drm_crtc *crtc)
2028{ 2074{
2029 struct drm_device *dev = crtc->dev; 2075 struct drm_device *dev = crtc->dev;
@@ -2032,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2032 int pipe = intel_crtc->pipe; 2078 int pipe = intel_crtc->pipe;
2033 int plane = intel_crtc->plane; 2079 int plane = intel_crtc->plane;
2034 u32 reg, temp; 2080 u32 reg, temp;
2081 bool is_pch_port = false;
2035 2082
2036 if (intel_crtc->active) 2083 if (intel_crtc->active)
2037 return; 2084 return;
@@ -2045,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2045 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 2092 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2046 } 2093 }
2047 2094
2048 ironlake_fdi_enable(crtc); 2095 is_pch_port = intel_crtc_driving_pch(crtc);
2096
2097 if (is_pch_port)
2098 ironlake_fdi_enable(crtc);
2099 else {
2100 /* disable CPU FDI tx and PCH FDI rx */
2101 reg = FDI_TX_CTL(pipe);
2102 temp = I915_READ(reg);
2103 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2104 POSTING_READ(reg);
2105
2106 reg = FDI_RX_CTL(pipe);
2107 temp = I915_READ(reg);
2108 temp &= ~(0x7 << 16);
2109 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2110 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2111
2112 POSTING_READ(reg);
2113 udelay(100);
2114
2115 /* Ironlake workaround, disable clock pointer after downing FDI */
2116 if (HAS_PCH_IBX(dev))
2117 I915_WRITE(FDI_RX_CHICKEN(pipe),
2118 I915_READ(FDI_RX_CHICKEN(pipe) &
2119 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
2120
2121 /* still set train pattern 1 */
2122 reg = FDI_TX_CTL(pipe);
2123 temp = I915_READ(reg);
2124 temp &= ~FDI_LINK_TRAIN_NONE;
2125 temp |= FDI_LINK_TRAIN_PATTERN_1;
2126 I915_WRITE(reg, temp);
2127
2128 reg = FDI_RX_CTL(pipe);
2129 temp = I915_READ(reg);
2130 if (HAS_PCH_CPT(dev)) {
2131 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2132 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2133 } else {
2134 temp &= ~FDI_LINK_TRAIN_NONE;
2135 temp |= FDI_LINK_TRAIN_PATTERN_1;
2136 }
2137 /* BPC in FDI rx is consistent with that in PIPECONF */
2138 temp &= ~(0x07 << 16);
2139 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2140 I915_WRITE(reg, temp);
2141
2142 POSTING_READ(reg);
2143 udelay(100);
2144 }
2049 2145
2050 /* Enable panel fitting for LVDS */ 2146 /* Enable panel fitting for LVDS */
2051 if (dev_priv->pch_pf_size && 2147 if (dev_priv->pch_pf_size &&
@@ -2079,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2079 intel_flush_display_plane(dev, plane); 2175 intel_flush_display_plane(dev, plane);
2080 } 2176 }
2081 2177
2178 /* Skip the PCH stuff if possible */
2179 if (!is_pch_port)
2180 goto done;
2181
2082 /* For PCH output, training FDI link */ 2182 /* For PCH output, training FDI link */
2083 if (IS_GEN6(dev)) 2183 if (IS_GEN6(dev))
2084 gen6_fdi_link_train(crtc); 2184 gen6_fdi_link_train(crtc);
@@ -2163,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2163 I915_WRITE(reg, temp | TRANS_ENABLE); 2263 I915_WRITE(reg, temp | TRANS_ENABLE);
2164 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 2264 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2165 DRM_ERROR("failed to enable transcoder %d\n", pipe); 2265 DRM_ERROR("failed to enable transcoder %d\n", pipe);
2166 2266done:
2167 intel_crtc_load_lut(crtc); 2267 intel_crtc_load_lut(crtc);
2168 intel_update_fbc(dev); 2268 intel_update_fbc(dev);
2169 intel_crtc_update_cursor(crtc, true); 2269 intel_crtc_update_cursor(crtc, true);
@@ -5530,6 +5630,16 @@ cleanup_work:
5530 return ret; 5630 return ret;
5531} 5631}
5532 5632
5633static void intel_crtc_reset(struct drm_crtc *crtc)
5634{
5635 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5636
5637 /* Reset flags back to the 'unknown' status so that they
5638 * will be correctly set on the initial modeset.
5639 */
5640 intel_crtc->dpms_mode = -1;
5641}
5642
5533static struct drm_crtc_helper_funcs intel_helper_funcs = { 5643static struct drm_crtc_helper_funcs intel_helper_funcs = {
5534 .dpms = intel_crtc_dpms, 5644 .dpms = intel_crtc_dpms,
5535 .mode_fixup = intel_crtc_mode_fixup, 5645 .mode_fixup = intel_crtc_mode_fixup,
@@ -5541,6 +5651,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
5541}; 5651};
5542 5652
5543static const struct drm_crtc_funcs intel_crtc_funcs = { 5653static const struct drm_crtc_funcs intel_crtc_funcs = {
5654 .reset = intel_crtc_reset,
5544 .cursor_set = intel_crtc_cursor_set, 5655 .cursor_set = intel_crtc_cursor_set,
5545 .cursor_move = intel_crtc_cursor_move, 5656 .cursor_move = intel_crtc_cursor_move,
5546 .gamma_set = intel_crtc_gamma_set, 5657 .gamma_set = intel_crtc_gamma_set,
@@ -5631,8 +5742,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5631 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 5742 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
5632 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 5743 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
5633 5744
5634 intel_crtc->cursor_addr = 0; 5745 intel_crtc_reset(&intel_crtc->base);
5635 intel_crtc->dpms_mode = -1;
5636 intel_crtc->active = true; /* force the pipe off on setup_init_config */ 5746 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5637 5747
5638 if (HAS_PCH_SPLIT(dev)) { 5748 if (HAS_PCH_SPLIT(dev)) {
@@ -6172,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6172 * userspace... 6282 * userspace...
6173 */ 6283 */
6174 I915_WRITE(GEN6_RC_STATE, 0); 6284 I915_WRITE(GEN6_RC_STATE, 0);
6175 __gen6_force_wake_get(dev_priv); 6285 __gen6_gt_force_wake_get(dev_priv);
6176 6286
6177 /* disable the counters and set deterministic thresholds */ 6287 /* disable the counters and set deterministic thresholds */
6178 I915_WRITE(GEN6_RC_CONTROL, 0); 6288 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6270,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6270 /* enable all PM interrupts */ 6380 /* enable all PM interrupts */
6271 I915_WRITE(GEN6_PMINTRMSK, 0); 6381 I915_WRITE(GEN6_PMINTRMSK, 0);
6272 6382
6273 __gen6_force_wake_put(dev_priv); 6383 __gen6_gt_force_wake_put(dev_priv);
6274} 6384}
6275 6385
6276void intel_enable_clock_gating(struct drm_device *dev) 6386void intel_enable_clock_gating(struct drm_device *dev)
@@ -6286,7 +6396,9 @@ void intel_enable_clock_gating(struct drm_device *dev)
6286 6396
6287 if (IS_GEN5(dev)) { 6397 if (IS_GEN5(dev)) {
6288 /* Required for FBC */ 6398 /* Required for FBC */
6289 dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; 6399 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
6400 DPFCRUNIT_CLOCK_GATE_DISABLE |
6401 DPFDUNIT_CLOCK_GATE_DISABLE;
6290 /* Required for CxSR */ 6402 /* Required for CxSR */
6291 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; 6403 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
6292 6404
@@ -6429,52 +6541,60 @@ void intel_enable_clock_gating(struct drm_device *dev)
6429 } 6541 }
6430} 6542}
6431 6543
6432void intel_disable_clock_gating(struct drm_device *dev) 6544static void ironlake_teardown_rc6(struct drm_device *dev)
6433{ 6545{
6434 struct drm_i915_private *dev_priv = dev->dev_private; 6546 struct drm_i915_private *dev_priv = dev->dev_private;
6435 6547
6436 if (dev_priv->renderctx) { 6548 if (dev_priv->renderctx) {
6437 struct drm_i915_gem_object *obj = dev_priv->renderctx; 6549 i915_gem_object_unpin(dev_priv->renderctx);
6438 6550 drm_gem_object_unreference(&dev_priv->renderctx->base);
6439 I915_WRITE(CCID, 0);
6440 POSTING_READ(CCID);
6441
6442 i915_gem_object_unpin(obj);
6443 drm_gem_object_unreference(&obj->base);
6444 dev_priv->renderctx = NULL; 6551 dev_priv->renderctx = NULL;
6445 } 6552 }
6446 6553
6447 if (dev_priv->pwrctx) { 6554 if (dev_priv->pwrctx) {
6448 struct drm_i915_gem_object *obj = dev_priv->pwrctx; 6555 i915_gem_object_unpin(dev_priv->pwrctx);
6556 drm_gem_object_unreference(&dev_priv->pwrctx->base);
6557 dev_priv->pwrctx = NULL;
6558 }
6559}
6560
6561static void ironlake_disable_rc6(struct drm_device *dev)
6562{
6563 struct drm_i915_private *dev_priv = dev->dev_private;
6564
6565 if (I915_READ(PWRCTXA)) {
6566 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
6567 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
6568 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
6569 50);
6449 6570
6450 I915_WRITE(PWRCTXA, 0); 6571 I915_WRITE(PWRCTXA, 0);
6451 POSTING_READ(PWRCTXA); 6572 POSTING_READ(PWRCTXA);
6452 6573
6453 i915_gem_object_unpin(obj); 6574 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
6454 drm_gem_object_unreference(&obj->base); 6575 POSTING_READ(RSTDBYCTL);
6455 dev_priv->pwrctx = NULL;
6456 } 6576 }
6577
6578 ironlake_teardown_rc6(dev);
6457} 6579}
6458 6580
6459static void ironlake_disable_rc6(struct drm_device *dev) 6581static int ironlake_setup_rc6(struct drm_device *dev)
6460{ 6582{
6461 struct drm_i915_private *dev_priv = dev->dev_private; 6583 struct drm_i915_private *dev_priv = dev->dev_private;
6462 6584
6463 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ 6585 if (dev_priv->renderctx == NULL)
6464 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); 6586 dev_priv->renderctx = intel_alloc_context_page(dev);
6465 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), 6587 if (!dev_priv->renderctx)
6466 10); 6588 return -ENOMEM;
6467 POSTING_READ(CCID); 6589
6468 I915_WRITE(PWRCTXA, 0); 6590 if (dev_priv->pwrctx == NULL)
6469 POSTING_READ(PWRCTXA); 6591 dev_priv->pwrctx = intel_alloc_context_page(dev);
6470 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 6592 if (!dev_priv->pwrctx) {
6471 POSTING_READ(RSTDBYCTL); 6593 ironlake_teardown_rc6(dev);
6472 i915_gem_object_unpin(dev_priv->renderctx); 6594 return -ENOMEM;
6473 drm_gem_object_unreference(&dev_priv->renderctx->base); 6595 }
6474 dev_priv->renderctx = NULL; 6596
6475 i915_gem_object_unpin(dev_priv->pwrctx); 6597 return 0;
6476 drm_gem_object_unreference(&dev_priv->pwrctx->base);
6477 dev_priv->pwrctx = NULL;
6478} 6598}
6479 6599
6480void ironlake_enable_rc6(struct drm_device *dev) 6600void ironlake_enable_rc6(struct drm_device *dev)
@@ -6482,15 +6602,26 @@ void ironlake_enable_rc6(struct drm_device *dev)
6482 struct drm_i915_private *dev_priv = dev->dev_private; 6602 struct drm_i915_private *dev_priv = dev->dev_private;
6483 int ret; 6603 int ret;
6484 6604
6605 /* rc6 disabled by default due to repeated reports of hanging during
6606 * boot and resume.
6607 */
6608 if (!i915_enable_rc6)
6609 return;
6610
6611 ret = ironlake_setup_rc6(dev);
6612 if (ret)
6613 return;
6614
6485 /* 6615 /*
6486 * GPU can automatically power down the render unit if given a page 6616 * GPU can automatically power down the render unit if given a page
6487 * to save state. 6617 * to save state.
6488 */ 6618 */
6489 ret = BEGIN_LP_RING(6); 6619 ret = BEGIN_LP_RING(6);
6490 if (ret) { 6620 if (ret) {
6491 ironlake_disable_rc6(dev); 6621 ironlake_teardown_rc6(dev);
6492 return; 6622 return;
6493 } 6623 }
6624
6494 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 6625 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
6495 OUT_RING(MI_SET_CONTEXT); 6626 OUT_RING(MI_SET_CONTEXT);
6496 OUT_RING(dev_priv->renderctx->gtt_offset | 6627 OUT_RING(dev_priv->renderctx->gtt_offset |
@@ -6507,6 +6638,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
6507 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 6638 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
6508} 6639}
6509 6640
6641
6510/* Set up chip specific display functions */ 6642/* Set up chip specific display functions */
6511static void intel_init_display(struct drm_device *dev) 6643static void intel_init_display(struct drm_device *dev)
6512{ 6644{
@@ -6749,21 +6881,9 @@ void intel_modeset_init(struct drm_device *dev)
6749 if (IS_GEN6(dev)) 6881 if (IS_GEN6(dev))
6750 gen6_enable_rps(dev_priv); 6882 gen6_enable_rps(dev_priv);
6751 6883
6752 if (IS_IRONLAKE_M(dev)) { 6884 if (IS_IRONLAKE_M(dev))
6753 dev_priv->renderctx = intel_alloc_context_page(dev);
6754 if (!dev_priv->renderctx)
6755 goto skip_rc6;
6756 dev_priv->pwrctx = intel_alloc_context_page(dev);
6757 if (!dev_priv->pwrctx) {
6758 i915_gem_object_unpin(dev_priv->renderctx);
6759 drm_gem_object_unreference(&dev_priv->renderctx->base);
6760 dev_priv->renderctx = NULL;
6761 goto skip_rc6;
6762 }
6763 ironlake_enable_rc6(dev); 6885 ironlake_enable_rc6(dev);
6764 }
6765 6886
6766skip_rc6:
6767 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 6887 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
6768 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 6888 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
6769 (unsigned long)dev); 6889 (unsigned long)dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1f4242b682c8..51cb4e36997f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1639,6 +1639,24 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1639 return 0; 1639 return 0;
1640} 1640}
1641 1641
1642static bool
1643intel_dp_detect_audio(struct drm_connector *connector)
1644{
1645 struct intel_dp *intel_dp = intel_attached_dp(connector);
1646 struct edid *edid;
1647 bool has_audio = false;
1648
1649 edid = drm_get_edid(connector, &intel_dp->adapter);
1650 if (edid) {
1651 has_audio = drm_detect_monitor_audio(edid);
1652
1653 connector->display_info.raw_edid = NULL;
1654 kfree(edid);
1655 }
1656
1657 return has_audio;
1658}
1659
1642static int 1660static int
1643intel_dp_set_property(struct drm_connector *connector, 1661intel_dp_set_property(struct drm_connector *connector,
1644 struct drm_property *property, 1662 struct drm_property *property,
@@ -1652,17 +1670,23 @@ intel_dp_set_property(struct drm_connector *connector,
1652 return ret; 1670 return ret;
1653 1671
1654 if (property == intel_dp->force_audio_property) { 1672 if (property == intel_dp->force_audio_property) {
1655 if (val == intel_dp->force_audio) 1673 int i = val;
1674 bool has_audio;
1675
1676 if (i == intel_dp->force_audio)
1656 return 0; 1677 return 0;
1657 1678
1658 intel_dp->force_audio = val; 1679 intel_dp->force_audio = i;
1659 1680
1660 if (val > 0 && intel_dp->has_audio) 1681 if (i == 0)
1661 return 0; 1682 has_audio = intel_dp_detect_audio(connector);
1662 if (val < 0 && !intel_dp->has_audio) 1683 else
1684 has_audio = i > 0;
1685
1686 if (has_audio == intel_dp->has_audio)
1663 return 0; 1687 return 0;
1664 1688
1665 intel_dp->has_audio = val > 0; 1689 intel_dp->has_audio = has_audio;
1666 goto done; 1690 goto done;
1667 } 1691 }
1668 1692
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 74db2557d644..2c431049963c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -298,7 +298,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
298extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 298extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
299 u16 *blue, int regno); 299 u16 *blue, int regno);
300extern void intel_enable_clock_gating(struct drm_device *dev); 300extern void intel_enable_clock_gating(struct drm_device *dev);
301extern void intel_disable_clock_gating(struct drm_device *dev);
302extern void ironlake_enable_drps(struct drm_device *dev); 301extern void ironlake_enable_drps(struct drm_device *dev);
303extern void ironlake_disable_drps(struct drm_device *dev); 302extern void ironlake_disable_drps(struct drm_device *dev);
304extern void gen6_enable_rps(struct drm_i915_private *dev_priv); 303extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0d0273e7b029..c635c9e357b9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -251,6 +251,27 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
251 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); 251 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
252} 252}
253 253
254static bool
255intel_hdmi_detect_audio(struct drm_connector *connector)
256{
257 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
258 struct drm_i915_private *dev_priv = connector->dev->dev_private;
259 struct edid *edid;
260 bool has_audio = false;
261
262 edid = drm_get_edid(connector,
263 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
264 if (edid) {
265 if (edid->input & DRM_EDID_INPUT_DIGITAL)
266 has_audio = drm_detect_monitor_audio(edid);
267
268 connector->display_info.raw_edid = NULL;
269 kfree(edid);
270 }
271
272 return has_audio;
273}
274
254static int 275static int
255intel_hdmi_set_property(struct drm_connector *connector, 276intel_hdmi_set_property(struct drm_connector *connector,
256 struct drm_property *property, 277 struct drm_property *property,
@@ -264,17 +285,23 @@ intel_hdmi_set_property(struct drm_connector *connector,
264 return ret; 285 return ret;
265 286
266 if (property == intel_hdmi->force_audio_property) { 287 if (property == intel_hdmi->force_audio_property) {
267 if (val == intel_hdmi->force_audio) 288 int i = val;
289 bool has_audio;
290
291 if (i == intel_hdmi->force_audio)
268 return 0; 292 return 0;
269 293
270 intel_hdmi->force_audio = val; 294 intel_hdmi->force_audio = i;
271 295
272 if (val > 0 && intel_hdmi->has_audio) 296 if (i == 0)
273 return 0; 297 has_audio = intel_hdmi_detect_audio(connector);
274 if (val < 0 && !intel_hdmi->has_audio) 298 else
299 has_audio = i > 0;
300
301 if (has_audio == intel_hdmi->has_audio)
275 return 0; 302 return 0;
276 303
277 intel_hdmi->has_audio = val > 0; 304 intel_hdmi->has_audio = has_audio;
278 goto done; 305 goto done;
279 } 306 }
280 307
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index ace8d5d30dd2..bcdba7bd5cfa 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -261,12 +261,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
261 return true; 261 return true;
262 } 262 }
263 263
264 /* Make sure pre-965s set dither correctly */
265 if (INTEL_INFO(dev)->gen < 4) {
266 if (dev_priv->lvds_dither)
267 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
268 }
269
270 /* Native modes don't need fitting */ 264 /* Native modes don't need fitting */
271 if (adjusted_mode->hdisplay == mode->hdisplay && 265 if (adjusted_mode->hdisplay == mode->hdisplay &&
272 adjusted_mode->vdisplay == mode->vdisplay) 266 adjusted_mode->vdisplay == mode->vdisplay)
@@ -374,10 +368,16 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
374 } 368 }
375 369
376out: 370out:
371 /* If not enabling scaling, be consistent and always use 0. */
377 if ((pfit_control & PFIT_ENABLE) == 0) { 372 if ((pfit_control & PFIT_ENABLE) == 0) {
378 pfit_control = 0; 373 pfit_control = 0;
379 pfit_pgm_ratios = 0; 374 pfit_pgm_ratios = 0;
380 } 375 }
376
377 /* Make sure pre-965 set dither correctly */
378 if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
379 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
380
381 if (pfit_control != intel_lvds->pfit_control || 381 if (pfit_control != intel_lvds->pfit_control ||
382 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { 382 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
383 intel_lvds->pfit_control = pfit_control; 383 intel_lvds->pfit_control = pfit_control;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index f295a7aaadf9..64fd64443ca6 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <linux/acpi.h> 28#include <linux/acpi.h>
29#include <linux/acpi_io.h>
29#include <acpi/video.h> 30#include <acpi/video.h>
30 31
31#include "drmP.h" 32#include "drmP.h"
@@ -476,7 +477,7 @@ int intel_opregion_setup(struct drm_device *dev)
476 return -ENOTSUPP; 477 return -ENOTSUPP;
477 } 478 }
478 479
479 base = ioremap(asls, OPREGION_SIZE); 480 base = acpi_os_ioremap(asls, OPREGION_SIZE);
480 if (!base) 481 if (!base)
481 return -ENOMEM; 482 return -ENOMEM;
482 483
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index c65992df458d..d860abeda70f 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,8 +30,6 @@
30 30
31#include "intel_drv.h" 31#include "intel_drv.h"
32 32
33#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
34
35void 33void
36intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 34intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
37 struct drm_display_mode *adjusted_mode) 35 struct drm_display_mode *adjusted_mode)
@@ -112,19 +110,6 @@ done:
112 dev_priv->pch_pf_size = (width << 16) | height; 110 dev_priv->pch_pf_size = (width << 16) | height;
113} 111}
114 112
115static int is_backlight_combination_mode(struct drm_device *dev)
116{
117 struct drm_i915_private *dev_priv = dev->dev_private;
118
119 if (INTEL_INFO(dev)->gen >= 4)
120 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
121
122 if (IS_GEN2(dev))
123 return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
124
125 return 0;
126}
127
128static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) 113static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
129{ 114{
130 u32 val; 115 u32 val;
@@ -181,9 +166,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
181 if (INTEL_INFO(dev)->gen < 4) 166 if (INTEL_INFO(dev)->gen < 4)
182 max &= ~1; 167 max &= ~1;
183 } 168 }
184
185 if (is_backlight_combination_mode(dev))
186 max *= 0xff;
187 } 169 }
188 170
189 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); 171 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
@@ -201,15 +183,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
201 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 183 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
202 if (IS_PINEVIEW(dev)) 184 if (IS_PINEVIEW(dev))
203 val >>= 1; 185 val >>= 1;
204
205 if (is_backlight_combination_mode(dev)){
206 u8 lbpc;
207
208 val &= ~1;
209 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
210 val *= lbpc;
211 val >>= 1;
212 }
213 } 186 }
214 187
215 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); 188 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
@@ -232,16 +205,6 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
232 205
233 if (HAS_PCH_SPLIT(dev)) 206 if (HAS_PCH_SPLIT(dev))
234 return intel_pch_panel_set_backlight(dev, level); 207 return intel_pch_panel_set_backlight(dev, level);
235
236 if (is_backlight_combination_mode(dev)){
237 u32 max = intel_panel_get_max_backlight(dev);
238 u8 lpbc;
239
240 lpbc = level * 0xfe / max + 1;
241 level /= lpbc;
242 pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
243 }
244
245 tmp = I915_READ(BLC_PWM_CTL); 208 tmp = I915_READ(BLC_PWM_CTL);
246 if (IS_PINEVIEW(dev)) { 209 if (IS_PINEVIEW(dev)) {
247 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); 210 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f6b9baa6a63d..445f27efe677 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,14 @@
34#include "i915_trace.h" 34#include "i915_trace.h"
35#include "intel_drv.h" 35#include "intel_drv.h"
36 36
37static inline int ring_space(struct intel_ring_buffer *ring)
38{
39 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40 if (space < 0)
41 space += ring->size;
42 return space;
43}
44
37static u32 i915_gem_get_seqno(struct drm_device *dev) 45static u32 i915_gem_get_seqno(struct drm_device *dev)
38{ 46{
39 drm_i915_private_t *dev_priv = dev->dev_private; 47 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -204,11 +212,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
204 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 212 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
205 i915_kernel_lost_context(ring->dev); 213 i915_kernel_lost_context(ring->dev);
206 else { 214 else {
207 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 215 ring->head = I915_READ_HEAD(ring);
208 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 216 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
209 ring->space = ring->head - (ring->tail + 8); 217 ring->space = ring_space(ring);
210 if (ring->space < 0)
211 ring->space += ring->size;
212 } 218 }
213 219
214 return 0; 220 return 0;
@@ -921,32 +927,34 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
921 } 927 }
922 928
923 ring->tail = 0; 929 ring->tail = 0;
924 ring->space = ring->head - 8; 930 ring->space = ring_space(ring);
925 931
926 return 0; 932 return 0;
927} 933}
928 934
929int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) 935int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
930{ 936{
931 int reread = 0;
932 struct drm_device *dev = ring->dev; 937 struct drm_device *dev = ring->dev;
933 struct drm_i915_private *dev_priv = dev->dev_private; 938 struct drm_i915_private *dev_priv = dev->dev_private;
934 unsigned long end; 939 unsigned long end;
935 u32 head; 940 u32 head;
936 941
942 /* If the reported head position has wrapped or hasn't advanced,
943 * fallback to the slow and accurate path.
944 */
945 head = intel_read_status_page(ring, 4);
946 if (head > ring->head) {
947 ring->head = head;
948 ring->space = ring_space(ring);
949 if (ring->space >= n)
950 return 0;
951 }
952
937 trace_i915_ring_wait_begin (dev); 953 trace_i915_ring_wait_begin (dev);
938 end = jiffies + 3 * HZ; 954 end = jiffies + 3 * HZ;
939 do { 955 do {
940 /* If the reported head position has wrapped or hasn't advanced, 956 ring->head = I915_READ_HEAD(ring);
941 * fallback to the slow and accurate path. 957 ring->space = ring_space(ring);
942 */
943 head = intel_read_status_page(ring, 4);
944 if (reread)
945 head = I915_READ_HEAD(ring);
946 ring->head = head & HEAD_ADDR;
947 ring->space = ring->head - (ring->tail + 8);
948 if (ring->space < 0)
949 ring->space += ring->size;
950 if (ring->space >= n) { 958 if (ring->space >= n) {
951 trace_i915_ring_wait_end(dev); 959 trace_i915_ring_wait_end(dev);
952 return 0; 960 return 0;
@@ -961,7 +969,6 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
961 msleep(1); 969 msleep(1);
962 if (atomic_read(&dev_priv->mm.wedged)) 970 if (atomic_read(&dev_priv->mm.wedged))
963 return -EAGAIN; 971 return -EAGAIN;
964 reread = 1;
965 } while (!time_after(jiffies, end)); 972 } while (!time_after(jiffies, end));
966 trace_i915_ring_wait_end (dev); 973 trace_i915_ring_wait_end (dev);
967 return -EBUSY; 974 return -EBUSY;
@@ -1052,22 +1059,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1052} 1059}
1053 1060
1054static int gen6_ring_flush(struct intel_ring_buffer *ring, 1061static int gen6_ring_flush(struct intel_ring_buffer *ring,
1055 u32 invalidate_domains, 1062 u32 invalidate, u32 flush)
1056 u32 flush_domains)
1057{ 1063{
1064 uint32_t cmd;
1058 int ret; 1065 int ret;
1059 1066
1060 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1067 if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
1061 return 0; 1068 return 0;
1062 1069
1063 ret = intel_ring_begin(ring, 4); 1070 ret = intel_ring_begin(ring, 4);
1064 if (ret) 1071 if (ret)
1065 return ret; 1072 return ret;
1066 1073
1067 intel_ring_emit(ring, MI_FLUSH_DW); 1074 cmd = MI_FLUSH_DW;
1068 intel_ring_emit(ring, 0); 1075 if (invalidate & I915_GEM_GPU_DOMAINS)
1076 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1077 intel_ring_emit(ring, cmd);
1069 intel_ring_emit(ring, 0); 1078 intel_ring_emit(ring, 0);
1070 intel_ring_emit(ring, 0); 1079 intel_ring_emit(ring, 0);
1080 intel_ring_emit(ring, MI_NOOP);
1071 intel_ring_advance(ring); 1081 intel_ring_advance(ring);
1072 return 0; 1082 return 0;
1073} 1083}
@@ -1223,22 +1233,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
1223} 1233}
1224 1234
1225static int blt_ring_flush(struct intel_ring_buffer *ring, 1235static int blt_ring_flush(struct intel_ring_buffer *ring,
1226 u32 invalidate_domains, 1236 u32 invalidate, u32 flush)
1227 u32 flush_domains)
1228{ 1237{
1238 uint32_t cmd;
1229 int ret; 1239 int ret;
1230 1240
1231 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1241 if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
1232 return 0; 1242 return 0;
1233 1243
1234 ret = blt_ring_begin(ring, 4); 1244 ret = blt_ring_begin(ring, 4);
1235 if (ret) 1245 if (ret)
1236 return ret; 1246 return ret;
1237 1247
1238 intel_ring_emit(ring, MI_FLUSH_DW); 1248 cmd = MI_FLUSH_DW;
1239 intel_ring_emit(ring, 0); 1249 if (invalidate & I915_GEM_DOMAIN_RENDER)
1250 cmd |= MI_INVALIDATE_TLB;
1251 intel_ring_emit(ring, cmd);
1240 intel_ring_emit(ring, 0); 1252 intel_ring_emit(ring, 0);
1241 intel_ring_emit(ring, 0); 1253 intel_ring_emit(ring, 0);
1254 intel_ring_emit(ring, MI_NOOP);
1242 intel_ring_advance(ring); 1255 intel_ring_advance(ring);
1243 return 0; 1256 return 0;
1244} 1257}
@@ -1292,6 +1305,48 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1292 return intel_init_ring_buffer(dev, ring); 1305 return intel_init_ring_buffer(dev, ring);
1293} 1306}
1294 1307
1308int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1309{
1310 drm_i915_private_t *dev_priv = dev->dev_private;
1311 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1312
1313 *ring = render_ring;
1314 if (INTEL_INFO(dev)->gen >= 6) {
1315 ring->add_request = gen6_add_request;
1316 ring->irq_get = gen6_render_ring_get_irq;
1317 ring->irq_put = gen6_render_ring_put_irq;
1318 } else if (IS_GEN5(dev)) {
1319 ring->add_request = pc_render_add_request;
1320 ring->get_seqno = pc_render_get_seqno;
1321 }
1322
1323 ring->dev = dev;
1324 INIT_LIST_HEAD(&ring->active_list);
1325 INIT_LIST_HEAD(&ring->request_list);
1326 INIT_LIST_HEAD(&ring->gpu_write_list);
1327
1328 ring->size = size;
1329 ring->effective_size = ring->size;
1330 if (IS_I830(ring->dev))
1331 ring->effective_size -= 128;
1332
1333 ring->map.offset = start;
1334 ring->map.size = size;
1335 ring->map.type = 0;
1336 ring->map.flags = 0;
1337 ring->map.mtrr = 0;
1338
1339 drm_core_ioremap_wc(&ring->map, dev);
1340 if (ring->map.handle == NULL) {
1341 DRM_ERROR("can not ioremap virtual address for"
1342 " ring buffer\n");
1343 return -ENOMEM;
1344 }
1345
1346 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1347 return 0;
1348}
1349
1295int intel_init_bsd_ring_buffer(struct drm_device *dev) 1350int intel_init_bsd_ring_buffer(struct drm_device *dev)
1296{ 1351{
1297 drm_i915_private_t *dev_priv = dev->dev_private; 1352 drm_i915_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 5b0abfa881fc..34306865a5df 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,22 +14,23 @@ struct intel_hw_status_page {
14 struct drm_i915_gem_object *obj; 14 struct drm_i915_gem_object *obj;
15}; 15};
16 16
17#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) 17#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
18#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
18 19
19#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) 20#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
20#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 21#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
21 22
22#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) 23#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
23#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 24#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
24 25
25#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) 26#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
26#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 27#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
27 28
28#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) 29#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
29#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 30#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
30 31
31#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
32#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) 32#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
33#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
33 34
34#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) 35#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
35#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) 36#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
@@ -166,4 +167,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
166u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); 167u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
167void intel_ring_setup_status_page(struct intel_ring_buffer *ring); 168void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
168 169
170/* DRI warts */
171int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
172
169#endif /* _INTEL_RINGBUFFER_H_ */ 173#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 45cd37652a37..7c50cdce84f0 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -46,6 +46,7 @@
46 SDVO_TV_MASK) 46 SDVO_TV_MASK)
47 47
48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK) 48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
49#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
49#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) 50#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
50#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) 51#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
51 52
@@ -473,20 +474,6 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
473 return false; 474 return false;
474 } 475 }
475 476
476 i = 3;
477 while (status == SDVO_CMD_STATUS_PENDING && i--) {
478 if (!intel_sdvo_read_byte(intel_sdvo,
479 SDVO_I2C_CMD_STATUS,
480 &status))
481 return false;
482 }
483 if (status != SDVO_CMD_STATUS_SUCCESS) {
484 DRM_DEBUG_KMS("command returns response %s [%d]\n",
485 status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
486 status);
487 return false;
488 }
489
490 return true; 477 return true;
491} 478}
492 479
@@ -497,6 +484,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
497 u8 status; 484 u8 status;
498 int i; 485 int i;
499 486
487 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
488
500 /* 489 /*
501 * The documentation states that all commands will be 490 * The documentation states that all commands will be
502 * processed within 15µs, and that we need only poll 491 * processed within 15µs, and that we need only poll
@@ -505,14 +494,19 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
505 * 494 *
506 * Check 5 times in case the hardware failed to read the docs. 495 * Check 5 times in case the hardware failed to read the docs.
507 */ 496 */
508 do { 497 if (!intel_sdvo_read_byte(intel_sdvo,
498 SDVO_I2C_CMD_STATUS,
499 &status))
500 goto log_fail;
501
502 while (status == SDVO_CMD_STATUS_PENDING && retry--) {
503 udelay(15);
509 if (!intel_sdvo_read_byte(intel_sdvo, 504 if (!intel_sdvo_read_byte(intel_sdvo,
510 SDVO_I2C_CMD_STATUS, 505 SDVO_I2C_CMD_STATUS,
511 &status)) 506 &status))
512 return false; 507 goto log_fail;
513 } while (status == SDVO_CMD_STATUS_PENDING && --retry); 508 }
514 509
515 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
516 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) 510 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
517 DRM_LOG_KMS("(%s)", cmd_status_names[status]); 511 DRM_LOG_KMS("(%s)", cmd_status_names[status]);
518 else 512 else
@@ -533,7 +527,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
533 return true; 527 return true;
534 528
535log_fail: 529log_fail:
536 DRM_LOG_KMS("\n"); 530 DRM_LOG_KMS("... failed\n");
537 return false; 531 return false;
538} 532}
539 533
@@ -550,6 +544,7 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
550static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, 544static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
551 u8 ddc_bus) 545 u8 ddc_bus)
552{ 546{
547 /* This must be the immediately preceding write before the i2c xfer */
553 return intel_sdvo_write_cmd(intel_sdvo, 548 return intel_sdvo_write_cmd(intel_sdvo,
554 SDVO_CMD_SET_CONTROL_BUS_SWITCH, 549 SDVO_CMD_SET_CONTROL_BUS_SWITCH,
555 &ddc_bus, 1); 550 &ddc_bus, 1);
@@ -557,7 +552,10 @@ static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
557 552
558static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) 553static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
559{ 554{
560 return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len); 555 if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
556 return false;
557
558 return intel_sdvo_read_response(intel_sdvo, NULL, 0);
561} 559}
562 560
563static bool 561static bool
@@ -859,18 +857,21 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
859 857
860 intel_dip_infoframe_csum(&avi_if); 858 intel_dip_infoframe_csum(&avi_if);
861 859
862 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, 860 if (!intel_sdvo_set_value(intel_sdvo,
861 SDVO_CMD_SET_HBUF_INDEX,
863 set_buf_index, 2)) 862 set_buf_index, 2))
864 return false; 863 return false;
865 864
866 for (i = 0; i < sizeof(avi_if); i += 8) { 865 for (i = 0; i < sizeof(avi_if); i += 8) {
867 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, 866 if (!intel_sdvo_set_value(intel_sdvo,
867 SDVO_CMD_SET_HBUF_DATA,
868 data, 8)) 868 data, 8))
869 return false; 869 return false;
870 data++; 870 data++;
871 } 871 }
872 872
873 return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, 873 return intel_sdvo_set_value(intel_sdvo,
874 SDVO_CMD_SET_HBUF_TXRATE,
874 &tx_rate, 1); 875 &tx_rate, 1);
875} 876}
876 877
@@ -1359,7 +1360,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1359 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); 1360 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
1360 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); 1361 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
1361 } 1362 }
1362 } 1363 } else
1364 status = connector_status_disconnected;
1363 connector->display_info.raw_edid = NULL; 1365 connector->display_info.raw_edid = NULL;
1364 kfree(edid); 1366 kfree(edid);
1365 } 1367 }
@@ -1407,10 +1409,25 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1407 1409
1408 if ((intel_sdvo_connector->output_flag & response) == 0) 1410 if ((intel_sdvo_connector->output_flag & response) == 0)
1409 ret = connector_status_disconnected; 1411 ret = connector_status_disconnected;
1410 else if (response & SDVO_TMDS_MASK) 1412 else if (IS_TMDS(intel_sdvo_connector))
1411 ret = intel_sdvo_hdmi_sink_detect(connector); 1413 ret = intel_sdvo_hdmi_sink_detect(connector);
1412 else 1414 else {
1413 ret = connector_status_connected; 1415 struct edid *edid;
1416
1417 /* if we have an edid check it matches the connection */
1418 edid = intel_sdvo_get_edid(connector);
1419 if (edid == NULL)
1420 edid = intel_sdvo_get_analog_edid(connector);
1421 if (edid != NULL) {
1422 if (edid->input & DRM_EDID_INPUT_DIGITAL)
1423 ret = connector_status_disconnected;
1424 else
1425 ret = connector_status_connected;
1426 connector->display_info.raw_edid = NULL;
1427 kfree(edid);
1428 } else
1429 ret = connector_status_connected;
1430 }
1414 1431
1415 /* May update encoder flag for like clock for SDVO TV, etc.*/ 1432 /* May update encoder flag for like clock for SDVO TV, etc.*/
1416 if (ret == connector_status_connected) { 1433 if (ret == connector_status_connected) {
@@ -1446,10 +1463,15 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1446 edid = intel_sdvo_get_analog_edid(connector); 1463 edid = intel_sdvo_get_analog_edid(connector);
1447 1464
1448 if (edid != NULL) { 1465 if (edid != NULL) {
1449 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 1466 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1467 bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1468 bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
1469
1470 if (connector_is_digital == monitor_is_digital) {
1450 drm_mode_connector_update_edid_property(connector, edid); 1471 drm_mode_connector_update_edid_property(connector, edid);
1451 drm_add_edid_modes(connector, edid); 1472 drm_add_edid_modes(connector, edid);
1452 } 1473 }
1474
1453 connector->display_info.raw_edid = NULL; 1475 connector->display_info.raw_edid = NULL;
1454 kfree(edid); 1476 kfree(edid);
1455 } 1477 }
@@ -1668,6 +1690,22 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1668 kfree(connector); 1690 kfree(connector);
1669} 1691}
1670 1692
1693static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
1694{
1695 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1696 struct edid *edid;
1697 bool has_audio = false;
1698
1699 if (!intel_sdvo->is_hdmi)
1700 return false;
1701
1702 edid = intel_sdvo_get_edid(connector);
1703 if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
1704 has_audio = drm_detect_monitor_audio(edid);
1705
1706 return has_audio;
1707}
1708
1671static int 1709static int
1672intel_sdvo_set_property(struct drm_connector *connector, 1710intel_sdvo_set_property(struct drm_connector *connector,
1673 struct drm_property *property, 1711 struct drm_property *property,
@@ -1684,17 +1722,23 @@ intel_sdvo_set_property(struct drm_connector *connector,
1684 return ret; 1722 return ret;
1685 1723
1686 if (property == intel_sdvo_connector->force_audio_property) { 1724 if (property == intel_sdvo_connector->force_audio_property) {
1687 if (val == intel_sdvo_connector->force_audio) 1725 int i = val;
1726 bool has_audio;
1727
1728 if (i == intel_sdvo_connector->force_audio)
1688 return 0; 1729 return 0;
1689 1730
1690 intel_sdvo_connector->force_audio = val; 1731 intel_sdvo_connector->force_audio = i;
1691 1732
1692 if (val > 0 && intel_sdvo->has_hdmi_audio) 1733 if (i == 0)
1693 return 0; 1734 has_audio = intel_sdvo_detect_hdmi_audio(connector);
1694 if (val < 0 && !intel_sdvo->has_hdmi_audio) 1735 else
1736 has_audio = i > 0;
1737
1738 if (has_audio == intel_sdvo->has_hdmi_audio)
1695 return 0; 1739 return 0;
1696 1740
1697 intel_sdvo->has_hdmi_audio = val > 0; 1741 intel_sdvo->has_hdmi_audio = has_audio;
1698 goto done; 1742 goto done;
1699 } 1743 }
1700 1744
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 93206e4eaa6f..fe4a53a50b83 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1234,7 +1234,8 @@ static const struct drm_display_mode reported_modes[] = {
1234 * \return false if TV is disconnected. 1234 * \return false if TV is disconnected.
1235 */ 1235 */
1236static int 1236static int
1237intel_tv_detect_type (struct intel_tv *intel_tv) 1237intel_tv_detect_type (struct intel_tv *intel_tv,
1238 struct drm_connector *connector)
1238{ 1239{
1239 struct drm_encoder *encoder = &intel_tv->base.base; 1240 struct drm_encoder *encoder = &intel_tv->base.base;
1240 struct drm_device *dev = encoder->dev; 1241 struct drm_device *dev = encoder->dev;
@@ -1245,11 +1246,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1245 int type; 1246 int type;
1246 1247
1247 /* Disable TV interrupts around load detect or we'll recurse */ 1248 /* Disable TV interrupts around load detect or we'll recurse */
1248 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1249 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1249 i915_disable_pipestat(dev_priv, 0, 1250 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1250 PIPE_HOTPLUG_INTERRUPT_ENABLE | 1251 i915_disable_pipestat(dev_priv, 0,
1251 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1252 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1252 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1253 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1254 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1255 }
1253 1256
1254 save_tv_dac = tv_dac = I915_READ(TV_DAC); 1257 save_tv_dac = tv_dac = I915_READ(TV_DAC);
1255 save_tv_ctl = tv_ctl = I915_READ(TV_CTL); 1258 save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1302,11 +1305,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1302 I915_WRITE(TV_CTL, save_tv_ctl); 1305 I915_WRITE(TV_CTL, save_tv_ctl);
1303 1306
1304 /* Restore interrupt config */ 1307 /* Restore interrupt config */
1305 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1308 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1306 i915_enable_pipestat(dev_priv, 0, 1309 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1307 PIPE_HOTPLUG_INTERRUPT_ENABLE | 1310 i915_enable_pipestat(dev_priv, 0,
1308 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1311 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1309 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1312 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1313 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1314 }
1310 1315
1311 return type; 1316 return type;
1312} 1317}
@@ -1356,7 +1361,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1356 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1361 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1357 1362
1358 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { 1363 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
1359 type = intel_tv_detect_type(intel_tv); 1364 type = intel_tv_detect_type(intel_tv, connector);
1360 } else if (force) { 1365 } else if (force) {
1361 struct drm_crtc *crtc; 1366 struct drm_crtc *crtc;
1362 int dpms_mode; 1367 int dpms_mode;
@@ -1364,7 +1369,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1364 crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, 1369 crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
1365 &mode, &dpms_mode); 1370 &mode, &dpms_mode);
1366 if (crtc) { 1371 if (crtc) {
1367 type = intel_tv_detect_type(intel_tv); 1372 type = intel_tv_detect_type(intel_tv, connector);
1368 intel_release_load_detect_pipe(&intel_tv->base, connector, 1373 intel_release_load_detect_pipe(&intel_tv->base, connector,
1369 dpms_mode); 1374 dpms_mode);
1370 } else 1375 } else
@@ -1658,6 +1663,18 @@ intel_tv_init(struct drm_device *dev)
1658 intel_encoder = &intel_tv->base; 1663 intel_encoder = &intel_tv->base;
1659 connector = &intel_connector->base; 1664 connector = &intel_connector->base;
1660 1665
1666 /* The documentation, for the older chipsets at least, recommend
1667 * using a polling method rather than hotplug detection for TVs.
1668 * This is because in order to perform the hotplug detection, the PLLs
1669 * for the TV must be kept alive increasing power drain and starving
1670 * bandwidth from other encoders. Notably for instance, it causes
1671 * pipe underruns on Crestline when this encoder is supposedly idle.
1672 *
1673 * More recent chipsets favour HDMI rather than integrated S-Video.
1674 */
1675 connector->polled =
1676 DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
1677
1661 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1678 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1662 DRM_MODE_CONNECTOR_SVIDEO); 1679 DRM_MODE_CONNECTOR_SVIDEO);
1663 1680