aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c152
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h16
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c145
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c104
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c35
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h40
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c356
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c75
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c50
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c33
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c91
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
17 files changed, 716 insertions, 401 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9c9998c4dceb..a894ade03093 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -290,7 +290,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
291 obj = obj_priv->obj; 291 obj = obj_priv->obj;
292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
293 ret = i915_gem_object_get_pages(obj); 293 ret = i915_gem_object_get_pages(obj, 0);
294 if (ret) { 294 if (ret) {
295 DRM_ERROR("Failed to get pages: %d\n", ret); 295 DRM_ERROR("Failed to get pages: %d\n", ret);
296 spin_unlock(&dev_priv->mm.active_list_lock); 296 spin_unlock(&dev_priv->mm.active_list_lock);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index bbe47812e4b6..2307f98349f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -134,6 +134,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
134 134
135 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 135 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
136 136
137 if (IS_I965G(dev))
138 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
139 0xf0;
140
137 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 141 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
138 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 142 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
139 return 0; 143 return 0;
@@ -731,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
731 if (cmdbuf->num_cliprects) { 735 if (cmdbuf->num_cliprects) {
732 cliprects = kcalloc(cmdbuf->num_cliprects, 736 cliprects = kcalloc(cmdbuf->num_cliprects,
733 sizeof(struct drm_clip_rect), GFP_KERNEL); 737 sizeof(struct drm_clip_rect), GFP_KERNEL);
734 if (cliprects == NULL) 738 if (cliprects == NULL) {
739 ret = -ENOMEM;
735 goto fail_batch_free; 740 goto fail_batch_free;
741 }
736 742
737 ret = copy_from_user(cliprects, cmdbuf->cliprects, 743 ret = copy_from_user(cliprects, cmdbuf->cliprects,
738 cmdbuf->num_cliprects * 744 cmdbuf->num_cliprects *
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index be631cc3e4dc..cf4cb3e9a0c2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -45,6 +45,9 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
45unsigned int i915_powersave = 1; 45unsigned int i915_powersave = 1;
46module_param_named(powersave, i915_powersave, int, 0400); 46module_param_named(powersave, i915_powersave, int, 0400);
47 47
48unsigned int i915_lvds_downclock = 0;
49module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
50
48static struct drm_driver driver; 51static struct drm_driver driver;
49 52
50#define INTEL_VGA_DEVICE(id, info) { \ 53#define INTEL_VGA_DEVICE(id, info) { \
@@ -117,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = {
117 120
118const static struct intel_device_info intel_pineview_info = { 121const static struct intel_device_info intel_pineview_info = {
119 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 122 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
120 .has_pipe_cxsr = 1, 123 .need_gfx_hws = 1,
121 .has_hotplug = 1, 124 .has_hotplug = 1,
122}; 125};
123 126
@@ -171,26 +174,20 @@ const static struct pci_device_id pciidlist[] = {
171MODULE_DEVICE_TABLE(pci, pciidlist); 174MODULE_DEVICE_TABLE(pci, pciidlist);
172#endif 175#endif
173 176
174static int i915_suspend(struct drm_device *dev, pm_message_t state) 177static int i915_drm_freeze(struct drm_device *dev)
175{ 178{
176 struct drm_i915_private *dev_priv = dev->dev_private; 179 struct drm_i915_private *dev_priv = dev->dev_private;
177 180
178 if (!dev || !dev_priv) {
179 DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
180 DRM_ERROR("DRM not initialized, aborting suspend.\n");
181 return -ENODEV;
182 }
183
184 if (state.event == PM_EVENT_PRETHAW)
185 return 0;
186
187 pci_save_state(dev->pdev); 181 pci_save_state(dev->pdev);
188 182
189 /* If KMS is active, we do the leavevt stuff here */ 183 /* If KMS is active, we do the leavevt stuff here */
190 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 184 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
191 if (i915_gem_idle(dev)) 185 int error = i915_gem_idle(dev);
186 if (error) {
192 dev_err(&dev->pdev->dev, 187 dev_err(&dev->pdev->dev,
193 "GEM idle failed, resume may fail\n"); 188 "GEM idle failed, resume might fail\n");
189 return error;
190 }
194 drm_irq_uninstall(dev); 191 drm_irq_uninstall(dev);
195 } 192 }
196 193
@@ -198,26 +195,42 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
198 195
199 intel_opregion_free(dev, 1); 196 intel_opregion_free(dev, 1);
200 197
198 /* Modeset on resume, not lid events */
199 dev_priv->modeset_on_lid = 0;
200
201 return 0;
202}
203
204static int i915_suspend(struct drm_device *dev, pm_message_t state)
205{
206 int error;
207
208 if (!dev || !dev->dev_private) {
209 DRM_ERROR("dev: %p\n", dev);
210 DRM_ERROR("DRM not initialized, aborting suspend.\n");
211 return -ENODEV;
212 }
213
214 if (state.event == PM_EVENT_PRETHAW)
215 return 0;
216
217 error = i915_drm_freeze(dev);
218 if (error)
219 return error;
220
201 if (state.event == PM_EVENT_SUSPEND) { 221 if (state.event == PM_EVENT_SUSPEND) {
202 /* Shut down the device */ 222 /* Shut down the device */
203 pci_disable_device(dev->pdev); 223 pci_disable_device(dev->pdev);
204 pci_set_power_state(dev->pdev, PCI_D3hot); 224 pci_set_power_state(dev->pdev, PCI_D3hot);
205 } 225 }
206 226
207 /* Modeset on resume, not lid events */
208 dev_priv->modeset_on_lid = 0;
209
210 return 0; 227 return 0;
211} 228}
212 229
213static int i915_resume(struct drm_device *dev) 230static int i915_drm_thaw(struct drm_device *dev)
214{ 231{
215 struct drm_i915_private *dev_priv = dev->dev_private; 232 struct drm_i915_private *dev_priv = dev->dev_private;
216 int ret = 0; 233 int error = 0;
217
218 if (pci_enable_device(dev->pdev))
219 return -1;
220 pci_set_master(dev->pdev);
221 234
222 i915_restore_state(dev); 235 i915_restore_state(dev);
223 236
@@ -228,21 +241,28 @@ static int i915_resume(struct drm_device *dev)
228 mutex_lock(&dev->struct_mutex); 241 mutex_lock(&dev->struct_mutex);
229 dev_priv->mm.suspended = 0; 242 dev_priv->mm.suspended = 0;
230 243
231 ret = i915_gem_init_ringbuffer(dev); 244 error = i915_gem_init_ringbuffer(dev);
232 if (ret != 0)
233 ret = -1;
234 mutex_unlock(&dev->struct_mutex); 245 mutex_unlock(&dev->struct_mutex);
235 246
236 drm_irq_install(dev); 247 drm_irq_install(dev);
237 } 248
238 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
239 /* Resume the modeset for every activated CRTC */ 249 /* Resume the modeset for every activated CRTC */
240 drm_helper_resume_force_mode(dev); 250 drm_helper_resume_force_mode(dev);
241 } 251 }
242 252
243 dev_priv->modeset_on_lid = 0; 253 dev_priv->modeset_on_lid = 0;
244 254
245 return ret; 255 return error;
256}
257
258static int i915_resume(struct drm_device *dev)
259{
260 if (pci_enable_device(dev->pdev))
261 return -EIO;
262
263 pci_set_master(dev->pdev);
264
265 return i915_drm_thaw(dev);
246} 266}
247 267
248/** 268/**
@@ -383,57 +403,62 @@ i915_pci_remove(struct pci_dev *pdev)
383 drm_put_dev(dev); 403 drm_put_dev(dev);
384} 404}
385 405
386static int 406static int i915_pm_suspend(struct device *dev)
387i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
388{ 407{
389 struct drm_device *dev = pci_get_drvdata(pdev); 408 struct pci_dev *pdev = to_pci_dev(dev);
409 struct drm_device *drm_dev = pci_get_drvdata(pdev);
410 int error;
390 411
391 return i915_suspend(dev, state); 412 if (!drm_dev || !drm_dev->dev_private) {
392} 413 dev_err(dev, "DRM not initialized, aborting suspend.\n");
414 return -ENODEV;
415 }
393 416
394static int 417 error = i915_drm_freeze(drm_dev);
395i915_pci_resume(struct pci_dev *pdev) 418 if (error)
396{ 419 return error;
397 struct drm_device *dev = pci_get_drvdata(pdev);
398 420
399 return i915_resume(dev); 421 pci_disable_device(pdev);
400} 422 pci_set_power_state(pdev, PCI_D3hot);
401 423
402static int 424 return 0;
403i915_pm_suspend(struct device *dev)
404{
405 return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
406} 425}
407 426
408static int 427static int i915_pm_resume(struct device *dev)
409i915_pm_resume(struct device *dev)
410{ 428{
411 return i915_pci_resume(to_pci_dev(dev)); 429 struct pci_dev *pdev = to_pci_dev(dev);
412} 430 struct drm_device *drm_dev = pci_get_drvdata(pdev);
413 431
414static int 432 return i915_resume(drm_dev);
415i915_pm_freeze(struct device *dev)
416{
417 return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE);
418} 433}
419 434
420static int 435static int i915_pm_freeze(struct device *dev)
421i915_pm_thaw(struct device *dev)
422{ 436{
423 /* thaw during hibernate, do nothing! */ 437 struct pci_dev *pdev = to_pci_dev(dev);
424 return 0; 438 struct drm_device *drm_dev = pci_get_drvdata(pdev);
439
440 if (!drm_dev || !drm_dev->dev_private) {
441 dev_err(dev, "DRM not initialized, aborting suspend.\n");
442 return -ENODEV;
443 }
444
445 return i915_drm_freeze(drm_dev);
425} 446}
426 447
427static int 448static int i915_pm_thaw(struct device *dev)
428i915_pm_poweroff(struct device *dev)
429{ 449{
430 return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE); 450 struct pci_dev *pdev = to_pci_dev(dev);
451 struct drm_device *drm_dev = pci_get_drvdata(pdev);
452
453 return i915_drm_thaw(drm_dev);
431} 454}
432 455
433static int 456static int i915_pm_poweroff(struct device *dev)
434i915_pm_restore(struct device *dev)
435{ 457{
436 return i915_pci_resume(to_pci_dev(dev)); 458 struct pci_dev *pdev = to_pci_dev(dev);
459 struct drm_device *drm_dev = pci_get_drvdata(pdev);
460
461 return i915_drm_freeze(drm_dev);
437} 462}
438 463
439const struct dev_pm_ops i915_pm_ops = { 464const struct dev_pm_ops i915_pm_ops = {
@@ -442,7 +467,7 @@ const struct dev_pm_ops i915_pm_ops = {
442 .freeze = i915_pm_freeze, 467 .freeze = i915_pm_freeze,
443 .thaw = i915_pm_thaw, 468 .thaw = i915_pm_thaw,
444 .poweroff = i915_pm_poweroff, 469 .poweroff = i915_pm_poweroff,
445 .restore = i915_pm_restore, 470 .restore = i915_pm_resume,
446}; 471};
447 472
448static struct vm_operations_struct i915_gem_vm_ops = { 473static struct vm_operations_struct i915_gem_vm_ops = {
@@ -464,8 +489,11 @@ static struct drm_driver driver = {
464 .lastclose = i915_driver_lastclose, 489 .lastclose = i915_driver_lastclose,
465 .preclose = i915_driver_preclose, 490 .preclose = i915_driver_preclose,
466 .postclose = i915_driver_postclose, 491 .postclose = i915_driver_postclose,
492
493 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
467 .suspend = i915_suspend, 494 .suspend = i915_suspend,
468 .resume = i915_resume, 495 .resume = i915_resume,
496
469 .device_is_agp = i915_driver_device_is_agp, 497 .device_is_agp = i915_driver_device_is_agp,
470 .enable_vblank = i915_enable_vblank, 498 .enable_vblank = i915_enable_vblank,
471 .disable_vblank = i915_disable_vblank, 499 .disable_vblank = i915_disable_vblank,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 29dd67626967..b99b6a841d95 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -283,6 +283,7 @@ typedef struct drm_i915_private {
283 unsigned int lvds_use_ssc:1; 283 unsigned int lvds_use_ssc:1;
284 unsigned int edp_support:1; 284 unsigned int edp_support:1;
285 int lvds_ssc_freq; 285 int lvds_ssc_freq;
286 int edp_bpp;
286 287
287 struct notifier_block lid_notifier; 288 struct notifier_block lid_notifier;
288 289
@@ -492,6 +493,15 @@ typedef struct drm_i915_private {
492 struct list_head flushing_list; 493 struct list_head flushing_list;
493 494
494 /** 495 /**
496 * List of objects currently pending a GPU write flush.
497 *
498 * All elements on this list will belong to either the
499 * active_list or flushing_list, last_rendering_seqno can
500 * be used to differentiate between the two elements.
501 */
502 struct list_head gpu_write_list;
503
504 /**
495 * LRU list of objects which are not in the ringbuffer and 505 * LRU list of objects which are not in the ringbuffer and
496 * are ready to unbind, but are still in the GTT. 506 * are ready to unbind, but are still in the GTT.
497 * 507 *
@@ -591,6 +601,8 @@ struct drm_i915_gem_object {
591 601
592 /** This object's place on the active/flushing/inactive lists */ 602 /** This object's place on the active/flushing/inactive lists */
593 struct list_head list; 603 struct list_head list;
604 /** This object's place on GPU write list */
605 struct list_head gpu_write_list;
594 606
595 /** This object's place on the fenced object LRU */ 607 /** This object's place on the fenced object LRU */
596 struct list_head fence_list; 608 struct list_head fence_list;
@@ -722,6 +734,7 @@ extern struct drm_ioctl_desc i915_ioctls[];
722extern int i915_max_ioctl; 734extern int i915_max_ioctl;
723extern unsigned int i915_fbpercrtc; 735extern unsigned int i915_fbpercrtc;
724extern unsigned int i915_powersave; 736extern unsigned int i915_powersave;
737extern unsigned int i915_lvds_downclock;
725 738
726extern void i915_save_display(struct drm_device *dev); 739extern void i915_save_display(struct drm_device *dev);
727extern void i915_restore_display(struct drm_device *dev); 740extern void i915_restore_display(struct drm_device *dev);
@@ -864,12 +877,13 @@ int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptib
864int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 877int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
865int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 878int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
866 int write); 879 int write);
880int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
867int i915_gem_attach_phys_object(struct drm_device *dev, 881int i915_gem_attach_phys_object(struct drm_device *dev,
868 struct drm_gem_object *obj, int id); 882 struct drm_gem_object *obj, int id);
869void i915_gem_detach_phys_object(struct drm_device *dev, 883void i915_gem_detach_phys_object(struct drm_device *dev,
870 struct drm_gem_object *obj); 884 struct drm_gem_object *obj);
871void i915_gem_free_all_phys_object(struct drm_device *dev); 885void i915_gem_free_all_phys_object(struct drm_device *dev);
872int i915_gem_object_get_pages(struct drm_gem_object *obj); 886int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
873void i915_gem_object_put_pages(struct drm_gem_object *obj); 887void i915_gem_object_put_pages(struct drm_gem_object *obj);
874void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 888void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
875void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); 889void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2748609f05b3..ec8a0d7ffa39 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
277 277
278 mutex_lock(&dev->struct_mutex); 278 mutex_lock(&dev->struct_mutex);
279 279
280 ret = i915_gem_object_get_pages(obj); 280 ret = i915_gem_object_get_pages(obj, 0);
281 if (ret != 0) 281 if (ret != 0)
282 goto fail_unlock; 282 goto fail_unlock;
283 283
@@ -321,40 +321,24 @@ fail_unlock:
321 return ret; 321 return ret;
322} 322}
323 323
324static inline gfp_t
325i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
326{
327 return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
328}
329
330static inline void
331i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
332{
333 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
334}
335
336static int 324static int
337i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) 325i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
338{ 326{
339 int ret; 327 int ret;
340 328
341 ret = i915_gem_object_get_pages(obj); 329 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
342 330
343 /* If we've insufficient memory to map in the pages, attempt 331 /* If we've insufficient memory to map in the pages, attempt
344 * to make some space by throwing out some old buffers. 332 * to make some space by throwing out some old buffers.
345 */ 333 */
346 if (ret == -ENOMEM) { 334 if (ret == -ENOMEM) {
347 struct drm_device *dev = obj->dev; 335 struct drm_device *dev = obj->dev;
348 gfp_t gfp;
349 336
350 ret = i915_gem_evict_something(dev, obj->size); 337 ret = i915_gem_evict_something(dev, obj->size);
351 if (ret) 338 if (ret)
352 return ret; 339 return ret;
353 340
354 gfp = i915_gem_object_get_page_gfp_mask(obj); 341 ret = i915_gem_object_get_pages(obj, 0);
355 i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
356 ret = i915_gem_object_get_pages(obj);
357 i915_gem_object_set_page_gfp_mask (obj, gfp);
358 } 342 }
359 343
360 return ret; 344 return ret;
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
790 774
791 mutex_lock(&dev->struct_mutex); 775 mutex_lock(&dev->struct_mutex);
792 776
793 ret = i915_gem_object_get_pages(obj); 777 ret = i915_gem_object_get_pages(obj, 0);
794 if (ret != 0) 778 if (ret != 0)
795 goto fail_unlock; 779 goto fail_unlock;
796 780
@@ -1568,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1568 else 1552 else
1569 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1553 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1570 1554
1555 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1556
1571 obj_priv->last_rendering_seqno = 0; 1557 obj_priv->last_rendering_seqno = 0;
1572 if (obj_priv->active) { 1558 if (obj_priv->active) {
1573 obj_priv->active = 0; 1559 obj_priv->active = 0;
@@ -1638,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1638 struct drm_i915_gem_object *obj_priv, *next; 1624 struct drm_i915_gem_object *obj_priv, *next;
1639 1625
1640 list_for_each_entry_safe(obj_priv, next, 1626 list_for_each_entry_safe(obj_priv, next,
1641 &dev_priv->mm.flushing_list, list) { 1627 &dev_priv->mm.gpu_write_list,
1628 gpu_write_list) {
1642 struct drm_gem_object *obj = obj_priv->obj; 1629 struct drm_gem_object *obj = obj_priv->obj;
1643 1630
1644 if ((obj->write_domain & flush_domains) == 1631 if ((obj->write_domain & flush_domains) ==
@@ -1646,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1646 uint32_t old_write_domain = obj->write_domain; 1633 uint32_t old_write_domain = obj->write_domain;
1647 1634
1648 obj->write_domain = 0; 1635 obj->write_domain = 0;
1636 list_del_init(&obj_priv->gpu_write_list);
1649 i915_gem_object_move_to_active(obj, seqno); 1637 i915_gem_object_move_to_active(obj, seqno);
1650 1638
1651 trace_i915_gem_object_change_domain(obj, 1639 trace_i915_gem_object_change_domain(obj,
@@ -2100,8 +2088,8 @@ static int
2100i915_gem_evict_everything(struct drm_device *dev) 2088i915_gem_evict_everything(struct drm_device *dev)
2101{ 2089{
2102 drm_i915_private_t *dev_priv = dev->dev_private; 2090 drm_i915_private_t *dev_priv = dev->dev_private;
2103 uint32_t seqno;
2104 int ret; 2091 int ret;
2092 uint32_t seqno;
2105 bool lists_empty; 2093 bool lists_empty;
2106 2094
2107 spin_lock(&dev_priv->mm.active_list_lock); 2095 spin_lock(&dev_priv->mm.active_list_lock);
@@ -2123,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
2123 if (ret) 2111 if (ret)
2124 return ret; 2112 return ret;
2125 2113
2114 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2115
2126 ret = i915_gem_evict_from_inactive_list(dev); 2116 ret = i915_gem_evict_from_inactive_list(dev);
2127 if (ret) 2117 if (ret)
2128 return ret; 2118 return ret;
@@ -2230,7 +2220,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2230} 2220}
2231 2221
2232int 2222int
2233i915_gem_object_get_pages(struct drm_gem_object *obj) 2223i915_gem_object_get_pages(struct drm_gem_object *obj,
2224 gfp_t gfpmask)
2234{ 2225{
2235 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2226 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2236 int page_count, i; 2227 int page_count, i;
@@ -2256,7 +2247,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2256 inode = obj->filp->f_path.dentry->d_inode; 2247 inode = obj->filp->f_path.dentry->d_inode;
2257 mapping = inode->i_mapping; 2248 mapping = inode->i_mapping;
2258 for (i = 0; i < page_count; i++) { 2249 for (i = 0; i < page_count; i++) {
2259 page = read_mapping_page(mapping, i, NULL); 2250 page = read_cache_page_gfp(mapping, i,
2251 mapping_gfp_mask (mapping) |
2252 __GFP_COLD |
2253 gfpmask);
2260 if (IS_ERR(page)) { 2254 if (IS_ERR(page)) {
2261 ret = PTR_ERR(page); 2255 ret = PTR_ERR(page);
2262 i915_gem_object_put_pages(obj); 2256 i915_gem_object_put_pages(obj);
@@ -2579,7 +2573,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2579 drm_i915_private_t *dev_priv = dev->dev_private; 2573 drm_i915_private_t *dev_priv = dev->dev_private;
2580 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2574 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2581 struct drm_mm_node *free_space; 2575 struct drm_mm_node *free_space;
2582 bool retry_alloc = false; 2576 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2583 int ret; 2577 int ret;
2584 2578
2585 if (obj_priv->madv != I915_MADV_WILLNEED) { 2579 if (obj_priv->madv != I915_MADV_WILLNEED) {
@@ -2623,15 +2617,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2623 DRM_INFO("Binding object of size %zd at 0x%08x\n", 2617 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2624 obj->size, obj_priv->gtt_offset); 2618 obj->size, obj_priv->gtt_offset);
2625#endif 2619#endif
2626 if (retry_alloc) { 2620 ret = i915_gem_object_get_pages(obj, gfpmask);
2627 i915_gem_object_set_page_gfp_mask (obj,
2628 i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2629 }
2630 ret = i915_gem_object_get_pages(obj);
2631 if (retry_alloc) {
2632 i915_gem_object_set_page_gfp_mask (obj,
2633 i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2634 }
2635 if (ret) { 2621 if (ret) {
2636 drm_mm_put_block(obj_priv->gtt_space); 2622 drm_mm_put_block(obj_priv->gtt_space);
2637 obj_priv->gtt_space = NULL; 2623 obj_priv->gtt_space = NULL;
@@ -2641,9 +2627,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2641 ret = i915_gem_evict_something(dev, obj->size); 2627 ret = i915_gem_evict_something(dev, obj->size);
2642 if (ret) { 2628 if (ret) {
2643 /* now try to shrink everyone else */ 2629 /* now try to shrink everyone else */
2644 if (! retry_alloc) { 2630 if (gfpmask) {
2645 retry_alloc = true; 2631 gfpmask = 0;
2646 goto search_free; 2632 goto search_free;
2647 } 2633 }
2648 2634
2649 return ret; 2635 return ret;
@@ -2721,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2721 old_write_domain = obj->write_domain; 2707 old_write_domain = obj->write_domain;
2722 i915_gem_flush(dev, 0, obj->write_domain); 2708 i915_gem_flush(dev, 0, obj->write_domain);
2723 seqno = i915_add_request(dev, NULL, obj->write_domain); 2709 seqno = i915_add_request(dev, NULL, obj->write_domain);
2724 obj->write_domain = 0; 2710 BUG_ON(obj->write_domain);
2725 i915_gem_object_move_to_active(obj, seqno); 2711 i915_gem_object_move_to_active(obj, seqno);
2726 2712
2727 trace_i915_gem_object_change_domain(obj, 2713 trace_i915_gem_object_change_domain(obj,
@@ -2837,6 +2823,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2837 return 0; 2823 return 0;
2838} 2824}
2839 2825
2826/*
2827 * Prepare buffer for display plane. Use uninterruptible for possible flush
2828 * wait, as in modesetting process we're not supposed to be interrupted.
2829 */
2830int
2831i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2832{
2833 struct drm_device *dev = obj->dev;
2834 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2835 uint32_t old_write_domain, old_read_domains;
2836 int ret;
2837
2838 /* Not valid to be called on unbound objects. */
2839 if (obj_priv->gtt_space == NULL)
2840 return -EINVAL;
2841
2842 i915_gem_object_flush_gpu_write_domain(obj);
2843
2844 /* Wait on any GPU rendering and flushing to occur. */
2845 if (obj_priv->active) {
2846#if WATCH_BUF
2847 DRM_INFO("%s: object %p wait for seqno %08x\n",
2848 __func__, obj, obj_priv->last_rendering_seqno);
2849#endif
2850 ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
2851 if (ret != 0)
2852 return ret;
2853 }
2854
2855 old_write_domain = obj->write_domain;
2856 old_read_domains = obj->read_domains;
2857
2858 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2859
2860 i915_gem_object_flush_cpu_write_domain(obj);
2861
2862 /* It should now be out of any other write domains, and we can update
2863 * the domain values for our changes.
2864 */
2865 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2866 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2867 obj->write_domain = I915_GEM_DOMAIN_GTT;
2868 obj_priv->dirty = 1;
2869
2870 trace_i915_gem_object_change_domain(obj,
2871 old_read_domains,
2872 old_write_domain);
2873
2874 return 0;
2875}
2876
2840/** 2877/**
2841 * Moves a single object to the CPU read, and possibly write domain. 2878 * Moves a single object to the CPU read, and possibly write domain.
2842 * 2879 *
@@ -3533,6 +3570,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
3533 uint32_t reloc_count = 0, i; 3570 uint32_t reloc_count = 0, i;
3534 int ret = 0; 3571 int ret = 0;
3535 3572
3573 if (relocs == NULL)
3574 return 0;
3575
3536 for (i = 0; i < buffer_count; i++) { 3576 for (i = 0; i < buffer_count; i++) {
3537 struct drm_i915_gem_relocation_entry __user *user_relocs; 3577 struct drm_i915_gem_relocation_entry __user *user_relocs;
3538 int unwritten; 3578 int unwritten;
@@ -3622,7 +3662,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3622 struct drm_gem_object *batch_obj; 3662 struct drm_gem_object *batch_obj;
3623 struct drm_i915_gem_object *obj_priv; 3663 struct drm_i915_gem_object *obj_priv;
3624 struct drm_clip_rect *cliprects = NULL; 3664 struct drm_clip_rect *cliprects = NULL;
3625 struct drm_i915_gem_relocation_entry *relocs; 3665 struct drm_i915_gem_relocation_entry *relocs = NULL;
3626 int ret = 0, ret2, i, pinned = 0; 3666 int ret = 0, ret2, i, pinned = 0;
3627 uint64_t exec_offset; 3667 uint64_t exec_offset;
3628 uint32_t seqno, flush_domains, reloc_index; 3668 uint32_t seqno, flush_domains, reloc_index;
@@ -3648,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3648 if (args->num_cliprects != 0) { 3688 if (args->num_cliprects != 0) {
3649 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), 3689 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3650 GFP_KERNEL); 3690 GFP_KERNEL);
3651 if (cliprects == NULL) 3691 if (cliprects == NULL) {
3692 ret = -ENOMEM;
3652 goto pre_mutex_err; 3693 goto pre_mutex_err;
3694 }
3653 3695
3654 ret = copy_from_user(cliprects, 3696 ret = copy_from_user(cliprects,
3655 (struct drm_clip_rect __user *) 3697 (struct drm_clip_rect __user *)
@@ -3691,6 +3733,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3691 if (object_list[i] == NULL) { 3733 if (object_list[i] == NULL) {
3692 DRM_ERROR("Invalid object handle %d at index %d\n", 3734 DRM_ERROR("Invalid object handle %d at index %d\n",
3693 exec_list[i].handle, i); 3735 exec_list[i].handle, i);
3736 /* prevent error path from reading uninitialized data */
3737 args->buffer_count = i + 1;
3694 ret = -EBADF; 3738 ret = -EBADF;
3695 goto err; 3739 goto err;
3696 } 3740 }
@@ -3699,6 +3743,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3699 if (obj_priv->in_execbuffer) { 3743 if (obj_priv->in_execbuffer) {
3700 DRM_ERROR("Object %p appears more than once in object list\n", 3744 DRM_ERROR("Object %p appears more than once in object list\n",
3701 object_list[i]); 3745 object_list[i]);
3746 /* prevent error path from reading uninitialized data */
3747 args->buffer_count = i + 1;
3702 ret = -EBADF; 3748 ret = -EBADF;
3703 goto err; 3749 goto err;
3704 } 3750 }
@@ -3812,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3812 i915_gem_flush(dev, 3858 i915_gem_flush(dev,
3813 dev->invalidate_domains, 3859 dev->invalidate_domains,
3814 dev->flush_domains); 3860 dev->flush_domains);
3815 if (dev->flush_domains) 3861 if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
3816 (void)i915_add_request(dev, file_priv, 3862 (void)i915_add_request(dev, file_priv,
3817 dev->flush_domains); 3863 dev->flush_domains);
3818 } 3864 }
3819 3865
3820 for (i = 0; i < args->buffer_count; i++) { 3866 for (i = 0; i < args->buffer_count; i++) {
3821 struct drm_gem_object *obj = object_list[i]; 3867 struct drm_gem_object *obj = object_list[i];
3868 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3822 uint32_t old_write_domain = obj->write_domain; 3869 uint32_t old_write_domain = obj->write_domain;
3823 3870
3824 obj->write_domain = obj->pending_write_domain; 3871 obj->write_domain = obj->pending_write_domain;
3872 if (obj->write_domain)
3873 list_move_tail(&obj_priv->gpu_write_list,
3874 &dev_priv->mm.gpu_write_list);
3875 else
3876 list_del_init(&obj_priv->gpu_write_list);
3877
3825 trace_i915_gem_object_change_domain(obj, 3878 trace_i915_gem_object_change_domain(obj,
3826 obj->read_domains, 3879 obj->read_domains,
3827 old_write_domain); 3880 old_write_domain);
@@ -3895,6 +3948,7 @@ err:
3895 3948
3896 mutex_unlock(&dev->struct_mutex); 3949 mutex_unlock(&dev->struct_mutex);
3897 3950
3951pre_mutex_err:
3898 /* Copy the updated relocations out regardless of current error 3952 /* Copy the updated relocations out regardless of current error
3899 * state. Failure to update the relocs would mean that the next 3953 * state. Failure to update the relocs would mean that the next
3900 * time userland calls execbuf, it would do so with presumed offset 3954 * time userland calls execbuf, it would do so with presumed offset
@@ -3909,7 +3963,6 @@ err:
3909 ret = ret2; 3963 ret = ret2;
3910 } 3964 }
3911 3965
3912pre_mutex_err:
3913 drm_free_large(object_list); 3966 drm_free_large(object_list);
3914 kfree(cliprects); 3967 kfree(cliprects);
3915 3968
@@ -4000,8 +4053,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
4000 "back to user (%d)\n", 4053 "back to user (%d)\n",
4001 args->buffer_count, ret); 4054 args->buffer_count, ret);
4002 } 4055 }
4003 } else {
4004 DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret);
4005 } 4056 }
4006 4057
4007 drm_free_large(exec_list); 4058 drm_free_large(exec_list);
@@ -4334,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4334 obj_priv->obj = obj; 4385 obj_priv->obj = obj;
4335 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4386 obj_priv->fence_reg = I915_FENCE_REG_NONE;
4336 INIT_LIST_HEAD(&obj_priv->list); 4387 INIT_LIST_HEAD(&obj_priv->list);
4388 INIT_LIST_HEAD(&obj_priv->gpu_write_list);
4337 INIT_LIST_HEAD(&obj_priv->fence_list); 4389 INIT_LIST_HEAD(&obj_priv->fence_list);
4338 obj_priv->madv = I915_MADV_WILLNEED; 4390 obj_priv->madv = I915_MADV_WILLNEED;
4339 4391
@@ -4785,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
4785 spin_lock_init(&dev_priv->mm.active_list_lock); 4837 spin_lock_init(&dev_priv->mm.active_list_lock);
4786 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4838 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4787 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4839 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4840 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4788 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4841 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4789 INIT_LIST_HEAD(&dev_priv->mm.request_list); 4842 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4790 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4843 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4897,7 +4950,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4897 if (!obj_priv->phys_obj) 4950 if (!obj_priv->phys_obj)
4898 return; 4951 return;
4899 4952
4900 ret = i915_gem_object_get_pages(obj); 4953 ret = i915_gem_object_get_pages(obj, 0);
4901 if (ret) 4954 if (ret)
4902 goto out; 4955 goto out;
4903 4956
@@ -4955,7 +5008,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4955 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 5008 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4956 obj_priv->phys_obj->cur_obj = obj; 5009 obj_priv->phys_obj->cur_obj = obj;
4957 5010
4958 ret = i915_gem_object_get_pages(obj); 5011 ret = i915_gem_object_get_pages(obj, 0);
4959 if (ret) { 5012 if (ret) {
4960 DRM_ERROR("failed to get page list\n"); 5013 DRM_ERROR("failed to get page list\n");
4961 goto out; 5014 goto out;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7cd8110051b6..a17d6bdfe63e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -274,7 +274,6 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
275 int ret = IRQ_NONE; 275 int ret = IRQ_NONE;
276 u32 de_iir, gt_iir, de_ier, pch_iir; 276 u32 de_iir, gt_iir, de_ier, pch_iir;
277 u32 new_de_iir, new_gt_iir, new_pch_iir;
278 struct drm_i915_master_private *master_priv; 277 struct drm_i915_master_private *master_priv;
279 278
280 /* disable master interrupt before clearing iir */ 279 /* disable master interrupt before clearing iir */
@@ -286,51 +285,58 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
286 gt_iir = I915_READ(GTIIR); 285 gt_iir = I915_READ(GTIIR);
287 pch_iir = I915_READ(SDEIIR); 286 pch_iir = I915_READ(SDEIIR);
288 287
289 for (;;) { 288 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
290 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) 289 goto done;
291 break;
292 290
293 ret = IRQ_HANDLED; 291 ret = IRQ_HANDLED;
294 292
295 /* should clear PCH hotplug event before clear CPU irq */ 293 if (dev->primary->master) {
296 I915_WRITE(SDEIIR, pch_iir); 294 master_priv = dev->primary->master->driver_priv;
297 new_pch_iir = I915_READ(SDEIIR); 295 if (master_priv->sarea_priv)
296 master_priv->sarea_priv->last_dispatch =
297 READ_BREADCRUMB(dev_priv);
298 }
298 299
299 I915_WRITE(DEIIR, de_iir); 300 if (gt_iir & GT_USER_INTERRUPT) {
300 new_de_iir = I915_READ(DEIIR); 301 u32 seqno = i915_get_gem_seqno(dev);
301 I915_WRITE(GTIIR, gt_iir); 302 dev_priv->mm.irq_gem_seqno = seqno;
302 new_gt_iir = I915_READ(GTIIR); 303 trace_i915_gem_request_complete(dev, seqno);
304 DRM_WAKEUP(&dev_priv->irq_queue);
305 dev_priv->hangcheck_count = 0;
306 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
307 }
303 308
304 if (dev->primary->master) { 309 if (de_iir & DE_GSE)
305 master_priv = dev->primary->master->driver_priv; 310 ironlake_opregion_gse_intr(dev);
306 if (master_priv->sarea_priv)
307 master_priv->sarea_priv->last_dispatch =
308 READ_BREADCRUMB(dev_priv);
309 }
310 311
311 if (gt_iir & GT_USER_INTERRUPT) { 312 if (de_iir & DE_PLANEA_FLIP_DONE) {
312 u32 seqno = i915_get_gem_seqno(dev); 313 intel_prepare_page_flip(dev, 0);
313 dev_priv->mm.irq_gem_seqno = seqno; 314 intel_finish_page_flip(dev, 0);
314 trace_i915_gem_request_complete(dev, seqno); 315 }
315 DRM_WAKEUP(&dev_priv->irq_queue);
316 dev_priv->hangcheck_count = 0;
317 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
318 }
319 316
320 if (de_iir & DE_GSE) 317 if (de_iir & DE_PLANEB_FLIP_DONE) {
321 ironlake_opregion_gse_intr(dev); 318 intel_prepare_page_flip(dev, 1);
319 intel_finish_page_flip(dev, 1);
320 }
322 321
323 /* check event from PCH */ 322 if (de_iir & DE_PIPEA_VBLANK)
324 if ((de_iir & DE_PCH_EVENT) && 323 drm_handle_vblank(dev, 0);
325 (pch_iir & SDE_HOTPLUG_MASK)) { 324
326 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 325 if (de_iir & DE_PIPEB_VBLANK)
327 } 326 drm_handle_vblank(dev, 1);
328 327
329 de_iir = new_de_iir; 328 /* check event from PCH */
330 gt_iir = new_gt_iir; 329 if ((de_iir & DE_PCH_EVENT) &&
331 pch_iir = new_pch_iir; 330 (pch_iir & SDE_HOTPLUG_MASK)) {
331 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
332 } 332 }
333 333
334 /* should clear PCH hotplug event before clear CPU irq */
335 I915_WRITE(SDEIIR, pch_iir);
336 I915_WRITE(GTIIR, gt_iir);
337 I915_WRITE(DEIIR, de_iir);
338
339done:
334 I915_WRITE(DEIER, de_ier); 340 I915_WRITE(DEIER, de_ier);
335 (void)I915_READ(DEIER); 341 (void)I915_READ(DEIER);
336 342
@@ -854,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
854 if (!(pipeconf & PIPEACONF_ENABLE)) 860 if (!(pipeconf & PIPEACONF_ENABLE))
855 return -EINVAL; 861 return -EINVAL;
856 862
857 if (IS_IRONLAKE(dev))
858 return 0;
859
860 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 863 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
861 if (IS_I965G(dev)) 864 if (IS_IRONLAKE(dev))
865 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
866 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
867 else if (IS_I965G(dev))
862 i915_enable_pipestat(dev_priv, pipe, 868 i915_enable_pipestat(dev_priv, pipe,
863 PIPE_START_VBLANK_INTERRUPT_ENABLE); 869 PIPE_START_VBLANK_INTERRUPT_ENABLE);
864 else 870 else
@@ -876,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
876 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 882 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
877 unsigned long irqflags; 883 unsigned long irqflags;
878 884
879 if (IS_IRONLAKE(dev))
880 return;
881
882 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 885 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
883 i915_disable_pipestat(dev_priv, pipe, 886 if (IS_IRONLAKE(dev))
884 PIPE_VBLANK_INTERRUPT_ENABLE | 887 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
885 PIPE_START_VBLANK_INTERRUPT_ENABLE); 888 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
889 else
890 i915_disable_pipestat(dev_priv, pipe,
891 PIPE_VBLANK_INTERRUPT_ENABLE |
892 PIPE_START_VBLANK_INTERRUPT_ENABLE);
886 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 893 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
887} 894}
888 895
@@ -1025,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1025{ 1032{
1026 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1033 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1027 /* enable kind of interrupts always enabled */ 1034 /* enable kind of interrupts always enabled */
1028 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; 1035 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1036 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1029 u32 render_mask = GT_USER_INTERRUPT; 1037 u32 render_mask = GT_USER_INTERRUPT;
1030 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1038 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1031 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1039 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1032 1040
1033 dev_priv->irq_mask_reg = ~display_mask; 1041 dev_priv->irq_mask_reg = ~display_mask;
1034 dev_priv->de_irq_enable_reg = display_mask; 1042 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
1035 1043
1036 /* should always can generate irq */ 1044 /* should always can generate irq */
1037 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1045 I915_WRITE(DEIIR, I915_READ(DEIIR));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 149d360d64a3..ab1bd2d3d3b6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -338,6 +338,7 @@
338#define FBC_CTL_PERIODIC (1<<30) 338#define FBC_CTL_PERIODIC (1<<30)
339#define FBC_CTL_INTERVAL_SHIFT (16) 339#define FBC_CTL_INTERVAL_SHIFT (16)
340#define FBC_CTL_UNCOMPRESSIBLE (1<<14) 340#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
341#define FBC_C3_IDLE (1<<13)
341#define FBC_CTL_STRIDE_SHIFT (5) 342#define FBC_CTL_STRIDE_SHIFT (5)
342#define FBC_CTL_FENCENO (1<<0) 343#define FBC_CTL_FENCENO (1<<0)
343#define FBC_COMMAND 0x0320c 344#define FBC_COMMAND 0x0320c
@@ -1815,7 +1816,7 @@
1815#define DSPFW_PLANEB_SHIFT 8 1816#define DSPFW_PLANEB_SHIFT 8
1816#define DSPFW2 0x70038 1817#define DSPFW2 0x70038
1817#define DSPFW_CURSORA_MASK 0x00003f00 1818#define DSPFW_CURSORA_MASK 0x00003f00
1818#define DSPFW_CURSORA_SHIFT 16 1819#define DSPFW_CURSORA_SHIFT 8
1819#define DSPFW3 0x7003c 1820#define DSPFW3 0x7003c
1820#define DSPFW_HPLL_SR_EN (1<<31) 1821#define DSPFW_HPLL_SR_EN (1<<31)
1821#define DSPFW_CURSOR_SR_SHIFT 24 1822#define DSPFW_CURSOR_SR_SHIFT 24
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f27567747580..15fbc1b5a83e 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -33,6 +33,8 @@
33#define SLAVE_ADDR1 0x70 33#define SLAVE_ADDR1 0x70
34#define SLAVE_ADDR2 0x72 34#define SLAVE_ADDR2 0x72
35 35
36static int panel_type;
37
36static void * 38static void *
37find_section(struct bdb_header *bdb, int section_id) 39find_section(struct bdb_header *bdb, int section_id)
38{ 40{
@@ -128,6 +130,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
128 dev_priv->lvds_dither = lvds_options->pixel_dither; 130 dev_priv->lvds_dither = lvds_options->pixel_dither;
129 if (lvds_options->panel_type == 0xff) 131 if (lvds_options->panel_type == 0xff)
130 return; 132 return;
133 panel_type = lvds_options->panel_type;
131 134
132 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); 135 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
133 if (!lvds_lfp_data) 136 if (!lvds_lfp_data)
@@ -197,7 +200,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
197 memset(temp_mode, 0, sizeof(*temp_mode)); 200 memset(temp_mode, 0, sizeof(*temp_mode));
198 } 201 }
199 kfree(temp_mode); 202 kfree(temp_mode);
200 if (temp_downclock < panel_fixed_mode->clock) { 203 if (temp_downclock < panel_fixed_mode->clock &&
204 i915_lvds_downclock) {
201 dev_priv->lvds_downclock_avail = 1; 205 dev_priv->lvds_downclock_avail = 1;
202 dev_priv->lvds_downclock = temp_downclock; 206 dev_priv->lvds_downclock = temp_downclock;
203 DRM_DEBUG_KMS("LVDS downclock is found in VBT. ", 207 DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
@@ -405,6 +409,34 @@ parse_driver_features(struct drm_i915_private *dev_priv,
405} 409}
406 410
407static void 411static void
412parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
413{
414 struct bdb_edp *edp;
415
416 edp = find_section(bdb, BDB_EDP);
417 if (!edp) {
418 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
419 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported,\
420 assume 18bpp panel color depth.\n");
421 dev_priv->edp_bpp = 18;
422 }
423 return;
424 }
425
426 switch ((edp->color_depth >> (panel_type * 2)) & 3) {
427 case EDP_18BPP:
428 dev_priv->edp_bpp = 18;
429 break;
430 case EDP_24BPP:
431 dev_priv->edp_bpp = 24;
432 break;
433 case EDP_30BPP:
434 dev_priv->edp_bpp = 30;
435 break;
436 }
437}
438
439static void
408parse_device_mapping(struct drm_i915_private *dev_priv, 440parse_device_mapping(struct drm_i915_private *dev_priv,
409 struct bdb_header *bdb) 441 struct bdb_header *bdb)
410{ 442{
@@ -521,6 +553,7 @@ intel_init_bios(struct drm_device *dev)
521 parse_sdvo_device_mapping(dev_priv, bdb); 553 parse_sdvo_device_mapping(dev_priv, bdb);
522 parse_device_mapping(dev_priv, bdb); 554 parse_device_mapping(dev_priv, bdb);
523 parse_driver_features(dev_priv, bdb); 555 parse_driver_features(dev_priv, bdb);
556 parse_edp(dev_priv, bdb);
524 557
525 pci_unmap_rom(pdev, bios); 558 pci_unmap_rom(pdev, bios);
526 559
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 425ac9d7f724..4c18514f6f80 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -98,6 +98,7 @@ struct vbios_data {
98#define BDB_SDVO_LVDS_PNP_IDS 24 98#define BDB_SDVO_LVDS_PNP_IDS 24
99#define BDB_SDVO_LVDS_POWER_SEQ 25 99#define BDB_SDVO_LVDS_POWER_SEQ 25
100#define BDB_TV_OPTIONS 26 100#define BDB_TV_OPTIONS 26
101#define BDB_EDP 27
101#define BDB_LVDS_OPTIONS 40 102#define BDB_LVDS_OPTIONS 40
102#define BDB_LVDS_LFP_DATA_PTRS 41 103#define BDB_LVDS_LFP_DATA_PTRS 41
103#define BDB_LVDS_LFP_DATA 42 104#define BDB_LVDS_LFP_DATA 42
@@ -426,6 +427,45 @@ struct bdb_driver_features {
426 u8 custom_vbt_version; 427 u8 custom_vbt_version;
427} __attribute__((packed)); 428} __attribute__((packed));
428 429
430#define EDP_18BPP 0
431#define EDP_24BPP 1
432#define EDP_30BPP 2
433#define EDP_RATE_1_62 0
434#define EDP_RATE_2_7 1
435#define EDP_LANE_1 0
436#define EDP_LANE_2 1
437#define EDP_LANE_4 3
438#define EDP_PREEMPHASIS_NONE 0
439#define EDP_PREEMPHASIS_3_5dB 1
440#define EDP_PREEMPHASIS_6dB 2
441#define EDP_PREEMPHASIS_9_5dB 3
442#define EDP_VSWING_0_4V 0
443#define EDP_VSWING_0_6V 1
444#define EDP_VSWING_0_8V 2
445#define EDP_VSWING_1_2V 3
446
447struct edp_power_seq {
448 u16 t3;
449 u16 t7;
450 u16 t9;
451 u16 t10;
452 u16 t12;
453} __attribute__ ((packed));
454
455struct edp_link_params {
456 u8 rate:4;
457 u8 lanes:4;
458 u8 preemphasis:4;
459 u8 vswing:4;
460} __attribute__ ((packed));
461
462struct bdb_edp {
463 struct edp_power_seq power_seqs[16];
464 u32 color_depth;
465 u32 sdrrs_msa_timing_delay;
466 struct edp_link_params link_params[16];
467} __attribute__ ((packed));
468
429bool intel_init_bios(struct drm_device *dev); 469bool intel_init_bios(struct drm_device *dev);
430 470
431/* 471/*
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ddefc871edfe..79dd4026586f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
157 adpa = I915_READ(PCH_ADPA); 157 adpa = I915_READ(PCH_ADPA);
158 158
159 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 159 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
160 /* disable HPD first */
161 I915_WRITE(PCH_ADPA, adpa);
162 (void)I915_READ(PCH_ADPA);
160 163
161 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 164 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
162 ADPA_CRT_HOTPLUG_WARMUP_10MS | 165 ADPA_CRT_HOTPLUG_WARMUP_10MS |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 002612fae717..b27202d23ebc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -70,8 +70,6 @@ struct intel_limit {
70 intel_p2_t p2; 70 intel_p2_t p2;
71 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 71 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
72 int, int, intel_clock_t *); 72 int, int, intel_clock_t *);
73 bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *,
74 int, int, intel_clock_t *);
75}; 73};
76 74
77#define I8XX_DOT_MIN 25000 75#define I8XX_DOT_MIN 25000
@@ -242,41 +240,91 @@ struct intel_limit {
242#define IRONLAKE_DOT_MAX 350000 240#define IRONLAKE_DOT_MAX 350000
243#define IRONLAKE_VCO_MIN 1760000 241#define IRONLAKE_VCO_MIN 1760000
244#define IRONLAKE_VCO_MAX 3510000 242#define IRONLAKE_VCO_MAX 3510000
245#define IRONLAKE_N_MIN 1
246#define IRONLAKE_N_MAX 5
247#define IRONLAKE_M_MIN 79
248#define IRONLAKE_M_MAX 118
249#define IRONLAKE_M1_MIN 12 243#define IRONLAKE_M1_MIN 12
250#define IRONLAKE_M1_MAX 23 244#define IRONLAKE_M1_MAX 22
251#define IRONLAKE_M2_MIN 5 245#define IRONLAKE_M2_MIN 5
252#define IRONLAKE_M2_MAX 9 246#define IRONLAKE_M2_MAX 9
253#define IRONLAKE_P_SDVO_DAC_MIN 5
254#define IRONLAKE_P_SDVO_DAC_MAX 80
255#define IRONLAKE_P_LVDS_MIN 28
256#define IRONLAKE_P_LVDS_MAX 112
257#define IRONLAKE_P1_MIN 1
258#define IRONLAKE_P1_MAX 8
259#define IRONLAKE_P2_SDVO_DAC_SLOW 10
260#define IRONLAKE_P2_SDVO_DAC_FAST 5
261#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
262#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
263#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 247#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
264 248
265#define IRONLAKE_P_DISPLAY_PORT_MIN 10 249/* We have parameter ranges for different type of outputs. */
266#define IRONLAKE_P_DISPLAY_PORT_MAX 20 250
267#define IRONLAKE_P2_DISPLAY_PORT_FAST 10 251/* DAC & HDMI Refclk 120Mhz */
268#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10 252#define IRONLAKE_DAC_N_MIN 1
269#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0 253#define IRONLAKE_DAC_N_MAX 5
270#define IRONLAKE_P1_DISPLAY_PORT_MIN 1 254#define IRONLAKE_DAC_M_MIN 79
271#define IRONLAKE_P1_DISPLAY_PORT_MAX 2 255#define IRONLAKE_DAC_M_MAX 127
256#define IRONLAKE_DAC_P_MIN 5
257#define IRONLAKE_DAC_P_MAX 80
258#define IRONLAKE_DAC_P1_MIN 1
259#define IRONLAKE_DAC_P1_MAX 8
260#define IRONLAKE_DAC_P2_SLOW 10
261#define IRONLAKE_DAC_P2_FAST 5
262
263/* LVDS single-channel 120Mhz refclk */
264#define IRONLAKE_LVDS_S_N_MIN 1
265#define IRONLAKE_LVDS_S_N_MAX 3
266#define IRONLAKE_LVDS_S_M_MIN 79
267#define IRONLAKE_LVDS_S_M_MAX 118
268#define IRONLAKE_LVDS_S_P_MIN 28
269#define IRONLAKE_LVDS_S_P_MAX 112
270#define IRONLAKE_LVDS_S_P1_MIN 2
271#define IRONLAKE_LVDS_S_P1_MAX 8
272#define IRONLAKE_LVDS_S_P2_SLOW 14
273#define IRONLAKE_LVDS_S_P2_FAST 14
274
275/* LVDS dual-channel 120Mhz refclk */
276#define IRONLAKE_LVDS_D_N_MIN 1
277#define IRONLAKE_LVDS_D_N_MAX 3
278#define IRONLAKE_LVDS_D_M_MIN 79
279#define IRONLAKE_LVDS_D_M_MAX 127
280#define IRONLAKE_LVDS_D_P_MIN 14
281#define IRONLAKE_LVDS_D_P_MAX 56
282#define IRONLAKE_LVDS_D_P1_MIN 2
283#define IRONLAKE_LVDS_D_P1_MAX 8
284#define IRONLAKE_LVDS_D_P2_SLOW 7
285#define IRONLAKE_LVDS_D_P2_FAST 7
286
287/* LVDS single-channel 100Mhz refclk */
288#define IRONLAKE_LVDS_S_SSC_N_MIN 1
289#define IRONLAKE_LVDS_S_SSC_N_MAX 2
290#define IRONLAKE_LVDS_S_SSC_M_MIN 79
291#define IRONLAKE_LVDS_S_SSC_M_MAX 126
292#define IRONLAKE_LVDS_S_SSC_P_MIN 28
293#define IRONLAKE_LVDS_S_SSC_P_MAX 112
294#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
295#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
296#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
297#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
298
299/* LVDS dual-channel 100Mhz refclk */
300#define IRONLAKE_LVDS_D_SSC_N_MIN 1
301#define IRONLAKE_LVDS_D_SSC_N_MAX 3
302#define IRONLAKE_LVDS_D_SSC_M_MIN 79
303#define IRONLAKE_LVDS_D_SSC_M_MAX 126
304#define IRONLAKE_LVDS_D_SSC_P_MIN 14
305#define IRONLAKE_LVDS_D_SSC_P_MAX 42
306#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
307#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
308#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
309#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
310
311/* DisplayPort */
312#define IRONLAKE_DP_N_MIN 1
313#define IRONLAKE_DP_N_MAX 2
314#define IRONLAKE_DP_M_MIN 81
315#define IRONLAKE_DP_M_MAX 90
316#define IRONLAKE_DP_P_MIN 10
317#define IRONLAKE_DP_P_MAX 20
318#define IRONLAKE_DP_P2_FAST 10
319#define IRONLAKE_DP_P2_SLOW 10
320#define IRONLAKE_DP_P2_LIMIT 0
321#define IRONLAKE_DP_P1_MIN 1
322#define IRONLAKE_DP_P1_MAX 2
272 323
273static bool 324static bool
274intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 325intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
275 int target, int refclk, intel_clock_t *best_clock); 326 int target, int refclk, intel_clock_t *best_clock);
276static bool 327static bool
277intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
278 int target, int refclk, intel_clock_t *best_clock);
279static bool
280intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 328intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
281 int target, int refclk, intel_clock_t *best_clock); 329 int target, int refclk, intel_clock_t *best_clock);
282 330
@@ -299,7 +347,6 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
299 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 347 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
300 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 348 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
301 .find_pll = intel_find_best_PLL, 349 .find_pll = intel_find_best_PLL,
302 .find_reduced_pll = intel_find_best_reduced_PLL,
303}; 350};
304 351
305static const intel_limit_t intel_limits_i8xx_lvds = { 352static const intel_limit_t intel_limits_i8xx_lvds = {
@@ -314,7 +361,6 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
314 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 361 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
315 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 362 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
316 .find_pll = intel_find_best_PLL, 363 .find_pll = intel_find_best_PLL,
317 .find_reduced_pll = intel_find_best_reduced_PLL,
318}; 364};
319 365
320static const intel_limit_t intel_limits_i9xx_sdvo = { 366static const intel_limit_t intel_limits_i9xx_sdvo = {
@@ -329,7 +375,6 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
329 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 375 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
330 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 376 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
331 .find_pll = intel_find_best_PLL, 377 .find_pll = intel_find_best_PLL,
332 .find_reduced_pll = intel_find_best_reduced_PLL,
333}; 378};
334 379
335static const intel_limit_t intel_limits_i9xx_lvds = { 380static const intel_limit_t intel_limits_i9xx_lvds = {
@@ -347,7 +392,6 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
347 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 392 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
348 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 393 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
349 .find_pll = intel_find_best_PLL, 394 .find_pll = intel_find_best_PLL,
350 .find_reduced_pll = intel_find_best_reduced_PLL,
351}; 395};
352 396
353 /* below parameter and function is for G4X Chipset Family*/ 397 /* below parameter and function is for G4X Chipset Family*/
@@ -365,7 +409,6 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
365 .p2_fast = G4X_P2_SDVO_FAST 409 .p2_fast = G4X_P2_SDVO_FAST
366 }, 410 },
367 .find_pll = intel_g4x_find_best_PLL, 411 .find_pll = intel_g4x_find_best_PLL,
368 .find_reduced_pll = intel_g4x_find_best_PLL,
369}; 412};
370 413
371static const intel_limit_t intel_limits_g4x_hdmi = { 414static const intel_limit_t intel_limits_g4x_hdmi = {
@@ -382,7 +425,6 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
382 .p2_fast = G4X_P2_HDMI_DAC_FAST 425 .p2_fast = G4X_P2_HDMI_DAC_FAST
383 }, 426 },
384 .find_pll = intel_g4x_find_best_PLL, 427 .find_pll = intel_g4x_find_best_PLL,
385 .find_reduced_pll = intel_g4x_find_best_PLL,
386}; 428};
387 429
388static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 430static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
@@ -407,7 +449,6 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
407 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST 449 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
408 }, 450 },
409 .find_pll = intel_g4x_find_best_PLL, 451 .find_pll = intel_g4x_find_best_PLL,
410 .find_reduced_pll = intel_g4x_find_best_PLL,
411}; 452};
412 453
413static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 454static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
@@ -432,7 +473,6 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
432 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST 473 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
433 }, 474 },
434 .find_pll = intel_g4x_find_best_PLL, 475 .find_pll = intel_g4x_find_best_PLL,
435 .find_reduced_pll = intel_g4x_find_best_PLL,
436}; 476};
437 477
438static const intel_limit_t intel_limits_g4x_display_port = { 478static const intel_limit_t intel_limits_g4x_display_port = {
@@ -470,7 +510,6 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
470 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 510 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
471 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 511 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
472 .find_pll = intel_find_best_PLL, 512 .find_pll = intel_find_best_PLL,
473 .find_reduced_pll = intel_find_best_reduced_PLL,
474}; 513};
475 514
476static const intel_limit_t intel_limits_pineview_lvds = { 515static const intel_limit_t intel_limits_pineview_lvds = {
@@ -486,36 +525,80 @@ static const intel_limit_t intel_limits_pineview_lvds = {
486 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 525 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
487 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 526 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
488 .find_pll = intel_find_best_PLL, 527 .find_pll = intel_find_best_PLL,
489 .find_reduced_pll = intel_find_best_reduced_PLL,
490}; 528};
491 529
492static const intel_limit_t intel_limits_ironlake_sdvo = { 530static const intel_limit_t intel_limits_ironlake_dac = {
531 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
532 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
533 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
534 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
535 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
536 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
537 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
538 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
539 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
540 .p2_slow = IRONLAKE_DAC_P2_SLOW,
541 .p2_fast = IRONLAKE_DAC_P2_FAST },
542 .find_pll = intel_g4x_find_best_PLL,
543};
544
545static const intel_limit_t intel_limits_ironlake_single_lvds = {
546 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
547 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
548 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
549 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
550 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
551 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
552 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
553 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
554 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
555 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
556 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
557 .find_pll = intel_g4x_find_best_PLL,
558};
559
560static const intel_limit_t intel_limits_ironlake_dual_lvds = {
561 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
562 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
563 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
564 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
565 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
566 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
567 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
568 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
569 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
570 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
571 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
572 .find_pll = intel_g4x_find_best_PLL,
573};
574
575static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
493 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 576 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
494 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 577 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
495 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 578 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
496 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 579 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
497 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 580 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
498 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 581 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
499 .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, 582 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
500 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 583 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
501 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 584 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
502 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, 585 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
503 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, 586 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
504 .find_pll = intel_g4x_find_best_PLL, 587 .find_pll = intel_g4x_find_best_PLL,
505}; 588};
506 589
507static const intel_limit_t intel_limits_ironlake_lvds = { 590static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
508 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 591 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
509 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 592 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
510 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 593 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
511 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 594 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
512 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 595 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
513 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 596 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
514 .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, 597 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
515 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 598 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
516 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 599 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
517 .p2_slow = IRONLAKE_P2_LVDS_SLOW, 600 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
518 .p2_fast = IRONLAKE_P2_LVDS_FAST }, 601 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
519 .find_pll = intel_g4x_find_best_PLL, 602 .find_pll = intel_g4x_find_best_PLL,
520}; 603};
521 604
@@ -524,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
524 .max = IRONLAKE_DOT_MAX }, 607 .max = IRONLAKE_DOT_MAX },
525 .vco = { .min = IRONLAKE_VCO_MIN, 608 .vco = { .min = IRONLAKE_VCO_MIN,
526 .max = IRONLAKE_VCO_MAX}, 609 .max = IRONLAKE_VCO_MAX},
527 .n = { .min = IRONLAKE_N_MIN, 610 .n = { .min = IRONLAKE_DP_N_MIN,
528 .max = IRONLAKE_N_MAX }, 611 .max = IRONLAKE_DP_N_MAX },
529 .m = { .min = IRONLAKE_M_MIN, 612 .m = { .min = IRONLAKE_DP_M_MIN,
530 .max = IRONLAKE_M_MAX }, 613 .max = IRONLAKE_DP_M_MAX },
531 .m1 = { .min = IRONLAKE_M1_MIN, 614 .m1 = { .min = IRONLAKE_M1_MIN,
532 .max = IRONLAKE_M1_MAX }, 615 .max = IRONLAKE_M1_MAX },
533 .m2 = { .min = IRONLAKE_M2_MIN, 616 .m2 = { .min = IRONLAKE_M2_MIN,
534 .max = IRONLAKE_M2_MAX }, 617 .max = IRONLAKE_M2_MAX },
535 .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN, 618 .p = { .min = IRONLAKE_DP_P_MIN,
536 .max = IRONLAKE_P_DISPLAY_PORT_MAX }, 619 .max = IRONLAKE_DP_P_MAX },
537 .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN, 620 .p1 = { .min = IRONLAKE_DP_P1_MIN,
538 .max = IRONLAKE_P1_DISPLAY_PORT_MAX}, 621 .max = IRONLAKE_DP_P1_MAX},
539 .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT, 622 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
540 .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW, 623 .p2_slow = IRONLAKE_DP_P2_SLOW,
541 .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST }, 624 .p2_fast = IRONLAKE_DP_P2_FAST },
542 .find_pll = intel_find_pll_ironlake_dp, 625 .find_pll = intel_find_pll_ironlake_dp,
543}; 626};
544 627
545static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) 628static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
546{ 629{
630 struct drm_device *dev = crtc->dev;
631 struct drm_i915_private *dev_priv = dev->dev_private;
547 const intel_limit_t *limit; 632 const intel_limit_t *limit;
548 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 633 int refclk = 120;
549 limit = &intel_limits_ironlake_lvds; 634
550 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 635 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
636 if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
637 refclk = 100;
638
639 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
640 LVDS_CLKB_POWER_UP) {
641 /* LVDS dual channel */
642 if (refclk == 100)
643 limit = &intel_limits_ironlake_dual_lvds_100m;
644 else
645 limit = &intel_limits_ironlake_dual_lvds;
646 } else {
647 if (refclk == 100)
648 limit = &intel_limits_ironlake_single_lvds_100m;
649 else
650 limit = &intel_limits_ironlake_single_lvds;
651 }
652 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
551 HAS_eDP) 653 HAS_eDP)
552 limit = &intel_limits_ironlake_display_port; 654 limit = &intel_limits_ironlake_display_port;
553 else 655 else
554 limit = &intel_limits_ironlake_sdvo; 656 limit = &intel_limits_ironlake_dac;
555 657
556 return limit; 658 return limit;
557} 659}
@@ -768,46 +870,6 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
768 return (err != target); 870 return (err != target);
769} 871}
770 872
771
772static bool
773intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
774 int target, int refclk, intel_clock_t *best_clock)
775
776{
777 struct drm_device *dev = crtc->dev;
778 intel_clock_t clock;
779 int err = target;
780 bool found = false;
781
782 memcpy(&clock, best_clock, sizeof(intel_clock_t));
783
784 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
785 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
786 /* m1 is always 0 in Pineview */
787 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
788 break;
789 for (clock.n = limit->n.min; clock.n <= limit->n.max;
790 clock.n++) {
791 int this_err;
792
793 intel_clock(dev, refclk, &clock);
794
795 if (!intel_PLL_is_valid(crtc, &clock))
796 continue;
797
798 this_err = abs(clock.dot - target);
799 if (this_err < err) {
800 *best_clock = clock;
801 err = this_err;
802 found = true;
803 }
804 }
805 }
806 }
807
808 return found;
809}
810
811static bool 873static bool
812intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 874intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
813 int target, int refclk, intel_clock_t *best_clock) 875 int target, int refclk, intel_clock_t *best_clock)
@@ -969,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
969 1031
970 /* enable it... */ 1032 /* enable it... */
971 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 1033 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1034 if (IS_I945GM(dev))
1035 fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
972 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1036 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
973 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1037 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
974 if (obj_priv->tiling_mode != I915_TILING_NONE) 1038 if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -1262,7 +1326,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1262 return ret; 1326 return ret;
1263 } 1327 }
1264 1328
1265 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 1329 ret = i915_gem_object_set_to_display_plane(obj);
1266 if (ret != 0) { 1330 if (ret != 0) {
1267 i915_gem_object_unpin(obj); 1331 i915_gem_object_unpin(obj);
1268 mutex_unlock(&dev->struct_mutex); 1332 mutex_unlock(&dev->struct_mutex);
@@ -1693,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1693 case DRM_MODE_DPMS_OFF: 1757 case DRM_MODE_DPMS_OFF:
1694 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); 1758 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
1695 1759
1760 drm_vblank_off(dev, pipe);
1696 /* Disable display plane */ 1761 /* Disable display plane */
1697 temp = I915_READ(dspcntr_reg); 1762 temp = I915_READ(dspcntr_reg);
1698 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 1763 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2574,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2574 sr_entries = roundup(sr_entries / cacheline_size, 1); 2639 sr_entries = roundup(sr_entries / cacheline_size, 1);
2575 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 2640 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2576 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2641 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2642 } else {
2643 /* Turn off self refresh if both pipes are enabled */
2644 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2645 & ~FW_BLC_SELF_EN);
2577 } 2646 }
2578 2647
2579 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", 2648 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2617,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2617 srwm = 1; 2686 srwm = 1;
2618 srwm &= 0x3f; 2687 srwm &= 0x3f;
2619 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2688 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2689 } else {
2690 /* Turn off self refresh if both pipes are enabled */
2691 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2692 & ~FW_BLC_SELF_EN);
2620 } 2693 }
2621 2694
2622 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 2695 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2685,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2685 if (srwm < 0) 2758 if (srwm < 0)
2686 srwm = 1; 2759 srwm = 1;
2687 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2760 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
2761 } else {
2762 /* Turn off self refresh if both pipes are enabled */
2763 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2764 & ~FW_BLC_SELF_EN);
2688 } 2765 }
2689 2766
2690 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2767 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -2910,10 +2987,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2910 return -EINVAL; 2987 return -EINVAL;
2911 } 2988 }
2912 2989
2913 if (is_lvds && limit->find_reduced_pll && 2990 if (is_lvds && dev_priv->lvds_downclock_avail) {
2914 dev_priv->lvds_downclock_avail) { 2991 has_reduced_clock = limit->find_pll(limit, crtc,
2915 memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
2916 has_reduced_clock = limit->find_reduced_pll(limit, crtc,
2917 dev_priv->lvds_downclock, 2992 dev_priv->lvds_downclock,
2918 refclk, 2993 refclk,
2919 &reduced_clock); 2994 &reduced_clock);
@@ -2981,6 +3056,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2981 temp |= PIPE_8BPC; 3056 temp |= PIPE_8BPC;
2982 else 3057 else
2983 temp |= PIPE_6BPC; 3058 temp |= PIPE_6BPC;
3059 } else if (is_edp) {
3060 switch (dev_priv->edp_bpp/3) {
3061 case 8:
3062 temp |= PIPE_8BPC;
3063 break;
3064 case 10:
3065 temp |= PIPE_10BPC;
3066 break;
3067 case 6:
3068 temp |= PIPE_6BPC;
3069 break;
3070 case 12:
3071 temp |= PIPE_12BPC;
3072 break;
3073 }
2984 } else 3074 } else
2985 temp |= PIPE_8BPC; 3075 temp |= PIPE_8BPC;
2986 I915_WRITE(pipeconf_reg, temp); 3076 I915_WRITE(pipeconf_reg, temp);
@@ -3991,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
3991struct intel_unpin_work { 4081struct intel_unpin_work {
3992 struct work_struct work; 4082 struct work_struct work;
3993 struct drm_device *dev; 4083 struct drm_device *dev;
3994 struct drm_gem_object *obj; 4084 struct drm_gem_object *old_fb_obj;
4085 struct drm_gem_object *pending_flip_obj;
3995 struct drm_pending_vblank_event *event; 4086 struct drm_pending_vblank_event *event;
3996 int pending; 4087 int pending;
3997}; 4088};
@@ -4002,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
4002 container_of(__work, struct intel_unpin_work, work); 4093 container_of(__work, struct intel_unpin_work, work);
4003 4094
4004 mutex_lock(&work->dev->struct_mutex); 4095 mutex_lock(&work->dev->struct_mutex);
4005 i915_gem_object_unpin(work->obj); 4096 i915_gem_object_unpin(work->old_fb_obj);
4006 drm_gem_object_unreference(work->obj); 4097 drm_gem_object_unreference(work->pending_flip_obj);
4098 drm_gem_object_unreference(work->old_fb_obj);
4007 mutex_unlock(&work->dev->struct_mutex); 4099 mutex_unlock(&work->dev->struct_mutex);
4008 kfree(work); 4100 kfree(work);
4009} 4101}
@@ -4026,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4026 spin_lock_irqsave(&dev->event_lock, flags); 4118 spin_lock_irqsave(&dev->event_lock, flags);
4027 work = intel_crtc->unpin_work; 4119 work = intel_crtc->unpin_work;
4028 if (work == NULL || !work->pending) { 4120 if (work == NULL || !work->pending) {
4121 if (work && !work->pending) {
4122 obj_priv = work->pending_flip_obj->driver_private;
4123 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4124 obj_priv,
4125 atomic_read(&obj_priv->pending_flip));
4126 }
4029 spin_unlock_irqrestore(&dev->event_lock, flags); 4127 spin_unlock_irqrestore(&dev->event_lock, flags);
4030 return; 4128 return;
4031 } 4129 }
@@ -4046,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4046 4144
4047 spin_unlock_irqrestore(&dev->event_lock, flags); 4145 spin_unlock_irqrestore(&dev->event_lock, flags);
4048 4146
4049 obj_priv = work->obj->driver_private; 4147 obj_priv = work->pending_flip_obj->driver_private;
4050 if (atomic_dec_and_test(&obj_priv->pending_flip)) 4148
4149 /* Initial scanout buffer will have a 0 pending flip count */
4150 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
4151 atomic_dec_and_test(&obj_priv->pending_flip))
4051 DRM_WAKEUP(&dev_priv->pending_flip_queue); 4152 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4052 schedule_work(&work->work); 4153 schedule_work(&work->work);
4053} 4154}
@@ -4060,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
4060 unsigned long flags; 4161 unsigned long flags;
4061 4162
4062 spin_lock_irqsave(&dev->event_lock, flags); 4163 spin_lock_irqsave(&dev->event_lock, flags);
4063 if (intel_crtc->unpin_work) 4164 if (intel_crtc->unpin_work) {
4064 intel_crtc->unpin_work->pending = 1; 4165 intel_crtc->unpin_work->pending = 1;
4166 } else {
4167 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
4168 }
4065 spin_unlock_irqrestore(&dev->event_lock, flags); 4169 spin_unlock_irqrestore(&dev->event_lock, flags);
4066} 4170}
4067 4171
@@ -4077,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4077 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4078 struct intel_unpin_work *work; 4182 struct intel_unpin_work *work;
4079 unsigned long flags; 4183 unsigned long flags;
4080 int ret; 4184 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4185 int ret, pipesrc;
4081 RING_LOCALS; 4186 RING_LOCALS;
4082 4187
4083 work = kzalloc(sizeof *work, GFP_KERNEL); 4188 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -4089,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4089 work->event = event; 4194 work->event = event;
4090 work->dev = crtc->dev; 4195 work->dev = crtc->dev;
4091 intel_fb = to_intel_framebuffer(crtc->fb); 4196 intel_fb = to_intel_framebuffer(crtc->fb);
4092 work->obj = intel_fb->obj; 4197 work->old_fb_obj = intel_fb->obj;
4093 INIT_WORK(&work->work, intel_unpin_work_fn); 4198 INIT_WORK(&work->work, intel_unpin_work_fn);
4094 4199
4095 /* We borrow the event spin lock for protecting unpin_work */ 4200 /* We borrow the event spin lock for protecting unpin_work */
4096 spin_lock_irqsave(&dev->event_lock, flags); 4201 spin_lock_irqsave(&dev->event_lock, flags);
4097 if (intel_crtc->unpin_work) { 4202 if (intel_crtc->unpin_work) {
4203 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4098 spin_unlock_irqrestore(&dev->event_lock, flags); 4204 spin_unlock_irqrestore(&dev->event_lock, flags);
4099 kfree(work); 4205 kfree(work);
4100 mutex_unlock(&dev->struct_mutex); 4206 mutex_unlock(&dev->struct_mutex);
@@ -4108,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4108 4214
4109 ret = intel_pin_and_fence_fb_obj(dev, obj); 4215 ret = intel_pin_and_fence_fb_obj(dev, obj);
4110 if (ret != 0) { 4216 if (ret != 0) {
4217 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4218 obj->driver_private);
4111 kfree(work); 4219 kfree(work);
4220 intel_crtc->unpin_work = NULL;
4112 mutex_unlock(&dev->struct_mutex); 4221 mutex_unlock(&dev->struct_mutex);
4113 return ret; 4222 return ret;
4114 } 4223 }
4115 4224
4116 /* Reference the old fb object for the scheduled work. */ 4225 /* Reference the objects for the scheduled work. */
4117 drm_gem_object_reference(work->obj); 4226 drm_gem_object_reference(work->old_fb_obj);
4227 drm_gem_object_reference(obj);
4118 4228
4119 crtc->fb = fb; 4229 crtc->fb = fb;
4120 i915_gem_object_flush_write_domain(obj); 4230 i915_gem_object_flush_write_domain(obj);
4121 drm_vblank_get(dev, intel_crtc->pipe); 4231 drm_vblank_get(dev, intel_crtc->pipe);
4122 obj_priv = obj->driver_private; 4232 obj_priv = obj->driver_private;
4123 atomic_inc(&obj_priv->pending_flip); 4233 atomic_inc(&obj_priv->pending_flip);
4234 work->pending_flip_obj = obj;
4124 4235
4125 BEGIN_LP_RING(4); 4236 BEGIN_LP_RING(4);
4126 OUT_RING(MI_DISPLAY_FLIP | 4237 OUT_RING(MI_DISPLAY_FLIP |
@@ -4128,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4128 OUT_RING(fb->pitch); 4239 OUT_RING(fb->pitch);
4129 if (IS_I965G(dev)) { 4240 if (IS_I965G(dev)) {
4130 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); 4241 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
4131 OUT_RING((fb->width << 16) | fb->height); 4242 pipesrc = I915_READ(pipesrc_reg);
4243 OUT_RING(pipesrc & 0x0fff0fff);
4132 } else { 4244 } else {
4133 OUT_RING(obj_priv->gtt_offset); 4245 OUT_RING(obj_priv->gtt_offset);
4134 OUT_RING(MI_NOOP); 4246 OUT_RING(MI_NOOP);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1349d9fd01c4..439506cefc14 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -125,9 +125,15 @@ intel_dp_link_clock(uint8_t link_bw)
125 125
126/* I think this is a fiction */ 126/* I think this is a fiction */
127static int 127static int
128intel_dp_link_required(int pixel_clock) 128intel_dp_link_required(struct drm_device *dev,
129 struct intel_output *intel_output, int pixel_clock)
129{ 130{
130 return pixel_clock * 3; 131 struct drm_i915_private *dev_priv = dev->dev_private;
132
133 if (IS_eDP(intel_output))
134 return (pixel_clock * dev_priv->edp_bpp) / 8;
135 else
136 return pixel_clock * 3;
131} 137}
132 138
133static int 139static int
@@ -138,7 +144,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
138 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); 144 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
139 int max_lanes = intel_dp_max_lane_count(intel_output); 145 int max_lanes = intel_dp_max_lane_count(intel_output);
140 146
141 if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes) 147 if (intel_dp_link_required(connector->dev, intel_output, mode->clock)
148 > max_link_clock * max_lanes)
142 return MODE_CLOCK_HIGH; 149 return MODE_CLOCK_HIGH;
143 150
144 if (mode->clock < 10000) 151 if (mode->clock < 10000)
@@ -492,7 +499,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
492 for (clock = 0; clock <= max_clock; clock++) { 499 for (clock = 0; clock <= max_clock; clock++) {
493 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; 500 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
494 501
495 if (intel_dp_link_required(mode->clock) <= link_avail) { 502 if (intel_dp_link_required(encoder->dev, intel_output, mode->clock)
503 <= link_avail) {
496 dp_priv->link_bw = bws[clock]; 504 dp_priv->link_bw = bws[clock];
497 dp_priv->lane_count = lane_count; 505 dp_priv->lane_count = lane_count;
498 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); 506 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
@@ -1289,53 +1297,7 @@ intel_dp_hot_plug(struct intel_output *intel_output)
1289 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) 1297 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
1290 intel_dp_check_link_status(intel_output); 1298 intel_dp_check_link_status(intel_output);
1291} 1299}
1292/* 1300
1293 * Enumerate the child dev array parsed from VBT to check whether
1294 * the given DP is present.
1295 * If it is present, return 1.
1296 * If it is not present, return false.
1297 * If no child dev is parsed from VBT, it is assumed that the given
1298 * DP is present.
1299 */
1300static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg)
1301{
1302 struct drm_i915_private *dev_priv = dev->dev_private;
1303 struct child_device_config *p_child;
1304 int i, dp_port, ret;
1305
1306 if (!dev_priv->child_dev_num)
1307 return 1;
1308
1309 dp_port = 0;
1310 if (dp_reg == DP_B || dp_reg == PCH_DP_B)
1311 dp_port = PORT_IDPB;
1312 else if (dp_reg == DP_C || dp_reg == PCH_DP_C)
1313 dp_port = PORT_IDPC;
1314 else if (dp_reg == DP_D || dp_reg == PCH_DP_D)
1315 dp_port = PORT_IDPD;
1316
1317 ret = 0;
1318 for (i = 0; i < dev_priv->child_dev_num; i++) {
1319 p_child = dev_priv->child_dev + i;
1320 /*
1321 * If the device type is not DP, continue.
1322 */
1323 if (p_child->device_type != DEVICE_TYPE_DP &&
1324 p_child->device_type != DEVICE_TYPE_eDP)
1325 continue;
1326 /* Find the eDP port */
1327 if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) {
1328 ret = 1;
1329 break;
1330 }
1331 /* Find the DP port */
1332 if (p_child->dvo_port == dp_port) {
1333 ret = 1;
1334 break;
1335 }
1336 }
1337 return ret;
1338}
1339void 1301void
1340intel_dp_init(struct drm_device *dev, int output_reg) 1302intel_dp_init(struct drm_device *dev, int output_reg)
1341{ 1303{
@@ -1345,10 +1307,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1345 struct intel_dp_priv *dp_priv; 1307 struct intel_dp_priv *dp_priv;
1346 const char *name = NULL; 1308 const char *name = NULL;
1347 1309
1348 if (!dp_is_present_in_vbt(dev, output_reg)) {
1349 DRM_DEBUG_KMS("DP is not present. Ignore it\n");
1350 return;
1351 }
1352 intel_output = kcalloc(sizeof(struct intel_output) + 1310 intel_output = kcalloc(sizeof(struct intel_output) +
1353 sizeof(struct intel_dp_priv), 1, GFP_KERNEL); 1311 sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
1354 if (!intel_output) 1312 if (!intel_output)
@@ -1373,11 +1331,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1373 else if (output_reg == DP_D || output_reg == PCH_DP_D) 1331 else if (output_reg == DP_D || output_reg == PCH_DP_D)
1374 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); 1332 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
1375 1333
1376 if (IS_eDP(intel_output)) { 1334 if (IS_eDP(intel_output))
1377 intel_output->crtc_mask = (1 << 1);
1378 intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); 1335 intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
1379 } else 1336
1380 intel_output->crtc_mask = (1 << 0) | (1 << 1); 1337 intel_output->crtc_mask = (1 << 0) | (1 << 1);
1381 connector->interlace_allowed = true; 1338 connector->interlace_allowed = true;
1382 connector->doublescan_allowed = 0; 1339 connector->doublescan_allowed = 0;
1383 1340
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 371d753e362b..aaabbcbe5905 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
148 148
149 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
150 150
151 ret = i915_gem_object_pin(fbo, PAGE_SIZE); 151 ret = i915_gem_object_pin(fbo, 64*1024);
152 if (ret) { 152 if (ret) {
153 DRM_ERROR("failed to pin fb: %d\n", ret); 153 DRM_ERROR("failed to pin fb: %d\n", ret);
154 goto out_unref; 154 goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 06431941b233..0e268deed761 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -225,52 +225,6 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
225 .destroy = intel_hdmi_enc_destroy, 225 .destroy = intel_hdmi_enc_destroy,
226}; 226};
227 227
228/*
229 * Enumerate the child dev array parsed from VBT to check whether
230 * the given HDMI is present.
231 * If it is present, return 1.
232 * If it is not present, return false.
233 * If no child dev is parsed from VBT, it assumes that the given
234 * HDMI is present.
235 */
236static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg)
237{
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 struct child_device_config *p_child;
240 int i, hdmi_port, ret;
241
242 if (!dev_priv->child_dev_num)
243 return 1;
244
245 if (hdmi_reg == SDVOB)
246 hdmi_port = DVO_B;
247 else if (hdmi_reg == SDVOC)
248 hdmi_port = DVO_C;
249 else if (hdmi_reg == HDMIB)
250 hdmi_port = DVO_B;
251 else if (hdmi_reg == HDMIC)
252 hdmi_port = DVO_C;
253 else if (hdmi_reg == HDMID)
254 hdmi_port = DVO_D;
255 else
256 return 0;
257
258 ret = 0;
259 for (i = 0; i < dev_priv->child_dev_num; i++) {
260 p_child = dev_priv->child_dev + i;
261 /*
262 * If the device type is not HDMI, continue.
263 */
264 if (p_child->device_type != DEVICE_TYPE_HDMI)
265 continue;
266 /* Find the HDMI port */
267 if (p_child->dvo_port == hdmi_port) {
268 ret = 1;
269 break;
270 }
271 }
272 return ret;
273}
274void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) 228void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
275{ 229{
276 struct drm_i915_private *dev_priv = dev->dev_private; 230 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -278,10 +232,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
278 struct intel_output *intel_output; 232 struct intel_output *intel_output;
279 struct intel_hdmi_priv *hdmi_priv; 233 struct intel_hdmi_priv *hdmi_priv;
280 234
281 if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) {
282 DRM_DEBUG_KMS("HDMI is not present. Ignored it \n");
283 return;
284 }
285 intel_output = kcalloc(sizeof(struct intel_output) + 235 intel_output = kcalloc(sizeof(struct intel_output) +
286 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); 236 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
287 if (!intel_output) 237 if (!intel_output)
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f4b4aa242df1..c2e8a45780d5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -602,6 +602,20 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
602/* Some lid devices report incorrect lid status, assume they're connected */ 602/* Some lid devices report incorrect lid status, assume they're connected */
603static const struct dmi_system_id bad_lid_status[] = { 603static const struct dmi_system_id bad_lid_status[] = {
604 { 604 {
605 .ident = "Compaq nx9020",
606 .matches = {
607 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
608 DMI_MATCH(DMI_BOARD_NAME, "3084"),
609 },
610 },
611 {
612 .ident = "Samsung SX20S",
613 .matches = {
614 DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
616 },
617 },
618 {
605 .ident = "Aspire One", 619 .ident = "Aspire One",
606 .matches = { 620 .matches = {
607 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 621 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -609,12 +623,26 @@ static const struct dmi_system_id bad_lid_status[] = {
609 }, 623 },
610 }, 624 },
611 { 625 {
626 .ident = "Aspire 1810T",
627 .matches = {
628 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
629 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
630 },
631 },
632 {
612 .ident = "PC-81005", 633 .ident = "PC-81005",
613 .matches = { 634 .matches = {
614 DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), 635 DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
615 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), 636 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
616 }, 637 },
617 }, 638 },
639 {
640 .ident = "Clevo M5x0N",
641 .matches = {
642 DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
643 DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
644 },
645 },
618 { } 646 { }
619}; 647};
620 648
@@ -629,7 +657,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
629{ 657{
630 enum drm_connector_status status = connector_status_connected; 658 enum drm_connector_status status = connector_status_connected;
631 659
632 if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) 660 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
633 status = connector_status_disconnected; 661 status = connector_status_disconnected;
634 662
635 return status; 663 return status;
@@ -912,7 +940,8 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
912 } 940 }
913 } 941 }
914 mutex_unlock(&dev->mode_config.mutex); 942 mutex_unlock(&dev->mode_config.mutex);
915 if (temp_downclock < panel_fixed_mode->clock) { 943 if (temp_downclock < panel_fixed_mode->clock &&
944 i915_lvds_downclock) {
916 /* We found the downclock for LVDS. */ 945 /* We found the downclock for LVDS. */
917 dev_priv->lvds_downclock_avail = 1; 946 dev_priv->lvds_downclock_avail = 1;
918 dev_priv->lvds_downclock = temp_downclock; 947 dev_priv->lvds_downclock = temp_downclock;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index de5144c8c153..82678d30ab06 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -462,14 +462,63 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
462} 462}
463 463
464/** 464/**
465 * Don't check status code from this as it switches the bus back to the 465 * Try to read the response after issuie the DDC switch command. But it
466 * SDVO chips which defeats the purpose of doing a bus switch in the first 466 * is noted that we must do the action of reading response and issuing DDC
467 * place. 467 * switch command in one I2C transaction. Otherwise when we try to start
468 * another I2C transaction after issuing the DDC bus switch, it will be
469 * switched to the internal SDVO register.
468 */ 470 */
469static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, 471static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
470 u8 target) 472 u8 target)
471{ 473{
472 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); 474 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
475 u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
476 struct i2c_msg msgs[] = {
477 {
478 .addr = sdvo_priv->slave_addr >> 1,
479 .flags = 0,
480 .len = 2,
481 .buf = out_buf,
482 },
483 /* the following two are to read the response */
484 {
485 .addr = sdvo_priv->slave_addr >> 1,
486 .flags = 0,
487 .len = 1,
488 .buf = cmd_buf,
489 },
490 {
491 .addr = sdvo_priv->slave_addr >> 1,
492 .flags = I2C_M_RD,
493 .len = 1,
494 .buf = ret_value,
495 },
496 };
497
498 intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
499 &target, 1);
500 /* write the DDC switch command argument */
501 intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target);
502
503 out_buf[0] = SDVO_I2C_OPCODE;
504 out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
505 cmd_buf[0] = SDVO_I2C_CMD_STATUS;
506 cmd_buf[1] = 0;
507 ret_value[0] = 0;
508 ret_value[1] = 0;
509
510 ret = i2c_transfer(intel_output->i2c_bus, msgs, 3);
511 if (ret != 3) {
512 /* failure in I2C transfer */
513 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
514 return;
515 }
516 if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
517 DRM_DEBUG_KMS("DDC switch command returns response %d\n",
518 ret_value[0]);
519 return;
520 }
521 return;
473} 522}
474 523
475static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) 524static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
@@ -1579,6 +1628,32 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1579 edid = drm_get_edid(&intel_output->base, 1628 edid = drm_get_edid(&intel_output->base,
1580 intel_output->ddc_bus); 1629 intel_output->ddc_bus);
1581 1630
1631 /* This is only applied to SDVO cards with multiple outputs */
1632 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) {
1633 uint8_t saved_ddc, temp_ddc;
1634 saved_ddc = sdvo_priv->ddc_bus;
1635 temp_ddc = sdvo_priv->ddc_bus >> 1;
1636 /*
1637 * Don't use the 1 as the argument of DDC bus switch to get
1638 * the EDID. It is used for SDVO SPD ROM.
1639 */
1640 while(temp_ddc > 1) {
1641 sdvo_priv->ddc_bus = temp_ddc;
1642 edid = drm_get_edid(&intel_output->base,
1643 intel_output->ddc_bus);
1644 if (edid) {
1645 /*
1646 * When we can get the EDID, maybe it is the
1647 * correct DDC bus. Update it.
1648 */
1649 sdvo_priv->ddc_bus = temp_ddc;
1650 break;
1651 }
1652 temp_ddc >>= 1;
1653 }
1654 if (edid == NULL)
1655 sdvo_priv->ddc_bus = saved_ddc;
1656 }
1582 /* when there is no edid and no monitor is connected with VGA 1657 /* when there is no edid and no monitor is connected with VGA
1583 * port, try to use the CRT ddc to read the EDID for DVI-connector 1658 * port, try to use the CRT ddc to read the EDID for DVI-connector
1584 */ 1659 */
@@ -2270,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2270 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2345 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2271 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2346 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2272 (1 << INTEL_ANALOG_CLONE_BIT); 2347 (1 << INTEL_ANALOG_CLONE_BIT);
2348 } else if (flags & SDVO_OUTPUT_CVBS0) {
2349
2350 sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
2351 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2352 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2353 sdvo_priv->is_tv = true;
2354 intel_output->needs_tv_clock = true;
2355 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2273 } else if (flags & SDVO_OUTPUT_LVDS0) { 2356 } else if (flags & SDVO_OUTPUT_LVDS0) {
2274 2357
2275 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 2358 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 1d5b9b7b033f..552ec110b741 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1840,8 +1840,6 @@ intel_tv_init(struct drm_device *dev)
1840 drm_connector_attach_property(connector, 1840 drm_connector_attach_property(connector,
1841 dev->mode_config.tv_bottom_margin_property, 1841 dev->mode_config.tv_bottom_margin_property,
1842 tv_priv->margin[TV_MARGIN_BOTTOM]); 1842 tv_priv->margin[TV_MARGIN_BOTTOM]);
1843
1844 dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
1845out: 1843out:
1846 drm_sysfs_connector_add(connector); 1844 drm_sysfs_connector_add(connector);
1847} 1845}