aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-02-12 12:32:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-02-12 12:32:10 -0500
commit26b23ace8b54d836763bad3495fe8ed1a9d4354d (patch)
tree35bc17fd4235e32fb1ab2c37ca8156ea778329bd /drivers/gpu
parent22a8cdd60339d931d0dca54427712b2714e5ba8b (diff)
parent75dfca80a610e4e87d3b9ccfb3f520692808697d (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: drm/i915: hold ref on flip object until it completes drm/i915: Fix crash while aborting hibernation drm/i915: Correctly return -ENOMEM on allocation failure in cmdbuf ioctls. drm/i915: fix pipe source image setting in flip command drm/i915: fix flip done interrupt on Ironlake drm/i915: untangle page flip completion drm/i915: handle FBC and self-refresh better drm/i915: Increase fb alignment to 64k drm/i915: Update write_domains on active list after flush. drm/i915: Rework DPLL calculation parameters for Ironlake
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c168
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c27
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c16
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c245
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
8 files changed, 332 insertions, 142 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e660ac07f3b2..2307f98349f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -735,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
735 if (cmdbuf->num_cliprects) { 735 if (cmdbuf->num_cliprects) {
736 cliprects = kcalloc(cmdbuf->num_cliprects, 736 cliprects = kcalloc(cmdbuf->num_cliprects,
737 sizeof(struct drm_clip_rect), GFP_KERNEL); 737 sizeof(struct drm_clip_rect), GFP_KERNEL);
738 if (cliprects == NULL) 738 if (cliprects == NULL) {
739 ret = -ENOMEM;
739 goto fail_batch_free; 740 goto fail_batch_free;
741 }
740 742
741 ret = copy_from_user(cliprects, cmdbuf->cliprects, 743 ret = copy_from_user(cliprects, cmdbuf->cliprects,
742 cmdbuf->num_cliprects * 744 cmdbuf->num_cliprects *
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ecac882e1d54..79beffcf5936 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -174,78 +174,100 @@ const static struct pci_device_id pciidlist[] = {
174MODULE_DEVICE_TABLE(pci, pciidlist); 174MODULE_DEVICE_TABLE(pci, pciidlist);
175#endif 175#endif
176 176
177static int i915_suspend(struct drm_device *dev, pm_message_t state) 177static int i915_drm_freeze(struct drm_device *dev)
178{ 178{
179 struct drm_i915_private *dev_priv = dev->dev_private;
180
181 if (!dev || !dev_priv) {
182 DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
183 DRM_ERROR("DRM not initialized, aborting suspend.\n");
184 return -ENODEV;
185 }
186
187 if (state.event == PM_EVENT_PRETHAW)
188 return 0;
189
190 pci_save_state(dev->pdev); 179 pci_save_state(dev->pdev);
191 180
192 /* If KMS is active, we do the leavevt stuff here */ 181 /* If KMS is active, we do the leavevt stuff here */
193 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 182 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
194 if (i915_gem_idle(dev)) 183 int error = i915_gem_idle(dev);
184 if (error) {
195 dev_err(&dev->pdev->dev, 185 dev_err(&dev->pdev->dev,
196 "GEM idle failed, resume may fail\n"); 186 "GEM idle failed, resume might fail\n");
187 return error;
188 }
197 drm_irq_uninstall(dev); 189 drm_irq_uninstall(dev);
198 } 190 }
199 191
200 i915_save_state(dev); 192 i915_save_state(dev);
201 193
194 return 0;
195}
196
197static void i915_drm_suspend(struct drm_device *dev)
198{
199 struct drm_i915_private *dev_priv = dev->dev_private;
200
202 intel_opregion_free(dev, 1); 201 intel_opregion_free(dev, 1);
203 202
203 /* Modeset on resume, not lid events */
204 dev_priv->modeset_on_lid = 0;
205}
206
207static int i915_suspend(struct drm_device *dev, pm_message_t state)
208{
209 int error;
210
211 if (!dev || !dev->dev_private) {
212 DRM_ERROR("dev: %p\n", dev);
213 DRM_ERROR("DRM not initialized, aborting suspend.\n");
214 return -ENODEV;
215 }
216
217 if (state.event == PM_EVENT_PRETHAW)
218 return 0;
219
220 error = i915_drm_freeze(dev);
221 if (error)
222 return error;
223
224 i915_drm_suspend(dev);
225
204 if (state.event == PM_EVENT_SUSPEND) { 226 if (state.event == PM_EVENT_SUSPEND) {
205 /* Shut down the device */ 227 /* Shut down the device */
206 pci_disable_device(dev->pdev); 228 pci_disable_device(dev->pdev);
207 pci_set_power_state(dev->pdev, PCI_D3hot); 229 pci_set_power_state(dev->pdev, PCI_D3hot);
208 } 230 }
209 231
210 /* Modeset on resume, not lid events */
211 dev_priv->modeset_on_lid = 0;
212
213 return 0; 232 return 0;
214} 233}
215 234
216static int i915_resume(struct drm_device *dev) 235static int i915_drm_thaw(struct drm_device *dev)
217{ 236{
218 struct drm_i915_private *dev_priv = dev->dev_private; 237 struct drm_i915_private *dev_priv = dev->dev_private;
219 int ret = 0; 238 int error = 0;
220
221 if (pci_enable_device(dev->pdev))
222 return -1;
223 pci_set_master(dev->pdev);
224
225 i915_restore_state(dev);
226
227 intel_opregion_init(dev, 1);
228 239
229 /* KMS EnterVT equivalent */ 240 /* KMS EnterVT equivalent */
230 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 241 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
231 mutex_lock(&dev->struct_mutex); 242 mutex_lock(&dev->struct_mutex);
232 dev_priv->mm.suspended = 0; 243 dev_priv->mm.suspended = 0;
233 244
234 ret = i915_gem_init_ringbuffer(dev); 245 error = i915_gem_init_ringbuffer(dev);
235 if (ret != 0)
236 ret = -1;
237 mutex_unlock(&dev->struct_mutex); 246 mutex_unlock(&dev->struct_mutex);
238 247
239 drm_irq_install(dev); 248 drm_irq_install(dev);
240 } 249
241 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
242 /* Resume the modeset for every activated CRTC */ 250 /* Resume the modeset for every activated CRTC */
243 drm_helper_resume_force_mode(dev); 251 drm_helper_resume_force_mode(dev);
244 } 252 }
245 253
246 dev_priv->modeset_on_lid = 0; 254 dev_priv->modeset_on_lid = 0;
247 255
248 return ret; 256 return error;
257}
258
259static int i915_resume(struct drm_device *dev)
260{
261 if (pci_enable_device(dev->pdev))
262 return -EIO;
263
264 pci_set_master(dev->pdev);
265
266 i915_restore_state(dev);
267
268 intel_opregion_init(dev, 1);
269
270 return i915_drm_thaw(dev);
249} 271}
250 272
251/** 273/**
@@ -386,57 +408,69 @@ i915_pci_remove(struct pci_dev *pdev)
386 drm_put_dev(dev); 408 drm_put_dev(dev);
387} 409}
388 410
389static int 411static int i915_pm_suspend(struct device *dev)
390i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
391{ 412{
392 struct drm_device *dev = pci_get_drvdata(pdev); 413 struct pci_dev *pdev = to_pci_dev(dev);
414 struct drm_device *drm_dev = pci_get_drvdata(pdev);
415 int error;
393 416
394 return i915_suspend(dev, state); 417 if (!drm_dev || !drm_dev->dev_private) {
395} 418 dev_err(dev, "DRM not initialized, aborting suspend.\n");
419 return -ENODEV;
420 }
396 421
397static int 422 error = i915_drm_freeze(drm_dev);
398i915_pci_resume(struct pci_dev *pdev) 423 if (error)
399{ 424 return error;
400 struct drm_device *dev = pci_get_drvdata(pdev);
401 425
402 return i915_resume(dev); 426 i915_drm_suspend(drm_dev);
403}
404 427
405static int 428 pci_disable_device(pdev);
406i915_pm_suspend(struct device *dev) 429 pci_set_power_state(pdev, PCI_D3hot);
407{
408 return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
409}
410 430
411static int 431 return 0;
412i915_pm_resume(struct device *dev)
413{
414 return i915_pci_resume(to_pci_dev(dev));
415} 432}
416 433
417static int 434static int i915_pm_resume(struct device *dev)
418i915_pm_freeze(struct device *dev)
419{ 435{
420 return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE); 436 struct pci_dev *pdev = to_pci_dev(dev);
437 struct drm_device *drm_dev = pci_get_drvdata(pdev);
438
439 return i915_resume(drm_dev);
421} 440}
422 441
423static int 442static int i915_pm_freeze(struct device *dev)
424i915_pm_thaw(struct device *dev)
425{ 443{
426 /* thaw during hibernate, do nothing! */ 444 struct pci_dev *pdev = to_pci_dev(dev);
427 return 0; 445 struct drm_device *drm_dev = pci_get_drvdata(pdev);
446
447 if (!drm_dev || !drm_dev->dev_private) {
448 dev_err(dev, "DRM not initialized, aborting suspend.\n");
449 return -ENODEV;
450 }
451
452 return i915_drm_freeze(drm_dev);
428} 453}
429 454
430static int 455static int i915_pm_thaw(struct device *dev)
431i915_pm_poweroff(struct device *dev)
432{ 456{
433 return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE); 457 struct pci_dev *pdev = to_pci_dev(dev);
458 struct drm_device *drm_dev = pci_get_drvdata(pdev);
459
460 return i915_drm_thaw(drm_dev);
434} 461}
435 462
436static int 463static int i915_pm_poweroff(struct device *dev)
437i915_pm_restore(struct device *dev)
438{ 464{
439 return i915_pci_resume(to_pci_dev(dev)); 465 struct pci_dev *pdev = to_pci_dev(dev);
466 struct drm_device *drm_dev = pci_get_drvdata(pdev);
467 int error;
468
469 error = i915_drm_freeze(drm_dev);
470 if (!error)
471 i915_drm_suspend(drm_dev);
472
473 return error;
440} 474}
441 475
442const struct dev_pm_ops i915_pm_ops = { 476const struct dev_pm_ops i915_pm_ops = {
@@ -445,7 +479,7 @@ const struct dev_pm_ops i915_pm_ops = {
445 .freeze = i915_pm_freeze, 479 .freeze = i915_pm_freeze,
446 .thaw = i915_pm_thaw, 480 .thaw = i915_pm_thaw,
447 .poweroff = i915_pm_poweroff, 481 .poweroff = i915_pm_poweroff,
448 .restore = i915_pm_restore, 482 .restore = i915_pm_resume,
449}; 483};
450 484
451static struct vm_operations_struct i915_gem_vm_ops = { 485static struct vm_operations_struct i915_gem_vm_ops = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aaf934d96f21..b99b6a841d95 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -493,6 +493,15 @@ typedef struct drm_i915_private {
493 struct list_head flushing_list; 493 struct list_head flushing_list;
494 494
495 /** 495 /**
496 * List of objects currently pending a GPU write flush.
497 *
498 * All elements on this list will belong to either the
499 * active_list or flushing_list, last_rendering_seqno can
500 * be used to differentiate between the two elements.
501 */
502 struct list_head gpu_write_list;
503
504 /**
496 * LRU list of objects which are not in the ringbuffer and 505 * LRU list of objects which are not in the ringbuffer and
497 * are ready to unbind, but are still in the GTT. 506 * are ready to unbind, but are still in the GTT.
498 * 507 *
@@ -592,6 +601,8 @@ struct drm_i915_gem_object {
592 601
593 /** This object's place on the active/flushing/inactive lists */ 602 /** This object's place on the active/flushing/inactive lists */
594 struct list_head list; 603 struct list_head list;
604 /** This object's place on GPU write list */
605 struct list_head gpu_write_list;
595 606
596 /** This object's place on the fenced object LRU */ 607 /** This object's place on the fenced object LRU */
597 struct list_head fence_list; 608 struct list_head fence_list;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b4c8c0230689..ec8a0d7ffa39 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1552,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1552 else 1552 else
1553 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1553 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1554 1554
1555 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1556
1555 obj_priv->last_rendering_seqno = 0; 1557 obj_priv->last_rendering_seqno = 0;
1556 if (obj_priv->active) { 1558 if (obj_priv->active) {
1557 obj_priv->active = 0; 1559 obj_priv->active = 0;
@@ -1622,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1622 struct drm_i915_gem_object *obj_priv, *next; 1624 struct drm_i915_gem_object *obj_priv, *next;
1623 1625
1624 list_for_each_entry_safe(obj_priv, next, 1626 list_for_each_entry_safe(obj_priv, next,
1625 &dev_priv->mm.flushing_list, list) { 1627 &dev_priv->mm.gpu_write_list,
1628 gpu_write_list) {
1626 struct drm_gem_object *obj = obj_priv->obj; 1629 struct drm_gem_object *obj = obj_priv->obj;
1627 1630
1628 if ((obj->write_domain & flush_domains) == 1631 if ((obj->write_domain & flush_domains) ==
@@ -1630,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1630 uint32_t old_write_domain = obj->write_domain; 1633 uint32_t old_write_domain = obj->write_domain;
1631 1634
1632 obj->write_domain = 0; 1635 obj->write_domain = 0;
1636 list_del_init(&obj_priv->gpu_write_list);
1633 i915_gem_object_move_to_active(obj, seqno); 1637 i915_gem_object_move_to_active(obj, seqno);
1634 1638
1635 trace_i915_gem_object_change_domain(obj, 1639 trace_i915_gem_object_change_domain(obj,
@@ -2084,8 +2088,8 @@ static int
2084i915_gem_evict_everything(struct drm_device *dev) 2088i915_gem_evict_everything(struct drm_device *dev)
2085{ 2089{
2086 drm_i915_private_t *dev_priv = dev->dev_private; 2090 drm_i915_private_t *dev_priv = dev->dev_private;
2087 uint32_t seqno;
2088 int ret; 2091 int ret;
2092 uint32_t seqno;
2089 bool lists_empty; 2093 bool lists_empty;
2090 2094
2091 spin_lock(&dev_priv->mm.active_list_lock); 2095 spin_lock(&dev_priv->mm.active_list_lock);
@@ -2107,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
2107 if (ret) 2111 if (ret)
2108 return ret; 2112 return ret;
2109 2113
2114 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2115
2110 ret = i915_gem_evict_from_inactive_list(dev); 2116 ret = i915_gem_evict_from_inactive_list(dev);
2111 if (ret) 2117 if (ret)
2112 return ret; 2118 return ret;
@@ -2701,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2701 old_write_domain = obj->write_domain; 2707 old_write_domain = obj->write_domain;
2702 i915_gem_flush(dev, 0, obj->write_domain); 2708 i915_gem_flush(dev, 0, obj->write_domain);
2703 seqno = i915_add_request(dev, NULL, obj->write_domain); 2709 seqno = i915_add_request(dev, NULL, obj->write_domain);
2704 obj->write_domain = 0; 2710 BUG_ON(obj->write_domain);
2705 i915_gem_object_move_to_active(obj, seqno); 2711 i915_gem_object_move_to_active(obj, seqno);
2706 2712
2707 trace_i915_gem_object_change_domain(obj, 2713 trace_i915_gem_object_change_domain(obj,
@@ -3682,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3682 if (args->num_cliprects != 0) { 3688 if (args->num_cliprects != 0) {
3683 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), 3689 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3684 GFP_KERNEL); 3690 GFP_KERNEL);
3685 if (cliprects == NULL) 3691 if (cliprects == NULL) {
3692 ret = -ENOMEM;
3686 goto pre_mutex_err; 3693 goto pre_mutex_err;
3694 }
3687 3695
3688 ret = copy_from_user(cliprects, 3696 ret = copy_from_user(cliprects,
3689 (struct drm_clip_rect __user *) 3697 (struct drm_clip_rect __user *)
@@ -3850,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3850 i915_gem_flush(dev, 3858 i915_gem_flush(dev,
3851 dev->invalidate_domains, 3859 dev->invalidate_domains,
3852 dev->flush_domains); 3860 dev->flush_domains);
3853 if (dev->flush_domains) 3861 if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
3854 (void)i915_add_request(dev, file_priv, 3862 (void)i915_add_request(dev, file_priv,
3855 dev->flush_domains); 3863 dev->flush_domains);
3856 } 3864 }
3857 3865
3858 for (i = 0; i < args->buffer_count; i++) { 3866 for (i = 0; i < args->buffer_count; i++) {
3859 struct drm_gem_object *obj = object_list[i]; 3867 struct drm_gem_object *obj = object_list[i];
3868 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3860 uint32_t old_write_domain = obj->write_domain; 3869 uint32_t old_write_domain = obj->write_domain;
3861 3870
3862 obj->write_domain = obj->pending_write_domain; 3871 obj->write_domain = obj->pending_write_domain;
3872 if (obj->write_domain)
3873 list_move_tail(&obj_priv->gpu_write_list,
3874 &dev_priv->mm.gpu_write_list);
3875 else
3876 list_del_init(&obj_priv->gpu_write_list);
3877
3863 trace_i915_gem_object_change_domain(obj, 3878 trace_i915_gem_object_change_domain(obj,
3864 obj->read_domains, 3879 obj->read_domains,
3865 old_write_domain); 3880 old_write_domain);
@@ -4370,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4370 obj_priv->obj = obj; 4385 obj_priv->obj = obj;
4371 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4386 obj_priv->fence_reg = I915_FENCE_REG_NONE;
4372 INIT_LIST_HEAD(&obj_priv->list); 4387 INIT_LIST_HEAD(&obj_priv->list);
4388 INIT_LIST_HEAD(&obj_priv->gpu_write_list);
4373 INIT_LIST_HEAD(&obj_priv->fence_list); 4389 INIT_LIST_HEAD(&obj_priv->fence_list);
4374 obj_priv->madv = I915_MADV_WILLNEED; 4390 obj_priv->madv = I915_MADV_WILLNEED;
4375 4391
@@ -4821,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
4821 spin_lock_init(&dev_priv->mm.active_list_lock); 4837 spin_lock_init(&dev_priv->mm.active_list_lock);
4822 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4838 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4823 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4839 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4840 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4824 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4841 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4825 INIT_LIST_HEAD(&dev_priv->mm.request_list); 4842 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4826 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4843 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 50ddf4a95c5e..a17d6bdfe63e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -309,21 +309,21 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
309 if (de_iir & DE_GSE) 309 if (de_iir & DE_GSE)
310 ironlake_opregion_gse_intr(dev); 310 ironlake_opregion_gse_intr(dev);
311 311
312 if (de_iir & DE_PLANEA_FLIP_DONE) 312 if (de_iir & DE_PLANEA_FLIP_DONE) {
313 intel_prepare_page_flip(dev, 0); 313 intel_prepare_page_flip(dev, 0);
314 intel_finish_page_flip(dev, 0);
315 }
314 316
315 if (de_iir & DE_PLANEB_FLIP_DONE) 317 if (de_iir & DE_PLANEB_FLIP_DONE) {
316 intel_prepare_page_flip(dev, 1); 318 intel_prepare_page_flip(dev, 1);
319 intel_finish_page_flip(dev, 1);
320 }
317 321
318 if (de_iir & DE_PIPEA_VBLANK) { 322 if (de_iir & DE_PIPEA_VBLANK)
319 drm_handle_vblank(dev, 0); 323 drm_handle_vblank(dev, 0);
320 intel_finish_page_flip(dev, 0);
321 }
322 324
323 if (de_iir & DE_PIPEB_VBLANK) { 325 if (de_iir & DE_PIPEB_VBLANK)
324 drm_handle_vblank(dev, 1); 326 drm_handle_vblank(dev, 1);
325 intel_finish_page_flip(dev, 1);
326 }
327 327
328 /* check event from PCH */ 328 /* check event from PCH */
329 if ((de_iir & DE_PCH_EVENT) && 329 if ((de_iir & DE_PCH_EVENT) &&
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 847006c5218e..ab1bd2d3d3b6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -338,6 +338,7 @@
338#define FBC_CTL_PERIODIC (1<<30) 338#define FBC_CTL_PERIODIC (1<<30)
339#define FBC_CTL_INTERVAL_SHIFT (16) 339#define FBC_CTL_INTERVAL_SHIFT (16)
340#define FBC_CTL_UNCOMPRESSIBLE (1<<14) 340#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
341#define FBC_C3_IDLE (1<<13)
341#define FBC_CTL_STRIDE_SHIFT (5) 342#define FBC_CTL_STRIDE_SHIFT (5)
342#define FBC_CTL_FENCENO (1<<0) 343#define FBC_CTL_FENCENO (1<<0)
343#define FBC_COMMAND 0x0320c 344#define FBC_COMMAND 0x0320c
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 12775df1bbfd..b27202d23ebc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -240,33 +240,86 @@ struct intel_limit {
240#define IRONLAKE_DOT_MAX 350000 240#define IRONLAKE_DOT_MAX 350000
241#define IRONLAKE_VCO_MIN 1760000 241#define IRONLAKE_VCO_MIN 1760000
242#define IRONLAKE_VCO_MAX 3510000 242#define IRONLAKE_VCO_MAX 3510000
243#define IRONLAKE_N_MIN 1
244#define IRONLAKE_N_MAX 6
245#define IRONLAKE_M_MIN 79
246#define IRONLAKE_M_MAX 127
247#define IRONLAKE_M1_MIN 12 243#define IRONLAKE_M1_MIN 12
248#define IRONLAKE_M1_MAX 22 244#define IRONLAKE_M1_MAX 22
249#define IRONLAKE_M2_MIN 5 245#define IRONLAKE_M2_MIN 5
250#define IRONLAKE_M2_MAX 9 246#define IRONLAKE_M2_MAX 9
251#define IRONLAKE_P_SDVO_DAC_MIN 5
252#define IRONLAKE_P_SDVO_DAC_MAX 80
253#define IRONLAKE_P_LVDS_MIN 28
254#define IRONLAKE_P_LVDS_MAX 112
255#define IRONLAKE_P1_MIN 1
256#define IRONLAKE_P1_MAX 8
257#define IRONLAKE_P2_SDVO_DAC_SLOW 10
258#define IRONLAKE_P2_SDVO_DAC_FAST 5
259#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
260#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
261#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 247#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
262 248
263#define IRONLAKE_P_DISPLAY_PORT_MIN 10 249/* We have parameter ranges for different type of outputs. */
264#define IRONLAKE_P_DISPLAY_PORT_MAX 20 250
265#define IRONLAKE_P2_DISPLAY_PORT_FAST 10 251/* DAC & HDMI Refclk 120Mhz */
266#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10 252#define IRONLAKE_DAC_N_MIN 1
267#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0 253#define IRONLAKE_DAC_N_MAX 5
268#define IRONLAKE_P1_DISPLAY_PORT_MIN 1 254#define IRONLAKE_DAC_M_MIN 79
269#define IRONLAKE_P1_DISPLAY_PORT_MAX 2 255#define IRONLAKE_DAC_M_MAX 127
256#define IRONLAKE_DAC_P_MIN 5
257#define IRONLAKE_DAC_P_MAX 80
258#define IRONLAKE_DAC_P1_MIN 1
259#define IRONLAKE_DAC_P1_MAX 8
260#define IRONLAKE_DAC_P2_SLOW 10
261#define IRONLAKE_DAC_P2_FAST 5
262
263/* LVDS single-channel 120Mhz refclk */
264#define IRONLAKE_LVDS_S_N_MIN 1
265#define IRONLAKE_LVDS_S_N_MAX 3
266#define IRONLAKE_LVDS_S_M_MIN 79
267#define IRONLAKE_LVDS_S_M_MAX 118
268#define IRONLAKE_LVDS_S_P_MIN 28
269#define IRONLAKE_LVDS_S_P_MAX 112
270#define IRONLAKE_LVDS_S_P1_MIN 2
271#define IRONLAKE_LVDS_S_P1_MAX 8
272#define IRONLAKE_LVDS_S_P2_SLOW 14
273#define IRONLAKE_LVDS_S_P2_FAST 14
274
275/* LVDS dual-channel 120Mhz refclk */
276#define IRONLAKE_LVDS_D_N_MIN 1
277#define IRONLAKE_LVDS_D_N_MAX 3
278#define IRONLAKE_LVDS_D_M_MIN 79
279#define IRONLAKE_LVDS_D_M_MAX 127
280#define IRONLAKE_LVDS_D_P_MIN 14
281#define IRONLAKE_LVDS_D_P_MAX 56
282#define IRONLAKE_LVDS_D_P1_MIN 2
283#define IRONLAKE_LVDS_D_P1_MAX 8
284#define IRONLAKE_LVDS_D_P2_SLOW 7
285#define IRONLAKE_LVDS_D_P2_FAST 7
286
287/* LVDS single-channel 100Mhz refclk */
288#define IRONLAKE_LVDS_S_SSC_N_MIN 1
289#define IRONLAKE_LVDS_S_SSC_N_MAX 2
290#define IRONLAKE_LVDS_S_SSC_M_MIN 79
291#define IRONLAKE_LVDS_S_SSC_M_MAX 126
292#define IRONLAKE_LVDS_S_SSC_P_MIN 28
293#define IRONLAKE_LVDS_S_SSC_P_MAX 112
294#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
295#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
296#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
297#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
298
299/* LVDS dual-channel 100Mhz refclk */
300#define IRONLAKE_LVDS_D_SSC_N_MIN 1
301#define IRONLAKE_LVDS_D_SSC_N_MAX 3
302#define IRONLAKE_LVDS_D_SSC_M_MIN 79
303#define IRONLAKE_LVDS_D_SSC_M_MAX 126
304#define IRONLAKE_LVDS_D_SSC_P_MIN 14
305#define IRONLAKE_LVDS_D_SSC_P_MAX 42
306#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
307#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
308#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
309#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
310
311/* DisplayPort */
312#define IRONLAKE_DP_N_MIN 1
313#define IRONLAKE_DP_N_MAX 2
314#define IRONLAKE_DP_M_MIN 81
315#define IRONLAKE_DP_M_MAX 90
316#define IRONLAKE_DP_P_MIN 10
317#define IRONLAKE_DP_P_MAX 20
318#define IRONLAKE_DP_P2_FAST 10
319#define IRONLAKE_DP_P2_SLOW 10
320#define IRONLAKE_DP_P2_LIMIT 0
321#define IRONLAKE_DP_P1_MIN 1
322#define IRONLAKE_DP_P1_MAX 2
270 323
271static bool 324static bool
272intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 325intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -474,33 +527,78 @@ static const intel_limit_t intel_limits_pineview_lvds = {
474 .find_pll = intel_find_best_PLL, 527 .find_pll = intel_find_best_PLL,
475}; 528};
476 529
477static const intel_limit_t intel_limits_ironlake_sdvo = { 530static const intel_limit_t intel_limits_ironlake_dac = {
478 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 531 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
479 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 532 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
480 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 533 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
481 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 534 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
482 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 535 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
483 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 536 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
484 .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, 537 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
485 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 538 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
486 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 539 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
487 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, 540 .p2_slow = IRONLAKE_DAC_P2_SLOW,
488 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, 541 .p2_fast = IRONLAKE_DAC_P2_FAST },
489 .find_pll = intel_g4x_find_best_PLL, 542 .find_pll = intel_g4x_find_best_PLL,
490}; 543};
491 544
492static const intel_limit_t intel_limits_ironlake_lvds = { 545static const intel_limit_t intel_limits_ironlake_single_lvds = {
493 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 546 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
494 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 547 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
495 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 548 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
496 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 549 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
497 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 550 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
498 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 551 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
499 .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, 552 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
500 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 553 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
501 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 554 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
502 .p2_slow = IRONLAKE_P2_LVDS_SLOW, 555 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
503 .p2_fast = IRONLAKE_P2_LVDS_FAST }, 556 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
557 .find_pll = intel_g4x_find_best_PLL,
558};
559
560static const intel_limit_t intel_limits_ironlake_dual_lvds = {
561 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
562 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
563 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
564 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
565 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
566 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
567 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
568 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
569 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
570 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
571 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
572 .find_pll = intel_g4x_find_best_PLL,
573};
574
575static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
576 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
577 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
578 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
579 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
580 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
581 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
582 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
583 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
584 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
585 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
586 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
587 .find_pll = intel_g4x_find_best_PLL,
588};
589
590static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
591 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
592 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
593 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
594 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
595 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
596 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
597 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
598 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
599 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
600 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
601 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
504 .find_pll = intel_g4x_find_best_PLL, 602 .find_pll = intel_g4x_find_best_PLL,
505}; 603};
506 604
@@ -509,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
509 .max = IRONLAKE_DOT_MAX }, 607 .max = IRONLAKE_DOT_MAX },
510 .vco = { .min = IRONLAKE_VCO_MIN, 608 .vco = { .min = IRONLAKE_VCO_MIN,
511 .max = IRONLAKE_VCO_MAX}, 609 .max = IRONLAKE_VCO_MAX},
512 .n = { .min = IRONLAKE_N_MIN, 610 .n = { .min = IRONLAKE_DP_N_MIN,
513 .max = IRONLAKE_N_MAX }, 611 .max = IRONLAKE_DP_N_MAX },
514 .m = { .min = IRONLAKE_M_MIN, 612 .m = { .min = IRONLAKE_DP_M_MIN,
515 .max = IRONLAKE_M_MAX }, 613 .max = IRONLAKE_DP_M_MAX },
516 .m1 = { .min = IRONLAKE_M1_MIN, 614 .m1 = { .min = IRONLAKE_M1_MIN,
517 .max = IRONLAKE_M1_MAX }, 615 .max = IRONLAKE_M1_MAX },
518 .m2 = { .min = IRONLAKE_M2_MIN, 616 .m2 = { .min = IRONLAKE_M2_MIN,
519 .max = IRONLAKE_M2_MAX }, 617 .max = IRONLAKE_M2_MAX },
520 .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN, 618 .p = { .min = IRONLAKE_DP_P_MIN,
521 .max = IRONLAKE_P_DISPLAY_PORT_MAX }, 619 .max = IRONLAKE_DP_P_MAX },
522 .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN, 620 .p1 = { .min = IRONLAKE_DP_P1_MIN,
523 .max = IRONLAKE_P1_DISPLAY_PORT_MAX}, 621 .max = IRONLAKE_DP_P1_MAX},
524 .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT, 622 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
525 .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW, 623 .p2_slow = IRONLAKE_DP_P2_SLOW,
526 .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST }, 624 .p2_fast = IRONLAKE_DP_P2_FAST },
527 .find_pll = intel_find_pll_ironlake_dp, 625 .find_pll = intel_find_pll_ironlake_dp,
528}; 626};
529 627
530static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) 628static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
531{ 629{
630 struct drm_device *dev = crtc->dev;
631 struct drm_i915_private *dev_priv = dev->dev_private;
532 const intel_limit_t *limit; 632 const intel_limit_t *limit;
533 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 633 int refclk = 120;
534 limit = &intel_limits_ironlake_lvds; 634
535 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 635 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
636 if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
637 refclk = 100;
638
639 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
640 LVDS_CLKB_POWER_UP) {
641 /* LVDS dual channel */
642 if (refclk == 100)
643 limit = &intel_limits_ironlake_dual_lvds_100m;
644 else
645 limit = &intel_limits_ironlake_dual_lvds;
646 } else {
647 if (refclk == 100)
648 limit = &intel_limits_ironlake_single_lvds_100m;
649 else
650 limit = &intel_limits_ironlake_single_lvds;
651 }
652 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
536 HAS_eDP) 653 HAS_eDP)
537 limit = &intel_limits_ironlake_display_port; 654 limit = &intel_limits_ironlake_display_port;
538 else 655 else
539 limit = &intel_limits_ironlake_sdvo; 656 limit = &intel_limits_ironlake_dac;
540 657
541 return limit; 658 return limit;
542} 659}
@@ -914,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
914 1031
915 /* enable it... */ 1032 /* enable it... */
916 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 1033 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1034 if (IS_I945GM(dev))
1035 fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
917 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1036 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
918 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1037 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
919 if (obj_priv->tiling_mode != I915_TILING_NONE) 1038 if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -3962,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
3962struct intel_unpin_work { 4081struct intel_unpin_work {
3963 struct work_struct work; 4082 struct work_struct work;
3964 struct drm_device *dev; 4083 struct drm_device *dev;
3965 struct drm_gem_object *obj; 4084 struct drm_gem_object *old_fb_obj;
4085 struct drm_gem_object *pending_flip_obj;
3966 struct drm_pending_vblank_event *event; 4086 struct drm_pending_vblank_event *event;
3967 int pending; 4087 int pending;
3968}; 4088};
@@ -3973,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
3973 container_of(__work, struct intel_unpin_work, work); 4093 container_of(__work, struct intel_unpin_work, work);
3974 4094
3975 mutex_lock(&work->dev->struct_mutex); 4095 mutex_lock(&work->dev->struct_mutex);
3976 i915_gem_object_unpin(work->obj); 4096 i915_gem_object_unpin(work->old_fb_obj);
3977 drm_gem_object_unreference(work->obj); 4097 drm_gem_object_unreference(work->pending_flip_obj);
4098 drm_gem_object_unreference(work->old_fb_obj);
3978 mutex_unlock(&work->dev->struct_mutex); 4099 mutex_unlock(&work->dev->struct_mutex);
3979 kfree(work); 4100 kfree(work);
3980} 4101}
@@ -3998,7 +4119,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
3998 work = intel_crtc->unpin_work; 4119 work = intel_crtc->unpin_work;
3999 if (work == NULL || !work->pending) { 4120 if (work == NULL || !work->pending) {
4000 if (work && !work->pending) { 4121 if (work && !work->pending) {
4001 obj_priv = work->obj->driver_private; 4122 obj_priv = work->pending_flip_obj->driver_private;
4002 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", 4123 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4003 obj_priv, 4124 obj_priv,
4004 atomic_read(&obj_priv->pending_flip)); 4125 atomic_read(&obj_priv->pending_flip));
@@ -4023,7 +4144,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4023 4144
4024 spin_unlock_irqrestore(&dev->event_lock, flags); 4145 spin_unlock_irqrestore(&dev->event_lock, flags);
4025 4146
4026 obj_priv = work->obj->driver_private; 4147 obj_priv = work->pending_flip_obj->driver_private;
4027 4148
4028 /* Initial scanout buffer will have a 0 pending flip count */ 4149 /* Initial scanout buffer will have a 0 pending flip count */
4029 if ((atomic_read(&obj_priv->pending_flip) == 0) || 4150 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
@@ -4060,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4060 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4061 struct intel_unpin_work *work; 4182 struct intel_unpin_work *work;
4062 unsigned long flags; 4183 unsigned long flags;
4063 int ret; 4184 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4185 int ret, pipesrc;
4064 RING_LOCALS; 4186 RING_LOCALS;
4065 4187
4066 work = kzalloc(sizeof *work, GFP_KERNEL); 4188 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -4072,7 +4194,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4072 work->event = event; 4194 work->event = event;
4073 work->dev = crtc->dev; 4195 work->dev = crtc->dev;
4074 intel_fb = to_intel_framebuffer(crtc->fb); 4196 intel_fb = to_intel_framebuffer(crtc->fb);
4075 work->obj = intel_fb->obj; 4197 work->old_fb_obj = intel_fb->obj;
4076 INIT_WORK(&work->work, intel_unpin_work_fn); 4198 INIT_WORK(&work->work, intel_unpin_work_fn);
4077 4199
4078 /* We borrow the event spin lock for protecting unpin_work */ 4200 /* We borrow the event spin lock for protecting unpin_work */
@@ -4100,14 +4222,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4100 return ret; 4222 return ret;
4101 } 4223 }
4102 4224
4103 /* Reference the old fb object for the scheduled work. */ 4225 /* Reference the objects for the scheduled work. */
4104 drm_gem_object_reference(work->obj); 4226 drm_gem_object_reference(work->old_fb_obj);
4227 drm_gem_object_reference(obj);
4105 4228
4106 crtc->fb = fb; 4229 crtc->fb = fb;
4107 i915_gem_object_flush_write_domain(obj); 4230 i915_gem_object_flush_write_domain(obj);
4108 drm_vblank_get(dev, intel_crtc->pipe); 4231 drm_vblank_get(dev, intel_crtc->pipe);
4109 obj_priv = obj->driver_private; 4232 obj_priv = obj->driver_private;
4110 atomic_inc(&obj_priv->pending_flip); 4233 atomic_inc(&obj_priv->pending_flip);
4234 work->pending_flip_obj = obj;
4111 4235
4112 BEGIN_LP_RING(4); 4236 BEGIN_LP_RING(4);
4113 OUT_RING(MI_DISPLAY_FLIP | 4237 OUT_RING(MI_DISPLAY_FLIP |
@@ -4115,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4115 OUT_RING(fb->pitch); 4239 OUT_RING(fb->pitch);
4116 if (IS_I965G(dev)) { 4240 if (IS_I965G(dev)) {
4117 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); 4241 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
4118 OUT_RING((fb->width << 16) | fb->height); 4242 pipesrc = I915_READ(pipesrc_reg);
4243 OUT_RING(pipesrc & 0x0fff0fff);
4119 } else { 4244 } else {
4120 OUT_RING(obj_priv->gtt_offset); 4245 OUT_RING(obj_priv->gtt_offset);
4121 OUT_RING(MI_NOOP); 4246 OUT_RING(MI_NOOP);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 371d753e362b..aaabbcbe5905 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
148 148
149 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
150 150
151 ret = i915_gem_object_pin(fbo, PAGE_SIZE); 151 ret = i915_gem_object_pin(fbo, 64*1024);
152 if (ret) { 152 if (ret) {
153 DRM_ERROR("failed to pin fb: %d\n", ret); 153 DRM_ERROR("failed to pin fb: %d\n", ret);
154 goto out_unref; 154 goto out_unref;