aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-01-17 05:34:08 -0500
committerDave Airlie <airlied@redhat.com>2013-01-17 05:34:08 -0500
commitb5cc6c0387b2f8d269c1df1e68c97c958dd22fed (patch)
tree697f2335b3a10f55e0ea226dcd044ee4ff3f0f7f
parent9931faca02c604c22335f5a935a501bb2ace6e20 (diff)
parentc0c36b941b6f0be6ac74f340040cbb29d6a0b06c (diff)
Merge tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Daniel writes: - seqno wrap fixes and debug infrastructure from Mika Kuoppala and Chris Wilson - some leftover kill-agp on gen6+ patches from Ben - hotplug improvements from Damien - clear fb when allocated from stolen, avoids dirt on the fbcon (Chris) - Stolen mem support from Chris Wilson, one of the many steps to get to real fastboot support. - Some DDI code cleanups from Paulo. - Some refactorings around lvds and dp code. - some random little bits&pieces * tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm-intel: (93 commits) drm/i915: Return the real error code from intel_set_mode() drm/i915: Make GSM void drm/i915: Move GSM mapping into dev_priv drm/i915: Move even more gtt code to i915_gem_gtt drm/i915: Make next_seqno debugs entry to use i915_gem_set_seqno drm/i915: Introduce i915_gem_set_seqno() drm/i915: Always clear semaphore mboxes on seqno wrap drm/i915: Initialize hardware semaphore state on ring init drm/i915: Introduce ring set_seqno drm/i915: Missed conversion to gtt_pte_t drm/i915: Bug on unsupported swizzled platforms drm/i915: BUG() if fences are used on unsupported platform drm/i915: fixup overlay stolen memory leak drm/i915: clean up PIPECONF bpc #defines drm/i915: add intel_dp_set_signal_levels drm/i915: remove leftover display.update_wm assignment drm/i915: check for the PCH when setting pch_transcoder drm/i915: Clear the stolen fb before enabling drm/i915: Access to snooped system memory through the GTT is incoherent drm/i915: Remove stale comment about intel_dp_detect() ... Conflicts: drivers/gpu/drm/i915/intel_display.c
-rw-r--r--drivers/char/agp/intel-gtt.c1
-rw-r--r--drivers/gpu/drm/drm_mm.c96
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c95
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c50
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h82
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c183
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c53
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c108
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c305
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c12
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c162
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h58
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c24
-rw-r--r--drivers/gpu/drm/i915/intel_display.c648
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c295
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h11
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c21
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c20
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c101
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c165
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c1
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c15
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c99
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h11
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c7
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c3
-rw-r--r--include/drm/drm_mm.h40
-rw-r--r--include/drm/intel-gtt.h2
32 files changed, 1572 insertions, 1111 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index dbd901e94ea6..c8d9dcb15db0 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -602,7 +602,6 @@ static int intel_gtt_init(void)
602 iounmap(intel_private.registers); 602 iounmap(intel_private.registers);
603 return -ENOMEM; 603 return -ENOMEM;
604 } 604 }
605 intel_private.base.gtt = intel_private.gtt;
606 605
607 global_cache_flush(); /* FIXME: ? */ 606 global_cache_flush(); /* FIXME: ? */
608 607
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2bf9670ba29b..86272f04b82f 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -102,20 +102,6 @@ int drm_mm_pre_get(struct drm_mm *mm)
102} 102}
103EXPORT_SYMBOL(drm_mm_pre_get); 103EXPORT_SYMBOL(drm_mm_pre_get);
104 104
105static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
106{
107 return hole_node->start + hole_node->size;
108}
109
110static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
111{
112 struct drm_mm_node *next_node =
113 list_entry(hole_node->node_list.next, struct drm_mm_node,
114 node_list);
115
116 return next_node->start;
117}
118
119static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 105static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
120 struct drm_mm_node *node, 106 struct drm_mm_node *node,
121 unsigned long size, unsigned alignment, 107 unsigned long size, unsigned alignment,
@@ -127,7 +113,7 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
127 unsigned long adj_start = hole_start; 113 unsigned long adj_start = hole_start;
128 unsigned long adj_end = hole_end; 114 unsigned long adj_end = hole_end;
129 115
130 BUG_ON(!hole_node->hole_follows || node->allocated); 116 BUG_ON(node->allocated);
131 117
132 if (mm->color_adjust) 118 if (mm->color_adjust)
133 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 119 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
@@ -155,12 +141,57 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
155 BUG_ON(node->start + node->size > adj_end); 141 BUG_ON(node->start + node->size > adj_end);
156 142
157 node->hole_follows = 0; 143 node->hole_follows = 0;
158 if (node->start + node->size < hole_end) { 144 if (__drm_mm_hole_node_start(node) < hole_end) {
159 list_add(&node->hole_stack, &mm->hole_stack); 145 list_add(&node->hole_stack, &mm->hole_stack);
160 node->hole_follows = 1; 146 node->hole_follows = 1;
161 } 147 }
162} 148}
163 149
150struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
151 unsigned long start,
152 unsigned long size,
153 bool atomic)
154{
155 struct drm_mm_node *hole, *node;
156 unsigned long end = start + size;
157 unsigned long hole_start;
158 unsigned long hole_end;
159
160 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
161 if (hole_start > start || hole_end < end)
162 continue;
163
164 node = drm_mm_kmalloc(mm, atomic);
165 if (unlikely(node == NULL))
166 return NULL;
167
168 node->start = start;
169 node->size = size;
170 node->mm = mm;
171 node->allocated = 1;
172
173 INIT_LIST_HEAD(&node->hole_stack);
174 list_add(&node->node_list, &hole->node_list);
175
176 if (start == hole_start) {
177 hole->hole_follows = 0;
178 list_del_init(&hole->hole_stack);
179 }
180
181 node->hole_follows = 0;
182 if (end != hole_end) {
183 list_add(&node->hole_stack, &mm->hole_stack);
184 node->hole_follows = 1;
185 }
186
187 return node;
188 }
189
190 WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
191 return NULL;
192}
193EXPORT_SYMBOL(drm_mm_create_block);
194
164struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, 195struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
165 unsigned long size, 196 unsigned long size,
166 unsigned alignment, 197 unsigned alignment,
@@ -251,7 +282,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
251 BUG_ON(node->start + node->size > end); 282 BUG_ON(node->start + node->size > end);
252 283
253 node->hole_follows = 0; 284 node->hole_follows = 0;
254 if (node->start + node->size < hole_end) { 285 if (__drm_mm_hole_node_start(node) < hole_end) {
255 list_add(&node->hole_stack, &mm->hole_stack); 286 list_add(&node->hole_stack, &mm->hole_stack);
256 node->hole_follows = 1; 287 node->hole_follows = 1;
257 } 288 }
@@ -325,12 +356,13 @@ void drm_mm_remove_node(struct drm_mm_node *node)
325 list_entry(node->node_list.prev, struct drm_mm_node, node_list); 356 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
326 357
327 if (node->hole_follows) { 358 if (node->hole_follows) {
328 BUG_ON(drm_mm_hole_node_start(node) 359 BUG_ON(__drm_mm_hole_node_start(node) ==
329 == drm_mm_hole_node_end(node)); 360 __drm_mm_hole_node_end(node));
330 list_del(&node->hole_stack); 361 list_del(&node->hole_stack);
331 } else 362 } else
332 BUG_ON(drm_mm_hole_node_start(node) 363 BUG_ON(__drm_mm_hole_node_start(node) !=
333 != drm_mm_hole_node_end(node)); 364 __drm_mm_hole_node_end(node));
365
334 366
335 if (!prev_node->hole_follows) { 367 if (!prev_node->hole_follows) {
336 prev_node->hole_follows = 1; 368 prev_node->hole_follows = 1;
@@ -388,6 +420,8 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
388{ 420{
389 struct drm_mm_node *entry; 421 struct drm_mm_node *entry;
390 struct drm_mm_node *best; 422 struct drm_mm_node *best;
423 unsigned long adj_start;
424 unsigned long adj_end;
391 unsigned long best_size; 425 unsigned long best_size;
392 426
393 BUG_ON(mm->scanned_blocks); 427 BUG_ON(mm->scanned_blocks);
@@ -395,17 +429,13 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
395 best = NULL; 429 best = NULL;
396 best_size = ~0UL; 430 best_size = ~0UL;
397 431
398 list_for_each_entry(entry, &mm->hole_stack, hole_stack) { 432 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
399 unsigned long adj_start = drm_mm_hole_node_start(entry);
400 unsigned long adj_end = drm_mm_hole_node_end(entry);
401
402 if (mm->color_adjust) { 433 if (mm->color_adjust) {
403 mm->color_adjust(entry, color, &adj_start, &adj_end); 434 mm->color_adjust(entry, color, &adj_start, &adj_end);
404 if (adj_end <= adj_start) 435 if (adj_end <= adj_start)
405 continue; 436 continue;
406 } 437 }
407 438
408 BUG_ON(!entry->hole_follows);
409 if (!check_free_hole(adj_start, adj_end, size, alignment)) 439 if (!check_free_hole(adj_start, adj_end, size, alignment))
410 continue; 440 continue;
411 441
@@ -432,6 +462,8 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
432{ 462{
433 struct drm_mm_node *entry; 463 struct drm_mm_node *entry;
434 struct drm_mm_node *best; 464 struct drm_mm_node *best;
465 unsigned long adj_start;
466 unsigned long adj_end;
435 unsigned long best_size; 467 unsigned long best_size;
436 468
437 BUG_ON(mm->scanned_blocks); 469 BUG_ON(mm->scanned_blocks);
@@ -439,13 +471,11 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
439 best = NULL; 471 best = NULL;
440 best_size = ~0UL; 472 best_size = ~0UL;
441 473
442 list_for_each_entry(entry, &mm->hole_stack, hole_stack) { 474 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
443 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ? 475 if (adj_start < start)
444 start : drm_mm_hole_node_start(entry); 476 adj_start = start;
445 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ? 477 if (adj_end > end)
446 end : drm_mm_hole_node_end(entry); 478 adj_end = end;
447
448 BUG_ON(!entry->hole_follows);
449 479
450 if (mm->color_adjust) { 480 if (mm->color_adjust) {
451 mm->color_adjust(entry, color, &adj_start, &adj_end); 481 mm->color_adjust(entry, color, &adj_start, &adj_end);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e6a11ca85eaf..f7d88e99ebf0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -102,7 +102,7 @@ static const char *cache_level_str(int type)
102static void 102static void
103describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 103describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
104{ 104{
105 seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s", 105 seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
106 &obj->base, 106 &obj->base,
107 get_pin_flag(obj), 107 get_pin_flag(obj),
108 get_tiling_flag(obj), 108 get_tiling_flag(obj),
@@ -124,6 +124,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
124 if (obj->gtt_space != NULL) 124 if (obj->gtt_space != NULL)
125 seq_printf(m, " (gtt offset: %08x, size: %08x)", 125 seq_printf(m, " (gtt offset: %08x, size: %08x)",
126 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 126 obj->gtt_offset, (unsigned int)obj->gtt_space->size);
127 if (obj->stolen)
128 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
127 if (obj->pin_mappable || obj->fault_mappable) { 129 if (obj->pin_mappable || obj->fault_mappable) {
128 char s[3], *t = s; 130 char s[3], *t = s;
129 if (obj->pin_mappable) 131 if (obj->pin_mappable)
@@ -387,7 +389,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
387 struct intel_ring_buffer *ring) 389 struct intel_ring_buffer *ring)
388{ 390{
389 if (ring->get_seqno) { 391 if (ring->get_seqno) {
390 seq_printf(m, "Current sequence (%s): %d\n", 392 seq_printf(m, "Current sequence (%s): %u\n",
391 ring->name, ring->get_seqno(ring, false)); 393 ring->name, ring->get_seqno(ring, false));
392 } 394 }
393} 395}
@@ -544,11 +546,11 @@ static int i915_hws_info(struct seq_file *m, void *data)
544 struct drm_device *dev = node->minor->dev; 546 struct drm_device *dev = node->minor->dev;
545 drm_i915_private_t *dev_priv = dev->dev_private; 547 drm_i915_private_t *dev_priv = dev->dev_private;
546 struct intel_ring_buffer *ring; 548 struct intel_ring_buffer *ring;
547 const volatile u32 __iomem *hws; 549 const u32 *hws;
548 int i; 550 int i;
549 551
550 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 552 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
551 hws = (volatile u32 __iomem *)ring->status_page.page_addr; 553 hws = ring->status_page.page_addr;
552 if (hws == NULL) 554 if (hws == NULL)
553 return 0; 555 return 0;
554 556
@@ -608,7 +610,7 @@ static void print_error_buffers(struct seq_file *m,
608 seq_printf(m, "%s [%d]:\n", name, count); 610 seq_printf(m, "%s [%d]:\n", name, count);
609 611
610 while (count--) { 612 while (count--) {
611 seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s", 613 seq_printf(m, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s",
612 err->gtt_offset, 614 err->gtt_offset,
613 err->size, 615 err->size,
614 err->read_domains, 616 err->read_domains,
@@ -841,6 +843,77 @@ static const struct file_operations i915_error_state_fops = {
841 .release = i915_error_state_release, 843 .release = i915_error_state_release,
842}; 844};
843 845
846static ssize_t
847i915_next_seqno_read(struct file *filp,
848 char __user *ubuf,
849 size_t max,
850 loff_t *ppos)
851{
852 struct drm_device *dev = filp->private_data;
853 drm_i915_private_t *dev_priv = dev->dev_private;
854 char buf[80];
855 int len;
856 int ret;
857
858 ret = mutex_lock_interruptible(&dev->struct_mutex);
859 if (ret)
860 return ret;
861
862 len = snprintf(buf, sizeof(buf),
863 "next_seqno : 0x%x\n",
864 dev_priv->next_seqno);
865
866 mutex_unlock(&dev->struct_mutex);
867
868 if (len > sizeof(buf))
869 len = sizeof(buf);
870
871 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
872}
873
874static ssize_t
875i915_next_seqno_write(struct file *filp,
876 const char __user *ubuf,
877 size_t cnt,
878 loff_t *ppos)
879{
880 struct drm_device *dev = filp->private_data;
881 char buf[20];
882 u32 val = 1;
883 int ret;
884
885 if (cnt > 0) {
886 if (cnt > sizeof(buf) - 1)
887 return -EINVAL;
888
889 if (copy_from_user(buf, ubuf, cnt))
890 return -EFAULT;
891 buf[cnt] = 0;
892
893 ret = kstrtouint(buf, 0, &val);
894 if (ret < 0)
895 return ret;
896 }
897
898 ret = mutex_lock_interruptible(&dev->struct_mutex);
899 if (ret)
900 return ret;
901
902 ret = i915_gem_set_seqno(dev, val);
903
904 mutex_unlock(&dev->struct_mutex);
905
906 return ret ?: cnt;
907}
908
909static const struct file_operations i915_next_seqno_fops = {
910 .owner = THIS_MODULE,
911 .open = simple_open,
912 .read = i915_next_seqno_read,
913 .write = i915_next_seqno_write,
914 .llseek = default_llseek,
915};
916
844static int i915_rstdby_delays(struct seq_file *m, void *unused) 917static int i915_rstdby_delays(struct seq_file *m, void *unused)
845{ 918{
846 struct drm_info_node *node = (struct drm_info_node *) m->private; 919 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1551,7 +1624,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1551 return 0; 1624 return 0;
1552 } 1625 }
1553 1626
1554 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1627 ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
1555 if (ret) 1628 if (ret)
1556 return ret; 1629 return ret;
1557 1630
@@ -1580,7 +1653,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1580 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1653 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1581 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1654 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
1582 1655
1583 mutex_unlock(&dev->mode_config.mutex); 1656 mutex_unlock(&dev_priv->dpio_lock);
1584 1657
1585 return 0; 1658 return 0;
1586} 1659}
@@ -2105,6 +2178,12 @@ int i915_debugfs_init(struct drm_minor *minor)
2105 if (ret) 2178 if (ret)
2106 return ret; 2179 return ret;
2107 2180
2181 ret = i915_debugfs_create(minor->debugfs_root, minor,
2182 "i915_next_seqno",
2183 &i915_next_seqno_fops);
2184 if (ret)
2185 return ret;
2186
2108 return drm_debugfs_create_files(i915_debugfs_list, 2187 return drm_debugfs_create_files(i915_debugfs_list,
2109 I915_DEBUGFS_ENTRIES, 2188 I915_DEBUGFS_ENTRIES,
2110 minor->debugfs_root, minor); 2189 minor->debugfs_root, minor);
@@ -2128,6 +2207,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
2128 1, minor); 2207 1, minor);
2129 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, 2208 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
2130 1, minor); 2209 1, minor);
2210 drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
2211 1, minor);
2131} 2212}
2132 2213
2133#endif /* CONFIG_DEBUG_FS */ 2214#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 99daa896105d..6d8a1dc74934 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1297,19 +1297,21 @@ static int i915_load_modeset_init(struct drm_device *dev)
1297 if (ret) 1297 if (ret)
1298 goto cleanup_vga_switcheroo; 1298 goto cleanup_vga_switcheroo;
1299 1299
1300 ret = drm_irq_install(dev);
1301 if (ret)
1302 goto cleanup_gem_stolen;
1303
1304 /* Important: The output setup functions called by modeset_init need
1305 * working irqs for e.g. gmbus and dp aux transfers. */
1300 intel_modeset_init(dev); 1306 intel_modeset_init(dev);
1301 1307
1302 ret = i915_gem_init(dev); 1308 ret = i915_gem_init(dev);
1303 if (ret) 1309 if (ret)
1304 goto cleanup_gem_stolen; 1310 goto cleanup_irq;
1305
1306 intel_modeset_gem_init(dev);
1307 1311
1308 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); 1312 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
1309 1313
1310 ret = drm_irq_install(dev); 1314 intel_modeset_gem_init(dev);
1311 if (ret)
1312 goto cleanup_gem;
1313 1315
1314 /* Always safe in the mode setting case. */ 1316 /* Always safe in the mode setting case. */
1315 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1317 /* FIXME: do pre/post-mode set stuff in core KMS code */
@@ -1317,7 +1319,25 @@ static int i915_load_modeset_init(struct drm_device *dev)
1317 1319
1318 ret = intel_fbdev_init(dev); 1320 ret = intel_fbdev_init(dev);
1319 if (ret) 1321 if (ret)
1320 goto cleanup_irq; 1322 goto cleanup_gem;
1323
1324 /* Only enable hotplug handling once the fbdev is fully set up. */
1325 intel_hpd_init(dev);
1326
1327 /*
1328 * Some ports require correctly set-up hpd registers for detection to
1329 * work properly (leading to ghost connected connector status), e.g. VGA
1330 * on gm45. Hence we can only set up the initial fbdev config after hpd
1331 * irqs are fully enabled. Now we should scan for the initial config
1332 * only once hotplug handling is enabled, but due to screwed-up locking
1333 * around kms/fbdev init we can't protect the fdbev initial config
1334 * scanning against hotplug events. Hence do this first and ignore the
1335 * tiny window where we will loose hotplug notifactions.
1336 */
1337 intel_fbdev_initial_config(dev);
1338
1339 /* Only enable hotplug handling once the fbdev is fully set up. */
1340 dev_priv->enable_hotplug_processing = true;
1321 1341
1322 drm_kms_helper_poll_init(dev); 1342 drm_kms_helper_poll_init(dev);
1323 1343
@@ -1326,13 +1346,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
1326 1346
1327 return 0; 1347 return 0;
1328 1348
1329cleanup_irq:
1330 drm_irq_uninstall(dev);
1331cleanup_gem: 1349cleanup_gem:
1332 mutex_lock(&dev->struct_mutex); 1350 mutex_lock(&dev->struct_mutex);
1333 i915_gem_cleanup_ringbuffer(dev); 1351 i915_gem_cleanup_ringbuffer(dev);
1334 mutex_unlock(&dev->struct_mutex); 1352 mutex_unlock(&dev->struct_mutex);
1335 i915_gem_cleanup_aliasing_ppgtt(dev); 1353 i915_gem_cleanup_aliasing_ppgtt(dev);
1354cleanup_irq:
1355 drm_irq_uninstall(dev);
1336cleanup_gem_stolen: 1356cleanup_gem_stolen:
1337 i915_gem_cleanup_stolen(dev); 1357 i915_gem_cleanup_stolen(dev);
1338cleanup_vga_switcheroo: 1358cleanup_vga_switcheroo:
@@ -1582,7 +1602,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1582 spin_lock_init(&dev_priv->irq_lock); 1602 spin_lock_init(&dev_priv->irq_lock);
1583 spin_lock_init(&dev_priv->error_lock); 1603 spin_lock_init(&dev_priv->error_lock);
1584 spin_lock_init(&dev_priv->rps.lock); 1604 spin_lock_init(&dev_priv->rps.lock);
1585 spin_lock_init(&dev_priv->dpio_lock); 1605 mutex_init(&dev_priv->dpio_lock);
1586 1606
1587 mutex_init(&dev_priv->rps.hw_lock); 1607 mutex_init(&dev_priv->rps.hw_lock);
1588 1608
@@ -1614,9 +1634,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1614 intel_opregion_init(dev); 1634 intel_opregion_init(dev);
1615 acpi_video_register(); 1635 acpi_video_register();
1616 1636
1617 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
1618 (unsigned long) dev);
1619
1620 if (IS_GEN5(dev)) 1637 if (IS_GEN5(dev))
1621 intel_gpu_ips_init(dev_priv); 1638 intel_gpu_ips_init(dev_priv);
1622 1639
@@ -1723,9 +1740,6 @@ int i915_driver_unload(struct drm_device *dev)
1723 mutex_unlock(&dev->struct_mutex); 1740 mutex_unlock(&dev->struct_mutex);
1724 i915_gem_cleanup_aliasing_ppgtt(dev); 1741 i915_gem_cleanup_aliasing_ppgtt(dev);
1725 i915_gem_cleanup_stolen(dev); 1742 i915_gem_cleanup_stolen(dev);
1726 drm_mm_takedown(&dev_priv->mm.stolen);
1727
1728 intel_cleanup_overlay(dev);
1729 1743
1730 if (!I915_NEED_GFX_HWS(dev)) 1744 if (!I915_NEED_GFX_HWS(dev))
1731 i915_free_hws(dev); 1745 i915_free_hws(dev);
@@ -1738,6 +1752,10 @@ int i915_driver_unload(struct drm_device *dev)
1738 intel_teardown_mchbar(dev); 1752 intel_teardown_mchbar(dev);
1739 1753
1740 destroy_workqueue(dev_priv->wq); 1754 destroy_workqueue(dev_priv->wq);
1755 pm_qos_remove_request(&dev_priv->pm_qos);
1756
1757 if (dev_priv->slab)
1758 kmem_cache_destroy(dev_priv->slab);
1741 1759
1742 pci_dev_put(dev_priv->bridge_dev); 1760 pci_dev_put(dev_priv->bridge_dev);
1743 kfree(dev->dev_private); 1761 kfree(dev->dev_private);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 117265840b1f..c8cbc32fe8db 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -565,6 +565,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
565 intel_modeset_init_hw(dev); 565 intel_modeset_init_hw(dev);
566 intel_modeset_setup_hw_state(dev, false); 566 intel_modeset_setup_hw_state(dev, false);
567 drm_irq_install(dev); 567 drm_irq_install(dev);
568 intel_hpd_init(dev);
568 } 569 }
569 570
570 intel_opregion_init(dev); 571 intel_opregion_init(dev);
@@ -870,6 +871,7 @@ int i915_reset(struct drm_device *dev)
870 871
871 drm_irq_uninstall(dev); 872 drm_irq_uninstall(dev);
872 drm_irq_install(dev); 873 drm_irq_install(dev);
874 intel_hpd_init(dev);
873 } else { 875 } else {
874 mutex_unlock(&dev->struct_mutex); 876 mutex_unlock(&dev->struct_mutex);
875 } 877 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ed3059575576..b1b1b7350ca4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -30,6 +30,8 @@
30#ifndef _I915_DRV_H_ 30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_ 31#define _I915_DRV_H_
32 32
33#include <uapi/drm/i915_drm.h>
34
33#include "i915_reg.h" 35#include "i915_reg.h"
34#include "intel_bios.h" 36#include "intel_bios.h"
35#include "intel_ringbuffer.h" 37#include "intel_ringbuffer.h"
@@ -40,6 +42,7 @@
40#include <linux/backlight.h> 42#include <linux/backlight.h>
41#include <linux/intel-iommu.h> 43#include <linux/intel-iommu.h>
42#include <linux/kref.h> 44#include <linux/kref.h>
45#include <linux/pm_qos.h>
43 46
44/* General customization: 47/* General customization:
45 */ 48 */
@@ -83,7 +86,12 @@ enum port {
83}; 86};
84#define port_name(p) ((p) + 'A') 87#define port_name(p) ((p) + 'A')
85 88
86#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 89#define I915_GEM_GPU_DOMAINS \
90 (I915_GEM_DOMAIN_RENDER | \
91 I915_GEM_DOMAIN_SAMPLER | \
92 I915_GEM_DOMAIN_COMMAND | \
93 I915_GEM_DOMAIN_INSTRUCTION | \
94 I915_GEM_DOMAIN_VERTEX)
87 95
88#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) 96#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
89 97
@@ -101,6 +109,19 @@ struct intel_pch_pll {
101}; 109};
102#define I915_NUM_PLLS 2 110#define I915_NUM_PLLS 2
103 111
112/* Used by dp and fdi links */
113struct intel_link_m_n {
114 uint32_t tu;
115 uint32_t gmch_m;
116 uint32_t gmch_n;
117 uint32_t link_m;
118 uint32_t link_n;
119};
120
121void intel_link_compute_m_n(int bpp, int nlanes,
122 int pixel_clock, int link_clock,
123 struct intel_link_m_n *m_n);
124
104struct intel_ddi_plls { 125struct intel_ddi_plls {
105 int spll_refcount; 126 int spll_refcount;
106 int wrpll1_refcount; 127 int wrpll1_refcount;
@@ -276,6 +297,7 @@ struct drm_i915_display_funcs {
276 struct drm_i915_gem_object *obj); 297 struct drm_i915_gem_object *obj);
277 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 298 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
278 int x, int y); 299 int x, int y);
300 void (*hpd_irq_setup)(struct drm_device *dev);
279 /* clock updates for mode set */ 301 /* clock updates for mode set */
280 /* cursor updates */ 302 /* cursor updates */
281 /* render clock increase/decrease */ 303 /* render clock increase/decrease */
@@ -577,6 +599,9 @@ struct intel_gen6_power_mgmt {
577 struct mutex hw_lock; 599 struct mutex hw_lock;
578}; 600};
579 601
602/* defined intel_pm.c */
603extern spinlock_t mchdev_lock;
604
580struct intel_ilk_power_mgmt { 605struct intel_ilk_power_mgmt {
581 u8 cur_delay; 606 u8 cur_delay;
582 u8 min_delay; 607 u8 min_delay;
@@ -619,6 +644,7 @@ struct intel_l3_parity {
619 644
620typedef struct drm_i915_private { 645typedef struct drm_i915_private {
621 struct drm_device *dev; 646 struct drm_device *dev;
647 struct kmem_cache *slab;
622 648
623 const struct intel_device_info *info; 649 const struct intel_device_info *info;
624 650
@@ -633,10 +659,11 @@ typedef struct drm_i915_private {
633 /** forcewake_count is protected by gt_lock */ 659 /** forcewake_count is protected by gt_lock */
634 unsigned forcewake_count; 660 unsigned forcewake_count;
635 /** gt_lock is also taken in irq contexts. */ 661 /** gt_lock is also taken in irq contexts. */
636 struct spinlock gt_lock; 662 spinlock_t gt_lock;
637 663
638 struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; 664 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
639 665
666
640 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 667 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
641 * controller on different i2c buses. */ 668 * controller on different i2c buses. */
642 struct mutex gmbus_mutex; 669 struct mutex gmbus_mutex;
@@ -646,9 +673,11 @@ typedef struct drm_i915_private {
646 */ 673 */
647 uint32_t gpio_mmio_base; 674 uint32_t gpio_mmio_base;
648 675
676 wait_queue_head_t gmbus_wait_queue;
677
649 struct pci_dev *bridge_dev; 678 struct pci_dev *bridge_dev;
650 struct intel_ring_buffer ring[I915_NUM_RINGS]; 679 struct intel_ring_buffer ring[I915_NUM_RINGS];
651 uint32_t next_seqno; 680 uint32_t last_seqno, next_seqno;
652 681
653 drm_dma_handle_t *status_page_dmah; 682 drm_dma_handle_t *status_page_dmah;
654 struct resource mch_res; 683 struct resource mch_res;
@@ -658,8 +687,11 @@ typedef struct drm_i915_private {
658 /* protects the irq masks */ 687 /* protects the irq masks */
659 spinlock_t irq_lock; 688 spinlock_t irq_lock;
660 689
690 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
691 struct pm_qos_request pm_qos;
692
661 /* DPIO indirect register protection */ 693 /* DPIO indirect register protection */
662 spinlock_t dpio_lock; 694 struct mutex dpio_lock;
663 695
664 /** Cached value of IMR to avoid reads in updating the bitfield */ 696 /** Cached value of IMR to avoid reads in updating the bitfield */
665 u32 pipestat[2]; 697 u32 pipestat[2];
@@ -669,6 +701,7 @@ typedef struct drm_i915_private {
669 701
670 u32 hotplug_supported_mask; 702 u32 hotplug_supported_mask;
671 struct work_struct hotplug_work; 703 struct work_struct hotplug_work;
704 bool enable_hotplug_processing;
672 705
673 int num_pipe; 706 int num_pipe;
674 int num_pch_pll; 707 int num_pch_pll;
@@ -710,7 +743,6 @@ typedef struct drm_i915_private {
710 unsigned int display_clock_mode:1; 743 unsigned int display_clock_mode:1;
711 int lvds_ssc_freq; 744 int lvds_ssc_freq;
712 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 745 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
713 unsigned int lvds_val; /* used for checking LVDS channel mode */
714 struct { 746 struct {
715 int rate; 747 int rate;
716 int lanes; 748 int lanes;
@@ -771,6 +803,10 @@ typedef struct drm_i915_private {
771 unsigned long gtt_start; 803 unsigned long gtt_start;
772 unsigned long gtt_mappable_end; 804 unsigned long gtt_mappable_end;
773 unsigned long gtt_end; 805 unsigned long gtt_end;
806 unsigned long stolen_base; /* limited to low memory (32-bit) */
807
808 /** "Graphics Stolen Memory" holds the global PTEs */
809 void __iomem *gsm;
774 810
775 struct io_mapping *gtt_mapping; 811 struct io_mapping *gtt_mapping;
776 phys_addr_t gtt_base_addr; 812 phys_addr_t gtt_base_addr;
@@ -943,6 +979,8 @@ enum i915_cache_level {
943 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ 979 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
944}; 980};
945 981
982#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
983
946struct drm_i915_gem_object_ops { 984struct drm_i915_gem_object_ops {
947 /* Interface between the GEM object and its backing storage. 985 /* Interface between the GEM object and its backing storage.
948 * get_pages() is called once prior to the use of the associated set 986 * get_pages() is called once prior to the use of the associated set
@@ -968,6 +1006,8 @@ struct drm_i915_gem_object {
968 1006
969 /** Current space allocated to this object in the GTT, if any. */ 1007 /** Current space allocated to this object in the GTT, if any. */
970 struct drm_mm_node *gtt_space; 1008 struct drm_mm_node *gtt_space;
1009 /** Stolen memory for this object, instead of being backed by shmem. */
1010 struct drm_mm_node *stolen;
971 struct list_head gtt_list; 1011 struct list_head gtt_list;
972 1012
973 /** This object's place on the active/inactive lists */ 1013 /** This object's place on the active/inactive lists */
@@ -1138,7 +1178,7 @@ struct drm_i915_gem_request {
1138 1178
1139struct drm_i915_file_private { 1179struct drm_i915_file_private {
1140 struct { 1180 struct {
1141 struct spinlock lock; 1181 spinlock_t lock;
1142 struct list_head request_list; 1182 struct list_head request_list;
1143 } mm; 1183 } mm;
1144 struct idr context_idr; 1184 struct idr context_idr;
@@ -1224,6 +1264,8 @@ struct drm_i915_file_private {
1224 1264
1225#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1265#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1226 1266
1267#define HAS_DDI(dev) (IS_HASWELL(dev))
1268
1227#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1269#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1228#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1270#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1229#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 1271#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
@@ -1320,6 +1362,7 @@ void i915_hangcheck_elapsed(unsigned long data);
1320void i915_handle_error(struct drm_device *dev, bool wedged); 1362void i915_handle_error(struct drm_device *dev, bool wedged);
1321 1363
1322extern void intel_irq_init(struct drm_device *dev); 1364extern void intel_irq_init(struct drm_device *dev);
1365extern void intel_hpd_init(struct drm_device *dev);
1323extern void intel_gt_init(struct drm_device *dev); 1366extern void intel_gt_init(struct drm_device *dev);
1324extern void intel_gt_reset(struct drm_device *dev); 1367extern void intel_gt_reset(struct drm_device *dev);
1325 1368
@@ -1388,12 +1431,15 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1388int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 1431int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1389 struct drm_file *file_priv); 1432 struct drm_file *file_priv);
1390void i915_gem_load(struct drm_device *dev); 1433void i915_gem_load(struct drm_device *dev);
1434void *i915_gem_object_alloc(struct drm_device *dev);
1435void i915_gem_object_free(struct drm_i915_gem_object *obj);
1391int i915_gem_init_object(struct drm_gem_object *obj); 1436int i915_gem_init_object(struct drm_gem_object *obj);
1392void i915_gem_object_init(struct drm_i915_gem_object *obj, 1437void i915_gem_object_init(struct drm_i915_gem_object *obj,
1393 const struct drm_i915_gem_object_ops *ops); 1438 const struct drm_i915_gem_object_ops *ops);
1394struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1439struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1395 size_t size); 1440 size_t size);
1396void i915_gem_free_object(struct drm_gem_object *obj); 1441void i915_gem_free_object(struct drm_gem_object *obj);
1442
1397int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 1443int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1398 uint32_t alignment, 1444 uint32_t alignment,
1399 bool map_and_fenceable, 1445 bool map_and_fenceable,
@@ -1451,8 +1497,8 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1451 return (int32_t)(seq1 - seq2) >= 0; 1497 return (int32_t)(seq1 - seq2) >= 0;
1452} 1498}
1453 1499
1454extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 1500int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1455 1501int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
1456int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 1502int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1457int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1503int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1458 1504
@@ -1559,10 +1605,9 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
1559 enum i915_cache_level cache_level); 1605 enum i915_cache_level cache_level);
1560void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 1606void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1561void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); 1607void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
1562void i915_gem_init_global_gtt(struct drm_device *dev, 1608void i915_gem_init_global_gtt(struct drm_device *dev);
1563 unsigned long start, 1609void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
1564 unsigned long mappable_end, 1610 unsigned long mappable_end, unsigned long end);
1565 unsigned long end);
1566int i915_gem_gtt_init(struct drm_device *dev); 1611int i915_gem_gtt_init(struct drm_device *dev);
1567void i915_gem_gtt_fini(struct drm_device *dev); 1612void i915_gem_gtt_fini(struct drm_device *dev);
1568static inline void i915_gem_chipset_flush(struct drm_device *dev) 1613static inline void i915_gem_chipset_flush(struct drm_device *dev)
@@ -1582,9 +1627,22 @@ int i915_gem_evict_everything(struct drm_device *dev);
1582 1627
1583/* i915_gem_stolen.c */ 1628/* i915_gem_stolen.c */
1584int i915_gem_init_stolen(struct drm_device *dev); 1629int i915_gem_init_stolen(struct drm_device *dev);
1630int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
1631void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
1585void i915_gem_cleanup_stolen(struct drm_device *dev); 1632void i915_gem_cleanup_stolen(struct drm_device *dev);
1633struct drm_i915_gem_object *
1634i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
1635void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
1586 1636
1587/* i915_gem_tiling.c */ 1637/* i915_gem_tiling.c */
1638inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1639{
1640 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1641
1642 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
1643 obj->tiling_mode != I915_TILING_NONE;
1644}
1645
1588void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1646void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
1589void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 1647void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1590void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 1648void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index da3c82e301b1..e6cc020ea32c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -163,8 +163,8 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
163 return -ENODEV; 163 return -ENODEV;
164 164
165 mutex_lock(&dev->struct_mutex); 165 mutex_lock(&dev->struct_mutex);
166 i915_gem_init_global_gtt(dev, args->gtt_start, 166 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
167 args->gtt_end, args->gtt_end); 167 args->gtt_end);
168 mutex_unlock(&dev->struct_mutex); 168 mutex_unlock(&dev->struct_mutex);
169 169
170 return 0; 170 return 0;
@@ -192,6 +192,18 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
192 return 0; 192 return 0;
193} 193}
194 194
195void *i915_gem_object_alloc(struct drm_device *dev)
196{
197 struct drm_i915_private *dev_priv = dev->dev_private;
198 return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
199}
200
201void i915_gem_object_free(struct drm_i915_gem_object *obj)
202{
203 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
204 kmem_cache_free(dev_priv->slab, obj);
205}
206
195static int 207static int
196i915_gem_create(struct drm_file *file, 208i915_gem_create(struct drm_file *file,
197 struct drm_device *dev, 209 struct drm_device *dev,
@@ -215,7 +227,7 @@ i915_gem_create(struct drm_file *file,
215 if (ret) { 227 if (ret) {
216 drm_gem_object_release(&obj->base); 228 drm_gem_object_release(&obj->base);
217 i915_gem_info_remove_obj(dev->dev_private, obj->base.size); 229 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
218 kfree(obj); 230 i915_gem_object_free(obj);
219 return ret; 231 return ret;
220 } 232 }
221 233
@@ -259,14 +271,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
259 args->size, &args->handle); 271 args->size, &args->handle);
260} 272}
261 273
262static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
263{
264 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
265
266 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
267 obj->tiling_mode != I915_TILING_NONE;
268}
269
270static inline int 274static inline int
271__copy_to_user_swizzled(char __user *cpu_vaddr, 275__copy_to_user_swizzled(char __user *cpu_vaddr,
272 const char *gpu_vaddr, int gpu_offset, 276 const char *gpu_vaddr, int gpu_offset,
@@ -407,7 +411,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
407 loff_t offset; 411 loff_t offset;
408 int shmem_page_offset, page_length, ret = 0; 412 int shmem_page_offset, page_length, ret = 0;
409 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 413 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
410 int hit_slowpath = 0;
411 int prefaulted = 0; 414 int prefaulted = 0;
412 int needs_clflush = 0; 415 int needs_clflush = 0;
413 struct scatterlist *sg; 416 struct scatterlist *sg;
@@ -469,7 +472,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
469 if (ret == 0) 472 if (ret == 0)
470 goto next_page; 473 goto next_page;
471 474
472 hit_slowpath = 1;
473 mutex_unlock(&dev->struct_mutex); 475 mutex_unlock(&dev->struct_mutex);
474 476
475 if (!prefaulted) { 477 if (!prefaulted) {
@@ -502,12 +504,6 @@ next_page:
502out: 504out:
503 i915_gem_object_unpin_pages(obj); 505 i915_gem_object_unpin_pages(obj);
504 506
505 if (hit_slowpath) {
506 /* Fixup: Kill any reinstated backing storage pages */
507 if (obj->madv == __I915_MADV_PURGED)
508 i915_gem_object_truncate(obj);
509 }
510
511 return ret; 507 return ret;
512} 508}
513 509
@@ -838,12 +834,13 @@ out:
838 i915_gem_object_unpin_pages(obj); 834 i915_gem_object_unpin_pages(obj);
839 835
840 if (hit_slowpath) { 836 if (hit_slowpath) {
841 /* Fixup: Kill any reinstated backing storage pages */ 837 /*
842 if (obj->madv == __I915_MADV_PURGED) 838 * Fixup: Flush cpu caches in case we didn't flush the dirty
843 i915_gem_object_truncate(obj); 839 * cachelines in-line while writing and the object moved
844 /* and flush dirty cachelines in case the object isn't in the cpu write 840 * out of the cpu write domain while we've dropped the lock.
845 * domain anymore. */ 841 */
846 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 842 if (!needs_clflush_after &&
843 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
847 i915_gem_clflush_object(obj); 844 i915_gem_clflush_object(obj);
848 i915_gem_chipset_flush(dev); 845 i915_gem_chipset_flush(dev);
849 } 846 }
@@ -1344,6 +1341,12 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1344 1341
1345 trace_i915_gem_object_fault(obj, page_offset, true, write); 1342 trace_i915_gem_object_fault(obj, page_offset, true, write);
1346 1343
1344 /* Access to snoopable pages through the GTT is incoherent. */
1345 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1346 ret = -EINVAL;
1347 goto unlock;
1348 }
1349
1347 /* Now bind it into the GTT if needed */ 1350 /* Now bind it into the GTT if needed */
1348 ret = i915_gem_object_pin(obj, 0, true, false); 1351 ret = i915_gem_object_pin(obj, 0, true, false);
1349 if (ret) 1352 if (ret)
@@ -1933,30 +1936,24 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1933} 1936}
1934 1937
1935static int 1938static int
1936i915_gem_handle_seqno_wrap(struct drm_device *dev) 1939i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
1937{ 1940{
1938 struct drm_i915_private *dev_priv = dev->dev_private; 1941 struct drm_i915_private *dev_priv = dev->dev_private;
1939 struct intel_ring_buffer *ring; 1942 struct intel_ring_buffer *ring;
1940 int ret, i, j; 1943 int ret, i, j;
1941 1944
1942 /* The hardware uses various monotonic 32-bit counters, if we 1945 /* Carefully retire all requests without writing to the rings */
1943 * detect that they will wraparound we need to idle the GPU
1944 * and reset those counters.
1945 */
1946 ret = 0;
1947 for_each_ring(ring, dev_priv, i) { 1946 for_each_ring(ring, dev_priv, i) {
1948 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) 1947 ret = intel_ring_idle(ring);
1949 ret |= ring->sync_seqno[j] != 0; 1948 if (ret)
1949 return ret;
1950 } 1950 }
1951 if (ret == 0)
1952 return ret;
1953
1954 ret = i915_gpu_idle(dev);
1955 if (ret)
1956 return ret;
1957
1958 i915_gem_retire_requests(dev); 1951 i915_gem_retire_requests(dev);
1952
1953 /* Finally reset hw state */
1959 for_each_ring(ring, dev_priv, i) { 1954 for_each_ring(ring, dev_priv, i) {
1955 intel_ring_init_seqno(ring, seqno);
1956
1960 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) 1957 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1961 ring->sync_seqno[j] = 0; 1958 ring->sync_seqno[j] = 0;
1962 } 1959 }
@@ -1964,6 +1961,32 @@ i915_gem_handle_seqno_wrap(struct drm_device *dev)
1964 return 0; 1961 return 0;
1965} 1962}
1966 1963
1964int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1965{
1966 struct drm_i915_private *dev_priv = dev->dev_private;
1967 int ret;
1968
1969 if (seqno == 0)
1970 return -EINVAL;
1971
1972 /* HWS page needs to be set less than what we
1973 * will inject to ring
1974 */
1975 ret = i915_gem_init_seqno(dev, seqno - 1);
1976 if (ret)
1977 return ret;
1978
1979 /* Carefully set the last_seqno value so that wrap
1980 * detection still works
1981 */
1982 dev_priv->next_seqno = seqno;
1983 dev_priv->last_seqno = seqno - 1;
1984 if (dev_priv->last_seqno == 0)
1985 dev_priv->last_seqno--;
1986
1987 return 0;
1988}
1989
1967int 1990int
1968i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) 1991i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1969{ 1992{
@@ -1971,14 +1994,14 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1971 1994
1972 /* reserve 0 for non-seqno */ 1995 /* reserve 0 for non-seqno */
1973 if (dev_priv->next_seqno == 0) { 1996 if (dev_priv->next_seqno == 0) {
1974 int ret = i915_gem_handle_seqno_wrap(dev); 1997 int ret = i915_gem_init_seqno(dev, 0);
1975 if (ret) 1998 if (ret)
1976 return ret; 1999 return ret;
1977 2000
1978 dev_priv->next_seqno = 1; 2001 dev_priv->next_seqno = 1;
1979 } 2002 }
1980 2003
1981 *seqno = dev_priv->next_seqno++; 2004 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
1982 return 0; 2005 return 0;
1983} 2006}
1984 2007
@@ -2648,7 +2671,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
2648 case 4: i965_write_fence_reg(dev, reg, obj); break; 2671 case 4: i965_write_fence_reg(dev, reg, obj); break;
2649 case 3: i915_write_fence_reg(dev, reg, obj); break; 2672 case 3: i915_write_fence_reg(dev, reg, obj); break;
2650 case 2: i830_write_fence_reg(dev, reg, obj); break; 2673 case 2: i830_write_fence_reg(dev, reg, obj); break;
2651 default: break; 2674 default: BUG();
2652 } 2675 }
2653} 2676}
2654 2677
@@ -2823,7 +2846,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2823 2846
2824 /* On non-LLC machines we have to be careful when putting differing 2847 /* On non-LLC machines we have to be careful when putting differing
2825 * types of snoopable memory together to avoid the prefetcher 2848 * types of snoopable memory together to avoid the prefetcher
2826 * crossing memory domains and dieing. 2849 * crossing memory domains and dying.
2827 */ 2850 */
2828 if (HAS_LLC(dev)) 2851 if (HAS_LLC(dev))
2829 return true; 2852 return true;
@@ -3698,14 +3721,14 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3698{ 3721{
3699 struct drm_i915_gem_object *obj; 3722 struct drm_i915_gem_object *obj;
3700 struct address_space *mapping; 3723 struct address_space *mapping;
3701 u32 mask; 3724 gfp_t mask;
3702 3725
3703 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 3726 obj = i915_gem_object_alloc(dev);
3704 if (obj == NULL) 3727 if (obj == NULL)
3705 return NULL; 3728 return NULL;
3706 3729
3707 if (drm_gem_object_init(dev, &obj->base, size) != 0) { 3730 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3708 kfree(obj); 3731 i915_gem_object_free(obj);
3709 return NULL; 3732 return NULL;
3710 } 3733 }
3711 3734
@@ -3777,6 +3800,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3777 obj->pages_pin_count = 0; 3800 obj->pages_pin_count = 0;
3778 i915_gem_object_put_pages(obj); 3801 i915_gem_object_put_pages(obj);
3779 i915_gem_object_free_mmap_offset(obj); 3802 i915_gem_object_free_mmap_offset(obj);
3803 i915_gem_object_release_stolen(obj);
3780 3804
3781 BUG_ON(obj->pages); 3805 BUG_ON(obj->pages);
3782 3806
@@ -3787,7 +3811,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3787 i915_gem_info_remove_obj(dev_priv, obj->base.size); 3811 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3788 3812
3789 kfree(obj->bit_17); 3813 kfree(obj->bit_17);
3790 kfree(obj); 3814 i915_gem_object_free(obj);
3791} 3815}
3792 3816
3793int 3817int
@@ -3883,8 +3907,10 @@ void i915_gem_init_swizzling(struct drm_device *dev)
3883 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); 3907 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3884 if (IS_GEN6(dev)) 3908 if (IS_GEN6(dev))
3885 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); 3909 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3886 else 3910 else if (IS_GEN7(dev))
3887 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 3911 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3912 else
3913 BUG();
3888} 3914}
3889 3915
3890static bool 3916static bool
@@ -3919,6 +3945,8 @@ i915_gem_init_hw(struct drm_device *dev)
3919 3945
3920 i915_gem_init_swizzling(dev); 3946 i915_gem_init_swizzling(dev);
3921 3947
3948 dev_priv->next_seqno = dev_priv->last_seqno = (u32)~0 - 0x1000;
3949
3922 ret = intel_init_render_ring_buffer(dev); 3950 ret = intel_init_render_ring_buffer(dev);
3923 if (ret) 3951 if (ret)
3924 return ret; 3952 return ret;
@@ -3935,8 +3963,6 @@ i915_gem_init_hw(struct drm_device *dev)
3935 goto cleanup_bsd_ring; 3963 goto cleanup_bsd_ring;
3936 } 3964 }
3937 3965
3938 dev_priv->next_seqno = 1;
3939
3940 /* 3966 /*
3941 * XXX: There was some w/a described somewhere suggesting loading 3967 * XXX: There was some w/a described somewhere suggesting loading
3942 * contexts before PPGTT. 3968 * contexts before PPGTT.
@@ -3953,58 +3979,13 @@ cleanup_render_ring:
3953 return ret; 3979 return ret;
3954} 3980}
3955 3981
3956static bool
3957intel_enable_ppgtt(struct drm_device *dev)
3958{
3959 if (i915_enable_ppgtt >= 0)
3960 return i915_enable_ppgtt;
3961
3962#ifdef CONFIG_INTEL_IOMMU
3963 /* Disable ppgtt on SNB if VT-d is on. */
3964 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
3965 return false;
3966#endif
3967
3968 return true;
3969}
3970
3971int i915_gem_init(struct drm_device *dev) 3982int i915_gem_init(struct drm_device *dev)
3972{ 3983{
3973 struct drm_i915_private *dev_priv = dev->dev_private; 3984 struct drm_i915_private *dev_priv = dev->dev_private;
3974 unsigned long gtt_size, mappable_size;
3975 int ret; 3985 int ret;
3976 3986
3977 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
3978 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
3979
3980 mutex_lock(&dev->struct_mutex); 3987 mutex_lock(&dev->struct_mutex);
3981 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { 3988 i915_gem_init_global_gtt(dev);
3982 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
3983 * aperture accordingly when using aliasing ppgtt. */
3984 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
3985
3986 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
3987
3988 ret = i915_gem_init_aliasing_ppgtt(dev);
3989 if (ret) {
3990 mutex_unlock(&dev->struct_mutex);
3991 return ret;
3992 }
3993 } else {
3994 /* Let GEM Manage all of the aperture.
3995 *
3996 * However, leave one page at the end still bound to the scratch
3997 * page. There are a number of places where the hardware
3998 * apparently prefetches past the end of the object, and we've
3999 * seen multiple hangs with the GPU head pointer stuck in a
4000 * batchbuffer bound at the last page of the aperture. One page
4001 * should be enough to keep any prefetching inside of the
4002 * aperture.
4003 */
4004 i915_gem_init_global_gtt(dev, 0, mappable_size,
4005 gtt_size);
4006 }
4007
4008 ret = i915_gem_init_hw(dev); 3989 ret = i915_gem_init_hw(dev);
4009 mutex_unlock(&dev->struct_mutex); 3990 mutex_unlock(&dev->struct_mutex);
4010 if (ret) { 3991 if (ret) {
@@ -4105,8 +4086,14 @@ init_ring_lists(struct intel_ring_buffer *ring)
4105void 4086void
4106i915_gem_load(struct drm_device *dev) 4087i915_gem_load(struct drm_device *dev)
4107{ 4088{
4108 int i;
4109 drm_i915_private_t *dev_priv = dev->dev_private; 4089 drm_i915_private_t *dev_priv = dev->dev_private;
4090 int i;
4091
4092 dev_priv->slab =
4093 kmem_cache_create("i915_gem_object",
4094 sizeof(struct drm_i915_gem_object), 0,
4095 SLAB_HWCACHE_ALIGN,
4096 NULL);
4110 4097
4111 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4098 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4112 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4099 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index abeaafef6d7e..6a5af6828624 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -281,8 +281,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
281 if (IS_ERR(attach)) 281 if (IS_ERR(attach))
282 return ERR_CAST(attach); 282 return ERR_CAST(attach);
283 283
284 284 obj = i915_gem_object_alloc(dev);
285 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
286 if (obj == NULL) { 285 if (obj == NULL) {
287 ret = -ENOMEM; 286 ret = -ENOMEM;
288 goto fail_detach; 287 goto fail_detach;
@@ -290,7 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
290 289
291 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size); 290 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
292 if (ret) { 291 if (ret) {
293 kfree(obj); 292 i915_gem_object_free(obj);
294 goto fail_detach; 293 goto fail_detach;
295 } 294 }
296 295
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d6a994a07393..163bb52bd3b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -150,17 +150,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
150 reloc->write_domain); 150 reloc->write_domain);
151 return ret; 151 return ret;
152 } 152 }
153 if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
154 reloc->write_domain != target_obj->pending_write_domain)) {
155 DRM_DEBUG("Write domain conflict: "
156 "obj %p target %d offset %d "
157 "new %08x old %08x\n",
158 obj, reloc->target_handle,
159 (int) reloc->offset,
160 reloc->write_domain,
161 target_obj->pending_write_domain);
162 return ret;
163 }
164 153
165 target_obj->pending_read_domains |= reloc->read_domains; 154 target_obj->pending_read_domains |= reloc->read_domains;
166 target_obj->pending_write_domain |= reloc->write_domain; 155 target_obj->pending_write_domain |= reloc->write_domain;
@@ -602,44 +591,11 @@ err:
602} 591}
603 592
604static int 593static int
605i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
606{
607 u32 plane, flip_mask;
608 int ret;
609
610 /* Check for any pending flips. As we only maintain a flip queue depth
611 * of 1, we can simply insert a WAIT for the next display flip prior
612 * to executing the batch and avoid stalling the CPU.
613 */
614
615 for (plane = 0; flips >> plane; plane++) {
616 if (((flips >> plane) & 1) == 0)
617 continue;
618
619 if (plane)
620 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
621 else
622 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
623
624 ret = intel_ring_begin(ring, 2);
625 if (ret)
626 return ret;
627
628 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
629 intel_ring_emit(ring, MI_NOOP);
630 intel_ring_advance(ring);
631 }
632
633 return 0;
634}
635
636static int
637i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, 594i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
638 struct list_head *objects) 595 struct list_head *objects)
639{ 596{
640 struct drm_i915_gem_object *obj; 597 struct drm_i915_gem_object *obj;
641 uint32_t flush_domains = 0; 598 uint32_t flush_domains = 0;
642 uint32_t flips = 0;
643 int ret; 599 int ret;
644 600
645 list_for_each_entry(obj, objects, exec_list) { 601 list_for_each_entry(obj, objects, exec_list) {
@@ -650,18 +606,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
650 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) 606 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
651 i915_gem_clflush_object(obj); 607 i915_gem_clflush_object(obj);
652 608
653 if (obj->base.pending_write_domain)
654 flips |= atomic_read(&obj->pending_flip);
655
656 flush_domains |= obj->base.write_domain; 609 flush_domains |= obj->base.write_domain;
657 } 610 }
658 611
659 if (flips) {
660 ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
661 if (ret)
662 return ret;
663 }
664
665 if (flush_domains & I915_GEM_DOMAIN_CPU) 612 if (flush_domains & I915_GEM_DOMAIN_CPU)
666 i915_gem_chipset_flush(ring->dev); 613 i915_gem_chipset_flush(ring->dev);
667 614
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2c150dee78a7..a4af0f79e972 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -282,7 +282,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
282 uint32_t pd_offset; 282 uint32_t pd_offset;
283 struct intel_ring_buffer *ring; 283 struct intel_ring_buffer *ring;
284 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 284 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
285 uint32_t __iomem *pd_addr; 285 gtt_pte_t __iomem *pd_addr;
286 uint32_t pd_entry; 286 uint32_t pd_entry;
287 int i; 287 int i;
288 288
@@ -290,7 +290,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
290 return; 290 return;
291 291
292 292
293 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); 293 pd_addr = (gtt_pte_t __iomem*)dev_priv->mm.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
294 for (i = 0; i < ppgtt->num_pd_entries; i++) { 294 for (i = 0; i < ppgtt->num_pd_entries; i++) {
295 dma_addr_t pt_addr; 295 dma_addr_t pt_addr;
296 296
@@ -367,7 +367,7 @@ static void i915_ggtt_clear_range(struct drm_device *dev,
367{ 367{
368 struct drm_i915_private *dev_priv = dev->dev_private; 368 struct drm_i915_private *dev_priv = dev->dev_private;
369 gtt_pte_t scratch_pte; 369 gtt_pte_t scratch_pte;
370 gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry; 370 gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->mm.gsm + first_entry;
371 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; 371 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
372 int i; 372 int i;
373 373
@@ -432,7 +432,8 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
432 struct scatterlist *sg = st->sgl; 432 struct scatterlist *sg = st->sgl;
433 const int first_entry = obj->gtt_space->start >> PAGE_SHIFT; 433 const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
434 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; 434 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
435 gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry; 435 gtt_pte_t __iomem *gtt_entries =
436 (gtt_pte_t __iomem *)dev_priv->mm.gsm + first_entry;
436 int unused, i = 0; 437 int unused, i = 0;
437 unsigned int len, m = 0; 438 unsigned int len, m = 0;
438 dma_addr_t addr; 439 dma_addr_t addr;
@@ -525,26 +526,103 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
525 } 526 }
526} 527}
527 528
528void i915_gem_init_global_gtt(struct drm_device *dev, 529void i915_gem_setup_global_gtt(struct drm_device *dev,
529 unsigned long start, 530 unsigned long start,
530 unsigned long mappable_end, 531 unsigned long mappable_end,
531 unsigned long end) 532 unsigned long end)
532{ 533{
533 drm_i915_private_t *dev_priv = dev->dev_private; 534 drm_i915_private_t *dev_priv = dev->dev_private;
535 struct drm_mm_node *entry;
536 struct drm_i915_gem_object *obj;
537 unsigned long hole_start, hole_end;
534 538
535 /* Substract the guard page ... */ 539 /* Subtract the guard page ... */
536 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); 540 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
537 if (!HAS_LLC(dev)) 541 if (!HAS_LLC(dev))
538 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; 542 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
539 543
544 /* Mark any preallocated objects as occupied */
545 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
546 DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
547 obj->gtt_offset, obj->base.size);
548
549 BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
550 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
551 obj->gtt_offset,
552 obj->base.size,
553 false);
554 obj->has_global_gtt_mapping = 1;
555 }
556
540 dev_priv->mm.gtt_start = start; 557 dev_priv->mm.gtt_start = start;
541 dev_priv->mm.gtt_mappable_end = mappable_end; 558 dev_priv->mm.gtt_mappable_end = mappable_end;
542 dev_priv->mm.gtt_end = end; 559 dev_priv->mm.gtt_end = end;
543 dev_priv->mm.gtt_total = end - start; 560 dev_priv->mm.gtt_total = end - start;
544 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; 561 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
545 562
546 /* ... but ensure that we clear the entire range. */ 563 /* Clear any non-preallocated blocks */
547 i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE); 564 drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
565 hole_start, hole_end) {
566 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
567 hole_start, hole_end);
568 i915_ggtt_clear_range(dev,
569 hole_start / PAGE_SIZE,
570 (hole_end-hole_start) / PAGE_SIZE);
571 }
572
573 /* And finally clear the reserved guard page */
574 i915_ggtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
575}
576
577static bool
578intel_enable_ppgtt(struct drm_device *dev)
579{
580 if (i915_enable_ppgtt >= 0)
581 return i915_enable_ppgtt;
582
583#ifdef CONFIG_INTEL_IOMMU
584 /* Disable ppgtt on SNB if VT-d is on. */
585 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
586 return false;
587#endif
588
589 return true;
590}
591
592void i915_gem_init_global_gtt(struct drm_device *dev)
593{
594 struct drm_i915_private *dev_priv = dev->dev_private;
595 unsigned long gtt_size, mappable_size;
596 int ret;
597
598 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
599 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
600
601 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
602 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
603 * aperture accordingly when using aliasing ppgtt. */
604 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
605
606 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
607
608 ret = i915_gem_init_aliasing_ppgtt(dev);
609 if (ret) {
610 mutex_unlock(&dev->struct_mutex);
611 return;
612 }
613 } else {
614 /* Let GEM Manage all of the aperture.
615 *
616 * However, leave one page at the end still bound to the scratch
617 * page. There are a number of places where the hardware
618 * apparently prefetches past the end of the object, and we've
619 * seen multiple hangs with the GPU head pointer stuck in a
620 * batchbuffer bound at the last page of the aperture. One page
621 * should be enough to keep any prefetching inside of the
622 * aperture.
623 */
624 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
625 }
548} 626}
549 627
550static int setup_scratch_page(struct drm_device *dev) 628static int setup_scratch_page(struct drm_device *dev)
@@ -674,9 +752,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
674 goto err_out; 752 goto err_out;
675 } 753 }
676 754
677 dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr, 755 dev_priv->mm.gsm = ioremap_wc(gtt_bus_addr,
678 dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t)); 756 dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
679 if (!dev_priv->mm.gtt->gtt) { 757 if (!dev_priv->mm.gsm) {
680 DRM_ERROR("Failed to map the gtt page table\n"); 758 DRM_ERROR("Failed to map the gtt page table\n");
681 teardown_scratch_page(dev); 759 teardown_scratch_page(dev);
682 ret = -ENOMEM; 760 ret = -ENOMEM;
@@ -700,7 +778,7 @@ err_out:
700void i915_gem_gtt_fini(struct drm_device *dev) 778void i915_gem_gtt_fini(struct drm_device *dev)
701{ 779{
702 struct drm_i915_private *dev_priv = dev->dev_private; 780 struct drm_i915_private *dev_priv = dev->dev_private;
703 iounmap(dev_priv->mm.gtt->gtt); 781 iounmap(dev_priv->mm.gsm);
704 teardown_scratch_page(dev); 782 teardown_scratch_page(dev);
705 if (INTEL_INFO(dev)->gen < 6) 783 if (INTEL_INFO(dev)->gen < 6)
706 intel_gmch_remove(); 784 intel_gmch_remove();
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 8e91083b126f..f21ae17e298f 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -42,85 +42,73 @@
42 * for is a boon. 42 * for is a boon.
43 */ 43 */
44 44
45#define PTE_ADDRESS_MASK 0xfffff000 45static unsigned long i915_stolen_to_physical(struct drm_device *dev)
46#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
47#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
48#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
49#define PTE_MAPPING_TYPE_CACHED (3 << 1)
50#define PTE_MAPPING_TYPE_MASK (3 << 1)
51#define PTE_VALID (1 << 0)
52
53/**
54 * i915_stolen_to_phys - take an offset into stolen memory and turn it into
55 * a physical one
56 * @dev: drm device
57 * @offset: address to translate
58 *
59 * Some chip functions require allocations from stolen space and need the
60 * physical address of the memory in question.
61 */
62static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
63{ 46{
64 struct drm_i915_private *dev_priv = dev->dev_private; 47 struct drm_i915_private *dev_priv = dev->dev_private;
65 struct pci_dev *pdev = dev_priv->bridge_dev; 48 struct pci_dev *pdev = dev_priv->bridge_dev;
66 u32 base; 49 u32 base;
67 50
68#if 0
69 /* On the machines I have tested the Graphics Base of Stolen Memory 51 /* On the machines I have tested the Graphics Base of Stolen Memory
70 * is unreliable, so compute the base by subtracting the stolen memory 52 * is unreliable, so on those compute the base by subtracting the
71 * from the Top of Low Usable DRAM which is where the BIOS places 53 * stolen memory from the Top of Low Usable DRAM which is where the
72 * the graphics stolen memory. 54 * BIOS places the graphics stolen memory.
55 *
56 * On gen2, the layout is slightly different with the Graphics Segment
57 * immediately following Top of Memory (or Top of Usable DRAM). Note
58 * it appears that TOUD is only reported by 865g, so we just use the
59 * top of memory as determined by the e820 probe.
60 *
61 * XXX gen2 requires an unavailable symbol and 945gm fails with
62 * its value of TOLUD.
73 */ 63 */
74 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { 64 base = 0;
75 /* top 32bits are reserved = 0 */ 65 if (INTEL_INFO(dev)->gen >= 6) {
66 /* Read Base Data of Stolen Memory Register (BDSM) directly.
67 * Note that there is also a MCHBAR miror at 0x1080c0 or
68 * we could use device 2:0x5c instead.
69 */
70 pci_read_config_dword(pdev, 0xB0, &base);
71 base &= ~4095; /* lower bits used for locking register */
72 } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
73 /* Read Graphics Base of Stolen Memory directly */
76 pci_read_config_dword(pdev, 0xA4, &base); 74 pci_read_config_dword(pdev, 0xA4, &base);
77 } else { 75#if 0
78 /* XXX presume 8xx is the same as i915 */ 76 } else if (IS_GEN3(dev)) {
79 pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
80 }
81#else
82 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
83 u16 val;
84 pci_read_config_word(pdev, 0xb0, &val);
85 base = val >> 4 << 20;
86 } else {
87 u8 val; 77 u8 val;
78 /* Stolen is immediately below Top of Low Usable DRAM */
88 pci_read_config_byte(pdev, 0x9c, &val); 79 pci_read_config_byte(pdev, 0x9c, &val);
89 base = val >> 3 << 27; 80 base = val >> 3 << 27;
90 } 81 base -= dev_priv->mm.gtt->stolen_size;
91 base -= dev_priv->mm.gtt->stolen_size; 82 } else {
83 /* Stolen is immediately above Top of Memory */
84 base = max_low_pfn_mapped << PAGE_SHIFT;
92#endif 85#endif
86 }
93 87
94 return base + offset; 88 return base;
95} 89}
96 90
97static void i915_warn_stolen(struct drm_device *dev) 91static int i915_setup_compression(struct drm_device *dev, int size)
98{
99 DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
100 DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
101}
102
103static void i915_setup_compression(struct drm_device *dev, int size)
104{ 92{
105 struct drm_i915_private *dev_priv = dev->dev_private; 93 struct drm_i915_private *dev_priv = dev->dev_private;
106 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); 94 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
107 unsigned long cfb_base;
108 unsigned long ll_base = 0;
109
110 /* Just in case the BIOS is doing something questionable. */
111 intel_disable_fbc(dev);
112 95
113 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); 96 /* Try to over-allocate to reduce reallocations and fragmentation */
97 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
98 size <<= 1, 4096, 0);
99 if (!compressed_fb)
100 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
101 size >>= 1, 4096, 0);
114 if (compressed_fb) 102 if (compressed_fb)
115 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 103 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
116 if (!compressed_fb) 104 if (!compressed_fb)
117 goto err; 105 goto err;
118 106
119 cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); 107 if (HAS_PCH_SPLIT(dev))
120 if (!cfb_base) 108 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
121 goto err_fb; 109 else if (IS_GM45(dev)) {
122 110 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
123 if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { 111 } else {
124 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, 112 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
125 4096, 4096, 0); 113 4096, 4096, 0);
126 if (compressed_llb) 114 if (compressed_llb)
@@ -129,73 +117,206 @@ static void i915_setup_compression(struct drm_device *dev, int size)
129 if (!compressed_llb) 117 if (!compressed_llb)
130 goto err_fb; 118 goto err_fb;
131 119
132 ll_base = i915_stolen_to_phys(dev, compressed_llb->start); 120 dev_priv->compressed_llb = compressed_llb;
133 if (!ll_base) 121
134 goto err_llb; 122 I915_WRITE(FBC_CFB_BASE,
123 dev_priv->mm.stolen_base + compressed_fb->start);
124 I915_WRITE(FBC_LL_BASE,
125 dev_priv->mm.stolen_base + compressed_llb->start);
135 } 126 }
136 127
128 dev_priv->compressed_fb = compressed_fb;
137 dev_priv->cfb_size = size; 129 dev_priv->cfb_size = size;
138 130
139 dev_priv->compressed_fb = compressed_fb; 131 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
140 if (HAS_PCH_SPLIT(dev)) 132 size);
141 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
142 else if (IS_GM45(dev)) {
143 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
144 } else {
145 I915_WRITE(FBC_CFB_BASE, cfb_base);
146 I915_WRITE(FBC_LL_BASE, ll_base);
147 dev_priv->compressed_llb = compressed_llb;
148 }
149 133
150 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", 134 return 0;
151 cfb_base, ll_base, size >> 20);
152 return;
153 135
154err_llb:
155 drm_mm_put_block(compressed_llb);
156err_fb: 136err_fb:
157 drm_mm_put_block(compressed_fb); 137 drm_mm_put_block(compressed_fb);
158err: 138err:
159 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 139 return -ENOSPC;
160 i915_warn_stolen(dev); 140}
141
142int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
143{
144 struct drm_i915_private *dev_priv = dev->dev_private;
145
146 if (dev_priv->mm.stolen_base == 0)
147 return -ENODEV;
148
149 if (size < dev_priv->cfb_size)
150 return 0;
151
152 /* Release any current block */
153 i915_gem_stolen_cleanup_compression(dev);
154
155 return i915_setup_compression(dev, size);
161} 156}
162 157
163static void i915_cleanup_compression(struct drm_device *dev) 158void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
164{ 159{
165 struct drm_i915_private *dev_priv = dev->dev_private; 160 struct drm_i915_private *dev_priv = dev->dev_private;
166 161
167 drm_mm_put_block(dev_priv->compressed_fb); 162 if (dev_priv->cfb_size == 0)
163 return;
164
165 if (dev_priv->compressed_fb)
166 drm_mm_put_block(dev_priv->compressed_fb);
167
168 if (dev_priv->compressed_llb) 168 if (dev_priv->compressed_llb)
169 drm_mm_put_block(dev_priv->compressed_llb); 169 drm_mm_put_block(dev_priv->compressed_llb);
170
171 dev_priv->cfb_size = 0;
170} 172}
171 173
172void i915_gem_cleanup_stolen(struct drm_device *dev) 174void i915_gem_cleanup_stolen(struct drm_device *dev)
173{ 175{
174 if (I915_HAS_FBC(dev) && i915_powersave) 176 struct drm_i915_private *dev_priv = dev->dev_private;
175 i915_cleanup_compression(dev); 177
178 i915_gem_stolen_cleanup_compression(dev);
179 drm_mm_takedown(&dev_priv->mm.stolen);
176} 180}
177 181
178int i915_gem_init_stolen(struct drm_device *dev) 182int i915_gem_init_stolen(struct drm_device *dev)
179{ 183{
180 struct drm_i915_private *dev_priv = dev->dev_private; 184 struct drm_i915_private *dev_priv = dev->dev_private;
181 unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size; 185
186 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
187 if (dev_priv->mm.stolen_base == 0)
188 return 0;
189
190 DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n",
191 dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base);
182 192
183 /* Basic memrange allocator for stolen space */ 193 /* Basic memrange allocator for stolen space */
184 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); 194 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->mm.gtt->stolen_size);
195
196 return 0;
197}
185 198
186 /* Try to set up FBC with a reasonable compressed buffer size */ 199static struct sg_table *
187 if (I915_HAS_FBC(dev) && i915_powersave) { 200i915_pages_create_for_stolen(struct drm_device *dev,
188 int cfb_size; 201 u32 offset, u32 size)
202{
203 struct drm_i915_private *dev_priv = dev->dev_private;
204 struct sg_table *st;
205 struct scatterlist *sg;
206
207 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
208 BUG_ON(offset > dev_priv->mm.gtt->stolen_size - size);
189 209
190 /* Leave 1M for line length buffer & misc. */ 210 /* We hide that we have no struct page backing our stolen object
211 * by wrapping the contiguous physical allocation with a fake
212 * dma mapping in a single scatterlist.
213 */
214
215 st = kmalloc(sizeof(*st), GFP_KERNEL);
216 if (st == NULL)
217 return NULL;
191 218
192 /* Try to get a 32M buffer... */ 219 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
193 if (prealloc_size > (36*1024*1024)) 220 kfree(st);
194 cfb_size = 32*1024*1024; 221 return NULL;
195 else /* fall back to 7/8 of the stolen space */
196 cfb_size = prealloc_size * 7 / 8;
197 i915_setup_compression(dev, cfb_size);
198 } 222 }
199 223
200 return 0; 224 sg = st->sgl;
225 sg->offset = offset;
226 sg->length = size;
227
228 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
229 sg_dma_len(sg) = size;
230
231 return st;
232}
233
234static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
235{
236 BUG();
237 return -EINVAL;
238}
239
240static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
241{
242 /* Should only be called during free */
243 sg_free_table(obj->pages);
244 kfree(obj->pages);
245}
246
247static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
248 .get_pages = i915_gem_object_get_pages_stolen,
249 .put_pages = i915_gem_object_put_pages_stolen,
250};
251
252static struct drm_i915_gem_object *
253_i915_gem_object_create_stolen(struct drm_device *dev,
254 struct drm_mm_node *stolen)
255{
256 struct drm_i915_gem_object *obj;
257
258 obj = i915_gem_object_alloc(dev);
259 if (obj == NULL)
260 return NULL;
261
262 if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
263 goto cleanup;
264
265 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
266
267 obj->pages = i915_pages_create_for_stolen(dev,
268 stolen->start, stolen->size);
269 if (obj->pages == NULL)
270 goto cleanup;
271
272 obj->has_dma_mapping = true;
273 obj->pages_pin_count = 1;
274 obj->stolen = stolen;
275
276 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
277 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
278 obj->cache_level = I915_CACHE_NONE;
279
280 return obj;
281
282cleanup:
283 i915_gem_object_free(obj);
284 return NULL;
285}
286
287struct drm_i915_gem_object *
288i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
289{
290 struct drm_i915_private *dev_priv = dev->dev_private;
291 struct drm_i915_gem_object *obj;
292 struct drm_mm_node *stolen;
293
294 if (dev_priv->mm.stolen_base == 0)
295 return NULL;
296
297 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
298 if (size == 0)
299 return NULL;
300
301 stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
302 if (stolen)
303 stolen = drm_mm_get_block(stolen, size, 4096);
304 if (stolen == NULL)
305 return NULL;
306
307 obj = _i915_gem_object_create_stolen(dev, stolen);
308 if (obj)
309 return obj;
310
311 drm_mm_put_block(stolen);
312 return NULL;
313}
314
315void
316i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
317{
318 if (obj->stolen) {
319 drm_mm_put_block(obj->stolen);
320 obj->stolen = NULL;
321 }
201} 322}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index cedbfd7b3dfa..65f1d4f3f775 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -396,6 +396,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
396 /* we have to maintain this existing ABI... */ 396 /* we have to maintain this existing ABI... */
397 args->stride = obj->stride; 397 args->stride = obj->stride;
398 args->tiling_mode = obj->tiling_mode; 398 args->tiling_mode = obj->tiling_mode;
399
400 /* Try to preallocate memory required to save swizzling on put-pages */
401 if (i915_gem_object_needs_bit17_swizzle(obj)) {
402 if (obj->bit_17 == NULL) {
403 obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
404 sizeof(long), GFP_KERNEL);
405 }
406 } else {
407 kfree(obj->bit_17);
408 obj->bit_17 = NULL;
409 }
410
399 drm_gem_object_unreference(&obj->base); 411 drm_gem_object_unreference(&obj->base);
400 mutex_unlock(&dev->struct_mutex); 412 mutex_unlock(&dev->struct_mutex);
401 413
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2220dec3e5d9..6689a61b02a3 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -287,6 +287,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
287 struct drm_mode_config *mode_config = &dev->mode_config; 287 struct drm_mode_config *mode_config = &dev->mode_config;
288 struct intel_encoder *encoder; 288 struct intel_encoder *encoder;
289 289
290 /* HPD irq before everything is fully set up. */
291 if (!dev_priv->enable_hotplug_processing)
292 return;
293
290 mutex_lock(&mode_config->mutex); 294 mutex_lock(&mode_config->mutex);
291 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 295 DRM_DEBUG_KMS("running encoder hotplug functions\n");
292 296
@@ -300,9 +304,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
300 drm_helper_hpd_irq_event(dev); 304 drm_helper_hpd_irq_event(dev);
301} 305}
302 306
303/* defined intel_pm.c */
304extern spinlock_t mchdev_lock;
305
306static void ironlake_handle_rps_change(struct drm_device *dev) 307static void ironlake_handle_rps_change(struct drm_device *dev)
307{ 308{
308 drm_i915_private_t *dev_priv = dev->dev_private; 309 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -524,6 +525,20 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
524 queue_work(dev_priv->wq, &dev_priv->rps.work); 525 queue_work(dev_priv->wq, &dev_priv->rps.work);
525} 526}
526 527
528static void gmbus_irq_handler(struct drm_device *dev)
529{
530 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
531
532 wake_up_all(&dev_priv->gmbus_wait_queue);
533}
534
535static void dp_aux_irq_handler(struct drm_device *dev)
536{
537 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
538
539 wake_up_all(&dev_priv->gmbus_wait_queue);
540}
541
527static irqreturn_t valleyview_irq_handler(int irq, void *arg) 542static irqreturn_t valleyview_irq_handler(int irq, void *arg)
528{ 543{
529 struct drm_device *dev = (struct drm_device *) arg; 544 struct drm_device *dev = (struct drm_device *) arg;
@@ -533,7 +548,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
533 unsigned long irqflags; 548 unsigned long irqflags;
534 int pipe; 549 int pipe;
535 u32 pipe_stats[I915_MAX_PIPES]; 550 u32 pipe_stats[I915_MAX_PIPES];
536 bool blc_event;
537 551
538 atomic_inc(&dev_priv->irq_received); 552 atomic_inc(&dev_priv->irq_received);
539 553
@@ -590,8 +604,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
590 I915_READ(PORT_HOTPLUG_STAT); 604 I915_READ(PORT_HOTPLUG_STAT);
591 } 605 }
592 606
593 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 607 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
594 blc_event = true; 608 gmbus_irq_handler(dev);
595 609
596 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 610 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
597 gen6_queue_rps_work(dev_priv, pm_iir); 611 gen6_queue_rps_work(dev_priv, pm_iir);
@@ -618,8 +632,11 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
618 (pch_iir & SDE_AUDIO_POWER_MASK) >> 632 (pch_iir & SDE_AUDIO_POWER_MASK) >>
619 SDE_AUDIO_POWER_SHIFT); 633 SDE_AUDIO_POWER_SHIFT);
620 634
635 if (pch_iir & SDE_AUX_MASK)
636 dp_aux_irq_handler(dev);
637
621 if (pch_iir & SDE_GMBUS) 638 if (pch_iir & SDE_GMBUS)
622 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 639 gmbus_irq_handler(dev);
623 640
624 if (pch_iir & SDE_AUDIO_HDCP_MASK) 641 if (pch_iir & SDE_AUDIO_HDCP_MASK)
625 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 642 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@@ -662,10 +679,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
662 SDE_AUDIO_POWER_SHIFT_CPT); 679 SDE_AUDIO_POWER_SHIFT_CPT);
663 680
664 if (pch_iir & SDE_AUX_MASK_CPT) 681 if (pch_iir & SDE_AUX_MASK_CPT)
665 DRM_DEBUG_DRIVER("AUX channel interrupt\n"); 682 dp_aux_irq_handler(dev);
666 683
667 if (pch_iir & SDE_GMBUS_CPT) 684 if (pch_iir & SDE_GMBUS_CPT)
668 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 685 gmbus_irq_handler(dev);
669 686
670 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 687 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
671 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 688 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@@ -703,6 +720,9 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
703 720
704 de_iir = I915_READ(DEIIR); 721 de_iir = I915_READ(DEIIR);
705 if (de_iir) { 722 if (de_iir) {
723 if (de_iir & DE_AUX_CHANNEL_A_IVB)
724 dp_aux_irq_handler(dev);
725
706 if (de_iir & DE_GSE_IVB) 726 if (de_iir & DE_GSE_IVB)
707 intel_opregion_gse_intr(dev); 727 intel_opregion_gse_intr(dev);
708 728
@@ -758,7 +778,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
758 struct drm_device *dev = (struct drm_device *) arg; 778 struct drm_device *dev = (struct drm_device *) arg;
759 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 779 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
760 int ret = IRQ_NONE; 780 int ret = IRQ_NONE;
761 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 781 u32 de_iir, gt_iir, de_ier, pm_iir;
762 782
763 atomic_inc(&dev_priv->irq_received); 783 atomic_inc(&dev_priv->irq_received);
764 784
@@ -769,11 +789,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
769 789
770 de_iir = I915_READ(DEIIR); 790 de_iir = I915_READ(DEIIR);
771 gt_iir = I915_READ(GTIIR); 791 gt_iir = I915_READ(GTIIR);
772 pch_iir = I915_READ(SDEIIR);
773 pm_iir = I915_READ(GEN6_PMIIR); 792 pm_iir = I915_READ(GEN6_PMIIR);
774 793
775 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && 794 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
776 (!IS_GEN6(dev) || pm_iir == 0))
777 goto done; 795 goto done;
778 796
779 ret = IRQ_HANDLED; 797 ret = IRQ_HANDLED;
@@ -783,6 +801,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
783 else 801 else
784 snb_gt_irq_handler(dev, dev_priv, gt_iir); 802 snb_gt_irq_handler(dev, dev_priv, gt_iir);
785 803
804 if (de_iir & DE_AUX_CHANNEL_A)
805 dp_aux_irq_handler(dev);
806
786 if (de_iir & DE_GSE) 807 if (de_iir & DE_GSE)
787 intel_opregion_gse_intr(dev); 808 intel_opregion_gse_intr(dev);
788 809
@@ -804,10 +825,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
804 825
805 /* check event from PCH */ 826 /* check event from PCH */
806 if (de_iir & DE_PCH_EVENT) { 827 if (de_iir & DE_PCH_EVENT) {
828 u32 pch_iir = I915_READ(SDEIIR);
829
807 if (HAS_PCH_CPT(dev)) 830 if (HAS_PCH_CPT(dev))
808 cpt_irq_handler(dev, pch_iir); 831 cpt_irq_handler(dev, pch_iir);
809 else 832 else
810 ibx_irq_handler(dev, pch_iir); 833 ibx_irq_handler(dev, pch_iir);
834
835 /* should clear PCH hotplug event before clear CPU irq */
836 I915_WRITE(SDEIIR, pch_iir);
811 } 837 }
812 838
813 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 839 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
@@ -816,8 +842,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
816 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 842 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
817 gen6_queue_rps_work(dev_priv, pm_iir); 843 gen6_queue_rps_work(dev_priv, pm_iir);
818 844
819 /* should clear PCH hotplug event before clear CPU irq */
820 I915_WRITE(SDEIIR, pch_iir);
821 I915_WRITE(GTIIR, gt_iir); 845 I915_WRITE(GTIIR, gt_iir);
822 I915_WRITE(DEIIR, de_iir); 846 I915_WRITE(DEIIR, de_iir);
823 I915_WRITE(GEN6_PMIIR, pm_iir); 847 I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -928,6 +952,14 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
928 reloc_offset); 952 reloc_offset);
929 memcpy_fromio(d, s, PAGE_SIZE); 953 memcpy_fromio(d, s, PAGE_SIZE);
930 io_mapping_unmap_atomic(s); 954 io_mapping_unmap_atomic(s);
955 } else if (src->stolen) {
956 unsigned long offset;
957
958 offset = dev_priv->mm.stolen_base;
959 offset += src->stolen->start;
960 offset += i << PAGE_SHIFT;
961
962 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
931 } else { 963 } else {
932 struct page *page; 964 struct page *page;
933 void *s; 965 void *s;
@@ -1074,6 +1106,8 @@ static void i915_gem_record_fences(struct drm_device *dev,
1074 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 1106 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1075 break; 1107 break;
1076 1108
1109 default:
1110 BUG();
1077 } 1111 }
1078} 1112}
1079 1113
@@ -1854,7 +1888,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1854 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1888 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1855 /* enable kind of interrupts always enabled */ 1889 /* enable kind of interrupts always enabled */
1856 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1890 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1857 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1891 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
1892 DE_AUX_CHANNEL_A;
1858 u32 render_irqs; 1893 u32 render_irqs;
1859 u32 hotplug_mask; 1894 u32 hotplug_mask;
1860 1895
@@ -1888,12 +1923,15 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1888 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1923 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1889 SDE_PORTB_HOTPLUG_CPT | 1924 SDE_PORTB_HOTPLUG_CPT |
1890 SDE_PORTC_HOTPLUG_CPT | 1925 SDE_PORTC_HOTPLUG_CPT |
1891 SDE_PORTD_HOTPLUG_CPT); 1926 SDE_PORTD_HOTPLUG_CPT |
1927 SDE_GMBUS_CPT |
1928 SDE_AUX_MASK_CPT);
1892 } else { 1929 } else {
1893 hotplug_mask = (SDE_CRT_HOTPLUG | 1930 hotplug_mask = (SDE_CRT_HOTPLUG |
1894 SDE_PORTB_HOTPLUG | 1931 SDE_PORTB_HOTPLUG |
1895 SDE_PORTC_HOTPLUG | 1932 SDE_PORTC_HOTPLUG |
1896 SDE_PORTD_HOTPLUG | 1933 SDE_PORTD_HOTPLUG |
1934 SDE_GMBUS |
1897 SDE_AUX_MASK); 1935 SDE_AUX_MASK);
1898 } 1936 }
1899 1937
@@ -1924,7 +1962,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
1924 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | 1962 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1925 DE_PLANEC_FLIP_DONE_IVB | 1963 DE_PLANEC_FLIP_DONE_IVB |
1926 DE_PLANEB_FLIP_DONE_IVB | 1964 DE_PLANEB_FLIP_DONE_IVB |
1927 DE_PLANEA_FLIP_DONE_IVB; 1965 DE_PLANEA_FLIP_DONE_IVB |
1966 DE_AUX_CHANNEL_A_IVB;
1928 u32 render_irqs; 1967 u32 render_irqs;
1929 u32 hotplug_mask; 1968 u32 hotplug_mask;
1930 1969
@@ -1953,7 +1992,9 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
1953 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1992 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1954 SDE_PORTB_HOTPLUG_CPT | 1993 SDE_PORTB_HOTPLUG_CPT |
1955 SDE_PORTC_HOTPLUG_CPT | 1994 SDE_PORTC_HOTPLUG_CPT |
1956 SDE_PORTD_HOTPLUG_CPT); 1995 SDE_PORTD_HOTPLUG_CPT |
1996 SDE_GMBUS_CPT |
1997 SDE_AUX_MASK_CPT);
1957 dev_priv->pch_irq_mask = ~hotplug_mask; 1998 dev_priv->pch_irq_mask = ~hotplug_mask;
1958 1999
1959 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2000 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
@@ -1970,7 +2011,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
1970{ 2011{
1971 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2012 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1972 u32 enable_mask; 2013 u32 enable_mask;
1973 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1974 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2014 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
1975 u32 render_irqs; 2015 u32 render_irqs;
1976 u16 msid; 2016 u16 msid;
@@ -1999,6 +2039,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
1999 msid |= (1<<14); 2039 msid |= (1<<14);
2000 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); 2040 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
2001 2041
2042 I915_WRITE(PORT_HOTPLUG_EN, 0);
2043 POSTING_READ(PORT_HOTPLUG_EN);
2044
2002 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 2045 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2003 I915_WRITE(VLV_IER, enable_mask); 2046 I915_WRITE(VLV_IER, enable_mask);
2004 I915_WRITE(VLV_IIR, 0xffffffff); 2047 I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2007,6 +2050,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2007 POSTING_READ(VLV_IER); 2050 POSTING_READ(VLV_IER);
2008 2051
2009 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2052 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2053 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2010 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2054 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2011 2055
2012 I915_WRITE(VLV_IIR, 0xffffffff); 2056 I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2027,6 +2071,15 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2027#endif 2071#endif
2028 2072
2029 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2073 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2074
2075 return 0;
2076}
2077
2078static void valleyview_hpd_irq_setup(struct drm_device *dev)
2079{
2080 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2081 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2082
2030 /* Note HDMI and DP share bits */ 2083 /* Note HDMI and DP share bits */
2031 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2084 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2032 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2085 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
@@ -2044,8 +2097,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2044 } 2097 }
2045 2098
2046 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2099 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2047
2048 return 0;
2049} 2100}
2050 2101
2051static void valleyview_irq_uninstall(struct drm_device *dev) 2102static void valleyview_irq_uninstall(struct drm_device *dev)
@@ -2275,6 +2326,9 @@ static int i915_irq_postinstall(struct drm_device *dev)
2275 I915_USER_INTERRUPT; 2326 I915_USER_INTERRUPT;
2276 2327
2277 if (I915_HAS_HOTPLUG(dev)) { 2328 if (I915_HAS_HOTPLUG(dev)) {
2329 I915_WRITE(PORT_HOTPLUG_EN, 0);
2330 POSTING_READ(PORT_HOTPLUG_EN);
2331
2278 /* Enable in IER... */ 2332 /* Enable in IER... */
2279 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2333 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2280 /* and unmask in IMR */ 2334 /* and unmask in IMR */
@@ -2285,8 +2339,18 @@ static int i915_irq_postinstall(struct drm_device *dev)
2285 I915_WRITE(IER, enable_mask); 2339 I915_WRITE(IER, enable_mask);
2286 POSTING_READ(IER); 2340 POSTING_READ(IER);
2287 2341
2342 intel_opregion_enable_asle(dev);
2343
2344 return 0;
2345}
2346
2347static void i915_hpd_irq_setup(struct drm_device *dev)
2348{
2349 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2350 u32 hotplug_en;
2351
2288 if (I915_HAS_HOTPLUG(dev)) { 2352 if (I915_HAS_HOTPLUG(dev)) {
2289 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2353 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2290 2354
2291 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2355 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2292 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2356 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
@@ -2307,10 +2371,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
2307 2371
2308 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2372 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2309 } 2373 }
2310
2311 intel_opregion_enable_asle(dev);
2312
2313 return 0;
2314} 2374}
2315 2375
2316static irqreturn_t i915_irq_handler(int irq, void *arg) 2376static irqreturn_t i915_irq_handler(int irq, void *arg)
@@ -2470,7 +2530,6 @@ static void i965_irq_preinstall(struct drm_device * dev)
2470static int i965_irq_postinstall(struct drm_device *dev) 2530static int i965_irq_postinstall(struct drm_device *dev)
2471{ 2531{
2472 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2532 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2473 u32 hotplug_en;
2474 u32 enable_mask; 2533 u32 enable_mask;
2475 u32 error_mask; 2534 u32 error_mask;
2476 2535
@@ -2491,6 +2550,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
2491 2550
2492 dev_priv->pipestat[0] = 0; 2551 dev_priv->pipestat[0] = 0;
2493 dev_priv->pipestat[1] = 0; 2552 dev_priv->pipestat[1] = 0;
2553 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2494 2554
2495 /* 2555 /*
2496 * Enable some error detection, note the instruction error mask 2556 * Enable some error detection, note the instruction error mask
@@ -2511,6 +2571,19 @@ static int i965_irq_postinstall(struct drm_device *dev)
2511 I915_WRITE(IER, enable_mask); 2571 I915_WRITE(IER, enable_mask);
2512 POSTING_READ(IER); 2572 POSTING_READ(IER);
2513 2573
2574 I915_WRITE(PORT_HOTPLUG_EN, 0);
2575 POSTING_READ(PORT_HOTPLUG_EN);
2576
2577 intel_opregion_enable_asle(dev);
2578
2579 return 0;
2580}
2581
2582static void i965_hpd_irq_setup(struct drm_device *dev)
2583{
2584 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2585 u32 hotplug_en;
2586
2514 /* Note HDMI and DP share hotplug bits */ 2587 /* Note HDMI and DP share hotplug bits */
2515 hotplug_en = 0; 2588 hotplug_en = 0;
2516 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2589 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
@@ -2545,10 +2618,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
2545 /* Ignore TV since it's buggy */ 2618 /* Ignore TV since it's buggy */
2546 2619
2547 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2620 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2548
2549 intel_opregion_enable_asle(dev);
2550
2551 return 0;
2552} 2621}
2553 2622
2554static irqreturn_t i965_irq_handler(int irq, void *arg) 2623static irqreturn_t i965_irq_handler(int irq, void *arg)
@@ -2644,6 +2713,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
2644 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2713 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2645 intel_opregion_asle_intr(dev); 2714 intel_opregion_asle_intr(dev);
2646 2715
2716 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2717 gmbus_irq_handler(dev);
2718
2647 /* With MSI, interrupts are only generated when iir 2719 /* With MSI, interrupts are only generated when iir
2648 * transitions from zero to nonzero. If another bit got 2720 * transitions from zero to nonzero. If another bit got
2649 * set while we were handling the existing iir bits, then 2721 * set while we were handling the existing iir bits, then
@@ -2699,6 +2771,11 @@ void intel_irq_init(struct drm_device *dev)
2699 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 2771 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
2700 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 2772 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
2701 2773
2774 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
2775 (unsigned long) dev);
2776
2777 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
2778
2702 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2779 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2703 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2780 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2704 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 2781 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
@@ -2719,7 +2796,8 @@ void intel_irq_init(struct drm_device *dev)
2719 dev->driver->irq_uninstall = valleyview_irq_uninstall; 2796 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2720 dev->driver->enable_vblank = valleyview_enable_vblank; 2797 dev->driver->enable_vblank = valleyview_enable_vblank;
2721 dev->driver->disable_vblank = valleyview_disable_vblank; 2798 dev->driver->disable_vblank = valleyview_disable_vblank;
2722 } else if (IS_IVYBRIDGE(dev)) { 2799 dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
2800 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
2723 /* Share pre & uninstall handlers with ILK/SNB */ 2801 /* Share pre & uninstall handlers with ILK/SNB */
2724 dev->driver->irq_handler = ivybridge_irq_handler; 2802 dev->driver->irq_handler = ivybridge_irq_handler;
2725 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2803 dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2727,14 +2805,6 @@ void intel_irq_init(struct drm_device *dev)
2727 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2805 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2728 dev->driver->enable_vblank = ivybridge_enable_vblank; 2806 dev->driver->enable_vblank = ivybridge_enable_vblank;
2729 dev->driver->disable_vblank = ivybridge_disable_vblank; 2807 dev->driver->disable_vblank = ivybridge_disable_vblank;
2730 } else if (IS_HASWELL(dev)) {
2731 /* Share interrupts handling with IVB */
2732 dev->driver->irq_handler = ivybridge_irq_handler;
2733 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2734 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2735 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2736 dev->driver->enable_vblank = ivybridge_enable_vblank;
2737 dev->driver->disable_vblank = ivybridge_disable_vblank;
2738 } else if (HAS_PCH_SPLIT(dev)) { 2808 } else if (HAS_PCH_SPLIT(dev)) {
2739 dev->driver->irq_handler = ironlake_irq_handler; 2809 dev->driver->irq_handler = ironlake_irq_handler;
2740 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2810 dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2753,13 +2823,23 @@ void intel_irq_init(struct drm_device *dev)
2753 dev->driver->irq_postinstall = i915_irq_postinstall; 2823 dev->driver->irq_postinstall = i915_irq_postinstall;
2754 dev->driver->irq_uninstall = i915_irq_uninstall; 2824 dev->driver->irq_uninstall = i915_irq_uninstall;
2755 dev->driver->irq_handler = i915_irq_handler; 2825 dev->driver->irq_handler = i915_irq_handler;
2826 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
2756 } else { 2827 } else {
2757 dev->driver->irq_preinstall = i965_irq_preinstall; 2828 dev->driver->irq_preinstall = i965_irq_preinstall;
2758 dev->driver->irq_postinstall = i965_irq_postinstall; 2829 dev->driver->irq_postinstall = i965_irq_postinstall;
2759 dev->driver->irq_uninstall = i965_irq_uninstall; 2830 dev->driver->irq_uninstall = i965_irq_uninstall;
2760 dev->driver->irq_handler = i965_irq_handler; 2831 dev->driver->irq_handler = i965_irq_handler;
2832 dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
2761 } 2833 }
2762 dev->driver->enable_vblank = i915_enable_vblank; 2834 dev->driver->enable_vblank = i915_enable_vblank;
2763 dev->driver->disable_vblank = i915_disable_vblank; 2835 dev->driver->disable_vblank = i915_disable_vblank;
2764 } 2836 }
2765} 2837}
2838
2839void intel_hpd_init(struct drm_device *dev)
2840{
2841 struct drm_i915_private *dev_priv = dev->dev_private;
2842
2843 if (dev_priv->display.hpd_irq_setup)
2844 dev_priv->display.hpd_irq_setup(dev);
2845}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 186ee5c85b51..3b039f4268e3 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -142,6 +142,7 @@
142#define VGA_MSR_CGA_MODE (1<<0) 142#define VGA_MSR_CGA_MODE (1<<0)
143 143
144#define VGA_SR_INDEX 0x3c4 144#define VGA_SR_INDEX 0x3c4
145#define SR01 1
145#define VGA_SR_DATA 0x3c5 146#define VGA_SR_DATA 0x3c5
146 147
147#define VGA_AR_INDEX 0x3c0 148#define VGA_AR_INDEX 0x3c0
@@ -940,23 +941,6 @@
940#define DPLL_LOCK_VLV (1<<15) 941#define DPLL_LOCK_VLV (1<<15)
941#define DPLL_INTEGRATED_CLOCK_VLV (1<<13) 942#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
942 943
943#define SRX_INDEX 0x3c4
944#define SRX_DATA 0x3c5
945#define SR01 1
946#define SR01_SCREEN_OFF (1<<5)
947
948#define PPCR 0x61204
949#define PPCR_ON (1<<0)
950
951#define DVOB 0x61140
952#define DVOB_ON (1<<31)
953#define DVOC 0x61160
954#define DVOC_ON (1<<31)
955#define LVDS 0x61180
956#define LVDS_ON (1<<31)
957
958/* Scratch pad debug 0 reg:
959 */
960#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 944#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
961/* 945/*
962 * The i830 generation, in LVDS mode, defines P1 as the bit number set within 946 * The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -1893,8 +1877,6 @@
1893#define PFIT_SCALING_PILLAR (2 << 26) 1877#define PFIT_SCALING_PILLAR (2 << 26)
1894#define PFIT_SCALING_LETTER (3 << 26) 1878#define PFIT_SCALING_LETTER (3 << 26)
1895#define PFIT_PGM_RATIOS 0x61234 1879#define PFIT_PGM_RATIOS 0x61234
1896#define PFIT_VERT_SCALE_MASK 0xfff00000
1897#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
1898/* Pre-965 */ 1880/* Pre-965 */
1899#define PFIT_VERT_SCALE_SHIFT 20 1881#define PFIT_VERT_SCALE_SHIFT 20
1900#define PFIT_VERT_SCALE_MASK 0xfff00000 1882#define PFIT_VERT_SCALE_MASK 0xfff00000
@@ -2668,11 +2650,11 @@
2668#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ 2650#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
2669#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ 2651#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
2670#define PIPECONF_CXSR_DOWNCLOCK (1<<16) 2652#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
2671#define PIPECONF_BPP_MASK (0x000000e0) 2653#define PIPECONF_BPC_MASK (0x7 << 5)
2672#define PIPECONF_BPP_8 (0<<5) 2654#define PIPECONF_8BPC (0<<5)
2673#define PIPECONF_BPP_10 (1<<5) 2655#define PIPECONF_10BPC (1<<5)
2674#define PIPECONF_BPP_6 (2<<5) 2656#define PIPECONF_6BPC (2<<5)
2675#define PIPECONF_BPP_12 (3<<5) 2657#define PIPECONF_12BPC (3<<5)
2676#define PIPECONF_DITHER_EN (1<<4) 2658#define PIPECONF_DITHER_EN (1<<4)
2677#define PIPECONF_DITHER_TYPE_MASK (0x0000000c) 2659#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
2678#define PIPECONF_DITHER_TYPE_SP (0<<2) 2660#define PIPECONF_DITHER_TYPE_SP (0<<2)
@@ -2716,11 +2698,6 @@
2716#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ 2698#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
2717#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) 2699#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
2718#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) 2700#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
2719#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
2720#define PIPE_8BPC (0 << 5)
2721#define PIPE_10BPC (1 << 5)
2722#define PIPE_6BPC (2 << 5)
2723#define PIPE_12BPC (3 << 5)
2724 2701
2725#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) 2702#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
2726#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF) 2703#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
@@ -3578,27 +3555,30 @@
3578#define PORTD_PULSE_DURATION_6ms (2 << 18) 3555#define PORTD_PULSE_DURATION_6ms (2 << 18)
3579#define PORTD_PULSE_DURATION_100ms (3 << 18) 3556#define PORTD_PULSE_DURATION_100ms (3 << 18)
3580#define PORTD_PULSE_DURATION_MASK (3 << 18) 3557#define PORTD_PULSE_DURATION_MASK (3 << 18)
3581#define PORTD_HOTPLUG_NO_DETECT (0) 3558#define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16)
3582#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) 3559#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
3583#define PORTD_HOTPLUG_LONG_DETECT (1 << 17) 3560#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
3561#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
3584#define PORTC_HOTPLUG_ENABLE (1 << 12) 3562#define PORTC_HOTPLUG_ENABLE (1 << 12)
3585#define PORTC_PULSE_DURATION_2ms (0) 3563#define PORTC_PULSE_DURATION_2ms (0)
3586#define PORTC_PULSE_DURATION_4_5ms (1 << 10) 3564#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
3587#define PORTC_PULSE_DURATION_6ms (2 << 10) 3565#define PORTC_PULSE_DURATION_6ms (2 << 10)
3588#define PORTC_PULSE_DURATION_100ms (3 << 10) 3566#define PORTC_PULSE_DURATION_100ms (3 << 10)
3589#define PORTC_PULSE_DURATION_MASK (3 << 10) 3567#define PORTC_PULSE_DURATION_MASK (3 << 10)
3590#define PORTC_HOTPLUG_NO_DETECT (0) 3568#define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8)
3591#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) 3569#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
3592#define PORTC_HOTPLUG_LONG_DETECT (1 << 9) 3570#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
3571#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
3593#define PORTB_HOTPLUG_ENABLE (1 << 4) 3572#define PORTB_HOTPLUG_ENABLE (1 << 4)
3594#define PORTB_PULSE_DURATION_2ms (0) 3573#define PORTB_PULSE_DURATION_2ms (0)
3595#define PORTB_PULSE_DURATION_4_5ms (1 << 2) 3574#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
3596#define PORTB_PULSE_DURATION_6ms (2 << 2) 3575#define PORTB_PULSE_DURATION_6ms (2 << 2)
3597#define PORTB_PULSE_DURATION_100ms (3 << 2) 3576#define PORTB_PULSE_DURATION_100ms (3 << 2)
3598#define PORTB_PULSE_DURATION_MASK (3 << 2) 3577#define PORTB_PULSE_DURATION_MASK (3 << 2)
3599#define PORTB_HOTPLUG_NO_DETECT (0) 3578#define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0)
3600#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) 3579#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
3601#define PORTB_HOTPLUG_LONG_DETECT (1 << 1) 3580#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
3581#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
3602 3582
3603#define PCH_GPIOA 0xc5010 3583#define PCH_GPIOA 0xc5010
3604#define PCH_GPIOB 0xc5014 3584#define PCH_GPIOB 0xc5014
@@ -3817,8 +3797,6 @@
3817#define TRANS_FSYNC_DELAY_HB2 (1<<27) 3797#define TRANS_FSYNC_DELAY_HB2 (1<<27)
3818#define TRANS_FSYNC_DELAY_HB3 (2<<27) 3798#define TRANS_FSYNC_DELAY_HB3 (2<<27)
3819#define TRANS_FSYNC_DELAY_HB4 (3<<27) 3799#define TRANS_FSYNC_DELAY_HB4 (3<<27)
3820#define TRANS_DP_AUDIO_ONLY (1<<26)
3821#define TRANS_DP_VIDEO_AUDIO (0<<26)
3822#define TRANS_INTERLACE_MASK (7<<21) 3800#define TRANS_INTERLACE_MASK (7<<21)
3823#define TRANS_PROGRESSIVE (0<<21) 3801#define TRANS_PROGRESSIVE (0<<21)
3824#define TRANS_INTERLACED (3<<21) 3802#define TRANS_INTERLACED (3<<21)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9293878ec7eb..71a5ebad14fb 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -776,7 +776,7 @@ void intel_crt_init(struct drm_device *dev)
776 776
777 crt->base.disable = intel_disable_crt; 777 crt->base.disable = intel_disable_crt;
778 crt->base.enable = intel_enable_crt; 778 crt->base.enable = intel_enable_crt;
779 if (IS_HASWELL(dev)) 779 if (HAS_DDI(dev))
780 crt->base.get_hw_state = intel_ddi_get_hw_state; 780 crt->base.get_hw_state = intel_ddi_get_hw_state;
781 else 781 else
782 crt->base.get_hw_state = intel_crt_get_hw_state; 782 crt->base.get_hw_state = intel_crt_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 4bad0f724019..2e904a5cd6cb 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -84,7 +84,8 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
84 * in either FDI or DP modes only, as HDMI connections will work with both 84 * in either FDI or DP modes only, as HDMI connections will work with both
85 * of those 85 * of those
86 */ 86 */
87void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode) 87static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
88 bool use_fdi_mode)
88{ 89{
89 struct drm_i915_private *dev_priv = dev->dev_private; 90 struct drm_i915_private *dev_priv = dev->dev_private;
90 u32 reg; 91 u32 reg;
@@ -114,16 +115,17 @@ void intel_prepare_ddi(struct drm_device *dev)
114{ 115{
115 int port; 116 int port;
116 117
117 if (IS_HASWELL(dev)) { 118 if (!HAS_DDI(dev))
118 for (port = PORT_A; port < PORT_E; port++) 119 return;
119 intel_prepare_ddi_buffers(dev, port, false);
120 120
121 /* DDI E is the suggested one to work in FDI mode, so program is as such by 121 for (port = PORT_A; port < PORT_E; port++)
122 * default. It will have to be re-programmed in case a digital DP output 122 intel_prepare_ddi_buffers(dev, port, false);
123 * will be detected on it 123
124 */ 124 /* DDI E is the suggested one to work in FDI mode, so program is as such
125 intel_prepare_ddi_buffers(dev, PORT_E, true); 125 * by default. It will have to be re-programmed in case a digital DP
126 } 126 * output will be detected on it
127 */
128 intel_prepare_ddi_buffers(dev, PORT_E, true);
127} 129}
128 130
129static const long hsw_ddi_buf_ctl_values[] = { 131static const long hsw_ddi_buf_ctl_values[] = {
@@ -1069,7 +1071,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1069 if (port == PORT_A) 1071 if (port == PORT_A)
1070 cpu_transcoder = TRANSCODER_EDP; 1072 cpu_transcoder = TRANSCODER_EDP;
1071 else 1073 else
1072 cpu_transcoder = pipe; 1074 cpu_transcoder = (enum transcoder) pipe;
1073 1075
1074 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1076 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1075 1077
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a9fb046b94a1..8c36a11a9a57 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -416,13 +416,11 @@ static const intel_limit_t intel_limits_vlv_dp = {
416 416
417u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg) 417u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
418{ 418{
419 unsigned long flags; 419 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
420 u32 val = 0;
421 420
422 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
423 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { 421 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
424 DRM_ERROR("DPIO idle wait timed out\n"); 422 DRM_ERROR("DPIO idle wait timed out\n");
425 goto out_unlock; 423 return 0;
426 } 424 }
427 425
428 I915_WRITE(DPIO_REG, reg); 426 I915_WRITE(DPIO_REG, reg);
@@ -430,24 +428,20 @@ u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
430 DPIO_BYTE); 428 DPIO_BYTE);
431 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { 429 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
432 DRM_ERROR("DPIO read wait timed out\n"); 430 DRM_ERROR("DPIO read wait timed out\n");
433 goto out_unlock; 431 return 0;
434 } 432 }
435 val = I915_READ(DPIO_DATA);
436 433
437out_unlock: 434 return I915_READ(DPIO_DATA);
438 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
439 return val;
440} 435}
441 436
442static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, 437static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
443 u32 val) 438 u32 val)
444{ 439{
445 unsigned long flags; 440 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
446 441
447 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
448 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { 442 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
449 DRM_ERROR("DPIO idle wait timed out\n"); 443 DRM_ERROR("DPIO idle wait timed out\n");
450 goto out_unlock; 444 return;
451 } 445 }
452 446
453 I915_WRITE(DPIO_DATA, val); 447 I915_WRITE(DPIO_DATA, val);
@@ -456,9 +450,6 @@ static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
456 DPIO_BYTE); 450 DPIO_BYTE);
457 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) 451 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
458 DRM_ERROR("DPIO write wait timed out\n"); 452 DRM_ERROR("DPIO write wait timed out\n");
459
460out_unlock:
461 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
462} 453}
463 454
464static void vlv_init_dpio(struct drm_device *dev) 455static void vlv_init_dpio(struct drm_device *dev)
@@ -472,61 +463,14 @@ static void vlv_init_dpio(struct drm_device *dev)
472 POSTING_READ(DPIO_CTL); 463 POSTING_READ(DPIO_CTL);
473} 464}
474 465
475static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
476{
477 DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
478 return 1;
479}
480
481static const struct dmi_system_id intel_dual_link_lvds[] = {
482 {
483 .callback = intel_dual_link_lvds_callback,
484 .ident = "Apple MacBook Pro (Core i5/i7 Series)",
485 .matches = {
486 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
487 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
488 },
489 },
490 { } /* terminating entry */
491};
492
493static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
494 unsigned int reg)
495{
496 unsigned int val;
497
498 /* use the module option value if specified */
499 if (i915_lvds_channel_mode > 0)
500 return i915_lvds_channel_mode == 2;
501
502 if (dmi_check_system(intel_dual_link_lvds))
503 return true;
504
505 if (dev_priv->lvds_val)
506 val = dev_priv->lvds_val;
507 else {
508 /* BIOS should set the proper LVDS register value at boot, but
509 * in reality, it doesn't set the value when the lid is closed;
510 * we need to check "the value to be set" in VBT when LVDS
511 * register is uninitialized.
512 */
513 val = I915_READ(reg);
514 if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
515 val = dev_priv->bios_lvds_val;
516 dev_priv->lvds_val = val;
517 }
518 return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
519}
520
521static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 466static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
522 int refclk) 467 int refclk)
523{ 468{
524 struct drm_device *dev = crtc->dev; 469 struct drm_device *dev = crtc->dev;
525 struct drm_i915_private *dev_priv = dev->dev_private;
526 const intel_limit_t *limit; 470 const intel_limit_t *limit;
527 471
528 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 472 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
529 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) { 473 if (intel_is_dual_link_lvds(dev)) {
530 /* LVDS dual channel */ 474 /* LVDS dual channel */
531 if (refclk == 100000) 475 if (refclk == 100000)
532 limit = &intel_limits_ironlake_dual_lvds_100m; 476 limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -550,11 +494,10 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
550static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 494static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
551{ 495{
552 struct drm_device *dev = crtc->dev; 496 struct drm_device *dev = crtc->dev;
553 struct drm_i915_private *dev_priv = dev->dev_private;
554 const intel_limit_t *limit; 497 const intel_limit_t *limit;
555 498
556 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 499 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
557 if (is_dual_link_lvds(dev_priv, LVDS)) 500 if (intel_is_dual_link_lvds(dev))
558 /* LVDS with dual channel */ 501 /* LVDS with dual channel */
559 limit = &intel_limits_g4x_dual_channel_lvds; 502 limit = &intel_limits_g4x_dual_channel_lvds;
560 else 503 else
@@ -686,19 +629,16 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
686 629
687{ 630{
688 struct drm_device *dev = crtc->dev; 631 struct drm_device *dev = crtc->dev;
689 struct drm_i915_private *dev_priv = dev->dev_private;
690 intel_clock_t clock; 632 intel_clock_t clock;
691 int err = target; 633 int err = target;
692 634
693 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 635 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
694 (I915_READ(LVDS)) != 0) {
695 /* 636 /*
696 * For LVDS, if the panel is on, just rely on its current 637 * For LVDS just rely on its current settings for dual-channel.
697 * settings for dual-channel. We haven't figured out how to 638 * We haven't figured out how to reliably set up different
698 * reliably set up different single/dual channel state, if we 639 * single/dual channel state, if we even can.
699 * even can.
700 */ 640 */
701 if (is_dual_link_lvds(dev_priv, LVDS)) 641 if (intel_is_dual_link_lvds(dev))
702 clock.p2 = limit->p2.p2_fast; 642 clock.p2 = limit->p2.p2_fast;
703 else 643 else
704 clock.p2 = limit->p2.p2_slow; 644 clock.p2 = limit->p2.p2_slow;
@@ -751,7 +691,6 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
751 intel_clock_t *best_clock) 691 intel_clock_t *best_clock)
752{ 692{
753 struct drm_device *dev = crtc->dev; 693 struct drm_device *dev = crtc->dev;
754 struct drm_i915_private *dev_priv = dev->dev_private;
755 intel_clock_t clock; 694 intel_clock_t clock;
756 int max_n; 695 int max_n;
757 bool found; 696 bool found;
@@ -766,8 +705,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
766 lvds_reg = PCH_LVDS; 705 lvds_reg = PCH_LVDS;
767 else 706 else
768 lvds_reg = LVDS; 707 lvds_reg = LVDS;
769 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == 708 if (intel_is_dual_link_lvds(dev))
770 LVDS_CLKB_POWER_UP)
771 clock.p2 = limit->p2.p2_fast; 709 clock.p2 = limit->p2.p2_fast;
772 else 710 else
773 clock.p2 = limit->p2.p2_slow; 711 clock.p2 = limit->p2.p2_slow;
@@ -1047,6 +985,51 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1047 } 985 }
1048} 986}
1049 987
988/*
989 * ibx_digital_port_connected - is the specified port connected?
990 * @dev_priv: i915 private structure
991 * @port: the port to test
992 *
993 * Returns true if @port is connected, false otherwise.
994 */
995bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
996 struct intel_digital_port *port)
997{
998 u32 bit;
999
1000 if (HAS_PCH_IBX(dev_priv->dev)) {
1001 switch(port->port) {
1002 case PORT_B:
1003 bit = SDE_PORTB_HOTPLUG;
1004 break;
1005 case PORT_C:
1006 bit = SDE_PORTC_HOTPLUG;
1007 break;
1008 case PORT_D:
1009 bit = SDE_PORTD_HOTPLUG;
1010 break;
1011 default:
1012 return true;
1013 }
1014 } else {
1015 switch(port->port) {
1016 case PORT_B:
1017 bit = SDE_PORTB_HOTPLUG_CPT;
1018 break;
1019 case PORT_C:
1020 bit = SDE_PORTC_HOTPLUG_CPT;
1021 break;
1022 case PORT_D:
1023 bit = SDE_PORTD_HOTPLUG_CPT;
1024 break;
1025 default:
1026 return true;
1027 }
1028 }
1029
1030 return I915_READ(SDEISR) & bit;
1031}
1032
1050static const char *state_string(bool enabled) 1033static const char *state_string(bool enabled)
1051{ 1034{
1052 return enabled ? "on" : "off"; 1035 return enabled ? "on" : "off";
@@ -1125,8 +1108,8 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1125 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1108 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1126 pipe); 1109 pipe);
1127 1110
1128 if (IS_HASWELL(dev_priv->dev)) { 1111 if (HAS_DDI(dev_priv->dev)) {
1129 /* On Haswell, DDI is used instead of FDI_TX_CTL */ 1112 /* DDI does not have a specific FDI_TX register */
1130 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1113 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1131 val = I915_READ(reg); 1114 val = I915_READ(reg);
1132 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1115 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
@@ -1170,7 +1153,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1170 return; 1153 return;
1171 1154
1172 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1155 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1173 if (IS_HASWELL(dev_priv->dev)) 1156 if (HAS_DDI(dev_priv->dev))
1174 return; 1157 return;
1175 1158
1176 reg = FDI_TX_CTL(pipe); 1159 reg = FDI_TX_CTL(pipe);
@@ -1509,13 +1492,14 @@ static void
1509intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 1492intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
1510 enum intel_sbi_destination destination) 1493 enum intel_sbi_destination destination)
1511{ 1494{
1512 unsigned long flags;
1513 u32 tmp; 1495 u32 tmp;
1514 1496
1515 spin_lock_irqsave(&dev_priv->dpio_lock, flags); 1497 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1516 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) { 1498
1499 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
1500 100)) {
1517 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1501 DRM_ERROR("timeout waiting for SBI to become ready\n");
1518 goto out_unlock; 1502 return;
1519 } 1503 }
1520 1504
1521 I915_WRITE(SBI_ADDR, (reg << 16)); 1505 I915_WRITE(SBI_ADDR, (reg << 16));
@@ -1530,24 +1514,21 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
1530 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1514 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1531 100)) { 1515 100)) {
1532 DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); 1516 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1533 goto out_unlock; 1517 return;
1534 } 1518 }
1535
1536out_unlock:
1537 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1538} 1519}
1539 1520
1540static u32 1521static u32
1541intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 1522intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
1542 enum intel_sbi_destination destination) 1523 enum intel_sbi_destination destination)
1543{ 1524{
1544 unsigned long flags;
1545 u32 value = 0; 1525 u32 value = 0;
1526 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1546 1527
1547 spin_lock_irqsave(&dev_priv->dpio_lock, flags); 1528 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
1548 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) { 1529 100)) {
1549 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1530 DRM_ERROR("timeout waiting for SBI to become ready\n");
1550 goto out_unlock; 1531 return 0;
1551 } 1532 }
1552 1533
1553 I915_WRITE(SBI_ADDR, (reg << 16)); 1534 I915_WRITE(SBI_ADDR, (reg << 16));
@@ -1561,14 +1542,10 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
1561 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1542 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1562 100)) { 1543 100)) {
1563 DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); 1544 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1564 goto out_unlock; 1545 return 0;
1565 } 1546 }
1566 1547
1567 value = I915_READ(SBI_DATA); 1548 return I915_READ(SBI_DATA);
1568
1569out_unlock:
1570 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1571 return value;
1572} 1549}
1573 1550
1574/** 1551/**
@@ -1700,8 +1677,8 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1700 * make the BPC in transcoder be consistent with 1677 * make the BPC in transcoder be consistent with
1701 * that in pipeconf reg. 1678 * that in pipeconf reg.
1702 */ 1679 */
1703 val &= ~PIPE_BPC_MASK; 1680 val &= ~PIPECONF_BPC_MASK;
1704 val |= pipeconf_val & PIPE_BPC_MASK; 1681 val |= pipeconf_val & PIPECONF_BPC_MASK;
1705 } 1682 }
1706 1683
1707 val &= ~TRANS_INTERLACE_MASK; 1684 val &= ~TRANS_INTERLACE_MASK;
@@ -1728,7 +1705,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1728 BUG_ON(dev_priv->info->gen < 5); 1705 BUG_ON(dev_priv->info->gen < 5);
1729 1706
1730 /* FDI must be feeding us bits for PCH ports */ 1707 /* FDI must be feeding us bits for PCH ports */
1731 assert_fdi_tx_enabled(dev_priv, cpu_transcoder); 1708 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1732 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 1709 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1733 1710
1734 /* Workaround: set timing override bit. */ 1711 /* Workaround: set timing override bit. */
@@ -1816,11 +1793,11 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1816{ 1793{
1817 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1794 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1818 pipe); 1795 pipe);
1819 enum transcoder pch_transcoder; 1796 enum pipe pch_transcoder;
1820 int reg; 1797 int reg;
1821 u32 val; 1798 u32 val;
1822 1799
1823 if (IS_HASWELL(dev_priv->dev)) 1800 if (HAS_PCH_LPT(dev_priv->dev))
1824 pch_transcoder = TRANSCODER_A; 1801 pch_transcoder = TRANSCODER_A;
1825 else 1802 else
1826 pch_transcoder = pipe; 1803 pch_transcoder = pipe;
@@ -1836,7 +1813,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1836 if (pch_port) { 1813 if (pch_port) {
1837 /* if driving the PCH, we need FDI enabled */ 1814 /* if driving the PCH, we need FDI enabled */
1838 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 1815 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1839 assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder); 1816 assert_fdi_tx_pll_enabled(dev_priv,
1817 (enum pipe) cpu_transcoder);
1840 } 1818 }
1841 /* FIXME: assert CPU port conditions for SNB+ */ 1819 /* FIXME: assert CPU port conditions for SNB+ */
1842 } 1820 }
@@ -2350,43 +2328,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2350 return 0; 2328 return 0;
2351} 2329}
2352 2330
2353static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2354{
2355 struct drm_device *dev = crtc->dev;
2356 struct drm_i915_private *dev_priv = dev->dev_private;
2357 u32 dpa_ctl;
2358
2359 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2360 dpa_ctl = I915_READ(DP_A);
2361 dpa_ctl &= ~DP_PLL_FREQ_MASK;
2362
2363 if (clock < 200000) {
2364 u32 temp;
2365 dpa_ctl |= DP_PLL_FREQ_160MHZ;
2366 /* workaround for 160Mhz:
2367 1) program 0x4600c bits 15:0 = 0x8124
2368 2) program 0x46010 bit 0 = 1
2369 3) program 0x46034 bit 24 = 1
2370 4) program 0x64000 bit 14 = 1
2371 */
2372 temp = I915_READ(0x4600c);
2373 temp &= 0xffff0000;
2374 I915_WRITE(0x4600c, temp | 0x8124);
2375
2376 temp = I915_READ(0x46010);
2377 I915_WRITE(0x46010, temp | 1);
2378
2379 temp = I915_READ(0x46034);
2380 I915_WRITE(0x46034, temp | (1 << 24));
2381 } else {
2382 dpa_ctl |= DP_PLL_FREQ_270MHZ;
2383 }
2384 I915_WRITE(DP_A, dpa_ctl);
2385
2386 POSTING_READ(DP_A);
2387 udelay(500);
2388}
2389
2390static void intel_fdi_normal_train(struct drm_crtc *crtc) 2331static void intel_fdi_normal_train(struct drm_crtc *crtc)
2391{ 2332{
2392 struct drm_device *dev = crtc->dev; 2333 struct drm_device *dev = crtc->dev;
@@ -2815,7 +2756,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2815 temp = I915_READ(reg); 2756 temp = I915_READ(reg);
2816 temp &= ~((0x7 << 19) | (0x7 << 16)); 2757 temp &= ~((0x7 << 19) | (0x7 << 16));
2817 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2758 temp |= (intel_crtc->fdi_lanes - 1) << 19;
2818 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2759 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2819 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 2760 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2820 2761
2821 POSTING_READ(reg); 2762 POSTING_READ(reg);
@@ -2828,18 +2769,14 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2828 POSTING_READ(reg); 2769 POSTING_READ(reg);
2829 udelay(200); 2770 udelay(200);
2830 2771
2831 /* On Haswell, the PLL configuration for ports and pipes is handled 2772 /* Enable CPU FDI TX PLL, always on for Ironlake */
2832 * separately, as part of DDI setup */ 2773 reg = FDI_TX_CTL(pipe);
2833 if (!IS_HASWELL(dev)) { 2774 temp = I915_READ(reg);
2834 /* Enable CPU FDI TX PLL, always on for Ironlake */ 2775 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2835 reg = FDI_TX_CTL(pipe); 2776 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2836 temp = I915_READ(reg);
2837 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2838 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2839 2777
2840 POSTING_READ(reg); 2778 POSTING_READ(reg);
2841 udelay(100); 2779 udelay(100);
2842 }
2843 } 2780 }
2844} 2781}
2845 2782
@@ -2889,7 +2826,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
2889 reg = FDI_RX_CTL(pipe); 2826 reg = FDI_RX_CTL(pipe);
2890 temp = I915_READ(reg); 2827 temp = I915_READ(reg);
2891 temp &= ~(0x7 << 16); 2828 temp &= ~(0x7 << 16);
2892 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2829 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2893 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 2830 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2894 2831
2895 POSTING_READ(reg); 2832 POSTING_READ(reg);
@@ -2918,7 +2855,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
2918 } 2855 }
2919 /* BPC in FDI rx is consistent with that in PIPECONF */ 2856 /* BPC in FDI rx is consistent with that in PIPECONF */
2920 temp &= ~(0x07 << 16); 2857 temp &= ~(0x07 << 16);
2921 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2858 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2922 I915_WRITE(reg, temp); 2859 I915_WRITE(reg, temp);
2923 2860
2924 POSTING_READ(reg); 2861 POSTING_READ(reg);
@@ -2992,6 +2929,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2992 u32 divsel, phaseinc, auxdiv, phasedir = 0; 2929 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2993 u32 temp; 2930 u32 temp;
2994 2931
2932 mutex_lock(&dev_priv->dpio_lock);
2933
2995 /* It is necessary to ungate the pixclk gate prior to programming 2934 /* It is necessary to ungate the pixclk gate prior to programming
2996 * the divisors, and gate it back when it is done. 2935 * the divisors, and gate it back when it is done.
2997 */ 2936 */
@@ -3066,6 +3005,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
3066 udelay(24); 3005 udelay(24);
3067 3006
3068 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 3007 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3008
3009 mutex_unlock(&dev_priv->dpio_lock);
3069} 3010}
3070 3011
3071/* 3012/*
@@ -3146,7 +3087,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3146 if (HAS_PCH_CPT(dev) && 3087 if (HAS_PCH_CPT(dev) &&
3147 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 3088 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3148 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 3089 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3149 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 3090 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3150 reg = TRANS_DP_CTL(pipe); 3091 reg = TRANS_DP_CTL(pipe);
3151 temp = I915_READ(reg); 3092 temp = I915_READ(reg);
3152 temp &= ~(TRANS_DP_PORT_SEL_MASK | 3093 temp &= ~(TRANS_DP_PORT_SEL_MASK |
@@ -3623,7 +3564,7 @@ static void haswell_crtc_off(struct drm_crtc *crtc)
3623 3564
3624 /* Stop saying we're using TRANSCODER_EDP because some other CRTC might 3565 /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
3625 * start using it. */ 3566 * start using it. */
3626 intel_crtc->cpu_transcoder = intel_crtc->pipe; 3567 intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe;
3627 3568
3628 intel_ddi_put_crtc_pll(crtc); 3569 intel_ddi_put_crtc_pll(crtc);
3629} 3570}
@@ -4012,16 +3953,8 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
4012 return 133000; 3953 return 133000;
4013} 3954}
4014 3955
4015struct fdi_m_n {
4016 u32 tu;
4017 u32 gmch_m;
4018 u32 gmch_n;
4019 u32 link_m;
4020 u32 link_n;
4021};
4022
4023static void 3956static void
4024fdi_reduce_ratio(u32 *num, u32 *den) 3957intel_reduce_ratio(uint32_t *num, uint32_t *den)
4025{ 3958{
4026 while (*num > 0xffffff || *den > 0xffffff) { 3959 while (*num > 0xffffff || *den > 0xffffff) {
4027 *num >>= 1; 3960 *num >>= 1;
@@ -4029,20 +3962,18 @@ fdi_reduce_ratio(u32 *num, u32 *den)
4029 } 3962 }
4030} 3963}
4031 3964
4032static void 3965void
4033ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, 3966intel_link_compute_m_n(int bits_per_pixel, int nlanes,
4034 int link_clock, struct fdi_m_n *m_n) 3967 int pixel_clock, int link_clock,
3968 struct intel_link_m_n *m_n)
4035{ 3969{
4036 m_n->tu = 64; /* default size */ 3970 m_n->tu = 64;
4037
4038 /* BUG_ON(pixel_clock > INT_MAX / 36); */
4039 m_n->gmch_m = bits_per_pixel * pixel_clock; 3971 m_n->gmch_m = bits_per_pixel * pixel_clock;
4040 m_n->gmch_n = link_clock * nlanes * 8; 3972 m_n->gmch_n = link_clock * nlanes * 8;
4041 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 3973 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
4042
4043 m_n->link_m = pixel_clock; 3974 m_n->link_m = pixel_clock;
4044 m_n->link_n = link_clock; 3975 m_n->link_n = link_clock;
4045 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); 3976 intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
4046} 3977}
4047 3978
4048static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 3979static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -4289,51 +4220,6 @@ static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
4289 } 4220 }
4290} 4221}
4291 4222
4292static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
4293 struct drm_display_mode *adjusted_mode)
4294{
4295 struct drm_device *dev = crtc->dev;
4296 struct drm_i915_private *dev_priv = dev->dev_private;
4297 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4298 int pipe = intel_crtc->pipe;
4299 u32 temp;
4300
4301 temp = I915_READ(LVDS);
4302 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4303 if (pipe == 1) {
4304 temp |= LVDS_PIPEB_SELECT;
4305 } else {
4306 temp &= ~LVDS_PIPEB_SELECT;
4307 }
4308 /* set the corresponsding LVDS_BORDER bit */
4309 temp |= dev_priv->lvds_border_bits;
4310 /* Set the B0-B3 data pairs corresponding to whether we're going to
4311 * set the DPLLs for dual-channel mode or not.
4312 */
4313 if (clock->p2 == 7)
4314 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4315 else
4316 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4317
4318 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4319 * appropriately here, but we need to look more thoroughly into how
4320 * panels behave in the two modes.
4321 */
4322 /* set the dithering flag on LVDS as needed */
4323 if (INTEL_INFO(dev)->gen >= 4) {
4324 if (dev_priv->lvds_dither)
4325 temp |= LVDS_ENABLE_DITHER;
4326 else
4327 temp &= ~LVDS_ENABLE_DITHER;
4328 }
4329 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
4330 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4331 temp |= LVDS_HSYNC_POLARITY;
4332 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4333 temp |= LVDS_VSYNC_POLARITY;
4334 I915_WRITE(LVDS, temp);
4335}
4336
4337static void vlv_update_pll(struct drm_crtc *crtc, 4223static void vlv_update_pll(struct drm_crtc *crtc,
4338 struct drm_display_mode *mode, 4224 struct drm_display_mode *mode,
4339 struct drm_display_mode *adjusted_mode, 4225 struct drm_display_mode *adjusted_mode,
@@ -4349,6 +4235,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4349 bool is_sdvo; 4235 bool is_sdvo;
4350 u32 temp; 4236 u32 temp;
4351 4237
4238 mutex_lock(&dev_priv->dpio_lock);
4239
4352 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || 4240 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
4353 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); 4241 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
4354 4242
@@ -4432,6 +4320,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4432 temp |= (1 << 21); 4320 temp |= (1 << 21);
4433 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp); 4321 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
4434 } 4322 }
4323
4324 mutex_unlock(&dev_priv->dpio_lock);
4435} 4325}
4436 4326
4437static void i9xx_update_pll(struct drm_crtc *crtc, 4327static void i9xx_update_pll(struct drm_crtc *crtc,
@@ -4443,6 +4333,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
4443 struct drm_device *dev = crtc->dev; 4333 struct drm_device *dev = crtc->dev;
4444 struct drm_i915_private *dev_priv = dev->dev_private; 4334 struct drm_i915_private *dev_priv = dev->dev_private;
4445 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4335 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4336 struct intel_encoder *encoder;
4446 int pipe = intel_crtc->pipe; 4337 int pipe = intel_crtc->pipe;
4447 u32 dpll; 4338 u32 dpll;
4448 bool is_sdvo; 4339 bool is_sdvo;
@@ -4511,12 +4402,9 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
4511 POSTING_READ(DPLL(pipe)); 4402 POSTING_READ(DPLL(pipe));
4512 udelay(150); 4403 udelay(150);
4513 4404
4514 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4405 for_each_encoder_on_crtc(dev, crtc, encoder)
4515 * This is an exception to the general rule that mode_set doesn't turn 4406 if (encoder->pre_pll_enable)
4516 * things on. 4407 encoder->pre_pll_enable(encoder);
4517 */
4518 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4519 intel_update_lvds(crtc, clock, adjusted_mode);
4520 4408
4521 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 4409 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4522 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4410 intel_dp_set_m_n(crtc, mode, adjusted_mode);
@@ -4555,6 +4443,7 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
4555 struct drm_device *dev = crtc->dev; 4443 struct drm_device *dev = crtc->dev;
4556 struct drm_i915_private *dev_priv = dev->dev_private; 4444 struct drm_i915_private *dev_priv = dev->dev_private;
4557 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4445 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4446 struct intel_encoder *encoder;
4558 int pipe = intel_crtc->pipe; 4447 int pipe = intel_crtc->pipe;
4559 u32 dpll; 4448 u32 dpll;
4560 4449
@@ -4588,12 +4477,9 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
4588 POSTING_READ(DPLL(pipe)); 4477 POSTING_READ(DPLL(pipe));
4589 udelay(150); 4478 udelay(150);
4590 4479
4591 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4480 for_each_encoder_on_crtc(dev, crtc, encoder)
4592 * This is an exception to the general rule that mode_set doesn't turn 4481 if (encoder->pre_pll_enable)
4593 * things on. 4482 encoder->pre_pll_enable(encoder);
4594 */
4595 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4596 intel_update_lvds(crtc, clock, adjusted_mode);
4597 4483
4598 I915_WRITE(DPLL(pipe), dpll); 4484 I915_WRITE(DPLL(pipe), dpll);
4599 4485
@@ -4783,10 +4669,10 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4783 } 4669 }
4784 4670
4785 /* default to 8bpc */ 4671 /* default to 8bpc */
4786 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); 4672 pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
4787 if (is_dp) { 4673 if (is_dp) {
4788 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 4674 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4789 pipeconf |= PIPECONF_BPP_6 | 4675 pipeconf |= PIPECONF_6BPC |
4790 PIPECONF_DITHER_EN | 4676 PIPECONF_DITHER_EN |
4791 PIPECONF_DITHER_TYPE_SP; 4677 PIPECONF_DITHER_TYPE_SP;
4792 } 4678 }
@@ -4794,7 +4680,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4794 4680
4795 if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { 4681 if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4796 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 4682 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4797 pipeconf |= PIPECONF_BPP_6 | 4683 pipeconf |= PIPECONF_6BPC |
4798 PIPECONF_ENABLE | 4684 PIPECONF_ENABLE |
4799 I965_PIPECONF_ACTIVE; 4685 I965_PIPECONF_ACTIVE;
4800 } 4686 }
@@ -5177,19 +5063,19 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
5177 5063
5178 val = I915_READ(PIPECONF(pipe)); 5064 val = I915_READ(PIPECONF(pipe));
5179 5065
5180 val &= ~PIPE_BPC_MASK; 5066 val &= ~PIPECONF_BPC_MASK;
5181 switch (intel_crtc->bpp) { 5067 switch (intel_crtc->bpp) {
5182 case 18: 5068 case 18:
5183 val |= PIPE_6BPC; 5069 val |= PIPECONF_6BPC;
5184 break; 5070 break;
5185 case 24: 5071 case 24:
5186 val |= PIPE_8BPC; 5072 val |= PIPECONF_8BPC;
5187 break; 5073 break;
5188 case 30: 5074 case 30:
5189 val |= PIPE_10BPC; 5075 val |= PIPECONF_10BPC;
5190 break; 5076 break;
5191 case 36: 5077 case 36:
5192 val |= PIPE_12BPC; 5078 val |= PIPECONF_12BPC;
5193 break; 5079 break;
5194 default: 5080 default:
5195 /* Case prevented by intel_choose_pipe_bpp_dither. */ 5081 /* Case prevented by intel_choose_pipe_bpp_dither. */
@@ -5400,7 +5286,7 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
5400 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5286 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5401 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 5287 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
5402 struct intel_encoder *intel_encoder, *edp_encoder = NULL; 5288 struct intel_encoder *intel_encoder, *edp_encoder = NULL;
5403 struct fdi_m_n m_n = {0}; 5289 struct intel_link_m_n m_n = {0};
5404 int target_clock, pixel_multiplier, lane, link_bw; 5290 int target_clock, pixel_multiplier, lane, link_bw;
5405 bool is_dp = false, is_cpu_edp = false; 5291 bool is_dp = false, is_cpu_edp = false;
5406 5292
@@ -5452,8 +5338,7 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
5452 5338
5453 if (pixel_multiplier > 1) 5339 if (pixel_multiplier > 1)
5454 link_bw *= pixel_multiplier; 5340 link_bw *= pixel_multiplier;
5455 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, 5341 intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n);
5456 &m_n);
5457 5342
5458 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m); 5343 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
5459 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); 5344 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
@@ -5506,7 +5391,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5506 if (is_lvds) { 5391 if (is_lvds) {
5507 if ((intel_panel_use_ssc(dev_priv) && 5392 if ((intel_panel_use_ssc(dev_priv) &&
5508 dev_priv->lvds_ssc_freq == 100) || 5393 dev_priv->lvds_ssc_freq == 100) ||
5509 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) 5394 intel_is_dual_link_lvds(dev))
5510 factor = 25; 5395 factor = 25;
5511 } else if (is_sdvo && is_tv) 5396 } else if (is_sdvo && is_tv)
5512 factor = 20; 5397 factor = 20;
@@ -5581,7 +5466,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5581 bool ok, has_reduced_clock = false; 5466 bool ok, has_reduced_clock = false;
5582 bool is_lvds = false, is_dp = false, is_cpu_edp = false; 5467 bool is_lvds = false, is_dp = false, is_cpu_edp = false;
5583 struct intel_encoder *encoder; 5468 struct intel_encoder *encoder;
5584 u32 temp;
5585 int ret; 5469 int ret;
5586 bool dither, fdi_config_ok; 5470 bool dither, fdi_config_ok;
5587 5471
@@ -5645,54 +5529,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5645 } else 5529 } else
5646 intel_put_pch_pll(intel_crtc); 5530 intel_put_pch_pll(intel_crtc);
5647 5531
5648 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 5532 if (is_dp && !is_cpu_edp)
5649 * This is an exception to the general rule that mode_set doesn't turn
5650 * things on.
5651 */
5652 if (is_lvds) {
5653 temp = I915_READ(PCH_LVDS);
5654 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5655 if (HAS_PCH_CPT(dev)) {
5656 temp &= ~PORT_TRANS_SEL_MASK;
5657 temp |= PORT_TRANS_SEL_CPT(pipe);
5658 } else {
5659 if (pipe == 1)
5660 temp |= LVDS_PIPEB_SELECT;
5661 else
5662 temp &= ~LVDS_PIPEB_SELECT;
5663 }
5664
5665 /* set the corresponsding LVDS_BORDER bit */
5666 temp |= dev_priv->lvds_border_bits;
5667 /* Set the B0-B3 data pairs corresponding to whether we're going to
5668 * set the DPLLs for dual-channel mode or not.
5669 */
5670 if (clock.p2 == 7)
5671 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5672 else
5673 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5674
5675 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5676 * appropriately here, but we need to look more thoroughly into how
5677 * panels behave in the two modes.
5678 */
5679 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5680 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5681 temp |= LVDS_HSYNC_POLARITY;
5682 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5683 temp |= LVDS_VSYNC_POLARITY;
5684 I915_WRITE(PCH_LVDS, temp);
5685 }
5686
5687 if (is_dp && !is_cpu_edp) {
5688 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5533 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5689 } else { 5534
5690 /* For non-DP output, clear any trans DP clock recovery setting.*/ 5535 for_each_encoder_on_crtc(dev, crtc, encoder)
5691 I915_WRITE(TRANSDATA_M1(pipe), 0); 5536 if (encoder->pre_pll_enable)
5692 I915_WRITE(TRANSDATA_N1(pipe), 0); 5537 encoder->pre_pll_enable(encoder);
5693 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5694 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5695 }
5696 5538
5697 if (intel_crtc->pch_pll) { 5539 if (intel_crtc->pch_pll) {
5698 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); 5540 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
@@ -5727,9 +5569,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5727 5569
5728 fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc); 5570 fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
5729 5571
5730 if (is_cpu_edp)
5731 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5732
5733 ironlake_set_pipeconf(crtc, adjusted_mode, dither); 5572 ironlake_set_pipeconf(crtc, adjusted_mode, dither);
5734 5573
5735 intel_wait_for_vblank(dev, pipe); 5574 intel_wait_for_vblank(dev, pipe);
@@ -5759,20 +5598,13 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5759 int pipe = intel_crtc->pipe; 5598 int pipe = intel_crtc->pipe;
5760 int plane = intel_crtc->plane; 5599 int plane = intel_crtc->plane;
5761 int num_connectors = 0; 5600 int num_connectors = 0;
5762 intel_clock_t clock, reduced_clock; 5601 bool is_dp = false, is_cpu_edp = false;
5763 u32 dpll = 0, fp = 0, fp2 = 0;
5764 bool ok, has_reduced_clock = false;
5765 bool is_lvds = false, is_dp = false, is_cpu_edp = false;
5766 struct intel_encoder *encoder; 5602 struct intel_encoder *encoder;
5767 u32 temp;
5768 int ret; 5603 int ret;
5769 bool dither; 5604 bool dither;
5770 5605
5771 for_each_encoder_on_crtc(dev, crtc, encoder) { 5606 for_each_encoder_on_crtc(dev, crtc, encoder) {
5772 switch (encoder->type) { 5607 switch (encoder->type) {
5773 case INTEL_OUTPUT_LVDS:
5774 is_lvds = true;
5775 break;
5776 case INTEL_OUTPUT_DISPLAYPORT: 5608 case INTEL_OUTPUT_DISPLAYPORT:
5777 is_dp = true; 5609 is_dp = true;
5778 break; 5610 break;
@@ -5806,143 +5638,26 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5806 if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock)) 5638 if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
5807 return -EINVAL; 5639 return -EINVAL;
5808 5640
5809 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5810 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5811 &has_reduced_clock,
5812 &reduced_clock);
5813 if (!ok) {
5814 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5815 return -EINVAL;
5816 }
5817 }
5818
5819 /* Ensure that the cursor is valid for the new mode before changing... */ 5641 /* Ensure that the cursor is valid for the new mode before changing... */
5820 intel_crtc_update_cursor(crtc, true); 5642 intel_crtc_update_cursor(crtc, true);
5821 5643
5822 /* determine panel color depth */ 5644 /* determine panel color depth */
5823 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, 5645 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
5824 adjusted_mode); 5646 adjusted_mode);
5825 if (is_lvds && dev_priv->lvds_dither)
5826 dither = true;
5827 5647
5828 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 5648 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5829 drm_mode_debug_printmodeline(mode); 5649 drm_mode_debug_printmodeline(mode);
5830 5650
5831 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 5651 if (is_dp && !is_cpu_edp)
5832 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5833 if (has_reduced_clock)
5834 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5835 reduced_clock.m2;
5836
5837 dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
5838 fp);
5839
5840 /* CPU eDP is the only output that doesn't need a PCH PLL of its
5841 * own on pre-Haswell/LPT generation */
5842 if (!is_cpu_edp) {
5843 struct intel_pch_pll *pll;
5844
5845 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
5846 if (pll == NULL) {
5847 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
5848 pipe);
5849 return -EINVAL;
5850 }
5851 } else
5852 intel_put_pch_pll(intel_crtc);
5853
5854 /* The LVDS pin pair needs to be on before the DPLLs are
5855 * enabled. This is an exception to the general rule that
5856 * mode_set doesn't turn things on.
5857 */
5858 if (is_lvds) {
5859 temp = I915_READ(PCH_LVDS);
5860 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5861 if (HAS_PCH_CPT(dev)) {
5862 temp &= ~PORT_TRANS_SEL_MASK;
5863 temp |= PORT_TRANS_SEL_CPT(pipe);
5864 } else {
5865 if (pipe == 1)
5866 temp |= LVDS_PIPEB_SELECT;
5867 else
5868 temp &= ~LVDS_PIPEB_SELECT;
5869 }
5870
5871 /* set the corresponsding LVDS_BORDER bit */
5872 temp |= dev_priv->lvds_border_bits;
5873 /* Set the B0-B3 data pairs corresponding to whether
5874 * we're going to set the DPLLs for dual-channel mode or
5875 * not.
5876 */
5877 if (clock.p2 == 7)
5878 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5879 else
5880 temp &= ~(LVDS_B0B3_POWER_UP |
5881 LVDS_CLKB_POWER_UP);
5882
5883 /* It would be nice to set 24 vs 18-bit mode
5884 * (LVDS_A3_POWER_UP) appropriately here, but we need to
5885 * look more thoroughly into how panels behave in the
5886 * two modes.
5887 */
5888 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5889 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5890 temp |= LVDS_HSYNC_POLARITY;
5891 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5892 temp |= LVDS_VSYNC_POLARITY;
5893 I915_WRITE(PCH_LVDS, temp);
5894 }
5895 }
5896
5897 if (is_dp && !is_cpu_edp) {
5898 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5652 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5899 } else {
5900 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5901 /* For non-DP output, clear any trans DP clock recovery
5902 * setting.*/
5903 I915_WRITE(TRANSDATA_M1(pipe), 0);
5904 I915_WRITE(TRANSDATA_N1(pipe), 0);
5905 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5906 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5907 }
5908 }
5909 5653
5910 intel_crtc->lowfreq_avail = false; 5654 intel_crtc->lowfreq_avail = false;
5911 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5912 if (intel_crtc->pch_pll) {
5913 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5914
5915 /* Wait for the clocks to stabilize. */
5916 POSTING_READ(intel_crtc->pch_pll->pll_reg);
5917 udelay(150);
5918
5919 /* The pixel multiplier can only be updated once the
5920 * DPLL is enabled and the clocks are stable.
5921 *
5922 * So write it again.
5923 */
5924 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5925 }
5926
5927 if (intel_crtc->pch_pll) {
5928 if (is_lvds && has_reduced_clock && i915_powersave) {
5929 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
5930 intel_crtc->lowfreq_avail = true;
5931 } else {
5932 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
5933 }
5934 }
5935 }
5936 5655
5937 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); 5656 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5938 5657
5939 if (!is_dp || is_cpu_edp) 5658 if (!is_dp || is_cpu_edp)
5940 ironlake_set_m_n(crtc, mode, adjusted_mode); 5659 ironlake_set_m_n(crtc, mode, adjusted_mode);
5941 5660
5942 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5943 if (is_cpu_edp)
5944 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5945
5946 haswell_set_pipeconf(crtc, adjusted_mode, dither); 5661 haswell_set_pipeconf(crtc, adjusted_mode, dither);
5947 5662
5948 /* Set up the display plane register */ 5663 /* Set up the display plane register */
@@ -6759,7 +6474,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
6759 return false; 6474 return false;
6760 } 6475 }
6761 6476
6762 if (!intel_set_mode(crtc, mode, 0, 0, fb)) { 6477 if (intel_set_mode(crtc, mode, 0, 0, fb)) {
6763 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 6478 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
6764 if (old->release_fb) 6479 if (old->release_fb)
6765 old->release_fb->funcs->destroy(old->release_fb); 6480 old->release_fb->funcs->destroy(old->release_fb);
@@ -7109,8 +6824,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
7109 6824
7110 obj = work->old_fb_obj; 6825 obj = work->old_fb_obj;
7111 6826
7112 atomic_clear_mask(1 << intel_crtc->plane,
7113 &obj->pending_flip.counter);
7114 wake_up(&dev_priv->pending_flip_queue); 6827 wake_up(&dev_priv->pending_flip_queue);
7115 6828
7116 queue_work(dev_priv->wq, &work->work); 6829 queue_work(dev_priv->wq, &work->work);
@@ -7474,10 +7187,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7474 7187
7475 work->enable_stall_check = true; 7188 work->enable_stall_check = true;
7476 7189
7477 /* Block clients from rendering to the new back buffer until
7478 * the flip occurs and the object is no longer visible.
7479 */
7480 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7481 atomic_inc(&intel_crtc->unpin_work_count); 7190 atomic_inc(&intel_crtc->unpin_work_count);
7482 7191
7483 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); 7192 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
@@ -7494,7 +7203,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7494 7203
7495cleanup_pending: 7204cleanup_pending:
7496 atomic_dec(&intel_crtc->unpin_work_count); 7205 atomic_dec(&intel_crtc->unpin_work_count);
7497 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7498 drm_gem_object_unreference(&work->old_fb_obj->base); 7206 drm_gem_object_unreference(&work->old_fb_obj->base);
7499 drm_gem_object_unreference(&obj->base); 7207 drm_gem_object_unreference(&obj->base);
7500 mutex_unlock(&dev->struct_mutex); 7208 mutex_unlock(&dev->struct_mutex);
@@ -7904,16 +7612,21 @@ intel_modeset_check_state(struct drm_device *dev)
7904 } 7612 }
7905} 7613}
7906 7614
7907bool intel_set_mode(struct drm_crtc *crtc, 7615int intel_set_mode(struct drm_crtc *crtc,
7908 struct drm_display_mode *mode, 7616 struct drm_display_mode *mode,
7909 int x, int y, struct drm_framebuffer *fb) 7617 int x, int y, struct drm_framebuffer *fb)
7910{ 7618{
7911 struct drm_device *dev = crtc->dev; 7619 struct drm_device *dev = crtc->dev;
7912 drm_i915_private_t *dev_priv = dev->dev_private; 7620 drm_i915_private_t *dev_priv = dev->dev_private;
7913 struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; 7621 struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode;
7914 struct intel_crtc *intel_crtc; 7622 struct intel_crtc *intel_crtc;
7915 unsigned disable_pipes, prepare_pipes, modeset_pipes; 7623 unsigned disable_pipes, prepare_pipes, modeset_pipes;
7916 bool ret = true; 7624 int ret = 0;
7625
7626 saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
7627 if (!saved_mode)
7628 return -ENOMEM;
7629 saved_hwmode = saved_mode + 1;
7917 7630
7918 intel_modeset_affected_pipes(crtc, &modeset_pipes, 7631 intel_modeset_affected_pipes(crtc, &modeset_pipes,
7919 &prepare_pipes, &disable_pipes); 7632 &prepare_pipes, &disable_pipes);
@@ -7924,8 +7637,8 @@ bool intel_set_mode(struct drm_crtc *crtc,
7924 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) 7637 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
7925 intel_crtc_disable(&intel_crtc->base); 7638 intel_crtc_disable(&intel_crtc->base);
7926 7639
7927 saved_hwmode = crtc->hwmode; 7640 *saved_hwmode = crtc->hwmode;
7928 saved_mode = crtc->mode; 7641 *saved_mode = crtc->mode;
7929 7642
7930 /* Hack: Because we don't (yet) support global modeset on multiple 7643 /* Hack: Because we don't (yet) support global modeset on multiple
7931 * crtcs, we don't keep track of the new mode for more than one crtc. 7644 * crtcs, we don't keep track of the new mode for more than one crtc.
@@ -7936,7 +7649,8 @@ bool intel_set_mode(struct drm_crtc *crtc,
7936 if (modeset_pipes) { 7649 if (modeset_pipes) {
7937 adjusted_mode = intel_modeset_adjusted_mode(crtc, mode); 7650 adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
7938 if (IS_ERR(adjusted_mode)) { 7651 if (IS_ERR(adjusted_mode)) {
7939 return false; 7652 ret = PTR_ERR(adjusted_mode);
7653 goto out;
7940 } 7654 }
7941 } 7655 }
7942 7656
@@ -7962,11 +7676,11 @@ bool intel_set_mode(struct drm_crtc *crtc,
7962 * on the DPLL. 7676 * on the DPLL.
7963 */ 7677 */
7964 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { 7678 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
7965 ret = !intel_crtc_mode_set(&intel_crtc->base, 7679 ret = intel_crtc_mode_set(&intel_crtc->base,
7966 mode, adjusted_mode, 7680 mode, adjusted_mode,
7967 x, y, fb); 7681 x, y, fb);
7968 if (!ret) 7682 if (ret)
7969 goto done; 7683 goto done;
7970 } 7684 }
7971 7685
7972 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 7686 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -7987,16 +7701,23 @@ bool intel_set_mode(struct drm_crtc *crtc,
7987 /* FIXME: add subpixel order */ 7701 /* FIXME: add subpixel order */
7988done: 7702done:
7989 drm_mode_destroy(dev, adjusted_mode); 7703 drm_mode_destroy(dev, adjusted_mode);
7990 if (!ret && crtc->enabled) { 7704 if (ret && crtc->enabled) {
7991 crtc->hwmode = saved_hwmode; 7705 crtc->hwmode = *saved_hwmode;
7992 crtc->mode = saved_mode; 7706 crtc->mode = *saved_mode;
7993 } else { 7707 } else {
7994 intel_modeset_check_state(dev); 7708 intel_modeset_check_state(dev);
7995 } 7709 }
7996 7710
7711out:
7712 kfree(saved_mode);
7997 return ret; 7713 return ret;
7998} 7714}
7999 7715
7716void intel_crtc_restore_mode(struct drm_crtc *crtc)
7717{
7718 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
7719}
7720
8000#undef for_each_intel_crtc_masked 7721#undef for_each_intel_crtc_masked
8001 7722
8002static void intel_set_config_free(struct intel_set_config *config) 7723static void intel_set_config_free(struct intel_set_config *config)
@@ -8262,11 +7983,11 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
8262 drm_mode_debug_printmodeline(set->mode); 7983 drm_mode_debug_printmodeline(set->mode);
8263 } 7984 }
8264 7985
8265 if (!intel_set_mode(set->crtc, set->mode, 7986 ret = intel_set_mode(set->crtc, set->mode,
8266 set->x, set->y, set->fb)) { 7987 set->x, set->y, set->fb);
8267 DRM_ERROR("failed to set mode on [CRTC:%d]\n", 7988 if (ret) {
8268 set->crtc->base.id); 7989 DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
8269 ret = -EINVAL; 7990 set->crtc->base.id, ret);
8270 goto fail; 7991 goto fail;
8271 } 7992 }
8272 } else if (config->fb_changed) { 7993 } else if (config->fb_changed) {
@@ -8283,8 +8004,8 @@ fail:
8283 8004
8284 /* Try to restore the config */ 8005 /* Try to restore the config */
8285 if (config->mode_changed && 8006 if (config->mode_changed &&
8286 !intel_set_mode(save_set.crtc, save_set.mode, 8007 intel_set_mode(save_set.crtc, save_set.mode,
8287 save_set.x, save_set.y, save_set.fb)) 8008 save_set.x, save_set.y, save_set.fb))
8288 DRM_ERROR("failed to restore config after modeset failure\n"); 8009 DRM_ERROR("failed to restore config after modeset failure\n");
8289 8010
8290out_config: 8011out_config:
@@ -8303,7 +8024,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
8303 8024
8304static void intel_cpu_pll_init(struct drm_device *dev) 8025static void intel_cpu_pll_init(struct drm_device *dev)
8305{ 8026{
8306 if (IS_HASWELL(dev)) 8027 if (HAS_DDI(dev))
8307 intel_ddi_pll_init(dev); 8028 intel_ddi_pll_init(dev);
8308} 8029}
8309 8030
@@ -8439,11 +8160,10 @@ static void intel_setup_outputs(struct drm_device *dev)
8439 I915_WRITE(PFIT_CONTROL, 0); 8160 I915_WRITE(PFIT_CONTROL, 0);
8440 } 8161 }
8441 8162
8442 if (!(IS_HASWELL(dev) && 8163 if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
8443 (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
8444 intel_crt_init(dev); 8164 intel_crt_init(dev);
8445 8165
8446 if (IS_HASWELL(dev)) { 8166 if (HAS_DDI(dev)) {
8447 int found; 8167 int found;
8448 8168
8449 /* Haswell uses DDI functions to detect digital outputs */ 8169 /* Haswell uses DDI functions to detect digital outputs */
@@ -8686,7 +8406,7 @@ static void intel_init_display(struct drm_device *dev)
8686 struct drm_i915_private *dev_priv = dev->dev_private; 8406 struct drm_i915_private *dev_priv = dev->dev_private;
8687 8407
8688 /* We always want a DPMS function */ 8408 /* We always want a DPMS function */
8689 if (IS_HASWELL(dev)) { 8409 if (HAS_DDI(dev)) {
8690 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; 8410 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
8691 dev_priv->display.crtc_enable = haswell_crtc_enable; 8411 dev_priv->display.crtc_enable = haswell_crtc_enable;
8692 dev_priv->display.crtc_disable = haswell_crtc_disable; 8412 dev_priv->display.crtc_disable = haswell_crtc_disable;
@@ -8748,8 +8468,7 @@ static void intel_init_display(struct drm_device *dev)
8748 } else if (IS_HASWELL(dev)) { 8468 } else if (IS_HASWELL(dev)) {
8749 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 8469 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
8750 dev_priv->display.write_eld = haswell_write_eld; 8470 dev_priv->display.write_eld = haswell_write_eld;
8751 } else 8471 }
8752 dev_priv->display.update_wm = NULL;
8753 } else if (IS_G4X(dev)) { 8472 } else if (IS_G4X(dev)) {
8754 dev_priv->display.write_eld = g4x_write_eld; 8473 dev_priv->display.write_eld = g4x_write_eld;
8755 } 8474 }
@@ -8982,6 +8701,9 @@ void intel_modeset_init(struct drm_device *dev)
8982 /* Just disable it once at startup */ 8701 /* Just disable it once at startup */
8983 i915_disable_vga(dev); 8702 i915_disable_vga(dev);
8984 intel_setup_outputs(dev); 8703 intel_setup_outputs(dev);
8704
8705 /* Just in case the BIOS is doing something questionable. */
8706 intel_disable_fbc(dev);
8985} 8707}
8986 8708
8987static void 8709static void
@@ -9192,7 +8914,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9192 struct intel_encoder *encoder; 8914 struct intel_encoder *encoder;
9193 struct intel_connector *connector; 8915 struct intel_connector *connector;
9194 8916
9195 if (IS_HASWELL(dev)) { 8917 if (HAS_DDI(dev)) {
9196 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 8918 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9197 8919
9198 if (tmp & TRANS_DDI_FUNC_ENABLE) { 8920 if (tmp & TRANS_DDI_FUNC_ENABLE) {
@@ -9233,7 +8955,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9233 crtc->active ? "enabled" : "disabled"); 8955 crtc->active ? "enabled" : "disabled");
9234 } 8956 }
9235 8957
9236 if (IS_HASWELL(dev)) 8958 if (HAS_DDI(dev))
9237 intel_ddi_setup_hw_pll_state(dev); 8959 intel_ddi_setup_hw_pll_state(dev);
9238 8960
9239 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 8961 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -9284,9 +9006,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9284 9006
9285 if (force_restore) { 9007 if (force_restore) {
9286 for_each_pipe(pipe) { 9008 for_each_pipe(pipe) {
9287 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 9009 intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]);
9288 intel_set_mode(&crtc->base, &crtc->base.mode,
9289 crtc->base.x, crtc->base.y, crtc->base.fb);
9290 } 9010 }
9291 9011
9292 i915_redisable_vga(dev); 9012 i915_redisable_vga(dev);
@@ -9350,6 +9070,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
9350 flush_scheduled_work(); 9070 flush_scheduled_work();
9351 9071
9352 drm_mode_config_cleanup(dev); 9072 drm_mode_config_cleanup(dev);
9073
9074 intel_cleanup_overlay(dev);
9353} 9075}
9354 9076
9355/* 9077/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1b63d55318a0..5f12eb2d0fb5 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -148,15 +148,6 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
148 return max_link_bw; 148 return max_link_bw;
149} 149}
150 150
151static int
152intel_dp_link_clock(uint8_t link_bw)
153{
154 if (link_bw == DP_LINK_BW_2_7)
155 return 270000;
156 else
157 return 162000;
158}
159
160/* 151/*
161 * The units on the numbers in the next two are... bizarre. Examples will 152 * The units on the numbers in the next two are... bizarre. Examples will
162 * make it clearer; this one parallels an example in the eDP spec. 153 * make it clearer; this one parallels an example in the eDP spec.
@@ -191,7 +182,8 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
191 struct drm_display_mode *mode, 182 struct drm_display_mode *mode,
192 bool adjust_mode) 183 bool adjust_mode)
193{ 184{
194 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 185 int max_link_clock =
186 drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
195 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 187 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
196 int max_rate, mode_rate; 188 int max_rate, mode_rate;
197 189
@@ -330,6 +322,48 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
330 } 322 }
331} 323}
332 324
325static uint32_t
326intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
327{
328 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
329 struct drm_device *dev = intel_dig_port->base.base.dev;
330 struct drm_i915_private *dev_priv = dev->dev_private;
331 uint32_t ch_ctl = intel_dp->output_reg + 0x10;
332 uint32_t status;
333 bool done;
334
335 if (IS_HASWELL(dev)) {
336 switch (intel_dig_port->port) {
337 case PORT_A:
338 ch_ctl = DPA_AUX_CH_CTL;
339 break;
340 case PORT_B:
341 ch_ctl = PCH_DPB_AUX_CH_CTL;
342 break;
343 case PORT_C:
344 ch_ctl = PCH_DPC_AUX_CH_CTL;
345 break;
346 case PORT_D:
347 ch_ctl = PCH_DPD_AUX_CH_CTL;
348 break;
349 default:
350 BUG();
351 }
352 }
353
354#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
355 if (has_aux_irq)
356 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
357 else
358 done = wait_for_atomic(C, 10) == 0;
359 if (!done)
360 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
361 has_aux_irq);
362#undef C
363
364 return status;
365}
366
333static int 367static int
334intel_dp_aux_ch(struct intel_dp *intel_dp, 368intel_dp_aux_ch(struct intel_dp *intel_dp,
335 uint8_t *send, int send_bytes, 369 uint8_t *send, int send_bytes,
@@ -341,11 +375,17 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
341 struct drm_i915_private *dev_priv = dev->dev_private; 375 struct drm_i915_private *dev_priv = dev->dev_private;
342 uint32_t ch_ctl = output_reg + 0x10; 376 uint32_t ch_ctl = output_reg + 0x10;
343 uint32_t ch_data = ch_ctl + 4; 377 uint32_t ch_data = ch_ctl + 4;
344 int i; 378 int i, ret, recv_bytes;
345 int recv_bytes;
346 uint32_t status; 379 uint32_t status;
347 uint32_t aux_clock_divider; 380 uint32_t aux_clock_divider;
348 int try, precharge; 381 int try, precharge;
382 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
383
384 /* dp aux is extremely sensitive to irq latency, hence request the
385 * lowest possible wakeup latency and so prevent the cpu from going into
386 * deep sleep states.
387 */
388 pm_qos_update_request(&dev_priv->pm_qos, 0);
349 389
350 if (IS_HASWELL(dev)) { 390 if (IS_HASWELL(dev)) {
351 switch (intel_dig_port->port) { 391 switch (intel_dig_port->port) {
@@ -379,7 +419,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
379 * clock divider. 419 * clock divider.
380 */ 420 */
381 if (is_cpu_edp(intel_dp)) { 421 if (is_cpu_edp(intel_dp)) {
382 if (IS_HASWELL(dev)) 422 if (HAS_DDI(dev))
383 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; 423 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
384 else if (IS_VALLEYVIEW(dev)) 424 else if (IS_VALLEYVIEW(dev))
385 aux_clock_divider = 100; 425 aux_clock_divider = 100;
@@ -399,7 +439,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
399 439
400 /* Try to wait for any previous AUX channel activity */ 440 /* Try to wait for any previous AUX channel activity */
401 for (try = 0; try < 3; try++) { 441 for (try = 0; try < 3; try++) {
402 status = I915_READ(ch_ctl); 442 status = I915_READ_NOTRACE(ch_ctl);
403 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 443 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
404 break; 444 break;
405 msleep(1); 445 msleep(1);
@@ -408,7 +448,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
408 if (try == 3) { 448 if (try == 3) {
409 WARN(1, "dp_aux_ch not started status 0x%08x\n", 449 WARN(1, "dp_aux_ch not started status 0x%08x\n",
410 I915_READ(ch_ctl)); 450 I915_READ(ch_ctl));
411 return -EBUSY; 451 ret = -EBUSY;
452 goto out;
412 } 453 }
413 454
414 /* Must try at least 3 times according to DP spec */ 455 /* Must try at least 3 times according to DP spec */
@@ -421,6 +462,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
421 /* Send the command and wait for it to complete */ 462 /* Send the command and wait for it to complete */
422 I915_WRITE(ch_ctl, 463 I915_WRITE(ch_ctl,
423 DP_AUX_CH_CTL_SEND_BUSY | 464 DP_AUX_CH_CTL_SEND_BUSY |
465 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
424 DP_AUX_CH_CTL_TIME_OUT_400us | 466 DP_AUX_CH_CTL_TIME_OUT_400us |
425 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 467 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
426 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 468 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@@ -428,12 +470,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
428 DP_AUX_CH_CTL_DONE | 470 DP_AUX_CH_CTL_DONE |
429 DP_AUX_CH_CTL_TIME_OUT_ERROR | 471 DP_AUX_CH_CTL_TIME_OUT_ERROR |
430 DP_AUX_CH_CTL_RECEIVE_ERROR); 472 DP_AUX_CH_CTL_RECEIVE_ERROR);
431 for (;;) { 473
432 status = I915_READ(ch_ctl); 474 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
433 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
434 break;
435 udelay(100);
436 }
437 475
438 /* Clear done status and any errors */ 476 /* Clear done status and any errors */
439 I915_WRITE(ch_ctl, 477 I915_WRITE(ch_ctl,
@@ -451,7 +489,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
451 489
452 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 490 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
453 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 491 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
454 return -EBUSY; 492 ret = -EBUSY;
493 goto out;
455 } 494 }
456 495
457 /* Check for timeout or receive error. 496 /* Check for timeout or receive error.
@@ -459,14 +498,16 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
459 */ 498 */
460 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 499 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
461 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 500 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
462 return -EIO; 501 ret = -EIO;
502 goto out;
463 } 503 }
464 504
465 /* Timeouts occur when the device isn't connected, so they're 505 /* Timeouts occur when the device isn't connected, so they're
466 * "normal" -- don't fill the kernel log with these */ 506 * "normal" -- don't fill the kernel log with these */
467 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 507 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
468 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 508 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
469 return -ETIMEDOUT; 509 ret = -ETIMEDOUT;
510 goto out;
470 } 511 }
471 512
472 /* Unload any bytes sent back from the other side */ 513 /* Unload any bytes sent back from the other side */
@@ -479,7 +520,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
479 unpack_aux(I915_READ(ch_data + i), 520 unpack_aux(I915_READ(ch_data + i),
480 recv + i, recv_bytes - i); 521 recv + i, recv_bytes - i);
481 522
482 return recv_bytes; 523 ret = recv_bytes;
524out:
525 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
526
527 return ret;
483} 528}
484 529
485/* Write data to the aux channel in native mode */ 530/* Write data to the aux channel in native mode */
@@ -722,12 +767,15 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
722 767
723 for (clock = 0; clock <= max_clock; clock++) { 768 for (clock = 0; clock <= max_clock; clock++) {
724 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 769 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
725 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 770 int link_bw_clock =
771 drm_dp_bw_code_to_link_rate(bws[clock]);
772 int link_avail = intel_dp_max_data_rate(link_bw_clock,
773 lane_count);
726 774
727 if (mode_rate <= link_avail) { 775 if (mode_rate <= link_avail) {
728 intel_dp->link_bw = bws[clock]; 776 intel_dp->link_bw = bws[clock];
729 intel_dp->lane_count = lane_count; 777 intel_dp->lane_count = lane_count;
730 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); 778 adjusted_mode->clock = link_bw_clock;
731 DRM_DEBUG_KMS("DP link bw %02x lane " 779 DRM_DEBUG_KMS("DP link bw %02x lane "
732 "count %d clock %d bpp %d\n", 780 "count %d clock %d bpp %d\n",
733 intel_dp->link_bw, intel_dp->lane_count, 781 intel_dp->link_bw, intel_dp->lane_count,
@@ -742,39 +790,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
742 return false; 790 return false;
743} 791}
744 792
745struct intel_dp_m_n {
746 uint32_t tu;
747 uint32_t gmch_m;
748 uint32_t gmch_n;
749 uint32_t link_m;
750 uint32_t link_n;
751};
752
753static void
754intel_reduce_ratio(uint32_t *num, uint32_t *den)
755{
756 while (*num > 0xffffff || *den > 0xffffff) {
757 *num >>= 1;
758 *den >>= 1;
759 }
760}
761
762static void
763intel_dp_compute_m_n(int bpp,
764 int nlanes,
765 int pixel_clock,
766 int link_clock,
767 struct intel_dp_m_n *m_n)
768{
769 m_n->tu = 64;
770 m_n->gmch_m = (pixel_clock * bpp) >> 3;
771 m_n->gmch_n = link_clock * nlanes;
772 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
773 m_n->link_m = pixel_clock;
774 m_n->link_n = link_clock;
775 intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
776}
777
778void 793void
779intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 794intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
780 struct drm_display_mode *adjusted_mode) 795 struct drm_display_mode *adjusted_mode)
@@ -785,7 +800,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
785 struct drm_i915_private *dev_priv = dev->dev_private; 800 struct drm_i915_private *dev_priv = dev->dev_private;
786 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 801 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
787 int lane_count = 4; 802 int lane_count = 4;
788 struct intel_dp_m_n m_n; 803 struct intel_link_m_n m_n;
789 int pipe = intel_crtc->pipe; 804 int pipe = intel_crtc->pipe;
790 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 805 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
791 806
@@ -808,8 +823,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
808 * the number of bytes_per_pixel post-LUT, which we always 823 * the number of bytes_per_pixel post-LUT, which we always
809 * set up for 8-bits of R/G/B, or 3 bytes total. 824 * set up for 8-bits of R/G/B, or 3 bytes total.
810 */ 825 */
811 intel_dp_compute_m_n(intel_crtc->bpp, lane_count, 826 intel_link_compute_m_n(intel_crtc->bpp, lane_count,
812 mode->clock, adjusted_mode->clock, &m_n); 827 mode->clock, adjusted_mode->clock, &m_n);
813 828
814 if (IS_HASWELL(dev)) { 829 if (IS_HASWELL(dev)) {
815 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), 830 I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
@@ -851,6 +866,32 @@ void intel_dp_init_link_config(struct intel_dp *intel_dp)
851 } 866 }
852} 867}
853 868
869static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
870{
871 struct drm_device *dev = crtc->dev;
872 struct drm_i915_private *dev_priv = dev->dev_private;
873 u32 dpa_ctl;
874
875 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
876 dpa_ctl = I915_READ(DP_A);
877 dpa_ctl &= ~DP_PLL_FREQ_MASK;
878
879 if (clock < 200000) {
880 /* For a long time we've carried around a ILK-DevA w/a for the
881 * 160MHz clock. If we're really unlucky, it's still required.
882 */
883 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
884 dpa_ctl |= DP_PLL_FREQ_160MHZ;
885 } else {
886 dpa_ctl |= DP_PLL_FREQ_270MHZ;
887 }
888
889 I915_WRITE(DP_A, dpa_ctl);
890
891 POSTING_READ(DP_A);
892 udelay(500);
893}
894
854static void 895static void
855intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 896intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
856 struct drm_display_mode *adjusted_mode) 897 struct drm_display_mode *adjusted_mode)
@@ -950,6 +991,9 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
950 } else { 991 } else {
951 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 992 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
952 } 993 }
994
995 if (is_cpu_edp(intel_dp))
996 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
953} 997}
954 998
955#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 999#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
@@ -1543,7 +1587,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
1543} 1587}
1544 1588
1545static uint32_t 1589static uint32_t
1546intel_dp_signal_levels(uint8_t train_set) 1590intel_gen4_signal_levels(uint8_t train_set)
1547{ 1591{
1548 uint32_t signal_levels = 0; 1592 uint32_t signal_levels = 0;
1549 1593
@@ -1641,7 +1685,7 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
1641 1685
1642/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ 1686/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
1643static uint32_t 1687static uint32_t
1644intel_dp_signal_levels_hsw(uint8_t train_set) 1688intel_hsw_signal_levels(uint8_t train_set)
1645{ 1689{
1646 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1690 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1647 DP_TRAIN_PRE_EMPHASIS_MASK); 1691 DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -1673,6 +1717,34 @@ intel_dp_signal_levels_hsw(uint8_t train_set)
1673 } 1717 }
1674} 1718}
1675 1719
1720/* Properly updates "DP" with the correct signal levels. */
1721static void
1722intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
1723{
1724 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1725 struct drm_device *dev = intel_dig_port->base.base.dev;
1726 uint32_t signal_levels, mask;
1727 uint8_t train_set = intel_dp->train_set[0];
1728
1729 if (IS_HASWELL(dev)) {
1730 signal_levels = intel_hsw_signal_levels(train_set);
1731 mask = DDI_BUF_EMP_MASK;
1732 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1733 signal_levels = intel_gen7_edp_signal_levels(train_set);
1734 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
1735 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1736 signal_levels = intel_gen6_edp_signal_levels(train_set);
1737 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
1738 } else {
1739 signal_levels = intel_gen4_signal_levels(train_set);
1740 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
1741 }
1742
1743 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
1744
1745 *DP = (*DP & ~mask) | signal_levels;
1746}
1747
1676static bool 1748static bool
1677intel_dp_set_link_train(struct intel_dp *intel_dp, 1749intel_dp_set_link_train(struct intel_dp *intel_dp,
1678 uint32_t dp_reg_value, 1750 uint32_t dp_reg_value,
@@ -1791,7 +1863,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1791 int voltage_tries, loop_tries; 1863 int voltage_tries, loop_tries;
1792 uint32_t DP = intel_dp->DP; 1864 uint32_t DP = intel_dp->DP;
1793 1865
1794 if (IS_HASWELL(dev)) 1866 if (HAS_DDI(dev))
1795 intel_ddi_prepare_link_retrain(encoder); 1867 intel_ddi_prepare_link_retrain(encoder);
1796 1868
1797 /* Write the link configuration data */ 1869 /* Write the link configuration data */
@@ -1809,24 +1881,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1809 for (;;) { 1881 for (;;) {
1810 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1882 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1811 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1883 uint8_t link_status[DP_LINK_STATUS_SIZE];
1812 uint32_t signal_levels; 1884
1813 1885 intel_dp_set_signal_levels(intel_dp, &DP);
1814 if (IS_HASWELL(dev)) {
1815 signal_levels = intel_dp_signal_levels_hsw(
1816 intel_dp->train_set[0]);
1817 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1818 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1819 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1820 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1821 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1822 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1823 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1824 } else {
1825 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1826 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1827 }
1828 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
1829 signal_levels);
1830 1886
1831 /* Set training pattern 1 */ 1887 /* Set training pattern 1 */
1832 if (!intel_dp_set_link_train(intel_dp, DP, 1888 if (!intel_dp_set_link_train(intel_dp, DP,
@@ -1882,7 +1938,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1882void 1938void
1883intel_dp_complete_link_train(struct intel_dp *intel_dp) 1939intel_dp_complete_link_train(struct intel_dp *intel_dp)
1884{ 1940{
1885 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1886 bool channel_eq = false; 1941 bool channel_eq = false;
1887 int tries, cr_tries; 1942 int tries, cr_tries;
1888 uint32_t DP = intel_dp->DP; 1943 uint32_t DP = intel_dp->DP;
@@ -1892,8 +1947,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1892 cr_tries = 0; 1947 cr_tries = 0;
1893 channel_eq = false; 1948 channel_eq = false;
1894 for (;;) { 1949 for (;;) {
1895 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1896 uint32_t signal_levels;
1897 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1950 uint8_t link_status[DP_LINK_STATUS_SIZE];
1898 1951
1899 if (cr_tries > 5) { 1952 if (cr_tries > 5) {
@@ -1902,19 +1955,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1902 break; 1955 break;
1903 } 1956 }
1904 1957
1905 if (IS_HASWELL(dev)) { 1958 intel_dp_set_signal_levels(intel_dp, &DP);
1906 signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
1907 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1908 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1909 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1910 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1911 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1912 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1913 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1914 } else {
1915 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1916 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1917 }
1918 1959
1919 /* channel eq pattern */ 1960 /* channel eq pattern */
1920 if (!intel_dp_set_link_train(intel_dp, DP, 1961 if (!intel_dp_set_link_train(intel_dp, DP,
@@ -1964,6 +2005,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1964 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2005 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1965 struct drm_device *dev = intel_dig_port->base.base.dev; 2006 struct drm_device *dev = intel_dig_port->base.base.dev;
1966 struct drm_i915_private *dev_priv = dev->dev_private; 2007 struct drm_i915_private *dev_priv = dev->dev_private;
2008 struct intel_crtc *intel_crtc =
2009 to_intel_crtc(intel_dig_port->base.base.crtc);
1967 uint32_t DP = intel_dp->DP; 2010 uint32_t DP = intel_dp->DP;
1968 2011
1969 /* 2012 /*
@@ -1981,7 +2024,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1981 * intel_ddi_prepare_link_retrain will take care of redoing the link 2024 * intel_ddi_prepare_link_retrain will take care of redoing the link
1982 * train. 2025 * train.
1983 */ 2026 */
1984 if (IS_HASWELL(dev)) 2027 if (HAS_DDI(dev))
1985 return; 2028 return;
1986 2029
1987 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 2030 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
@@ -1998,7 +2041,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1998 } 2041 }
1999 POSTING_READ(intel_dp->output_reg); 2042 POSTING_READ(intel_dp->output_reg);
2000 2043
2001 msleep(17); 2044 /* We don't really know why we're doing this */
2045 intel_wait_for_vblank(dev, intel_crtc->pipe);
2002 2046
2003 if (HAS_PCH_IBX(dev) && 2047 if (HAS_PCH_IBX(dev) &&
2004 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 2048 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@@ -2018,19 +2062,14 @@ intel_dp_link_down(struct intel_dp *intel_dp)
2018 /* Changes to enable or select take place the vblank 2062 /* Changes to enable or select take place the vblank
2019 * after being written. 2063 * after being written.
2020 */ 2064 */
2021 if (crtc == NULL) { 2065 if (WARN_ON(crtc == NULL)) {
2022 /* We can arrive here never having been attached 2066 /* We should never try to disable a port without a crtc
2023 * to a CRTC, for instance, due to inheriting 2067 * attached. For paranoia keep the code around for a
2024 * random state from the BIOS. 2068 * bit. */
2025 *
2026 * If the pipe is not running, play safe and
2027 * wait for the clocks to stabilise before
2028 * continuing.
2029 */
2030 POSTING_READ(intel_dp->output_reg); 2069 POSTING_READ(intel_dp->output_reg);
2031 msleep(50); 2070 msleep(50);
2032 } else 2071 } else
2033 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 2072 intel_wait_for_vblank(dev, intel_crtc->pipe);
2034 } 2073 }
2035 2074
2036 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 2075 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
@@ -2042,10 +2081,16 @@ intel_dp_link_down(struct intel_dp *intel_dp)
2042static bool 2081static bool
2043intel_dp_get_dpcd(struct intel_dp *intel_dp) 2082intel_dp_get_dpcd(struct intel_dp *intel_dp)
2044{ 2083{
2084 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2085
2045 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2086 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
2046 sizeof(intel_dp->dpcd)) == 0) 2087 sizeof(intel_dp->dpcd)) == 0)
2047 return false; /* aux transfer failed */ 2088 return false; /* aux transfer failed */
2048 2089
2090 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2091 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2092 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2093
2049 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2094 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2050 return false; /* DPCD not present */ 2095 return false; /* DPCD not present */
2051 2096
@@ -2206,6 +2251,8 @@ static enum drm_connector_status
2206ironlake_dp_detect(struct intel_dp *intel_dp) 2251ironlake_dp_detect(struct intel_dp *intel_dp)
2207{ 2252{
2208 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2253 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2254 struct drm_i915_private *dev_priv = dev->dev_private;
2255 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2209 enum drm_connector_status status; 2256 enum drm_connector_status status;
2210 2257
2211 /* Can't disconnect eDP, but you can close the lid... */ 2258 /* Can't disconnect eDP, but you can close the lid... */
@@ -2216,6 +2263,9 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
2216 return status; 2263 return status;
2217 } 2264 }
2218 2265
2266 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
2267 return connector_status_disconnected;
2268
2219 return intel_dp_detect_dpcd(intel_dp); 2269 return intel_dp_detect_dpcd(intel_dp);
2220} 2270}
2221 2271
@@ -2290,13 +2340,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
2290 return intel_ddc_get_modes(connector, adapter); 2340 return intel_ddc_get_modes(connector, adapter);
2291} 2341}
2292 2342
2293
2294/**
2295 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
2296 *
2297 * \return true if DP port is connected.
2298 * \return false if DP port is disconnected.
2299 */
2300static enum drm_connector_status 2343static enum drm_connector_status
2301intel_dp_detect(struct drm_connector *connector, bool force) 2344intel_dp_detect(struct drm_connector *connector, bool force)
2302{ 2345{
@@ -2306,7 +2349,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
2306 struct drm_device *dev = connector->dev; 2349 struct drm_device *dev = connector->dev;
2307 enum drm_connector_status status; 2350 enum drm_connector_status status;
2308 struct edid *edid = NULL; 2351 struct edid *edid = NULL;
2309 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2310 2352
2311 intel_dp->has_audio = false; 2353 intel_dp->has_audio = false;
2312 2354
@@ -2315,10 +2357,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
2315 else 2357 else
2316 status = g4x_dp_detect(intel_dp); 2358 status = g4x_dp_detect(intel_dp);
2317 2359
2318 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2319 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2320 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2321
2322 if (status != connector_status_connected) 2360 if (status != connector_status_connected)
2323 return status; 2361 return status;
2324 2362
@@ -2445,11 +2483,8 @@ intel_dp_set_property(struct drm_connector *connector,
2445 return -EINVAL; 2483 return -EINVAL;
2446 2484
2447done: 2485done:
2448 if (intel_encoder->base.crtc) { 2486 if (intel_encoder->base.crtc)
2449 struct drm_crtc *crtc = intel_encoder->base.crtc; 2487 intel_crtc_restore_mode(intel_encoder->base.crtc);
2450 intel_set_mode(crtc, &crtc->mode,
2451 crtc->x, crtc->y, crtc->fb);
2452 }
2453 2488
2454 return 0; 2489 return 0;
2455} 2490}
@@ -2742,7 +2777,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2742 intel_connector_attach_encoder(intel_connector, intel_encoder); 2777 intel_connector_attach_encoder(intel_connector, intel_encoder);
2743 drm_sysfs_connector_add(connector); 2778 drm_sysfs_connector_add(connector);
2744 2779
2745 if (IS_HASWELL(dev)) 2780 if (HAS_DDI(dev))
2746 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 2781 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
2747 else 2782 else
2748 intel_connector->get_hw_state = intel_connector_get_hw_state; 2783 intel_connector->get_hw_state = intel_connector_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8a1bd4a3ad0d..54a034c82061 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -153,6 +153,7 @@ struct intel_encoder {
153 bool cloneable; 153 bool cloneable;
154 bool connectors_active; 154 bool connectors_active;
155 void (*hot_plug)(struct intel_encoder *); 155 void (*hot_plug)(struct intel_encoder *);
156 void (*pre_pll_enable)(struct intel_encoder *);
156 void (*pre_enable)(struct intel_encoder *); 157 void (*pre_enable)(struct intel_encoder *);
157 void (*enable)(struct intel_encoder *); 158 void (*enable)(struct intel_encoder *);
158 void (*disable)(struct intel_encoder *); 159 void (*disable)(struct intel_encoder *);
@@ -443,6 +444,7 @@ extern void intel_mark_idle(struct drm_device *dev);
443extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); 444extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
444extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj); 445extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
445extern bool intel_lvds_init(struct drm_device *dev); 446extern bool intel_lvds_init(struct drm_device *dev);
447extern bool intel_is_dual_link_lvds(struct drm_device *dev);
446extern void intel_dp_init(struct drm_device *dev, int output_reg, 448extern void intel_dp_init(struct drm_device *dev, int output_reg,
447 enum port port); 449 enum port port);
448extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 450extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -502,9 +504,10 @@ struct intel_set_config {
502 bool mode_changed; 504 bool mode_changed;
503}; 505};
504 506
505extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, 507extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
506 int x, int y, struct drm_framebuffer *old_fb); 508 int x, int y, struct drm_framebuffer *old_fb);
507extern void intel_modeset_disable(struct drm_device *dev); 509extern void intel_modeset_disable(struct drm_device *dev);
510extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
508extern void intel_crtc_load_lut(struct drm_crtc *crtc); 511extern void intel_crtc_load_lut(struct drm_crtc *crtc);
509extern void intel_crtc_update_dpms(struct drm_crtc *crtc); 512extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
510extern void intel_encoder_noop(struct drm_encoder *encoder); 513extern void intel_encoder_noop(struct drm_encoder *encoder);
@@ -546,6 +549,9 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
546 return container_of(intel_hdmi, struct intel_digital_port, hdmi); 549 return container_of(intel_hdmi, struct intel_digital_port, hdmi);
547} 550}
548 551
552bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
553 struct intel_digital_port *port);
554
549extern void intel_connector_attach_encoder(struct intel_connector *connector, 555extern void intel_connector_attach_encoder(struct intel_connector *connector,
550 struct intel_encoder *encoder); 556 struct intel_encoder *encoder);
551extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); 557extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -589,6 +595,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
589 struct drm_mode_fb_cmd2 *mode_cmd, 595 struct drm_mode_fb_cmd2 *mode_cmd,
590 struct drm_i915_gem_object *obj); 596 struct drm_i915_gem_object *obj);
591extern int intel_fbdev_init(struct drm_device *dev); 597extern int intel_fbdev_init(struct drm_device *dev);
598extern void intel_fbdev_initial_config(struct drm_device *dev);
592extern void intel_fbdev_fini(struct drm_device *dev); 599extern void intel_fbdev_fini(struct drm_device *dev);
593extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); 600extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
594extern void intel_prepare_page_flip(struct drm_device *dev, int plane); 601extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 7b30b5c2c4ee..71d55801c0d9 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -83,7 +83,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
83 83
84 size = mode_cmd.pitches[0] * mode_cmd.height; 84 size = mode_cmd.pitches[0] * mode_cmd.height;
85 size = ALIGN(size, PAGE_SIZE); 85 size = ALIGN(size, PAGE_SIZE);
86 obj = i915_gem_alloc_object(dev, size); 86 obj = i915_gem_object_create_stolen(dev, size);
87 if (obj == NULL)
88 obj = i915_gem_alloc_object(dev, size);
87 if (!obj) { 89 if (!obj) {
88 DRM_ERROR("failed to allocate framebuffer\n"); 90 DRM_ERROR("failed to allocate framebuffer\n");
89 ret = -ENOMEM; 91 ret = -ENOMEM;
@@ -153,6 +155,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
153 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 155 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
154 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); 156 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
155 157
158 /* If the object is shmemfs backed, it will have given us zeroed pages.
159 * If the object is stolen however, it will be full of whatever
160 * garbage was left in there.
161 */
162 if (ifbdev->ifb.obj->stolen)
163 memset_io(info->screen_base, 0, info->screen_size);
164
156 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 165 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
157 166
158 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", 167 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
@@ -241,10 +250,18 @@ int intel_fbdev_init(struct drm_device *dev)
241 } 250 }
242 251
243 drm_fb_helper_single_add_all_connectors(&ifbdev->helper); 252 drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
244 drm_fb_helper_initial_config(&ifbdev->helper, 32); 253
245 return 0; 254 return 0;
246} 255}
247 256
257void intel_fbdev_initial_config(struct drm_device *dev)
258{
259 drm_i915_private_t *dev_priv = dev->dev_private;
260
261 /* Due to peculiar init order wrt to hpd handling this is separate. */
262 drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
263}
264
248void intel_fbdev_fini(struct drm_device *dev) 265void intel_fbdev_fini(struct drm_device *dev)
249{ 266{
250 drm_i915_private_t *dev_priv = dev->dev_private; 267 drm_i915_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2ee9821b9d93..6387f9b0df99 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -48,7 +48,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
48 struct drm_i915_private *dev_priv = dev->dev_private; 48 struct drm_i915_private *dev_priv = dev->dev_private;
49 uint32_t enabled_bits; 49 uint32_t enabled_bits;
50 50
51 enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; 51 enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
52 52
53 WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits, 53 WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
54 "HDMI port enabled, expecting disabled\n"); 54 "HDMI port enabled, expecting disabled\n");
@@ -793,16 +793,21 @@ static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
793static enum drm_connector_status 793static enum drm_connector_status
794intel_hdmi_detect(struct drm_connector *connector, bool force) 794intel_hdmi_detect(struct drm_connector *connector, bool force)
795{ 795{
796 struct drm_device *dev = connector->dev;
796 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 797 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
797 struct intel_digital_port *intel_dig_port = 798 struct intel_digital_port *intel_dig_port =
798 hdmi_to_dig_port(intel_hdmi); 799 hdmi_to_dig_port(intel_hdmi);
799 struct intel_encoder *intel_encoder = &intel_dig_port->base; 800 struct intel_encoder *intel_encoder = &intel_dig_port->base;
800 struct drm_i915_private *dev_priv = connector->dev->dev_private; 801 struct drm_i915_private *dev_priv = dev->dev_private;
801 struct edid *edid; 802 struct edid *edid;
802 enum drm_connector_status status = connector_status_disconnected; 803 enum drm_connector_status status = connector_status_disconnected;
803 804
804 if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi)) 805
806 if (IS_G4X(dev) && !g4x_hdmi_connected(intel_hdmi))
805 return status; 807 return status;
808 else if (HAS_PCH_SPLIT(dev) &&
809 !ibx_digital_port_connected(dev_priv, intel_dig_port))
810 return status;
806 811
807 intel_hdmi->has_hdmi_sink = false; 812 intel_hdmi->has_hdmi_sink = false;
808 intel_hdmi->has_audio = false; 813 intel_hdmi->has_audio = false;
@@ -912,11 +917,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
912 return -EINVAL; 917 return -EINVAL;
913 918
914done: 919done:
915 if (intel_dig_port->base.base.crtc) { 920 if (intel_dig_port->base.base.crtc)
916 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 921 intel_crtc_restore_mode(intel_dig_port->base.base.crtc);
917 intel_set_mode(crtc, &crtc->mode,
918 crtc->x, crtc->y, crtc->fb);
919 }
920 922
921 return 0; 923 return 0;
922} 924}
@@ -1013,7 +1015,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1013 intel_hdmi->set_infoframes = cpt_set_infoframes; 1015 intel_hdmi->set_infoframes = cpt_set_infoframes;
1014 } 1016 }
1015 1017
1016 if (IS_HASWELL(dev)) 1018 if (HAS_DDI(dev))
1017 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 1019 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
1018 else 1020 else
1019 intel_connector->get_hw_state = intel_connector_get_hw_state; 1021 intel_connector->get_hw_state = intel_connector_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3ef5af15b812..7f0904170963 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -63,6 +63,7 @@ intel_i2c_reset(struct drm_device *dev)
63{ 63{
64 struct drm_i915_private *dev_priv = dev->dev_private; 64 struct drm_i915_private *dev_priv = dev->dev_private;
65 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); 65 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
66 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
66} 67}
67 68
68static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) 69static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
@@ -202,6 +203,68 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
202 algo->data = bus; 203 algo->data = bus;
203} 204}
204 205
206#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4)
207static int
208gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
209 u32 gmbus2_status,
210 u32 gmbus4_irq_en)
211{
212 int i;
213 int reg_offset = dev_priv->gpio_mmio_base;
214 u32 gmbus2 = 0;
215 DEFINE_WAIT(wait);
216
217 /* Important: The hw handles only the first bit, so set only one! Since
218 * we also need to check for NAKs besides the hw ready/idle signal, we
219 * need to wake up periodically and check that ourselves. */
220 I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
221
222 for (i = 0; i < msecs_to_jiffies(50) + 1; i++) {
223 prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
224 TASK_UNINTERRUPTIBLE);
225
226 gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset);
227 if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
228 break;
229
230 schedule_timeout(1);
231 }
232 finish_wait(&dev_priv->gmbus_wait_queue, &wait);
233
234 I915_WRITE(GMBUS4 + reg_offset, 0);
235
236 if (gmbus2 & GMBUS_SATOER)
237 return -ENXIO;
238 if (gmbus2 & gmbus2_status)
239 return 0;
240 return -ETIMEDOUT;
241}
242
243static int
244gmbus_wait_idle(struct drm_i915_private *dev_priv)
245{
246 int ret;
247 int reg_offset = dev_priv->gpio_mmio_base;
248
249#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0)
250
251 if (!HAS_GMBUS_IRQ(dev_priv->dev))
252 return wait_for(C, 10);
253
254 /* Important: The hw handles only the first bit, so set only one! */
255 I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
256
257 ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
258
259 I915_WRITE(GMBUS4 + reg_offset, 0);
260
261 if (ret)
262 return 0;
263 else
264 return -ETIMEDOUT;
265#undef C
266}
267
205static int 268static int
206gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, 269gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
207 u32 gmbus1_index) 270 u32 gmbus1_index)
@@ -219,15 +282,11 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
219 while (len) { 282 while (len) {
220 int ret; 283 int ret;
221 u32 val, loop = 0; 284 u32 val, loop = 0;
222 u32 gmbus2;
223 285
224 ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & 286 ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
225 (GMBUS_SATOER | GMBUS_HW_RDY), 287 GMBUS_HW_RDY_EN);
226 50);
227 if (ret) 288 if (ret)
228 return -ETIMEDOUT; 289 return ret;
229 if (gmbus2 & GMBUS_SATOER)
230 return -ENXIO;
231 290
232 val = I915_READ(GMBUS3 + reg_offset); 291 val = I915_READ(GMBUS3 + reg_offset);
233 do { 292 do {
@@ -261,7 +320,6 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
261 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); 320 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
262 while (len) { 321 while (len) {
263 int ret; 322 int ret;
264 u32 gmbus2;
265 323
266 val = loop = 0; 324 val = loop = 0;
267 do { 325 do {
@@ -270,13 +328,10 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
270 328
271 I915_WRITE(GMBUS3 + reg_offset, val); 329 I915_WRITE(GMBUS3 + reg_offset, val);
272 330
273 ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & 331 ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
274 (GMBUS_SATOER | GMBUS_HW_RDY), 332 GMBUS_HW_RDY_EN);
275 50);
276 if (ret) 333 if (ret)
277 return -ETIMEDOUT; 334 return ret;
278 if (gmbus2 & GMBUS_SATOER)
279 return -ENXIO;
280 } 335 }
281 return 0; 336 return 0;
282} 337}
@@ -345,8 +400,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
345 I915_WRITE(GMBUS0 + reg_offset, bus->reg0); 400 I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
346 401
347 for (i = 0; i < num; i++) { 402 for (i = 0; i < num; i++) {
348 u32 gmbus2;
349
350 if (gmbus_is_index_read(msgs, i, num)) { 403 if (gmbus_is_index_read(msgs, i, num)) {
351 ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); 404 ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
352 i += 1; /* set i to the index of the read xfer */ 405 i += 1; /* set i to the index of the read xfer */
@@ -361,13 +414,12 @@ gmbus_xfer(struct i2c_adapter *adapter,
361 if (ret == -ENXIO) 414 if (ret == -ENXIO)
362 goto clear_err; 415 goto clear_err;
363 416
364 ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & 417 ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
365 (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 418 GMBUS_HW_WAIT_EN);
366 50); 419 if (ret == -ENXIO)
420 goto clear_err;
367 if (ret) 421 if (ret)
368 goto timeout; 422 goto timeout;
369 if (gmbus2 & GMBUS_SATOER)
370 goto clear_err;
371 } 423 }
372 424
373 /* Generate a STOP condition on the bus. Note that gmbus can't generata 425 /* Generate a STOP condition on the bus. Note that gmbus can't generata
@@ -380,8 +432,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
380 * We will re-enable it at the start of the next xfer, 432 * We will re-enable it at the start of the next xfer,
381 * till then let it sleep. 433 * till then let it sleep.
382 */ 434 */
383 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 435 if (gmbus_wait_idle(dev_priv)) {
384 10)) {
385 DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n", 436 DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
386 adapter->name); 437 adapter->name);
387 ret = -ETIMEDOUT; 438 ret = -ETIMEDOUT;
@@ -405,8 +456,7 @@ clear_err:
405 * it's slow responding and only answers on the 2nd retry. 456 * it's slow responding and only answers on the 2nd retry.
406 */ 457 */
407 ret = -ENXIO; 458 ret = -ENXIO;
408 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 459 if (gmbus_wait_idle(dev_priv)) {
409 10)) {
410 DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n", 460 DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
411 adapter->name); 461 adapter->name);
412 ret = -ETIMEDOUT; 462 ret = -ETIMEDOUT;
@@ -469,6 +519,7 @@ int intel_setup_gmbus(struct drm_device *dev)
469 dev_priv->gpio_mmio_base = 0; 519 dev_priv->gpio_mmio_base = 0;
470 520
471 mutex_init(&dev_priv->gmbus_mutex); 521 mutex_init(&dev_priv->gmbus_mutex);
522 init_waitqueue_head(&dev_priv->gmbus_wait_queue);
472 523
473 for (i = 0; i < GMBUS_NUM_PORTS; i++) { 524 for (i = 0; i < GMBUS_NUM_PORTS; i++) {
474 struct intel_gmbus *bus = &dev_priv->gmbus[i]; 525 struct intel_gmbus *bus = &dev_priv->gmbus[i];
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b9a660a53677..8c61876dbe95 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -52,6 +52,8 @@ struct intel_lvds_encoder {
52 u32 pfit_control; 52 u32 pfit_control;
53 u32 pfit_pgm_ratios; 53 u32 pfit_pgm_ratios;
54 bool pfit_dirty; 54 bool pfit_dirty;
55 bool is_dual_link;
56 u32 reg;
55 57
56 struct intel_lvds_connector *attached_connector; 58 struct intel_lvds_connector *attached_connector;
57}; 59};
@@ -71,15 +73,10 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
71{ 73{
72 struct drm_device *dev = encoder->base.dev; 74 struct drm_device *dev = encoder->base.dev;
73 struct drm_i915_private *dev_priv = dev->dev_private; 75 struct drm_i915_private *dev_priv = dev->dev_private;
74 u32 lvds_reg, tmp; 76 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
75 77 u32 tmp;
76 if (HAS_PCH_SPLIT(dev)) {
77 lvds_reg = PCH_LVDS;
78 } else {
79 lvds_reg = LVDS;
80 }
81 78
82 tmp = I915_READ(lvds_reg); 79 tmp = I915_READ(lvds_encoder->reg);
83 80
84 if (!(tmp & LVDS_PORT_EN)) 81 if (!(tmp & LVDS_PORT_EN))
85 return false; 82 return false;
@@ -92,6 +89,68 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
92 return true; 89 return true;
93} 90}
94 91
92/* The LVDS pin pair needs to be on before the DPLLs are enabled.
93 * This is an exception to the general rule that mode_set doesn't turn
94 * things on.
95 */
96static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
97{
98 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
99 struct drm_device *dev = encoder->base.dev;
100 struct drm_i915_private *dev_priv = dev->dev_private;
101 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
102 struct drm_display_mode *fixed_mode =
103 lvds_encoder->attached_connector->base.panel.fixed_mode;
104 int pipe = intel_crtc->pipe;
105 u32 temp;
106
107 temp = I915_READ(lvds_encoder->reg);
108 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
109
110 if (HAS_PCH_CPT(dev)) {
111 temp &= ~PORT_TRANS_SEL_MASK;
112 temp |= PORT_TRANS_SEL_CPT(pipe);
113 } else {
114 if (pipe == 1) {
115 temp |= LVDS_PIPEB_SELECT;
116 } else {
117 temp &= ~LVDS_PIPEB_SELECT;
118 }
119 }
120
121 /* set the corresponsding LVDS_BORDER bit */
122 temp |= dev_priv->lvds_border_bits;
123 /* Set the B0-B3 data pairs corresponding to whether we're going to
124 * set the DPLLs for dual-channel mode or not.
125 */
126 if (lvds_encoder->is_dual_link)
127 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
128 else
129 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
130
131 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
132 * appropriately here, but we need to look more thoroughly into how
133 * panels behave in the two modes.
134 */
135
136 /* Set the dithering flag on LVDS as needed, note that there is no
137 * special lvds dither control bit on pch-split platforms, dithering is
138 * only controlled through the PIPECONF reg. */
139 if (INTEL_INFO(dev)->gen == 4) {
140 if (dev_priv->lvds_dither)
141 temp |= LVDS_ENABLE_DITHER;
142 else
143 temp &= ~LVDS_ENABLE_DITHER;
144 }
145 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
146 if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
147 temp |= LVDS_HSYNC_POLARITY;
148 if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
149 temp |= LVDS_VSYNC_POLARITY;
150
151 I915_WRITE(lvds_encoder->reg, temp);
152}
153
95/** 154/**
96 * Sets the power state for the panel. 155 * Sets the power state for the panel.
97 */ 156 */
@@ -101,19 +160,17 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
101 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 160 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
102 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 161 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
103 struct drm_i915_private *dev_priv = dev->dev_private; 162 struct drm_i915_private *dev_priv = dev->dev_private;
104 u32 ctl_reg, lvds_reg, stat_reg; 163 u32 ctl_reg, stat_reg;
105 164
106 if (HAS_PCH_SPLIT(dev)) { 165 if (HAS_PCH_SPLIT(dev)) {
107 ctl_reg = PCH_PP_CONTROL; 166 ctl_reg = PCH_PP_CONTROL;
108 lvds_reg = PCH_LVDS;
109 stat_reg = PCH_PP_STATUS; 167 stat_reg = PCH_PP_STATUS;
110 } else { 168 } else {
111 ctl_reg = PP_CONTROL; 169 ctl_reg = PP_CONTROL;
112 lvds_reg = LVDS;
113 stat_reg = PP_STATUS; 170 stat_reg = PP_STATUS;
114 } 171 }
115 172
116 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); 173 I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
117 174
118 if (lvds_encoder->pfit_dirty) { 175 if (lvds_encoder->pfit_dirty) {
119 /* 176 /*
@@ -132,7 +189,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
132 } 189 }
133 190
134 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); 191 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
135 POSTING_READ(lvds_reg); 192 POSTING_READ(lvds_encoder->reg);
136 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) 193 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
137 DRM_ERROR("timed out waiting for panel to power on\n"); 194 DRM_ERROR("timed out waiting for panel to power on\n");
138 195
@@ -144,15 +201,13 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
144 struct drm_device *dev = encoder->base.dev; 201 struct drm_device *dev = encoder->base.dev;
145 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 202 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
146 struct drm_i915_private *dev_priv = dev->dev_private; 203 struct drm_i915_private *dev_priv = dev->dev_private;
147 u32 ctl_reg, lvds_reg, stat_reg; 204 u32 ctl_reg, stat_reg;
148 205
149 if (HAS_PCH_SPLIT(dev)) { 206 if (HAS_PCH_SPLIT(dev)) {
150 ctl_reg = PCH_PP_CONTROL; 207 ctl_reg = PCH_PP_CONTROL;
151 lvds_reg = PCH_LVDS;
152 stat_reg = PCH_PP_STATUS; 208 stat_reg = PCH_PP_STATUS;
153 } else { 209 } else {
154 ctl_reg = PP_CONTROL; 210 ctl_reg = PP_CONTROL;
155 lvds_reg = LVDS;
156 stat_reg = PP_STATUS; 211 stat_reg = PP_STATUS;
157 } 212 }
158 213
@@ -167,8 +222,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
167 lvds_encoder->pfit_dirty = true; 222 lvds_encoder->pfit_dirty = true;
168 } 223 }
169 224
170 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); 225 I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
171 POSTING_READ(lvds_reg); 226 POSTING_READ(lvds_encoder->reg);
172} 227}
173 228
174static int intel_lvds_mode_valid(struct drm_connector *connector, 229static int intel_lvds_mode_valid(struct drm_connector *connector,
@@ -591,8 +646,7 @@ static int intel_lvds_set_property(struct drm_connector *connector,
591 * If the CRTC is enabled, the display will be changed 646 * If the CRTC is enabled, the display will be changed
592 * according to the new panel fitting mode. 647 * according to the new panel fitting mode.
593 */ 648 */
594 intel_set_mode(crtc, &crtc->mode, 649 intel_crtc_restore_mode(crtc);
595 crtc->x, crtc->y, crtc->fb);
596 } 650 }
597 } 651 }
598 652
@@ -903,6 +957,66 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
903 return false; 957 return false;
904} 958}
905 959
960static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
961{
962 DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
963 return 1;
964}
965
966static const struct dmi_system_id intel_dual_link_lvds[] = {
967 {
968 .callback = intel_dual_link_lvds_callback,
969 .ident = "Apple MacBook Pro (Core i5/i7 Series)",
970 .matches = {
971 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
972 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
973 },
974 },
975 { } /* terminating entry */
976};
977
978bool intel_is_dual_link_lvds(struct drm_device *dev)
979{
980 struct intel_encoder *encoder;
981 struct intel_lvds_encoder *lvds_encoder;
982
983 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
984 base.head) {
985 if (encoder->type == INTEL_OUTPUT_LVDS) {
986 lvds_encoder = to_lvds_encoder(&encoder->base);
987
988 return lvds_encoder->is_dual_link;
989 }
990 }
991
992 return false;
993}
994
995static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
996{
997 struct drm_device *dev = lvds_encoder->base.base.dev;
998 unsigned int val;
999 struct drm_i915_private *dev_priv = dev->dev_private;
1000
1001 /* use the module option value if specified */
1002 if (i915_lvds_channel_mode > 0)
1003 return i915_lvds_channel_mode == 2;
1004
1005 if (dmi_check_system(intel_dual_link_lvds))
1006 return true;
1007
1008 /* BIOS should set the proper LVDS register value at boot, but
1009 * in reality, it doesn't set the value when the lid is closed;
1010 * we need to check "the value to be set" in VBT when LVDS
1011 * register is uninitialized.
1012 */
1013 val = I915_READ(lvds_encoder->reg);
1014 if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
1015 val = dev_priv->bios_lvds_val;
1016
1017 return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
1018}
1019
906static bool intel_lvds_supported(struct drm_device *dev) 1020static bool intel_lvds_supported(struct drm_device *dev)
907{ 1021{
908 /* With the introduction of the PCH we gained a dedicated 1022 /* With the introduction of the PCH we gained a dedicated
@@ -988,6 +1102,7 @@ bool intel_lvds_init(struct drm_device *dev)
988 DRM_MODE_ENCODER_LVDS); 1102 DRM_MODE_ENCODER_LVDS);
989 1103
990 intel_encoder->enable = intel_enable_lvds; 1104 intel_encoder->enable = intel_enable_lvds;
1105 intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
991 intel_encoder->disable = intel_disable_lvds; 1106 intel_encoder->disable = intel_disable_lvds;
992 intel_encoder->get_hw_state = intel_lvds_get_hw_state; 1107 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
993 intel_connector->get_hw_state = intel_connector_get_hw_state; 1108 intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1009,6 +1124,12 @@ bool intel_lvds_init(struct drm_device *dev)
1009 connector->interlace_allowed = false; 1124 connector->interlace_allowed = false;
1010 connector->doublescan_allowed = false; 1125 connector->doublescan_allowed = false;
1011 1126
1127 if (HAS_PCH_SPLIT(dev)) {
1128 lvds_encoder->reg = PCH_LVDS;
1129 } else {
1130 lvds_encoder->reg = LVDS;
1131 }
1132
1012 /* create the scaling mode property */ 1133 /* create the scaling mode property */
1013 drm_mode_create_scaling_mode_property(dev); 1134 drm_mode_create_scaling_mode_property(dev);
1014 drm_object_attach_property(&connector->base, 1135 drm_object_attach_property(&connector->base,
@@ -1109,6 +1230,10 @@ bool intel_lvds_init(struct drm_device *dev)
1109 goto failed; 1230 goto failed;
1110 1231
1111out: 1232out:
1233 lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
1234 DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
1235 lvds_encoder->is_dual_link ? "dual" : "single");
1236
1112 /* 1237 /*
1113 * Unlock registers and just 1238 * Unlock registers and just
1114 * leave them unlocked 1239 * leave them unlocked
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index b00f1c83adce..49249bb97485 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -28,7 +28,6 @@
28#include <linux/fb.h> 28#include <linux/fb.h>
29#include <drm/drm_edid.h> 29#include <drm/drm_edid.h>
30#include <drm/drmP.h> 30#include <drm/drmP.h>
31#include <drm/drm_edid.h>
32#include "intel_drv.h" 31#include "intel_drv.h"
33#include "i915_drv.h" 32#include "i915_drv.h"
34 33
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d7bc817f51a0..fabe0acf808d 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1333,8 +1333,10 @@ void intel_setup_overlay(struct drm_device *dev)
1333 1333
1334 overlay->dev = dev; 1334 overlay->dev = dev;
1335 1335
1336 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); 1336 reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
1337 if (!reg_bo) 1337 if (reg_bo == NULL)
1338 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
1339 if (reg_bo == NULL)
1338 goto out_free; 1340 goto out_free;
1339 overlay->reg_bo = reg_bo; 1341 overlay->reg_bo = reg_bo;
1340 1342
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e6f54ffab3ba..5a8a72c5a89d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -440,12 +440,6 @@ void intel_update_fbc(struct drm_device *dev)
440 dev_priv->no_fbc_reason = FBC_MODULE_PARAM; 440 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
441 goto out_disable; 441 goto out_disable;
442 } 442 }
443 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
444 DRM_DEBUG_KMS("framebuffer too large, disabling "
445 "compression\n");
446 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
447 goto out_disable;
448 }
449 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || 443 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
450 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { 444 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
451 DRM_DEBUG_KMS("mode incompatible with compression, " 445 DRM_DEBUG_KMS("mode incompatible with compression, "
@@ -479,6 +473,14 @@ void intel_update_fbc(struct drm_device *dev)
479 if (in_dbg_master()) 473 if (in_dbg_master())
480 goto out_disable; 474 goto out_disable;
481 475
476 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
477 DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size);
478 DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
479 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
480 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
481 goto out_disable;
482 }
483
482 /* If the scanout has not changed, don't modify the FBC settings. 484 /* If the scanout has not changed, don't modify the FBC settings.
483 * Note that we make the fundamental assumption that the fb->obj 485 * Note that we make the fundamental assumption that the fb->obj
484 * cannot be unpinned (and have its GTT offset and fence revoked) 486 * cannot be unpinned (and have its GTT offset and fence revoked)
@@ -526,6 +528,7 @@ out_disable:
526 DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); 528 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
527 intel_disable_fbc(dev); 529 intel_disable_fbc(dev);
528 } 530 }
531 i915_gem_stolen_cleanup_compression(dev);
529} 532}
530 533
531static void i915_pineview_get_mem_freq(struct drm_device *dev) 534static void i915_pineview_get_mem_freq(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ae253e04c391..59e02691baf3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -601,6 +601,13 @@ gen6_add_request(struct intel_ring_buffer *ring)
601 return 0; 601 return 0;
602} 602}
603 603
604static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
605 u32 seqno)
606{
607 struct drm_i915_private *dev_priv = dev->dev_private;
608 return dev_priv->last_seqno < seqno;
609}
610
604/** 611/**
605 * intel_ring_sync - sync the waiter to the signaller on seqno 612 * intel_ring_sync - sync the waiter to the signaller on seqno
606 * 613 *
@@ -631,11 +638,20 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
631 if (ret) 638 if (ret)
632 return ret; 639 return ret;
633 640
634 intel_ring_emit(waiter, 641 /* If seqno wrap happened, omit the wait with no-ops */
635 dw1 | signaller->semaphore_register[waiter->id]); 642 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
636 intel_ring_emit(waiter, seqno); 643 intel_ring_emit(waiter,
637 intel_ring_emit(waiter, 0); 644 dw1 |
638 intel_ring_emit(waiter, MI_NOOP); 645 signaller->semaphore_register[waiter->id]);
646 intel_ring_emit(waiter, seqno);
647 intel_ring_emit(waiter, 0);
648 intel_ring_emit(waiter, MI_NOOP);
649 } else {
650 intel_ring_emit(waiter, MI_NOOP);
651 intel_ring_emit(waiter, MI_NOOP);
652 intel_ring_emit(waiter, MI_NOOP);
653 intel_ring_emit(waiter, MI_NOOP);
654 }
639 intel_ring_advance(waiter); 655 intel_ring_advance(waiter);
640 656
641 return 0; 657 return 0;
@@ -716,6 +732,12 @@ ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
716 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 732 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
717} 733}
718 734
735static void
736ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
737{
738 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
739}
740
719static u32 741static u32
720pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) 742pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
721{ 743{
@@ -723,6 +745,13 @@ pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
723 return pc->cpu_page[0]; 745 return pc->cpu_page[0];
724} 746}
725 747
748static void
749pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
750{
751 struct pipe_control *pc = ring->private;
752 pc->cpu_page[0] = seqno;
753}
754
726static bool 755static bool
727gen5_ring_get_irq(struct intel_ring_buffer *ring) 756gen5_ring_get_irq(struct intel_ring_buffer *ring)
728{ 757{
@@ -1152,7 +1181,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1152 return ret; 1181 return ret;
1153 } 1182 }
1154 1183
1155 obj = i915_gem_alloc_object(dev, ring->size); 1184 obj = NULL;
1185 if (!HAS_LLC(dev))
1186 obj = i915_gem_object_create_stolen(dev, ring->size);
1187 if (obj == NULL)
1188 obj = i915_gem_alloc_object(dev, ring->size);
1156 if (obj == NULL) { 1189 if (obj == NULL) {
1157 DRM_ERROR("Failed to allocate ringbuffer\n"); 1190 DRM_ERROR("Failed to allocate ringbuffer\n");
1158 ret = -ENOMEM; 1191 ret = -ENOMEM;
@@ -1190,6 +1223,8 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1190 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 1223 if (IS_I830(ring->dev) || IS_845G(ring->dev))
1191 ring->effective_size -= 128; 1224 ring->effective_size -= 128;
1192 1225
1226 intel_ring_init_seqno(ring, dev_priv->last_seqno);
1227
1193 return 0; 1228 return 0;
1194 1229
1195err_unmap: 1230err_unmap:
@@ -1398,11 +1433,31 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1398 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); 1433 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
1399} 1434}
1400 1435
1436static int __intel_ring_begin(struct intel_ring_buffer *ring,
1437 int bytes)
1438{
1439 int ret;
1440
1441 if (unlikely(ring->tail + bytes > ring->effective_size)) {
1442 ret = intel_wrap_ring_buffer(ring);
1443 if (unlikely(ret))
1444 return ret;
1445 }
1446
1447 if (unlikely(ring->space < bytes)) {
1448 ret = ring_wait_for_space(ring, bytes);
1449 if (unlikely(ret))
1450 return ret;
1451 }
1452
1453 ring->space -= bytes;
1454 return 0;
1455}
1456
1401int intel_ring_begin(struct intel_ring_buffer *ring, 1457int intel_ring_begin(struct intel_ring_buffer *ring,
1402 int num_dwords) 1458 int num_dwords)
1403{ 1459{
1404 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1460 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1405 int n = 4*num_dwords;
1406 int ret; 1461 int ret;
1407 1462
1408 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); 1463 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
@@ -1414,20 +1469,21 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1414 if (ret) 1469 if (ret)
1415 return ret; 1470 return ret;
1416 1471
1417 if (unlikely(ring->tail + n > ring->effective_size)) { 1472 return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
1418 ret = intel_wrap_ring_buffer(ring); 1473}
1419 if (unlikely(ret))
1420 return ret;
1421 }
1422 1474
1423 if (unlikely(ring->space < n)) { 1475void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1424 ret = ring_wait_for_space(ring, n); 1476{
1425 if (unlikely(ret)) 1477 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1426 return ret; 1478
1479 BUG_ON(ring->outstanding_lazy_request);
1480
1481 if (INTEL_INFO(ring->dev)->gen >= 6) {
1482 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1483 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1427 } 1484 }
1428 1485
1429 ring->space -= n; 1486 ring->set_seqno(ring, seqno);
1430 return 0;
1431} 1487}
1432 1488
1433void intel_ring_advance(struct intel_ring_buffer *ring) 1489void intel_ring_advance(struct intel_ring_buffer *ring)
@@ -1592,6 +1648,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1592 ring->irq_put = gen6_ring_put_irq; 1648 ring->irq_put = gen6_ring_put_irq;
1593 ring->irq_enable_mask = GT_USER_INTERRUPT; 1649 ring->irq_enable_mask = GT_USER_INTERRUPT;
1594 ring->get_seqno = gen6_ring_get_seqno; 1650 ring->get_seqno = gen6_ring_get_seqno;
1651 ring->set_seqno = ring_set_seqno;
1595 ring->sync_to = gen6_ring_sync; 1652 ring->sync_to = gen6_ring_sync;
1596 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; 1653 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
1597 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; 1654 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
@@ -1602,6 +1659,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1602 ring->add_request = pc_render_add_request; 1659 ring->add_request = pc_render_add_request;
1603 ring->flush = gen4_render_ring_flush; 1660 ring->flush = gen4_render_ring_flush;
1604 ring->get_seqno = pc_render_get_seqno; 1661 ring->get_seqno = pc_render_get_seqno;
1662 ring->set_seqno = pc_render_set_seqno;
1605 ring->irq_get = gen5_ring_get_irq; 1663 ring->irq_get = gen5_ring_get_irq;
1606 ring->irq_put = gen5_ring_put_irq; 1664 ring->irq_put = gen5_ring_put_irq;
1607 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; 1665 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
@@ -1612,6 +1670,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1612 else 1670 else
1613 ring->flush = gen4_render_ring_flush; 1671 ring->flush = gen4_render_ring_flush;
1614 ring->get_seqno = ring_get_seqno; 1672 ring->get_seqno = ring_get_seqno;
1673 ring->set_seqno = ring_set_seqno;
1615 if (IS_GEN2(dev)) { 1674 if (IS_GEN2(dev)) {
1616 ring->irq_get = i8xx_ring_get_irq; 1675 ring->irq_get = i8xx_ring_get_irq;
1617 ring->irq_put = i8xx_ring_put_irq; 1676 ring->irq_put = i8xx_ring_put_irq;
@@ -1683,6 +1742,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1683 else 1742 else
1684 ring->flush = gen4_render_ring_flush; 1743 ring->flush = gen4_render_ring_flush;
1685 ring->get_seqno = ring_get_seqno; 1744 ring->get_seqno = ring_get_seqno;
1745 ring->set_seqno = ring_set_seqno;
1686 if (IS_GEN2(dev)) { 1746 if (IS_GEN2(dev)) {
1687 ring->irq_get = i8xx_ring_get_irq; 1747 ring->irq_get = i8xx_ring_get_irq;
1688 ring->irq_put = i8xx_ring_put_irq; 1748 ring->irq_put = i8xx_ring_put_irq;
@@ -1743,6 +1803,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1743 ring->flush = gen6_ring_flush; 1803 ring->flush = gen6_ring_flush;
1744 ring->add_request = gen6_add_request; 1804 ring->add_request = gen6_add_request;
1745 ring->get_seqno = gen6_ring_get_seqno; 1805 ring->get_seqno = gen6_ring_get_seqno;
1806 ring->set_seqno = ring_set_seqno;
1746 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; 1807 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
1747 ring->irq_get = gen6_ring_get_irq; 1808 ring->irq_get = gen6_ring_get_irq;
1748 ring->irq_put = gen6_ring_put_irq; 1809 ring->irq_put = gen6_ring_put_irq;
@@ -1758,6 +1819,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1758 ring->flush = bsd_ring_flush; 1819 ring->flush = bsd_ring_flush;
1759 ring->add_request = i9xx_add_request; 1820 ring->add_request = i9xx_add_request;
1760 ring->get_seqno = ring_get_seqno; 1821 ring->get_seqno = ring_get_seqno;
1822 ring->set_seqno = ring_set_seqno;
1761 if (IS_GEN5(dev)) { 1823 if (IS_GEN5(dev)) {
1762 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1824 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1763 ring->irq_get = gen5_ring_get_irq; 1825 ring->irq_get = gen5_ring_get_irq;
@@ -1787,6 +1849,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1787 ring->flush = blt_ring_flush; 1849 ring->flush = blt_ring_flush;
1788 ring->add_request = gen6_add_request; 1850 ring->add_request = gen6_add_request;
1789 ring->get_seqno = gen6_ring_get_seqno; 1851 ring->get_seqno = gen6_ring_get_seqno;
1852 ring->set_seqno = ring_set_seqno;
1790 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; 1853 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
1791 ring->irq_get = gen6_ring_get_irq; 1854 ring->irq_get = gen6_ring_get_irq;
1792 ring->irq_put = gen6_ring_put_irq; 1855 ring->irq_put = gen6_ring_put_irq;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6af87cd05725..d66208c2c48b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -90,6 +90,8 @@ struct intel_ring_buffer {
90 */ 90 */
91 u32 (*get_seqno)(struct intel_ring_buffer *ring, 91 u32 (*get_seqno)(struct intel_ring_buffer *ring,
92 bool lazy_coherency); 92 bool lazy_coherency);
93 void (*set_seqno)(struct intel_ring_buffer *ring,
94 u32 seqno);
93 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, 95 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
94 u32 offset, u32 length, 96 u32 offset, u32 length,
95 unsigned flags); 97 unsigned flags);
@@ -178,6 +180,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
178 return ring->status_page.page_addr[reg]; 180 return ring->status_page.page_addr[reg];
179} 181}
180 182
183static inline void
184intel_write_status_page(struct intel_ring_buffer *ring,
185 int reg, u32 value)
186{
187 ring->status_page.page_addr[reg] = value;
188}
189
181/** 190/**
182 * Reads a dword out of the status page, which is written to from the command 191 * Reads a dword out of the status page, which is written to from the command
183 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 192 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -208,7 +217,7 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
208} 217}
209void intel_ring_advance(struct intel_ring_buffer *ring); 218void intel_ring_advance(struct intel_ring_buffer *ring);
210int __must_check intel_ring_idle(struct intel_ring_buffer *ring); 219int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
211 220void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
212int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); 221int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
213int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); 222int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
214 223
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c275bf0fa36d..153377bed66a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1997,11 +1997,8 @@ set_value:
1997 1997
1998 1998
1999done: 1999done:
2000 if (intel_sdvo->base.base.crtc) { 2000 if (intel_sdvo->base.base.crtc)
2001 struct drm_crtc *crtc = intel_sdvo->base.base.crtc; 2001 intel_crtc_restore_mode(intel_sdvo->base.base.crtc);
2002 intel_set_mode(crtc, &crtc->mode,
2003 crtc->x, crtc->y, crtc->fb);
2004 }
2005 2002
2006 return 0; 2003 return 0;
2007#undef CHECK_PROPERTY 2004#undef CHECK_PROPERTY
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ea93520c1278..984a113c5d13 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1479,8 +1479,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1479 } 1479 }
1480 1480
1481 if (changed && crtc) 1481 if (changed && crtc)
1482 intel_set_mode(crtc, &crtc->mode, 1482 intel_crtc_restore_mode(crtc);
1483 crtc->x, crtc->y, crtc->fb);
1484out: 1483out:
1485 return ret; 1484 return ret;
1486} 1485}
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 0f4a366f6fa6..9b991f91d81b 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -89,6 +89,29 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
89{ 89{
90 return mm->hole_stack.next; 90 return mm->hole_stack.next;
91} 91}
92
93static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
94{
95 return hole_node->start + hole_node->size;
96}
97
98static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
99{
100 BUG_ON(!hole_node->hole_follows);
101 return __drm_mm_hole_node_start(hole_node);
102}
103
104static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
105{
106 return list_entry(hole_node->node_list.next,
107 struct drm_mm_node, node_list)->start;
108}
109
110static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
111{
112 return __drm_mm_hole_node_end(hole_node);
113}
114
92#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ 115#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
93 &(mm)->head_node.node_list, \ 116 &(mm)->head_node.node_list, \
94 node_list) 117 node_list)
@@ -99,9 +122,26 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
99 entry != NULL; entry = next, \ 122 entry != NULL; entry = next, \
100 next = entry ? list_entry(entry->node_list.next, \ 123 next = entry ? list_entry(entry->node_list.next, \
101 struct drm_mm_node, node_list) : NULL) \ 124 struct drm_mm_node, node_list) : NULL) \
125
126/* Note that we need to unroll list_for_each_entry in order to inline
127 * setting hole_start and hole_end on each iteration and keep the
128 * macro sane.
129 */
130#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
131 for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
132 &entry->hole_stack != &(mm)->hole_stack ? \
133 hole_start = drm_mm_hole_node_start(entry), \
134 hole_end = drm_mm_hole_node_end(entry), \
135 1 : 0; \
136 entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
137
102/* 138/*
103 * Basic range manager support (drm_mm.c) 139 * Basic range manager support (drm_mm.c)
104 */ 140 */
141extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
142 unsigned long start,
143 unsigned long size,
144 bool atomic);
105extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, 145extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
106 unsigned long size, 146 unsigned long size,
107 unsigned alignment, 147 unsigned alignment,
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 6eb76a1f11ab..3e3a166a2690 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -18,8 +18,6 @@ struct intel_gtt {
18 /* Share the scratch page dma with ppgtts. */ 18 /* Share the scratch page dma with ppgtts. */
19 dma_addr_t scratch_page_dma; 19 dma_addr_t scratch_page_dma;
20 struct page *scratch_page; 20 struct page *scratch_page;
21 /* for ppgtt PDE access */
22 u32 __iomem *gtt;
23 /* needed for ioremap in drm/i915 */ 21 /* needed for ioremap in drm/i915 */
24 phys_addr_t gma_bus_addr; 22 phys_addr_t gma_bus_addr;
25} *intel_gtt_get(void); 23} *intel_gtt_get(void);