aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2012-05-11 12:41:58 -0400
committerDave Airlie <airlied@redhat.com>2012-05-11 12:42:41 -0400
commit218c872bf8285af7aaa50f1f83312020e05451bf (patch)
tree2d26aac19eeb5bab7c3c6a81ba67a955cef7578f
parentb06d66be3b0b198ee30bd9f779874ae7115570a0 (diff)
parent5e13a0c5ec05d382b488a691dfb8af015b1dea1e (diff)
Merge tag 'drm-intel-next-2012-05-06-merged' of git://people.freedesktop.org/~danvet/drm-intel into drm-core-next
Daniel says Highlights: - sparse fixes from Ben. - tons of little cleanups from Chris all over: tiling_changed clarification, deferred_free list removal, ... - fix up irq handler on gen2 & gen3 + related cleanups from Chris - prep work for wait_rendering_timeout from Ben with some nice refactorings - first set of infoframe fixes from Paulo for doubleclocked CEA modes - improve pch pll handling from Jesse and Chris - gpu hangman, this also contains the reset fix for gen4 - rps sanity check from Chris - this papers over issues when the gpu fails to clock up on snb/ivb, and it is shockingly easy to hit. The code prints a big WARN backtrace and restores the hw to a sane state. The real fix is still in the works. Atm I'm aware of 2 regressions in -next: - One of the gmbus patches (not gmbus itself) regressed lvds detection on a MacbookPro. I've analyzed the bug already and I think I know what's going on, patch is awaiting test feedback. - Just today QA reported that DP on ilk regressed. That bug is fresh of the press and still awaiting detailed logfiles and the bisect result. The only thing that's clear atm is that -fixes works and -next doesn't.
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c270
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1076
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c132
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h122
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c434
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c42
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c202
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c18
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1134
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h24
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c413
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c19
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h24
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c67
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c209
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c675
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c170
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h15
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c19
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c29
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c5
27 files changed, 2881 insertions, 2256 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b65c06f1a021..8b8bbc70f86b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -11,6 +11,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
11 i915_gem_evict.o \ 11 i915_gem_evict.o \
12 i915_gem_execbuffer.o \ 12 i915_gem_execbuffer.o \
13 i915_gem_gtt.o \ 13 i915_gem_gtt.o \
14 i915_gem_stolen.o \
14 i915_gem_tiling.o \ 15 i915_gem_tiling.o \
15 i915_sysfs.o \ 16 i915_sysfs.o \
16 i915_trace_points.o \ 17 i915_trace_points.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a8db38617f4a..950f72a0d729 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -47,7 +47,6 @@ enum {
47 FLUSHING_LIST, 47 FLUSHING_LIST,
48 INACTIVE_LIST, 48 INACTIVE_LIST,
49 PINNED_LIST, 49 PINNED_LIST,
50 DEFERRED_FREE_LIST,
51}; 50};
52 51
53static const char *yesno(int v) 52static const char *yesno(int v)
@@ -178,18 +177,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
178 seq_printf(m, "Inactive:\n"); 177 seq_printf(m, "Inactive:\n");
179 head = &dev_priv->mm.inactive_list; 178 head = &dev_priv->mm.inactive_list;
180 break; 179 break;
181 case PINNED_LIST:
182 seq_printf(m, "Pinned:\n");
183 head = &dev_priv->mm.pinned_list;
184 break;
185 case FLUSHING_LIST: 180 case FLUSHING_LIST:
186 seq_printf(m, "Flushing:\n"); 181 seq_printf(m, "Flushing:\n");
187 head = &dev_priv->mm.flushing_list; 182 head = &dev_priv->mm.flushing_list;
188 break; 183 break;
189 case DEFERRED_FREE_LIST:
190 seq_printf(m, "Deferred free:\n");
191 head = &dev_priv->mm.deferred_free_list;
192 break;
193 default: 184 default:
194 mutex_unlock(&dev->struct_mutex); 185 mutex_unlock(&dev->struct_mutex);
195 return -EINVAL; 186 return -EINVAL;
@@ -252,21 +243,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
252 count, mappable_count, size, mappable_size); 243 count, mappable_count, size, mappable_size);
253 244
254 size = count = mappable_size = mappable_count = 0; 245 size = count = mappable_size = mappable_count = 0;
255 count_objects(&dev_priv->mm.pinned_list, mm_list);
256 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
257 count, mappable_count, size, mappable_size);
258
259 size = count = mappable_size = mappable_count = 0;
260 count_objects(&dev_priv->mm.inactive_list, mm_list); 246 count_objects(&dev_priv->mm.inactive_list, mm_list);
261 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 247 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
262 count, mappable_count, size, mappable_size); 248 count, mappable_count, size, mappable_size);
263 249
264 size = count = mappable_size = mappable_count = 0; 250 size = count = mappable_size = mappable_count = 0;
265 count_objects(&dev_priv->mm.deferred_free_list, mm_list);
266 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
267 count, mappable_count, size, mappable_size);
268
269 size = count = mappable_size = mappable_count = 0;
270 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 251 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
271 if (obj->fault_mappable) { 252 if (obj->fault_mappable) {
272 size += obj->gtt_space->size; 253 size += obj->gtt_space->size;
@@ -294,6 +275,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
294{ 275{
295 struct drm_info_node *node = (struct drm_info_node *) m->private; 276 struct drm_info_node *node = (struct drm_info_node *) m->private;
296 struct drm_device *dev = node->minor->dev; 277 struct drm_device *dev = node->minor->dev;
278 uintptr_t list = (uintptr_t) node->info_ent->data;
297 struct drm_i915_private *dev_priv = dev->dev_private; 279 struct drm_i915_private *dev_priv = dev->dev_private;
298 struct drm_i915_gem_object *obj; 280 struct drm_i915_gem_object *obj;
299 size_t total_obj_size, total_gtt_size; 281 size_t total_obj_size, total_gtt_size;
@@ -305,6 +287,9 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
305 287
306 total_obj_size = total_gtt_size = count = 0; 288 total_obj_size = total_gtt_size = count = 0;
307 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 289 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
290 if (list == PINNED_LIST && obj->pin_count == 0)
291 continue;
292
308 seq_printf(m, " "); 293 seq_printf(m, " ");
309 describe_obj(m, obj); 294 describe_obj(m, obj);
310 seq_printf(m, "\n"); 295 seq_printf(m, "\n");
@@ -321,7 +306,6 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
321 return 0; 306 return 0;
322} 307}
323 308
324
325static int i915_gem_pageflip_info(struct seq_file *m, void *data) 309static int i915_gem_pageflip_info(struct seq_file *m, void *data)
326{ 310{
327 struct drm_info_node *node = (struct drm_info_node *) m->private; 311 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -430,10 +414,6 @@ static void i915_ring_seqno_info(struct seq_file *m,
430 if (ring->get_seqno) { 414 if (ring->get_seqno) {
431 seq_printf(m, "Current sequence (%s): %d\n", 415 seq_printf(m, "Current sequence (%s): %d\n",
432 ring->name, ring->get_seqno(ring)); 416 ring->name, ring->get_seqno(ring));
433 seq_printf(m, "Waiter sequence (%s): %d\n",
434 ring->name, ring->waiting_seqno);
435 seq_printf(m, "IRQ sequence (%s): %d\n",
436 ring->name, ring->irq_seqno);
437 } 417 }
438} 418}
439 419
@@ -602,69 +582,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
602 return 0; 582 return 0;
603} 583}
604 584
605static int i915_ringbuffer_data(struct seq_file *m, void *data)
606{
607 struct drm_info_node *node = (struct drm_info_node *) m->private;
608 struct drm_device *dev = node->minor->dev;
609 drm_i915_private_t *dev_priv = dev->dev_private;
610 struct intel_ring_buffer *ring;
611 int ret;
612
613 ret = mutex_lock_interruptible(&dev->struct_mutex);
614 if (ret)
615 return ret;
616
617 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
618 if (!ring->obj) {
619 seq_printf(m, "No ringbuffer setup\n");
620 } else {
621 const u8 __iomem *virt = ring->virtual_start;
622 uint32_t off;
623
624 for (off = 0; off < ring->size; off += 4) {
625 uint32_t *ptr = (uint32_t *)(virt + off);
626 seq_printf(m, "%08x : %08x\n", off, *ptr);
627 }
628 }
629 mutex_unlock(&dev->struct_mutex);
630
631 return 0;
632}
633
634static int i915_ringbuffer_info(struct seq_file *m, void *data)
635{
636 struct drm_info_node *node = (struct drm_info_node *) m->private;
637 struct drm_device *dev = node->minor->dev;
638 drm_i915_private_t *dev_priv = dev->dev_private;
639 struct intel_ring_buffer *ring;
640 int ret;
641
642 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
643 if (ring->size == 0)
644 return 0;
645
646 ret = mutex_lock_interruptible(&dev->struct_mutex);
647 if (ret)
648 return ret;
649
650 seq_printf(m, "Ring %s:\n", ring->name);
651 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
652 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
653 seq_printf(m, " Size : %08x\n", ring->size);
654 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
655 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
656 if (IS_GEN6(dev) || IS_GEN7(dev)) {
657 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
658 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
659 }
660 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
661 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
662
663 mutex_unlock(&dev->struct_mutex);
664
665 return 0;
666}
667
668static const char *ring_str(int ring) 585static const char *ring_str(int ring)
669{ 586{
670 switch (ring) { 587 switch (ring) {
@@ -766,31 +683,35 @@ static void i915_ring_error_state(struct seq_file *m,
766 error->semaphore_mboxes[ring][1]); 683 error->semaphore_mboxes[ring][1]);
767 } 684 }
768 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 685 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
686 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
769 seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 687 seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
770 seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 688 seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
771} 689}
772 690
691struct i915_error_state_file_priv {
692 struct drm_device *dev;
693 struct drm_i915_error_state *error;
694};
695
773static int i915_error_state(struct seq_file *m, void *unused) 696static int i915_error_state(struct seq_file *m, void *unused)
774{ 697{
775 struct drm_info_node *node = (struct drm_info_node *) m->private; 698 struct i915_error_state_file_priv *error_priv = m->private;
776 struct drm_device *dev = node->minor->dev; 699 struct drm_device *dev = error_priv->dev;
777 drm_i915_private_t *dev_priv = dev->dev_private; 700 drm_i915_private_t *dev_priv = dev->dev_private;
778 struct drm_i915_error_state *error; 701 struct drm_i915_error_state *error = error_priv->error;
779 unsigned long flags;
780 int i, j, page, offset, elt; 702 int i, j, page, offset, elt;
781 703
782 spin_lock_irqsave(&dev_priv->error_lock, flags); 704 if (!error) {
783 if (!dev_priv->first_error) {
784 seq_printf(m, "no error state collected\n"); 705 seq_printf(m, "no error state collected\n");
785 goto out; 706 return 0;
786 } 707 }
787 708
788 error = dev_priv->first_error;
789 709
790 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 710 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
791 error->time.tv_usec); 711 error->time.tv_usec);
792 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 712 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
793 seq_printf(m, "EIR: 0x%08x\n", error->eir); 713 seq_printf(m, "EIR: 0x%08x\n", error->eir);
714 seq_printf(m, "IER: 0x%08x\n", error->ier);
794 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 715 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
795 716
796 for (i = 0; i < dev_priv->num_fence_regs; i++) 717 for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -867,12 +788,71 @@ static int i915_error_state(struct seq_file *m, void *unused)
867 if (error->display) 788 if (error->display)
868 intel_display_print_error_state(m, dev, error->display); 789 intel_display_print_error_state(m, dev, error->display);
869 790
870out: 791 return 0;
792}
793
794static ssize_t
795i915_error_state_write(struct file *filp,
796 const char __user *ubuf,
797 size_t cnt,
798 loff_t *ppos)
799{
800 struct seq_file *m = filp->private_data;
801 struct i915_error_state_file_priv *error_priv = m->private;
802 struct drm_device *dev = error_priv->dev;
803
804 DRM_DEBUG_DRIVER("Resetting error state\n");
805
806 mutex_lock(&dev->struct_mutex);
807 i915_destroy_error_state(dev);
808 mutex_unlock(&dev->struct_mutex);
809
810 return cnt;
811}
812
813static int i915_error_state_open(struct inode *inode, struct file *file)
814{
815 struct drm_device *dev = inode->i_private;
816 drm_i915_private_t *dev_priv = dev->dev_private;
817 struct i915_error_state_file_priv *error_priv;
818 unsigned long flags;
819
820 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
821 if (!error_priv)
822 return -ENOMEM;
823
824 error_priv->dev = dev;
825
826 spin_lock_irqsave(&dev_priv->error_lock, flags);
827 error_priv->error = dev_priv->first_error;
828 if (error_priv->error)
829 kref_get(&error_priv->error->ref);
871 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 830 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
872 831
873 return 0; 832 return single_open(file, i915_error_state, error_priv);
874} 833}
875 834
835static int i915_error_state_release(struct inode *inode, struct file *file)
836{
837 struct seq_file *m = file->private_data;
838 struct i915_error_state_file_priv *error_priv = m->private;
839
840 if (error_priv->error)
841 kref_put(&error_priv->error->ref, i915_error_state_free);
842 kfree(error_priv);
843
844 return single_release(inode, file);
845}
846
847static const struct file_operations i915_error_state_fops = {
848 .owner = THIS_MODULE,
849 .open = i915_error_state_open,
850 .read = seq_read,
851 .write = i915_error_state_write,
852 .llseek = default_llseek,
853 .release = i915_error_state_release,
854};
855
876static int i915_rstdby_delays(struct seq_file *m, void *unused) 856static int i915_rstdby_delays(struct seq_file *m, void *unused)
877{ 857{
878 struct drm_info_node *node = (struct drm_info_node *) m->private; 858 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1356,17 +1336,25 @@ static int i915_opregion(struct seq_file *m, void *unused)
1356 struct drm_device *dev = node->minor->dev; 1336 struct drm_device *dev = node->minor->dev;
1357 drm_i915_private_t *dev_priv = dev->dev_private; 1337 drm_i915_private_t *dev_priv = dev->dev_private;
1358 struct intel_opregion *opregion = &dev_priv->opregion; 1338 struct intel_opregion *opregion = &dev_priv->opregion;
1339 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1359 int ret; 1340 int ret;
1360 1341
1342 if (data == NULL)
1343 return -ENOMEM;
1344
1361 ret = mutex_lock_interruptible(&dev->struct_mutex); 1345 ret = mutex_lock_interruptible(&dev->struct_mutex);
1362 if (ret) 1346 if (ret)
1363 return ret; 1347 goto out;
1364 1348
1365 if (opregion->header) 1349 if (opregion->header) {
1366 seq_write(m, opregion->header, OPREGION_SIZE); 1350 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1351 seq_write(m, data, OPREGION_SIZE);
1352 }
1367 1353
1368 mutex_unlock(&dev->struct_mutex); 1354 mutex_unlock(&dev->struct_mutex);
1369 1355
1356out:
1357 kfree(data);
1370 return 0; 1358 return 0;
1371} 1359}
1372 1360
@@ -1659,6 +1647,65 @@ static const struct file_operations i915_wedged_fops = {
1659}; 1647};
1660 1648
1661static ssize_t 1649static ssize_t
1650i915_ring_stop_read(struct file *filp,
1651 char __user *ubuf,
1652 size_t max,
1653 loff_t *ppos)
1654{
1655 struct drm_device *dev = filp->private_data;
1656 drm_i915_private_t *dev_priv = dev->dev_private;
1657 char buf[20];
1658 int len;
1659
1660 len = snprintf(buf, sizeof(buf),
1661 "0x%08x\n", dev_priv->stop_rings);
1662
1663 if (len > sizeof(buf))
1664 len = sizeof(buf);
1665
1666 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1667}
1668
1669static ssize_t
1670i915_ring_stop_write(struct file *filp,
1671 const char __user *ubuf,
1672 size_t cnt,
1673 loff_t *ppos)
1674{
1675 struct drm_device *dev = filp->private_data;
1676 struct drm_i915_private *dev_priv = dev->dev_private;
1677 char buf[20];
1678 int val = 0;
1679
1680 if (cnt > 0) {
1681 if (cnt > sizeof(buf) - 1)
1682 return -EINVAL;
1683
1684 if (copy_from_user(buf, ubuf, cnt))
1685 return -EFAULT;
1686 buf[cnt] = 0;
1687
1688 val = simple_strtoul(buf, NULL, 0);
1689 }
1690
1691 DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
1692
1693 mutex_lock(&dev->struct_mutex);
1694 dev_priv->stop_rings = val;
1695 mutex_unlock(&dev->struct_mutex);
1696
1697 return cnt;
1698}
1699
1700static const struct file_operations i915_ring_stop_fops = {
1701 .owner = THIS_MODULE,
1702 .open = simple_open,
1703 .read = i915_ring_stop_read,
1704 .write = i915_ring_stop_write,
1705 .llseek = default_llseek,
1706};
1707
1708static ssize_t
1662i915_max_freq_read(struct file *filp, 1709i915_max_freq_read(struct file *filp,
1663 char __user *ubuf, 1710 char __user *ubuf,
1664 size_t max, 1711 size_t max,
@@ -1900,11 +1947,10 @@ static struct drm_info_list i915_debugfs_list[] = {
1900 {"i915_capabilities", i915_capabilities, 0}, 1947 {"i915_capabilities", i915_capabilities, 0},
1901 {"i915_gem_objects", i915_gem_object_info, 0}, 1948 {"i915_gem_objects", i915_gem_object_info, 0},
1902 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 1949 {"i915_gem_gtt", i915_gem_gtt_info, 0},
1950 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
1903 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 1951 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1904 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 1952 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1905 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 1953 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1906 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1907 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
1908 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 1954 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
1909 {"i915_gem_request", i915_gem_request_info, 0}, 1955 {"i915_gem_request", i915_gem_request_info, 0},
1910 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1956 {"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -1913,13 +1959,6 @@ static struct drm_info_list i915_debugfs_list[] = {
1913 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 1959 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
1914 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 1960 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
1915 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 1961 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
1916 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
1917 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
1918 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
1919 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
1920 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
1921 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
1922 {"i915_error_state", i915_error_state, 0},
1923 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 1962 {"i915_rstdby_delays", i915_rstdby_delays, 0},
1924 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 1963 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1925 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 1964 {"i915_delayfreq_table", i915_delayfreq_table, 0},
@@ -1965,6 +2004,17 @@ int i915_debugfs_init(struct drm_minor *minor)
1965 &i915_cache_sharing_fops); 2004 &i915_cache_sharing_fops);
1966 if (ret) 2005 if (ret)
1967 return ret; 2006 return ret;
2007 ret = i915_debugfs_create(minor->debugfs_root, minor,
2008 "i915_ring_stop",
2009 &i915_ring_stop_fops);
2010 if (ret)
2011 return ret;
2012
2013 ret = i915_debugfs_create(minor->debugfs_root, minor,
2014 "i915_error_state",
2015 &i915_error_state_fops);
2016 if (ret)
2017 return ret;
1968 2018
1969 return drm_debugfs_create_files(i915_debugfs_list, 2019 return drm_debugfs_create_files(i915_debugfs_list,
1970 I915_DEBUGFS_ENTRIES, 2020 I915_DEBUGFS_ENTRIES,
@@ -1983,6 +2033,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1983 1, minor); 2033 1, minor);
1984 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, 2034 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1985 1, minor); 2035 1, minor);
2036 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
2037 1, minor);
1986} 2038}
1987 2039
1988#endif /* CONFIG_DEBUG_FS */ 2040#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 068958cdd555..006ea473b57d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -36,17 +36,63 @@
36#include "i915_drm.h" 36#include "i915_drm.h"
37#include "i915_drv.h" 37#include "i915_drv.h"
38#include "i915_trace.h" 38#include "i915_trace.h"
39#include "../../../platform/x86/intel_ips.h"
40#include <linux/pci.h> 39#include <linux/pci.h>
41#include <linux/vgaarb.h> 40#include <linux/vgaarb.h>
42#include <linux/acpi.h> 41#include <linux/acpi.h>
43#include <linux/pnp.h> 42#include <linux/pnp.h>
44#include <linux/vga_switcheroo.h> 43#include <linux/vga_switcheroo.h>
45#include <linux/slab.h> 44#include <linux/slab.h>
46#include <linux/module.h>
47#include <acpi/video.h> 45#include <acpi/video.h>
48#include <asm/pat.h> 46#include <asm/pat.h>
49 47
48#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
49
50#define BEGIN_LP_RING(n) \
51 intel_ring_begin(LP_RING(dev_priv), (n))
52
53#define OUT_RING(x) \
54 intel_ring_emit(LP_RING(dev_priv), x)
55
56#define ADVANCE_LP_RING() \
57 intel_ring_advance(LP_RING(dev_priv))
58
59/**
60 * Lock test for when it's just for synchronization of ring access.
61 *
62 * In that case, we don't need to do it when GEM is initialized as nobody else
63 * has access to the ring.
64 */
65#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
66 if (LP_RING(dev->dev_private)->obj == NULL) \
67 LOCK_TEST_WITH_RETURN(dev, file); \
68} while (0)
69
70static inline u32
71intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
72{
73 if (I915_NEED_GFX_HWS(dev_priv->dev))
74 return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
75 else
76 return intel_read_status_page(LP_RING(dev_priv), reg);
77}
78
79#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
80#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
81#define I915_BREADCRUMB_INDEX 0x21
82
83void i915_update_dri1_breadcrumb(struct drm_device *dev)
84{
85 drm_i915_private_t *dev_priv = dev->dev_private;
86 struct drm_i915_master_private *master_priv;
87
88 if (dev->primary->master) {
89 master_priv = dev->primary->master->driver_priv;
90 if (master_priv->sarea_priv)
91 master_priv->sarea_priv->last_dispatch =
92 READ_BREADCRUMB(dev_priv);
93 }
94}
95
50static void i915_write_hws_pga(struct drm_device *dev) 96static void i915_write_hws_pga(struct drm_device *dev)
51{ 97{
52 drm_i915_private_t *dev_priv = dev->dev_private; 98 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -100,7 +146,7 @@ static void i915_free_hws(struct drm_device *dev)
100 146
101 if (ring->status_page.gfx_addr) { 147 if (ring->status_page.gfx_addr) {
102 ring->status_page.gfx_addr = 0; 148 ring->status_page.gfx_addr = 0;
103 drm_core_ioremapfree(&dev_priv->hws_map, dev); 149 iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
104 } 150 }
105 151
106 /* Need to rewrite hardware status page */ 152 /* Need to rewrite hardware status page */
@@ -198,7 +244,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
198 244
199 /* Allow hardware batchbuffers unless told otherwise. 245 /* Allow hardware batchbuffers unless told otherwise.
200 */ 246 */
201 dev_priv->allow_batchbuffer = 1; 247 dev_priv->dri1.allow_batchbuffer = 1;
202 248
203 return 0; 249 return 0;
204} 250}
@@ -210,7 +256,7 @@ static int i915_dma_resume(struct drm_device * dev)
210 256
211 DRM_DEBUG_DRIVER("%s\n", __func__); 257 DRM_DEBUG_DRIVER("%s\n", __func__);
212 258
213 if (ring->map.handle == NULL) { 259 if (ring->virtual_start == NULL) {
214 DRM_ERROR("can not ioremap virtual address for" 260 DRM_ERROR("can not ioremap virtual address for"
215 " ring buffer\n"); 261 " ring buffer\n");
216 return -ENOMEM; 262 return -ENOMEM;
@@ -239,6 +285,9 @@ static int i915_dma_init(struct drm_device *dev, void *data,
239 drm_i915_init_t *init = data; 285 drm_i915_init_t *init = data;
240 int retcode = 0; 286 int retcode = 0;
241 287
288 if (drm_core_check_feature(dev, DRIVER_MODESET))
289 return -ENODEV;
290
242 switch (init->func) { 291 switch (init->func) {
243 case I915_INIT_DMA: 292 case I915_INIT_DMA:
244 retcode = i915_initialize(dev, init); 293 retcode = i915_initialize(dev, init);
@@ -581,6 +630,9 @@ static int i915_flush_ioctl(struct drm_device *dev, void *data,
581{ 630{
582 int ret; 631 int ret;
583 632
633 if (drm_core_check_feature(dev, DRIVER_MODESET))
634 return -ENODEV;
635
584 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 636 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
585 637
586 mutex_lock(&dev->struct_mutex); 638 mutex_lock(&dev->struct_mutex);
@@ -601,7 +653,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
601 int ret; 653 int ret;
602 struct drm_clip_rect *cliprects = NULL; 654 struct drm_clip_rect *cliprects = NULL;
603 655
604 if (!dev_priv->allow_batchbuffer) { 656 if (drm_core_check_feature(dev, DRIVER_MODESET))
657 return -ENODEV;
658
659 if (!dev_priv->dri1.allow_batchbuffer) {
605 DRM_ERROR("Batchbuffer ioctl disabled\n"); 660 DRM_ERROR("Batchbuffer ioctl disabled\n");
606 return -EINVAL; 661 return -EINVAL;
607 } 662 }
@@ -658,6 +713,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
658 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 713 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
659 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 714 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
660 715
716 if (drm_core_check_feature(dev, DRIVER_MODESET))
717 return -ENODEV;
718
661 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 719 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
662 720
663 if (cmdbuf->num_cliprects < 0) 721 if (cmdbuf->num_cliprects < 0)
@@ -709,11 +767,166 @@ fail_batch_free:
709 return ret; 767 return ret;
710} 768}
711 769
770static int i915_emit_irq(struct drm_device * dev)
771{
772 drm_i915_private_t *dev_priv = dev->dev_private;
773 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
774
775 i915_kernel_lost_context(dev);
776
777 DRM_DEBUG_DRIVER("\n");
778
779 dev_priv->counter++;
780 if (dev_priv->counter > 0x7FFFFFFFUL)
781 dev_priv->counter = 1;
782 if (master_priv->sarea_priv)
783 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
784
785 if (BEGIN_LP_RING(4) == 0) {
786 OUT_RING(MI_STORE_DWORD_INDEX);
787 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
788 OUT_RING(dev_priv->counter);
789 OUT_RING(MI_USER_INTERRUPT);
790 ADVANCE_LP_RING();
791 }
792
793 return dev_priv->counter;
794}
795
796static int i915_wait_irq(struct drm_device * dev, int irq_nr)
797{
798 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
799 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
800 int ret = 0;
801 struct intel_ring_buffer *ring = LP_RING(dev_priv);
802
803 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
804 READ_BREADCRUMB(dev_priv));
805
806 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
807 if (master_priv->sarea_priv)
808 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
809 return 0;
810 }
811
812 if (master_priv->sarea_priv)
813 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
814
815 if (ring->irq_get(ring)) {
816 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
817 READ_BREADCRUMB(dev_priv) >= irq_nr);
818 ring->irq_put(ring);
819 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
820 ret = -EBUSY;
821
822 if (ret == -EBUSY) {
823 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
824 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
825 }
826
827 return ret;
828}
829
830/* Needs the lock as it touches the ring.
831 */
832static int i915_irq_emit(struct drm_device *dev, void *data,
833 struct drm_file *file_priv)
834{
835 drm_i915_private_t *dev_priv = dev->dev_private;
836 drm_i915_irq_emit_t *emit = data;
837 int result;
838
839 if (drm_core_check_feature(dev, DRIVER_MODESET))
840 return -ENODEV;
841
842 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
843 DRM_ERROR("called with no initialization\n");
844 return -EINVAL;
845 }
846
847 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
848
849 mutex_lock(&dev->struct_mutex);
850 result = i915_emit_irq(dev);
851 mutex_unlock(&dev->struct_mutex);
852
853 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
854 DRM_ERROR("copy_to_user\n");
855 return -EFAULT;
856 }
857
858 return 0;
859}
860
861/* Doesn't need the hardware lock.
862 */
863static int i915_irq_wait(struct drm_device *dev, void *data,
864 struct drm_file *file_priv)
865{
866 drm_i915_private_t *dev_priv = dev->dev_private;
867 drm_i915_irq_wait_t *irqwait = data;
868
869 if (drm_core_check_feature(dev, DRIVER_MODESET))
870 return -ENODEV;
871
872 if (!dev_priv) {
873 DRM_ERROR("called with no initialization\n");
874 return -EINVAL;
875 }
876
877 return i915_wait_irq(dev, irqwait->irq_seq);
878}
879
880static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
881 struct drm_file *file_priv)
882{
883 drm_i915_private_t *dev_priv = dev->dev_private;
884 drm_i915_vblank_pipe_t *pipe = data;
885
886 if (drm_core_check_feature(dev, DRIVER_MODESET))
887 return -ENODEV;
888
889 if (!dev_priv) {
890 DRM_ERROR("called with no initialization\n");
891 return -EINVAL;
892 }
893
894 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
895
896 return 0;
897}
898
899/**
900 * Schedule buffer swap at given vertical blank.
901 */
902static int i915_vblank_swap(struct drm_device *dev, void *data,
903 struct drm_file *file_priv)
904{
905 /* The delayed swap mechanism was fundamentally racy, and has been
906 * removed. The model was that the client requested a delayed flip/swap
907 * from the kernel, then waited for vblank before continuing to perform
908 * rendering. The problem was that the kernel might wake the client
909 * up before it dispatched the vblank swap (since the lock has to be
910 * held while touching the ringbuffer), in which case the client would
911 * clear and start the next frame before the swap occurred, and
912 * flicker would occur in addition to likely missing the vblank.
913 *
914 * In the absence of this ioctl, userland falls back to a correct path
915 * of waiting for a vblank, then dispatching the swap on its own.
916 * Context switching to userland and back is plenty fast enough for
917 * meeting the requirements of vblank swapping.
918 */
919 return -EINVAL;
920}
921
712static int i915_flip_bufs(struct drm_device *dev, void *data, 922static int i915_flip_bufs(struct drm_device *dev, void *data,
713 struct drm_file *file_priv) 923 struct drm_file *file_priv)
714{ 924{
715 int ret; 925 int ret;
716 926
927 if (drm_core_check_feature(dev, DRIVER_MODESET))
928 return -ENODEV;
929
717 DRM_DEBUG_DRIVER("%s\n", __func__); 930 DRM_DEBUG_DRIVER("%s\n", __func__);
718 931
719 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 932 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -742,7 +955,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
742 value = dev->pdev->irq ? 1 : 0; 955 value = dev->pdev->irq ? 1 : 0;
743 break; 956 break;
744 case I915_PARAM_ALLOW_BATCHBUFFER: 957 case I915_PARAM_ALLOW_BATCHBUFFER:
745 value = dev_priv->allow_batchbuffer ? 1 : 0; 958 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
746 break; 959 break;
747 case I915_PARAM_LAST_DISPATCH: 960 case I915_PARAM_LAST_DISPATCH:
748 value = READ_BREADCRUMB(dev_priv); 961 value = READ_BREADCRUMB(dev_priv);
@@ -751,7 +964,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
751 value = dev->pci_device; 964 value = dev->pci_device;
752 break; 965 break;
753 case I915_PARAM_HAS_GEM: 966 case I915_PARAM_HAS_GEM:
754 value = dev_priv->has_gem; 967 value = 1;
755 break; 968 break;
756 case I915_PARAM_NUM_FENCES_AVAIL: 969 case I915_PARAM_NUM_FENCES_AVAIL:
757 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 970 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
@@ -764,7 +977,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
764 break; 977 break;
765 case I915_PARAM_HAS_EXECBUF2: 978 case I915_PARAM_HAS_EXECBUF2:
766 /* depends on GEM */ 979 /* depends on GEM */
767 value = dev_priv->has_gem; 980 value = 1;
768 break; 981 break;
769 case I915_PARAM_HAS_BSD: 982 case I915_PARAM_HAS_BSD:
770 value = HAS_BSD(dev); 983 value = HAS_BSD(dev);
@@ -822,10 +1035,9 @@ static int i915_setparam(struct drm_device *dev, void *data,
822 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 1035 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
823 break; 1036 break;
824 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 1037 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
825 dev_priv->tex_lru_log_granularity = param->value;
826 break; 1038 break;
827 case I915_SETPARAM_ALLOW_BATCHBUFFER: 1039 case I915_SETPARAM_ALLOW_BATCHBUFFER:
828 dev_priv->allow_batchbuffer = param->value; 1040 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
829 break; 1041 break;
830 case I915_SETPARAM_NUM_USED_FENCES: 1042 case I915_SETPARAM_NUM_USED_FENCES:
831 if (param->value > dev_priv->num_fence_regs || 1043 if (param->value > dev_priv->num_fence_regs ||
@@ -850,6 +1062,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
850 drm_i915_hws_addr_t *hws = data; 1062 drm_i915_hws_addr_t *hws = data;
851 struct intel_ring_buffer *ring = LP_RING(dev_priv); 1063 struct intel_ring_buffer *ring = LP_RING(dev_priv);
852 1064
1065 if (drm_core_check_feature(dev, DRIVER_MODESET))
1066 return -ENODEV;
1067
853 if (!I915_NEED_GFX_HWS(dev)) 1068 if (!I915_NEED_GFX_HWS(dev))
854 return -EINVAL; 1069 return -EINVAL;
855 1070
@@ -867,23 +1082,17 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
867 1082
868 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 1083 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
869 1084
870 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 1085 dev_priv->dri1.gfx_hws_cpu_addr = ioremap_wc(dev->agp->base + hws->addr,
871 dev_priv->hws_map.size = 4*1024; 1086 4096);
872 dev_priv->hws_map.type = 0; 1087 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
873 dev_priv->hws_map.flags = 0;
874 dev_priv->hws_map.mtrr = 0;
875
876 drm_core_ioremap_wc(&dev_priv->hws_map, dev);
877 if (dev_priv->hws_map.handle == NULL) {
878 i915_dma_cleanup(dev); 1088 i915_dma_cleanup(dev);
879 ring->status_page.gfx_addr = 0; 1089 ring->status_page.gfx_addr = 0;
880 DRM_ERROR("can not ioremap virtual address for" 1090 DRM_ERROR("can not ioremap virtual address for"
881 " G33 hw status page\n"); 1091 " G33 hw status page\n");
882 return -ENOMEM; 1092 return -ENOMEM;
883 } 1093 }
884 ring->status_page.page_addr = 1094
885 (void __force __iomem *)dev_priv->hws_map.handle; 1095 memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
886 memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
887 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); 1096 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
888 1097
889 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 1098 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
@@ -1019,133 +1228,6 @@ intel_teardown_mchbar(struct drm_device *dev)
1019 release_resource(&dev_priv->mch_res); 1228 release_resource(&dev_priv->mch_res);
1020} 1229}
1021 1230
1022#define PTE_ADDRESS_MASK 0xfffff000
1023#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1024#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
1025#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
1026#define PTE_MAPPING_TYPE_CACHED (3 << 1)
1027#define PTE_MAPPING_TYPE_MASK (3 << 1)
1028#define PTE_VALID (1 << 0)
1029
1030/**
1031 * i915_stolen_to_phys - take an offset into stolen memory and turn it into
1032 * a physical one
1033 * @dev: drm device
1034 * @offset: address to translate
1035 *
1036 * Some chip functions require allocations from stolen space and need the
1037 * physical address of the memory in question.
1038 */
1039static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
1040{
1041 struct drm_i915_private *dev_priv = dev->dev_private;
1042 struct pci_dev *pdev = dev_priv->bridge_dev;
1043 u32 base;
1044
1045#if 0
1046 /* On the machines I have tested the Graphics Base of Stolen Memory
1047 * is unreliable, so compute the base by subtracting the stolen memory
1048 * from the Top of Low Usable DRAM which is where the BIOS places
1049 * the graphics stolen memory.
1050 */
1051 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
1052 /* top 32bits are reserved = 0 */
1053 pci_read_config_dword(pdev, 0xA4, &base);
1054 } else {
1055 /* XXX presume 8xx is the same as i915 */
1056 pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
1057 }
1058#else
1059 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
1060 u16 val;
1061 pci_read_config_word(pdev, 0xb0, &val);
1062 base = val >> 4 << 20;
1063 } else {
1064 u8 val;
1065 pci_read_config_byte(pdev, 0x9c, &val);
1066 base = val >> 3 << 27;
1067 }
1068 base -= dev_priv->mm.gtt->stolen_size;
1069#endif
1070
1071 return base + offset;
1072}
1073
1074static void i915_warn_stolen(struct drm_device *dev)
1075{
1076 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
1077 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
1078}
1079
1080static void i915_setup_compression(struct drm_device *dev, int size)
1081{
1082 struct drm_i915_private *dev_priv = dev->dev_private;
1083 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
1084 unsigned long cfb_base;
1085 unsigned long ll_base = 0;
1086
1087 /* Just in case the BIOS is doing something questionable. */
1088 intel_disable_fbc(dev);
1089
1090 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
1091 if (compressed_fb)
1092 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1093 if (!compressed_fb)
1094 goto err;
1095
1096 cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
1097 if (!cfb_base)
1098 goto err_fb;
1099
1100 if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
1101 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
1102 4096, 4096, 0);
1103 if (compressed_llb)
1104 compressed_llb = drm_mm_get_block(compressed_llb,
1105 4096, 4096);
1106 if (!compressed_llb)
1107 goto err_fb;
1108
1109 ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
1110 if (!ll_base)
1111 goto err_llb;
1112 }
1113
1114 dev_priv->cfb_size = size;
1115
1116 dev_priv->compressed_fb = compressed_fb;
1117 if (HAS_PCH_SPLIT(dev))
1118 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
1119 else if (IS_GM45(dev)) {
1120 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
1121 } else {
1122 I915_WRITE(FBC_CFB_BASE, cfb_base);
1123 I915_WRITE(FBC_LL_BASE, ll_base);
1124 dev_priv->compressed_llb = compressed_llb;
1125 }
1126
1127 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
1128 cfb_base, ll_base, size >> 20);
1129 return;
1130
1131err_llb:
1132 drm_mm_put_block(compressed_llb);
1133err_fb:
1134 drm_mm_put_block(compressed_fb);
1135err:
1136 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1137 i915_warn_stolen(dev);
1138}
1139
1140static void i915_cleanup_compression(struct drm_device *dev)
1141{
1142 struct drm_i915_private *dev_priv = dev->dev_private;
1143
1144 drm_mm_put_block(dev_priv->compressed_fb);
1145 if (dev_priv->compressed_llb)
1146 drm_mm_put_block(dev_priv->compressed_llb);
1147}
1148
1149/* true = enable decode, false = disable decoder */ 1231/* true = enable decode, false = disable decoder */
1150static unsigned int i915_vga_set_decode(void *cookie, bool state) 1232static unsigned int i915_vga_set_decode(void *cookie, bool state)
1151{ 1233{
@@ -1189,88 +1271,6 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1189 return can_switch; 1271 return can_switch;
1190} 1272}
1191 1273
1192static bool
1193intel_enable_ppgtt(struct drm_device *dev)
1194{
1195 if (i915_enable_ppgtt >= 0)
1196 return i915_enable_ppgtt;
1197
1198#ifdef CONFIG_INTEL_IOMMU
1199 /* Disable ppgtt on SNB if VT-d is on. */
1200 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
1201 return false;
1202#endif
1203
1204 return true;
1205}
1206
1207static int i915_load_gem_init(struct drm_device *dev)
1208{
1209 struct drm_i915_private *dev_priv = dev->dev_private;
1210 unsigned long prealloc_size, gtt_size, mappable_size;
1211 int ret;
1212
1213 prealloc_size = dev_priv->mm.gtt->stolen_size;
1214 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
1215 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1216
1217 /* Basic memrange allocator for stolen space */
1218 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
1219
1220 mutex_lock(&dev->struct_mutex);
1221 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
1222 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
1223 * aperture accordingly when using aliasing ppgtt. */
1224 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
1225
1226 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
1227
1228 ret = i915_gem_init_aliasing_ppgtt(dev);
1229 if (ret) {
1230 mutex_unlock(&dev->struct_mutex);
1231 return ret;
1232 }
1233 } else {
1234 /* Let GEM Manage all of the aperture.
1235 *
1236 * However, leave one page at the end still bound to the scratch
1237 * page. There are a number of places where the hardware
1238 * apparently prefetches past the end of the object, and we've
1239 * seen multiple hangs with the GPU head pointer stuck in a
1240 * batchbuffer bound at the last page of the aperture. One page
1241 * should be enough to keep any prefetching inside of the
1242 * aperture.
1243 */
1244 i915_gem_init_global_gtt(dev, 0, mappable_size,
1245 gtt_size);
1246 }
1247
1248 ret = i915_gem_init_hw(dev);
1249 mutex_unlock(&dev->struct_mutex);
1250 if (ret) {
1251 i915_gem_cleanup_aliasing_ppgtt(dev);
1252 return ret;
1253 }
1254
1255 /* Try to set up FBC with a reasonable compressed buffer size */
1256 if (I915_HAS_FBC(dev) && i915_powersave) {
1257 int cfb_size;
1258
1259 /* Leave 1M for line length buffer & misc. */
1260
1261 /* Try to get a 32M buffer... */
1262 if (prealloc_size > (36*1024*1024))
1263 cfb_size = 32*1024*1024;
1264 else /* fall back to 7/8 of the stolen space */
1265 cfb_size = prealloc_size * 7 / 8;
1266 i915_setup_compression(dev, cfb_size);
1267 }
1268
1269 /* Allow hardware batchbuffers unless told otherwise. */
1270 dev_priv->allow_batchbuffer = 1;
1271 return 0;
1272}
1273
1274static int i915_load_modeset_init(struct drm_device *dev) 1274static int i915_load_modeset_init(struct drm_device *dev)
1275{ 1275{
1276 struct drm_i915_private *dev_priv = dev->dev_private; 1276 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1300,15 +1300,18 @@ static int i915_load_modeset_init(struct drm_device *dev)
1300 if (ret) 1300 if (ret)
1301 goto cleanup_vga_client; 1301 goto cleanup_vga_client;
1302 1302
1303 /* IIR "flip pending" bit means done if this bit is set */ 1303 /* Initialise stolen first so that we may reserve preallocated
1304 if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) 1304 * objects for the BIOS to KMS transition.
1305 dev_priv->flip_pending_is_done = true; 1305 */
1306 ret = i915_gem_init_stolen(dev);
1307 if (ret)
1308 goto cleanup_vga_switcheroo;
1306 1309
1307 intel_modeset_init(dev); 1310 intel_modeset_init(dev);
1308 1311
1309 ret = i915_load_gem_init(dev); 1312 ret = i915_gem_init(dev);
1310 if (ret) 1313 if (ret)
1311 goto cleanup_vga_switcheroo; 1314 goto cleanup_gem_stolen;
1312 1315
1313 intel_modeset_gem_init(dev); 1316 intel_modeset_gem_init(dev);
1314 1317
@@ -1338,6 +1341,8 @@ cleanup_gem:
1338 i915_gem_cleanup_ringbuffer(dev); 1341 i915_gem_cleanup_ringbuffer(dev);
1339 mutex_unlock(&dev->struct_mutex); 1342 mutex_unlock(&dev->struct_mutex);
1340 i915_gem_cleanup_aliasing_ppgtt(dev); 1343 i915_gem_cleanup_aliasing_ppgtt(dev);
1344cleanup_gem_stolen:
1345 i915_gem_cleanup_stolen(dev);
1341cleanup_vga_switcheroo: 1346cleanup_vga_switcheroo:
1342 vga_switcheroo_unregister_client(dev->pdev); 1347 vga_switcheroo_unregister_client(dev->pdev);
1343cleanup_vga_client: 1348cleanup_vga_client:
@@ -1370,575 +1375,6 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1370 master->driver_priv = NULL; 1375 master->driver_priv = NULL;
1371} 1376}
1372 1377
1373static void i915_pineview_get_mem_freq(struct drm_device *dev)
1374{
1375 drm_i915_private_t *dev_priv = dev->dev_private;
1376 u32 tmp;
1377
1378 tmp = I915_READ(CLKCFG);
1379
1380 switch (tmp & CLKCFG_FSB_MASK) {
1381 case CLKCFG_FSB_533:
1382 dev_priv->fsb_freq = 533; /* 133*4 */
1383 break;
1384 case CLKCFG_FSB_800:
1385 dev_priv->fsb_freq = 800; /* 200*4 */
1386 break;
1387 case CLKCFG_FSB_667:
1388 dev_priv->fsb_freq = 667; /* 167*4 */
1389 break;
1390 case CLKCFG_FSB_400:
1391 dev_priv->fsb_freq = 400; /* 100*4 */
1392 break;
1393 }
1394
1395 switch (tmp & CLKCFG_MEM_MASK) {
1396 case CLKCFG_MEM_533:
1397 dev_priv->mem_freq = 533;
1398 break;
1399 case CLKCFG_MEM_667:
1400 dev_priv->mem_freq = 667;
1401 break;
1402 case CLKCFG_MEM_800:
1403 dev_priv->mem_freq = 800;
1404 break;
1405 }
1406
1407 /* detect pineview DDR3 setting */
1408 tmp = I915_READ(CSHRDDR3CTL);
1409 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
1410}
1411
1412static void i915_ironlake_get_mem_freq(struct drm_device *dev)
1413{
1414 drm_i915_private_t *dev_priv = dev->dev_private;
1415 u16 ddrpll, csipll;
1416
1417 ddrpll = I915_READ16(DDRMPLL1);
1418 csipll = I915_READ16(CSIPLL0);
1419
1420 switch (ddrpll & 0xff) {
1421 case 0xc:
1422 dev_priv->mem_freq = 800;
1423 break;
1424 case 0x10:
1425 dev_priv->mem_freq = 1066;
1426 break;
1427 case 0x14:
1428 dev_priv->mem_freq = 1333;
1429 break;
1430 case 0x18:
1431 dev_priv->mem_freq = 1600;
1432 break;
1433 default:
1434 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
1435 ddrpll & 0xff);
1436 dev_priv->mem_freq = 0;
1437 break;
1438 }
1439
1440 dev_priv->r_t = dev_priv->mem_freq;
1441
1442 switch (csipll & 0x3ff) {
1443 case 0x00c:
1444 dev_priv->fsb_freq = 3200;
1445 break;
1446 case 0x00e:
1447 dev_priv->fsb_freq = 3733;
1448 break;
1449 case 0x010:
1450 dev_priv->fsb_freq = 4266;
1451 break;
1452 case 0x012:
1453 dev_priv->fsb_freq = 4800;
1454 break;
1455 case 0x014:
1456 dev_priv->fsb_freq = 5333;
1457 break;
1458 case 0x016:
1459 dev_priv->fsb_freq = 5866;
1460 break;
1461 case 0x018:
1462 dev_priv->fsb_freq = 6400;
1463 break;
1464 default:
1465 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
1466 csipll & 0x3ff);
1467 dev_priv->fsb_freq = 0;
1468 break;
1469 }
1470
1471 if (dev_priv->fsb_freq == 3200) {
1472 dev_priv->c_m = 0;
1473 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
1474 dev_priv->c_m = 1;
1475 } else {
1476 dev_priv->c_m = 2;
1477 }
1478}
1479
1480static const struct cparams {
1481 u16 i;
1482 u16 t;
1483 u16 m;
1484 u16 c;
1485} cparams[] = {
1486 { 1, 1333, 301, 28664 },
1487 { 1, 1066, 294, 24460 },
1488 { 1, 800, 294, 25192 },
1489 { 0, 1333, 276, 27605 },
1490 { 0, 1066, 276, 27605 },
1491 { 0, 800, 231, 23784 },
1492};
1493
1494unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1495{
1496 u64 total_count, diff, ret;
1497 u32 count1, count2, count3, m = 0, c = 0;
1498 unsigned long now = jiffies_to_msecs(jiffies), diff1;
1499 int i;
1500
1501 diff1 = now - dev_priv->last_time1;
1502
1503 /* Prevent division-by-zero if we are asking too fast.
1504 * Also, we don't get interesting results if we are polling
1505 * faster than once in 10ms, so just return the saved value
1506 * in such cases.
1507 */
1508 if (diff1 <= 10)
1509 return dev_priv->chipset_power;
1510
1511 count1 = I915_READ(DMIEC);
1512 count2 = I915_READ(DDREC);
1513 count3 = I915_READ(CSIEC);
1514
1515 total_count = count1 + count2 + count3;
1516
1517 /* FIXME: handle per-counter overflow */
1518 if (total_count < dev_priv->last_count1) {
1519 diff = ~0UL - dev_priv->last_count1;
1520 diff += total_count;
1521 } else {
1522 diff = total_count - dev_priv->last_count1;
1523 }
1524
1525 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
1526 if (cparams[i].i == dev_priv->c_m &&
1527 cparams[i].t == dev_priv->r_t) {
1528 m = cparams[i].m;
1529 c = cparams[i].c;
1530 break;
1531 }
1532 }
1533
1534 diff = div_u64(diff, diff1);
1535 ret = ((m * diff) + c);
1536 ret = div_u64(ret, 10);
1537
1538 dev_priv->last_count1 = total_count;
1539 dev_priv->last_time1 = now;
1540
1541 dev_priv->chipset_power = ret;
1542
1543 return ret;
1544}
1545
1546unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
1547{
1548 unsigned long m, x, b;
1549 u32 tsfs;
1550
1551 tsfs = I915_READ(TSFS);
1552
1553 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
1554 x = I915_READ8(TR1);
1555
1556 b = tsfs & TSFS_INTR_MASK;
1557
1558 return ((m * x) / 127) - b;
1559}
1560
1561static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
1562{
1563 static const struct v_table {
1564 u16 vd; /* in .1 mil */
1565 u16 vm; /* in .1 mil */
1566 } v_table[] = {
1567 { 0, 0, },
1568 { 375, 0, },
1569 { 500, 0, },
1570 { 625, 0, },
1571 { 750, 0, },
1572 { 875, 0, },
1573 { 1000, 0, },
1574 { 1125, 0, },
1575 { 4125, 3000, },
1576 { 4125, 3000, },
1577 { 4125, 3000, },
1578 { 4125, 3000, },
1579 { 4125, 3000, },
1580 { 4125, 3000, },
1581 { 4125, 3000, },
1582 { 4125, 3000, },
1583 { 4125, 3000, },
1584 { 4125, 3000, },
1585 { 4125, 3000, },
1586 { 4125, 3000, },
1587 { 4125, 3000, },
1588 { 4125, 3000, },
1589 { 4125, 3000, },
1590 { 4125, 3000, },
1591 { 4125, 3000, },
1592 { 4125, 3000, },
1593 { 4125, 3000, },
1594 { 4125, 3000, },
1595 { 4125, 3000, },
1596 { 4125, 3000, },
1597 { 4125, 3000, },
1598 { 4125, 3000, },
1599 { 4250, 3125, },
1600 { 4375, 3250, },
1601 { 4500, 3375, },
1602 { 4625, 3500, },
1603 { 4750, 3625, },
1604 { 4875, 3750, },
1605 { 5000, 3875, },
1606 { 5125, 4000, },
1607 { 5250, 4125, },
1608 { 5375, 4250, },
1609 { 5500, 4375, },
1610 { 5625, 4500, },
1611 { 5750, 4625, },
1612 { 5875, 4750, },
1613 { 6000, 4875, },
1614 { 6125, 5000, },
1615 { 6250, 5125, },
1616 { 6375, 5250, },
1617 { 6500, 5375, },
1618 { 6625, 5500, },
1619 { 6750, 5625, },
1620 { 6875, 5750, },
1621 { 7000, 5875, },
1622 { 7125, 6000, },
1623 { 7250, 6125, },
1624 { 7375, 6250, },
1625 { 7500, 6375, },
1626 { 7625, 6500, },
1627 { 7750, 6625, },
1628 { 7875, 6750, },
1629 { 8000, 6875, },
1630 { 8125, 7000, },
1631 { 8250, 7125, },
1632 { 8375, 7250, },
1633 { 8500, 7375, },
1634 { 8625, 7500, },
1635 { 8750, 7625, },
1636 { 8875, 7750, },
1637 { 9000, 7875, },
1638 { 9125, 8000, },
1639 { 9250, 8125, },
1640 { 9375, 8250, },
1641 { 9500, 8375, },
1642 { 9625, 8500, },
1643 { 9750, 8625, },
1644 { 9875, 8750, },
1645 { 10000, 8875, },
1646 { 10125, 9000, },
1647 { 10250, 9125, },
1648 { 10375, 9250, },
1649 { 10500, 9375, },
1650 { 10625, 9500, },
1651 { 10750, 9625, },
1652 { 10875, 9750, },
1653 { 11000, 9875, },
1654 { 11125, 10000, },
1655 { 11250, 10125, },
1656 { 11375, 10250, },
1657 { 11500, 10375, },
1658 { 11625, 10500, },
1659 { 11750, 10625, },
1660 { 11875, 10750, },
1661 { 12000, 10875, },
1662 { 12125, 11000, },
1663 { 12250, 11125, },
1664 { 12375, 11250, },
1665 { 12500, 11375, },
1666 { 12625, 11500, },
1667 { 12750, 11625, },
1668 { 12875, 11750, },
1669 { 13000, 11875, },
1670 { 13125, 12000, },
1671 { 13250, 12125, },
1672 { 13375, 12250, },
1673 { 13500, 12375, },
1674 { 13625, 12500, },
1675 { 13750, 12625, },
1676 { 13875, 12750, },
1677 { 14000, 12875, },
1678 { 14125, 13000, },
1679 { 14250, 13125, },
1680 { 14375, 13250, },
1681 { 14500, 13375, },
1682 { 14625, 13500, },
1683 { 14750, 13625, },
1684 { 14875, 13750, },
1685 { 15000, 13875, },
1686 { 15125, 14000, },
1687 { 15250, 14125, },
1688 { 15375, 14250, },
1689 { 15500, 14375, },
1690 { 15625, 14500, },
1691 { 15750, 14625, },
1692 { 15875, 14750, },
1693 { 16000, 14875, },
1694 { 16125, 15000, },
1695 };
1696 if (dev_priv->info->is_mobile)
1697 return v_table[pxvid].vm;
1698 else
1699 return v_table[pxvid].vd;
1700}
1701
1702void i915_update_gfx_val(struct drm_i915_private *dev_priv)
1703{
1704 struct timespec now, diff1;
1705 u64 diff;
1706 unsigned long diffms;
1707 u32 count;
1708
1709 if (dev_priv->info->gen != 5)
1710 return;
1711
1712 getrawmonotonic(&now);
1713 diff1 = timespec_sub(now, dev_priv->last_time2);
1714
1715 /* Don't divide by 0 */
1716 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
1717 if (!diffms)
1718 return;
1719
1720 count = I915_READ(GFXEC);
1721
1722 if (count < dev_priv->last_count2) {
1723 diff = ~0UL - dev_priv->last_count2;
1724 diff += count;
1725 } else {
1726 diff = count - dev_priv->last_count2;
1727 }
1728
1729 dev_priv->last_count2 = count;
1730 dev_priv->last_time2 = now;
1731
1732 /* More magic constants... */
1733 diff = diff * 1181;
1734 diff = div_u64(diff, diffms * 10);
1735 dev_priv->gfx_power = diff;
1736}
1737
1738unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
1739{
1740 unsigned long t, corr, state1, corr2, state2;
1741 u32 pxvid, ext_v;
1742
1743 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
1744 pxvid = (pxvid >> 24) & 0x7f;
1745 ext_v = pvid_to_extvid(dev_priv, pxvid);
1746
1747 state1 = ext_v;
1748
1749 t = i915_mch_val(dev_priv);
1750
1751 /* Revel in the empirically derived constants */
1752
1753 /* Correction factor in 1/100000 units */
1754 if (t > 80)
1755 corr = ((t * 2349) + 135940);
1756 else if (t >= 50)
1757 corr = ((t * 964) + 29317);
1758 else /* < 50 */
1759 corr = ((t * 301) + 1004);
1760
1761 corr = corr * ((150142 * state1) / 10000 - 78642);
1762 corr /= 100000;
1763 corr2 = (corr * dev_priv->corr);
1764
1765 state2 = (corr2 * state1) / 10000;
1766 state2 /= 100; /* convert to mW */
1767
1768 i915_update_gfx_val(dev_priv);
1769
1770 return dev_priv->gfx_power + state2;
1771}
1772
1773/* Global for IPS driver to get at the current i915 device */
1774static struct drm_i915_private *i915_mch_dev;
1775/*
1776 * Lock protecting IPS related data structures
1777 * - i915_mch_dev
1778 * - dev_priv->max_delay
1779 * - dev_priv->min_delay
1780 * - dev_priv->fmax
1781 * - dev_priv->gpu_busy
1782 */
1783static DEFINE_SPINLOCK(mchdev_lock);
1784
1785/**
1786 * i915_read_mch_val - return value for IPS use
1787 *
1788 * Calculate and return a value for the IPS driver to use when deciding whether
1789 * we have thermal and power headroom to increase CPU or GPU power budget.
1790 */
1791unsigned long i915_read_mch_val(void)
1792{
1793 struct drm_i915_private *dev_priv;
1794 unsigned long chipset_val, graphics_val, ret = 0;
1795
1796 spin_lock(&mchdev_lock);
1797 if (!i915_mch_dev)
1798 goto out_unlock;
1799 dev_priv = i915_mch_dev;
1800
1801 chipset_val = i915_chipset_val(dev_priv);
1802 graphics_val = i915_gfx_val(dev_priv);
1803
1804 ret = chipset_val + graphics_val;
1805
1806out_unlock:
1807 spin_unlock(&mchdev_lock);
1808
1809 return ret;
1810}
1811EXPORT_SYMBOL_GPL(i915_read_mch_val);
1812
1813/**
1814 * i915_gpu_raise - raise GPU frequency limit
1815 *
1816 * Raise the limit; IPS indicates we have thermal headroom.
1817 */
1818bool i915_gpu_raise(void)
1819{
1820 struct drm_i915_private *dev_priv;
1821 bool ret = true;
1822
1823 spin_lock(&mchdev_lock);
1824 if (!i915_mch_dev) {
1825 ret = false;
1826 goto out_unlock;
1827 }
1828 dev_priv = i915_mch_dev;
1829
1830 if (dev_priv->max_delay > dev_priv->fmax)
1831 dev_priv->max_delay--;
1832
1833out_unlock:
1834 spin_unlock(&mchdev_lock);
1835
1836 return ret;
1837}
1838EXPORT_SYMBOL_GPL(i915_gpu_raise);
1839
1840/**
1841 * i915_gpu_lower - lower GPU frequency limit
1842 *
1843 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1844 * frequency maximum.
1845 */
1846bool i915_gpu_lower(void)
1847{
1848 struct drm_i915_private *dev_priv;
1849 bool ret = true;
1850
1851 spin_lock(&mchdev_lock);
1852 if (!i915_mch_dev) {
1853 ret = false;
1854 goto out_unlock;
1855 }
1856 dev_priv = i915_mch_dev;
1857
1858 if (dev_priv->max_delay < dev_priv->min_delay)
1859 dev_priv->max_delay++;
1860
1861out_unlock:
1862 spin_unlock(&mchdev_lock);
1863
1864 return ret;
1865}
1866EXPORT_SYMBOL_GPL(i915_gpu_lower);
1867
1868/**
1869 * i915_gpu_busy - indicate GPU business to IPS
1870 *
1871 * Tell the IPS driver whether or not the GPU is busy.
1872 */
1873bool i915_gpu_busy(void)
1874{
1875 struct drm_i915_private *dev_priv;
1876 bool ret = false;
1877
1878 spin_lock(&mchdev_lock);
1879 if (!i915_mch_dev)
1880 goto out_unlock;
1881 dev_priv = i915_mch_dev;
1882
1883 ret = dev_priv->busy;
1884
1885out_unlock:
1886 spin_unlock(&mchdev_lock);
1887
1888 return ret;
1889}
1890EXPORT_SYMBOL_GPL(i915_gpu_busy);
1891
1892/**
1893 * i915_gpu_turbo_disable - disable graphics turbo
1894 *
1895 * Disable graphics turbo by resetting the max frequency and setting the
1896 * current frequency to the default.
1897 */
1898bool i915_gpu_turbo_disable(void)
1899{
1900 struct drm_i915_private *dev_priv;
1901 bool ret = true;
1902
1903 spin_lock(&mchdev_lock);
1904 if (!i915_mch_dev) {
1905 ret = false;
1906 goto out_unlock;
1907 }
1908 dev_priv = i915_mch_dev;
1909
1910 dev_priv->max_delay = dev_priv->fstart;
1911
1912 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
1913 ret = false;
1914
1915out_unlock:
1916 spin_unlock(&mchdev_lock);
1917
1918 return ret;
1919}
1920EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
1921
1922/**
1923 * Tells the intel_ips driver that the i915 driver is now loaded, if
1924 * IPS got loaded first.
1925 *
1926 * This awkward dance is so that neither module has to depend on the
1927 * other in order for IPS to do the appropriate communication of
1928 * GPU turbo limits to i915.
1929 */
1930static void
1931ips_ping_for_i915_load(void)
1932{
1933 void (*link)(void);
1934
1935 link = symbol_get(ips_link_to_i915_driver);
1936 if (link) {
1937 link();
1938 symbol_put(ips_link_to_i915_driver);
1939 }
1940}
1941
1942static void 1378static void
1943i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base, 1379i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
1944 unsigned long size) 1380 unsigned long size)
@@ -2072,9 +1508,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2072 goto out_mtrrfree; 1508 goto out_mtrrfree;
2073 } 1509 }
2074 1510
2075 /* enable GEM by default */
2076 dev_priv->has_gem = 1;
2077
2078 intel_irq_init(dev); 1511 intel_irq_init(dev);
2079 1512
2080 /* Try to make sure MCHBAR is enabled before poking at it */ 1513 /* Try to make sure MCHBAR is enabled before poking at it */
@@ -2094,11 +1527,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2094 goto out_gem_unload; 1527 goto out_gem_unload;
2095 } 1528 }
2096 1529
2097 if (IS_PINEVIEW(dev))
2098 i915_pineview_get_mem_freq(dev);
2099 else if (IS_GEN5(dev))
2100 i915_ironlake_get_mem_freq(dev);
2101
2102 /* On the 945G/GM, the chipset reports the MSI capability on the 1530 /* On the 945G/GM, the chipset reports the MSI capability on the
2103 * integrated graphics even though the support isn't actually there 1531 * integrated graphics even though the support isn't actually there
2104 * according to the published specs. It doesn't appear to function 1532 * according to the published specs. It doesn't appear to function
@@ -2151,14 +1579,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2151 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 1579 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
2152 (unsigned long) dev); 1580 (unsigned long) dev);
2153 1581
2154 if (IS_GEN5(dev)) { 1582 if (IS_GEN5(dev))
2155 spin_lock(&mchdev_lock); 1583 intel_gpu_ips_init(dev_priv);
2156 i915_mch_dev = dev_priv;
2157 dev_priv->mchdev_lock = &mchdev_lock;
2158 spin_unlock(&mchdev_lock);
2159
2160 ips_ping_for_i915_load();
2161 }
2162 1584
2163 return 0; 1585 return 0;
2164 1586
@@ -2193,9 +1615,7 @@ int i915_driver_unload(struct drm_device *dev)
2193 struct drm_i915_private *dev_priv = dev->dev_private; 1615 struct drm_i915_private *dev_priv = dev->dev_private;
2194 int ret; 1616 int ret;
2195 1617
2196 spin_lock(&mchdev_lock); 1618 intel_gpu_ips_teardown();
2197 i915_mch_dev = NULL;
2198 spin_unlock(&mchdev_lock);
2199 1619
2200 i915_teardown_sysfs(dev); 1620 i915_teardown_sysfs(dev);
2201 1621
@@ -2203,9 +1623,10 @@ int i915_driver_unload(struct drm_device *dev)
2203 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 1623 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
2204 1624
2205 mutex_lock(&dev->struct_mutex); 1625 mutex_lock(&dev->struct_mutex);
2206 ret = i915_gpu_idle(dev, true); 1626 ret = i915_gpu_idle(dev);
2207 if (ret) 1627 if (ret)
2208 DRM_ERROR("failed to idle hardware: %d\n", ret); 1628 DRM_ERROR("failed to idle hardware: %d\n", ret);
1629 i915_gem_retire_requests(dev);
2209 mutex_unlock(&dev->struct_mutex); 1630 mutex_unlock(&dev->struct_mutex);
2210 1631
2211 /* Cancel the retire work handler, which should be idle now. */ 1632 /* Cancel the retire work handler, which should be idle now. */
@@ -2257,8 +1678,7 @@ int i915_driver_unload(struct drm_device *dev)
2257 i915_gem_cleanup_ringbuffer(dev); 1678 i915_gem_cleanup_ringbuffer(dev);
2258 mutex_unlock(&dev->struct_mutex); 1679 mutex_unlock(&dev->struct_mutex);
2259 i915_gem_cleanup_aliasing_ppgtt(dev); 1680 i915_gem_cleanup_aliasing_ppgtt(dev);
2260 if (I915_HAS_FBC(dev) && i915_powersave) 1681 i915_gem_cleanup_stolen(dev);
2261 i915_cleanup_compression(dev);
2262 drm_mm_takedown(&dev_priv->mm.stolen); 1682 drm_mm_takedown(&dev_priv->mm.stolen);
2263 1683
2264 intel_cleanup_overlay(dev); 1684 intel_cleanup_overlay(dev);
@@ -2351,7 +1771,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
2351 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1771 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2352 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 1772 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
2353 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1773 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2354 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1774 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2355 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), 1775 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
2356 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 1776 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
2357 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1777 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3effcf71e1b1..77b7a50e2014 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -377,18 +377,23 @@ void intel_detect_pch(struct drm_device *dev)
377 377
378 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 378 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
379 dev_priv->pch_type = PCH_IBX; 379 dev_priv->pch_type = PCH_IBX;
380 dev_priv->num_pch_pll = 2;
380 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 381 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
381 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 382 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
382 dev_priv->pch_type = PCH_CPT; 383 dev_priv->pch_type = PCH_CPT;
384 dev_priv->num_pch_pll = 2;
383 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 385 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
384 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 386 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
385 /* PantherPoint is CPT compatible */ 387 /* PantherPoint is CPT compatible */
386 dev_priv->pch_type = PCH_CPT; 388 dev_priv->pch_type = PCH_CPT;
389 dev_priv->num_pch_pll = 2;
387 DRM_DEBUG_KMS("Found PatherPoint PCH\n"); 390 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
388 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 391 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
389 dev_priv->pch_type = PCH_LPT; 392 dev_priv->pch_type = PCH_LPT;
393 dev_priv->num_pch_pll = 0;
390 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 394 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
391 } 395 }
396 BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
392 } 397 }
393 pci_dev_put(pch); 398 pci_dev_put(pch);
394 } 399 }
@@ -433,7 +438,7 @@ void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
433 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1)) 438 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
434 udelay(10); 439 udelay(10);
435 440
436 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1); 441 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
437 POSTING_READ(FORCEWAKE_MT); 442 POSTING_READ(FORCEWAKE_MT);
438 443
439 count = 0; 444 count = 0;
@@ -475,7 +480,7 @@ void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
475 480
476void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) 481void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
477{ 482{
478 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0); 483 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
479 /* The below doubles as a POSTING_READ */ 484 /* The below doubles as a POSTING_READ */
480 gen6_gt_check_fifodbg(dev_priv); 485 gen6_gt_check_fifodbg(dev_priv);
481} 486}
@@ -668,7 +673,7 @@ int i915_resume(struct drm_device *dev)
668 return 0; 673 return 0;
669} 674}
670 675
671static int i8xx_do_reset(struct drm_device *dev, u8 flags) 676static int i8xx_do_reset(struct drm_device *dev)
672{ 677{
673 struct drm_i915_private *dev_priv = dev->dev_private; 678 struct drm_i915_private *dev_priv = dev->dev_private;
674 679
@@ -702,11 +707,12 @@ static int i965_reset_complete(struct drm_device *dev)
702{ 707{
703 u8 gdrst; 708 u8 gdrst;
704 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); 709 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
705 return gdrst & 0x1; 710 return (gdrst & GRDOM_RESET_ENABLE) == 0;
706} 711}
707 712
708static int i965_do_reset(struct drm_device *dev, u8 flags) 713static int i965_do_reset(struct drm_device *dev)
709{ 714{
715 int ret;
710 u8 gdrst; 716 u8 gdrst;
711 717
712 /* 718 /*
@@ -715,20 +721,43 @@ static int i965_do_reset(struct drm_device *dev, u8 flags)
715 * triggers the reset; when done, the hardware will clear it. 721 * triggers the reset; when done, the hardware will clear it.
716 */ 722 */
717 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); 723 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
718 pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1); 724 pci_write_config_byte(dev->pdev, I965_GDRST,
725 gdrst | GRDOM_RENDER |
726 GRDOM_RESET_ENABLE);
727 ret = wait_for(i965_reset_complete(dev), 500);
728 if (ret)
729 return ret;
730
731 /* We can't reset render&media without also resetting display ... */
732 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
733 pci_write_config_byte(dev->pdev, I965_GDRST,
734 gdrst | GRDOM_MEDIA |
735 GRDOM_RESET_ENABLE);
719 736
720 return wait_for(i965_reset_complete(dev), 500); 737 return wait_for(i965_reset_complete(dev), 500);
721} 738}
722 739
723static int ironlake_do_reset(struct drm_device *dev, u8 flags) 740static int ironlake_do_reset(struct drm_device *dev)
724{ 741{
725 struct drm_i915_private *dev_priv = dev->dev_private; 742 struct drm_i915_private *dev_priv = dev->dev_private;
726 u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); 743 u32 gdrst;
727 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1); 744 int ret;
745
746 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
747 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
748 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
749 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
750 if (ret)
751 return ret;
752
753 /* We can't reset render&media without also resetting display ... */
754 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
755 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
756 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
728 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 757 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
729} 758}
730 759
731static int gen6_do_reset(struct drm_device *dev, u8 flags) 760static int gen6_do_reset(struct drm_device *dev)
732{ 761{
733 struct drm_i915_private *dev_priv = dev->dev_private; 762 struct drm_i915_private *dev_priv = dev->dev_private;
734 int ret; 763 int ret;
@@ -763,10 +792,44 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
763 return ret; 792 return ret;
764} 793}
765 794
795static int intel_gpu_reset(struct drm_device *dev)
796{
797 struct drm_i915_private *dev_priv = dev->dev_private;
798 int ret = -ENODEV;
799
800 switch (INTEL_INFO(dev)->gen) {
801 case 7:
802 case 6:
803 ret = gen6_do_reset(dev);
804 break;
805 case 5:
806 ret = ironlake_do_reset(dev);
807 break;
808 case 4:
809 ret = i965_do_reset(dev);
810 break;
811 case 2:
812 ret = i8xx_do_reset(dev);
813 break;
814 }
815
816 /* Also reset the gpu hangman. */
817 if (dev_priv->stop_rings) {
818 DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
819 dev_priv->stop_rings = 0;
820 if (ret == -ENODEV) {
821 DRM_ERROR("Reset not implemented, but ignoring "
822 "error for simulated gpu hangs\n");
823 ret = 0;
824 }
825 }
826
827 return ret;
828}
829
766/** 830/**
767 * i915_reset - reset chip after a hang 831 * i915_reset - reset chip after a hang
768 * @dev: drm device to reset 832 * @dev: drm device to reset
769 * @flags: reset domains
770 * 833 *
771 * Reset the chip. Useful if a hang is detected. Returns zero on successful 834 * Reset the chip. Useful if a hang is detected. Returns zero on successful
772 * reset or otherwise an error code. 835 * reset or otherwise an error code.
@@ -779,14 +842,9 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
779 * - re-init interrupt state 842 * - re-init interrupt state
780 * - re-init display 843 * - re-init display
781 */ 844 */
782int i915_reset(struct drm_device *dev, u8 flags) 845int i915_reset(struct drm_device *dev)
783{ 846{
784 drm_i915_private_t *dev_priv = dev->dev_private; 847 drm_i915_private_t *dev_priv = dev->dev_private;
785 /*
786 * We really should only reset the display subsystem if we actually
787 * need to
788 */
789 bool need_display = true;
790 int ret; 848 int ret;
791 849
792 if (!i915_try_reset) 850 if (!i915_try_reset)
@@ -795,26 +853,16 @@ int i915_reset(struct drm_device *dev, u8 flags)
795 if (!mutex_trylock(&dev->struct_mutex)) 853 if (!mutex_trylock(&dev->struct_mutex))
796 return -EBUSY; 854 return -EBUSY;
797 855
856 dev_priv->stop_rings = 0;
857
798 i915_gem_reset(dev); 858 i915_gem_reset(dev);
799 859
800 ret = -ENODEV; 860 ret = -ENODEV;
801 if (get_seconds() - dev_priv->last_gpu_reset < 5) { 861 if (get_seconds() - dev_priv->last_gpu_reset < 5)
802 DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); 862 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
803 } else switch (INTEL_INFO(dev)->gen) { 863 else
804 case 7: 864 ret = intel_gpu_reset(dev);
805 case 6: 865
806 ret = gen6_do_reset(dev, flags);
807 break;
808 case 5:
809 ret = ironlake_do_reset(dev, flags);
810 break;
811 case 4:
812 ret = i965_do_reset(dev, flags);
813 break;
814 case 2:
815 ret = i8xx_do_reset(dev, flags);
816 break;
817 }
818 dev_priv->last_gpu_reset = get_seconds(); 866 dev_priv->last_gpu_reset = get_seconds();
819 if (ret) { 867 if (ret) {
820 DRM_ERROR("Failed to reset chip.\n"); 868 DRM_ERROR("Failed to reset chip.\n");
@@ -856,23 +904,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
856 intel_modeset_init_hw(dev); 904 intel_modeset_init_hw(dev);
857 905
858 drm_irq_uninstall(dev); 906 drm_irq_uninstall(dev);
859 drm_mode_config_reset(dev);
860 drm_irq_install(dev); 907 drm_irq_install(dev);
861 908 } else {
862 mutex_lock(&dev->struct_mutex); 909 mutex_unlock(&dev->struct_mutex);
863 }
864
865 mutex_unlock(&dev->struct_mutex);
866
867 /*
868 * Perform a full modeset as on later generations, e.g. Ironlake, we may
869 * need to retrain the display link and cannot just restore the register
870 * values.
871 */
872 if (need_display) {
873 mutex_lock(&dev->mode_config.mutex);
874 drm_helper_resume_force_mode(dev);
875 mutex_unlock(&dev->mode_config.mutex);
876 } 910 }
877 911
878 return 0; 912 return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 69e153956182..e03a4f80c5c9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -39,6 +39,7 @@
39#include <drm/intel-gtt.h> 39#include <drm/intel-gtt.h>
40#include <linux/backlight.h> 40#include <linux/backlight.h>
41#include <linux/intel-iommu.h> 41#include <linux/intel-iommu.h>
42#include <linux/kref.h>
42 43
43/* General customization: 44/* General customization:
44 */ 45 */
@@ -78,6 +79,16 @@ enum port {
78 79
79#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) 80#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
80 81
82struct intel_pch_pll {
83 int refcount; /* count of number of CRTCs sharing this PLL */
84 int active; /* count of number of active CRTCs (i.e. DPMS on) */
85 bool on; /* is the PLL actually active? Disabled during modeset */
86 int pll_reg;
87 int fp0_reg;
88 int fp1_reg;
89};
90#define I915_NUM_PLLS 2
91
81/* Interface history: 92/* Interface history:
82 * 93 *
83 * 1.1: Original. 94 * 1.1: Original.
@@ -122,11 +133,11 @@ struct opregion_asle;
122struct drm_i915_private; 133struct drm_i915_private;
123 134
124struct intel_opregion { 135struct intel_opregion {
125 struct opregion_header *header; 136 struct opregion_header __iomem *header;
126 struct opregion_acpi *acpi; 137 struct opregion_acpi __iomem *acpi;
127 struct opregion_swsci *swsci; 138 struct opregion_swsci __iomem *swsci;
128 struct opregion_asle *asle; 139 struct opregion_asle __iomem *asle;
129 void *vbt; 140 void __iomem *vbt;
130 u32 __iomem *lid_state; 141 u32 __iomem *lid_state;
131}; 142};
132#define OPREGION_SIZE (8*1024) 143#define OPREGION_SIZE (8*1024)
@@ -161,8 +172,11 @@ struct sdvo_device_mapping {
161struct intel_display_error_state; 172struct intel_display_error_state;
162 173
163struct drm_i915_error_state { 174struct drm_i915_error_state {
175 struct kref ref;
164 u32 eir; 176 u32 eir;
165 u32 pgtbl_er; 177 u32 pgtbl_er;
178 u32 ier;
179 bool waiting[I915_NUM_RINGS];
166 u32 pipestat[I915_MAX_PIPES]; 180 u32 pipestat[I915_MAX_PIPES];
167 u32 tail[I915_NUM_RINGS]; 181 u32 tail[I915_NUM_RINGS];
168 u32 head[I915_NUM_RINGS]; 182 u32 head[I915_NUM_RINGS];
@@ -228,11 +242,13 @@ struct drm_i915_display_funcs {
228 void (*update_wm)(struct drm_device *dev); 242 void (*update_wm)(struct drm_device *dev);
229 void (*update_sprite_wm)(struct drm_device *dev, int pipe, 243 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
230 uint32_t sprite_width, int pixel_size); 244 uint32_t sprite_width, int pixel_size);
245 void (*sanitize_pm)(struct drm_device *dev);
231 int (*crtc_mode_set)(struct drm_crtc *crtc, 246 int (*crtc_mode_set)(struct drm_crtc *crtc,
232 struct drm_display_mode *mode, 247 struct drm_display_mode *mode,
233 struct drm_display_mode *adjusted_mode, 248 struct drm_display_mode *adjusted_mode,
234 int x, int y, 249 int x, int y,
235 struct drm_framebuffer *old_fb); 250 struct drm_framebuffer *old_fb);
251 void (*off)(struct drm_crtc *crtc);
236 void (*write_eld)(struct drm_connector *connector, 252 void (*write_eld)(struct drm_connector *connector,
237 struct drm_crtc *crtc); 253 struct drm_crtc *crtc);
238 void (*fdi_link_train)(struct drm_crtc *crtc); 254 void (*fdi_link_train)(struct drm_crtc *crtc);
@@ -328,7 +344,6 @@ typedef struct drm_i915_private {
328 344
329 const struct intel_device_info *info; 345 const struct intel_device_info *info;
330 346
331 int has_gem;
332 int relative_constants_mode; 347 int relative_constants_mode;
333 348
334 void __iomem *regs; 349 void __iomem *regs;
@@ -357,7 +372,6 @@ typedef struct drm_i915_private {
357 372
358 drm_dma_handle_t *status_page_dmah; 373 drm_dma_handle_t *status_page_dmah;
359 uint32_t counter; 374 uint32_t counter;
360 drm_local_map_t hws_map;
361 struct drm_i915_gem_object *pwrctx; 375 struct drm_i915_gem_object *pwrctx;
362 struct drm_i915_gem_object *renderctx; 376 struct drm_i915_gem_object *renderctx;
363 377
@@ -386,11 +400,9 @@ typedef struct drm_i915_private {
386 u32 hotplug_supported_mask; 400 u32 hotplug_supported_mask;
387 struct work_struct hotplug_work; 401 struct work_struct hotplug_work;
388 402
389 int tex_lru_log_granularity;
390 int allow_batchbuffer;
391 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 403 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
392 int vblank_pipe;
393 int num_pipe; 404 int num_pipe;
405 int num_pch_pll;
394 406
395 /* For hangcheck timer */ 407 /* For hangcheck timer */
396#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 408#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -402,6 +414,8 @@ typedef struct drm_i915_private {
402 uint32_t last_instdone; 414 uint32_t last_instdone;
403 uint32_t last_instdone1; 415 uint32_t last_instdone1;
404 416
417 unsigned int stop_rings;
418
405 unsigned long cfb_size; 419 unsigned long cfb_size;
406 unsigned int cfb_fb; 420 unsigned int cfb_fb;
407 enum plane cfb_plane; 421 enum plane cfb_plane;
@@ -453,6 +467,7 @@ typedef struct drm_i915_private {
453 unsigned int fsb_freq, mem_freq, is_ddr3; 467 unsigned int fsb_freq, mem_freq, is_ddr3;
454 468
455 spinlock_t error_lock; 469 spinlock_t error_lock;
470 /* Protected by dev->error_lock. */
456 struct drm_i915_error_state *first_error; 471 struct drm_i915_error_state *first_error;
457 struct work_struct error_work; 472 struct work_struct error_work;
458 struct completion error_completion; 473 struct completion error_completion;
@@ -677,24 +692,10 @@ typedef struct drm_i915_private {
677 */ 692 */
678 struct list_head inactive_list; 693 struct list_head inactive_list;
679 694
680 /**
681 * LRU list of objects which are not in the ringbuffer but
682 * are still pinned in the GTT.
683 */
684 struct list_head pinned_list;
685
686 /** LRU list of objects with fence regs on them. */ 695 /** LRU list of objects with fence regs on them. */
687 struct list_head fence_list; 696 struct list_head fence_list;
688 697
689 /** 698 /**
690 * List of objects currently pending being freed.
691 *
692 * These objects are no longer in use, but due to a signal
693 * we were prevented from freeing them at the appointed time.
694 */
695 struct list_head deferred_free_list;
696
697 /**
698 * We leave the user IRQ off as much as possible, 699 * We leave the user IRQ off as much as possible,
699 * but this means that requests will finish and never 700 * but this means that requests will finish and never
700 * be retired once the system goes idle. Set a timer to 701 * be retired once the system goes idle. Set a timer to
@@ -742,6 +743,16 @@ typedef struct drm_i915_private {
742 size_t object_memory; 743 size_t object_memory;
743 u32 object_count; 744 u32 object_count;
744 } mm; 745 } mm;
746
747 /* Old dri1 support infrastructure, beware the dragons ya fools entering
748 * here! */
749 struct {
750 unsigned allow_batchbuffer : 1;
751 u32 __iomem *gfx_hws_cpu_addr;
752 } dri1;
753
754 /* Kernel Modesetting */
755
745 struct sdvo_device_mapping sdvo_mappings[2]; 756 struct sdvo_device_mapping sdvo_mappings[2];
746 /* indicate whether the LVDS_BORDER should be enabled or not */ 757 /* indicate whether the LVDS_BORDER should be enabled or not */
747 unsigned int lvds_border_bits; 758 unsigned int lvds_border_bits;
@@ -751,7 +762,8 @@ typedef struct drm_i915_private {
751 struct drm_crtc *plane_to_crtc_mapping[3]; 762 struct drm_crtc *plane_to_crtc_mapping[3];
752 struct drm_crtc *pipe_to_crtc_mapping[3]; 763 struct drm_crtc *pipe_to_crtc_mapping[3];
753 wait_queue_head_t pending_flip_queue; 764 wait_queue_head_t pending_flip_queue;
754 bool flip_pending_is_done; 765
766 struct intel_pch_pll pch_plls[I915_NUM_PLLS];
755 767
756 /* Reclocking support */ 768 /* Reclocking support */
757 bool render_reclock_avail; 769 bool render_reclock_avail;
@@ -869,7 +881,14 @@ struct drm_i915_gem_object {
869 * Current tiling mode for the object. 881 * Current tiling mode for the object.
870 */ 882 */
871 unsigned int tiling_mode:2; 883 unsigned int tiling_mode:2;
872 unsigned int tiling_changed:1; 884 /**
885 * Whether the tiling parameters for the currently associated fence
886 * register have changed. Note that for the purposes of tracking
887 * tiling changes we also treat the unfenced register, the register
888 * slot that the object occupies whilst it executes a fenced
889 * command (such as BLT on gen2/3), as a "fence".
890 */
891 unsigned int fence_dirty:1;
873 892
874 /** How many users have pinned this object in GTT space. The following 893 /** How many users have pinned this object in GTT space. The following
875 * users can each hold at most one reference: pwrite/pread, pin_ioctl 894 * users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -1116,6 +1135,7 @@ extern int i915_master_create(struct drm_device *dev, struct drm_master *master)
1116extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 1135extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1117 1136
1118 /* i915_dma.c */ 1137 /* i915_dma.c */
1138void i915_update_dri1_breadcrumb(struct drm_device *dev);
1119extern void i915_kernel_lost_context(struct drm_device * dev); 1139extern void i915_kernel_lost_context(struct drm_device * dev);
1120extern int i915_driver_load(struct drm_device *, unsigned long flags); 1140extern int i915_driver_load(struct drm_device *, unsigned long flags);
1121extern int i915_driver_unload(struct drm_device *); 1141extern int i915_driver_unload(struct drm_device *);
@@ -1133,7 +1153,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1133extern int i915_emit_box(struct drm_device *dev, 1153extern int i915_emit_box(struct drm_device *dev,
1134 struct drm_clip_rect *box, 1154 struct drm_clip_rect *box,
1135 int DR1, int DR4); 1155 int DR1, int DR4);
1136extern int i915_reset(struct drm_device *dev, u8 flags); 1156extern int i915_reset(struct drm_device *dev);
1137extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 1157extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1138extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 1158extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1139extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 1159extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -1143,19 +1163,10 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1143/* i915_irq.c */ 1163/* i915_irq.c */
1144void i915_hangcheck_elapsed(unsigned long data); 1164void i915_hangcheck_elapsed(unsigned long data);
1145void i915_handle_error(struct drm_device *dev, bool wedged); 1165void i915_handle_error(struct drm_device *dev, bool wedged);
1146extern int i915_irq_emit(struct drm_device *dev, void *data,
1147 struct drm_file *file_priv);
1148extern int i915_irq_wait(struct drm_device *dev, void *data,
1149 struct drm_file *file_priv);
1150 1166
1151extern void intel_irq_init(struct drm_device *dev); 1167extern void intel_irq_init(struct drm_device *dev);
1152 1168
1153extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 1169void i915_error_state_free(struct kref *error_ref);
1154 struct drm_file *file_priv);
1155extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1156 struct drm_file *file_priv);
1157extern int i915_vblank_swap(struct drm_device *dev, void *data,
1158 struct drm_file *file_priv);
1159 1170
1160void 1171void
1161i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1172i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1287,18 +1298,18 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1287 uint32_t read_domains, 1298 uint32_t read_domains,
1288 uint32_t write_domain); 1299 uint32_t write_domain);
1289int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 1300int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1301int __must_check i915_gem_init(struct drm_device *dev);
1290int __must_check i915_gem_init_hw(struct drm_device *dev); 1302int __must_check i915_gem_init_hw(struct drm_device *dev);
1291void i915_gem_init_swizzling(struct drm_device *dev); 1303void i915_gem_init_swizzling(struct drm_device *dev);
1292void i915_gem_init_ppgtt(struct drm_device *dev); 1304void i915_gem_init_ppgtt(struct drm_device *dev);
1293void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1305void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1294int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire); 1306int __must_check i915_gpu_idle(struct drm_device *dev);
1295int __must_check i915_gem_idle(struct drm_device *dev); 1307int __must_check i915_gem_idle(struct drm_device *dev);
1296int __must_check i915_add_request(struct intel_ring_buffer *ring, 1308int __must_check i915_add_request(struct intel_ring_buffer *ring,
1297 struct drm_file *file, 1309 struct drm_file *file,
1298 struct drm_i915_gem_request *request); 1310 struct drm_i915_gem_request *request);
1299int __must_check i915_wait_request(struct intel_ring_buffer *ring, 1311int __must_check i915_wait_request(struct intel_ring_buffer *ring,
1300 uint32_t seqno, 1312 uint32_t seqno);
1301 bool do_retire);
1302int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1313int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1303int __must_check 1314int __must_check
1304i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 1315i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -1349,10 +1360,11 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
1349/* i915_gem_evict.c */ 1360/* i915_gem_evict.c */
1350int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 1361int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
1351 unsigned alignment, bool mappable); 1362 unsigned alignment, bool mappable);
1352int __must_check i915_gem_evict_everything(struct drm_device *dev, 1363int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
1353 bool purgeable_only); 1364
1354int __must_check i915_gem_evict_inactive(struct drm_device *dev, 1365/* i915_gem_stolen.c */
1355 bool purgeable_only); 1366int i915_gem_init_stolen(struct drm_device *dev);
1367void i915_gem_cleanup_stolen(struct drm_device *dev);
1356 1368
1357/* i915_gem_tiling.c */ 1369/* i915_gem_tiling.c */
1358void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1370void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -1467,28 +1479,6 @@ extern void intel_display_print_error_state(struct seq_file *m,
1467 struct intel_display_error_state *error); 1479 struct intel_display_error_state *error);
1468#endif 1480#endif
1469 1481
1470#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
1471
1472#define BEGIN_LP_RING(n) \
1473 intel_ring_begin(LP_RING(dev_priv), (n))
1474
1475#define OUT_RING(x) \
1476 intel_ring_emit(LP_RING(dev_priv), x)
1477
1478#define ADVANCE_LP_RING() \
1479 intel_ring_advance(LP_RING(dev_priv))
1480
1481/**
1482 * Lock test for when it's just for synchronization of ring access.
1483 *
1484 * In that case, we don't need to do it when GEM is initialized as nobody else
1485 * has access to the ring.
1486 */
1487#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
1488 if (LP_RING(dev->dev_private)->obj == NULL) \
1489 LOCK_TEST_WITH_RETURN(dev, file); \
1490} while (0)
1491
1492/* On SNB platform, before reading ring registers forcewake bit 1482/* On SNB platform, before reading ring registers forcewake bit
1493 * must be set to prevent GT core from power down and stale values being 1483 * must be set to prevent GT core from power down and stale values being
1494 * returned. 1484 * returned.
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dd87937e921f..44a5f241b1a0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,7 +46,6 @@ static int i915_gem_phys_pwrite(struct drm_device *dev,
46 struct drm_i915_gem_object *obj, 46 struct drm_i915_gem_object *obj,
47 struct drm_i915_gem_pwrite *args, 47 struct drm_i915_gem_pwrite *args,
48 struct drm_file *file); 48 struct drm_file *file);
49static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
50 49
51static void i915_gem_write_fence(struct drm_device *dev, int reg, 50static void i915_gem_write_fence(struct drm_device *dev, int reg,
52 struct drm_i915_gem_object *obj); 51 struct drm_i915_gem_object *obj);
@@ -66,7 +65,7 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
66 /* As we do not have an associated fence register, we will force 65 /* As we do not have an associated fence register, we will force
67 * a tiling change if we ever need to acquire one. 66 * a tiling change if we ever need to acquire one.
68 */ 67 */
69 obj->tiling_changed = false; 68 obj->fence_dirty = false;
70 obj->fence_reg = I915_FENCE_REG_NONE; 69 obj->fence_reg = I915_FENCE_REG_NONE;
71} 70}
72 71
@@ -132,7 +131,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
132static inline bool 131static inline bool
133i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) 132i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
134{ 133{
135 return obj->gtt_space && !obj->active && obj->pin_count == 0; 134 return !obj->active;
136} 135}
137 136
138int 137int
@@ -141,6 +140,9 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
141{ 140{
142 struct drm_i915_gem_init *args = data; 141 struct drm_i915_gem_init *args = data;
143 142
143 if (drm_core_check_feature(dev, DRIVER_MODESET))
144 return -ENODEV;
145
144 if (args->gtt_start >= args->gtt_end || 146 if (args->gtt_start >= args->gtt_end ||
145 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) 147 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
146 return -EINVAL; 148 return -EINVAL;
@@ -166,13 +168,11 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
166 struct drm_i915_gem_object *obj; 168 struct drm_i915_gem_object *obj;
167 size_t pinned; 169 size_t pinned;
168 170
169 if (!(dev->driver->driver_features & DRIVER_GEM))
170 return -ENODEV;
171
172 pinned = 0; 171 pinned = 0;
173 mutex_lock(&dev->struct_mutex); 172 mutex_lock(&dev->struct_mutex);
174 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) 173 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
175 pinned += obj->gtt_space->size; 174 if (obj->pin_count)
175 pinned += obj->gtt_space->size;
176 mutex_unlock(&dev->struct_mutex); 176 mutex_unlock(&dev->struct_mutex);
177 177
178 args->aper_size = dev_priv->mm.gtt_total; 178 args->aper_size = dev_priv->mm.gtt_total;
@@ -243,6 +243,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
243 struct drm_file *file) 243 struct drm_file *file)
244{ 244{
245 struct drm_i915_gem_create *args = data; 245 struct drm_i915_gem_create *args = data;
246
246 return i915_gem_create(file, dev, 247 return i915_gem_create(file, dev,
247 args->size, &args->handle); 248 args->size, &args->handle);
248} 249}
@@ -282,8 +283,8 @@ __copy_to_user_swizzled(char __user *cpu_vaddr,
282} 283}
283 284
284static inline int 285static inline int
285__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset, 286__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
286 const char *cpu_vaddr, 287 const char __user *cpu_vaddr,
287 int length) 288 int length)
288{ 289{
289 int ret, cpu_offset = 0; 290 int ret, cpu_offset = 0;
@@ -558,11 +559,14 @@ fast_user_write(struct io_mapping *mapping,
558 char __user *user_data, 559 char __user *user_data,
559 int length) 560 int length)
560{ 561{
561 char *vaddr_atomic; 562 void __iomem *vaddr_atomic;
563 void *vaddr;
562 unsigned long unwritten; 564 unsigned long unwritten;
563 565
564 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); 566 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
565 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, 567 /* We can use the cpu mem copy function because this is X86. */
568 vaddr = (void __force*)vaddr_atomic + page_offset;
569 unwritten = __copy_from_user_inatomic_nocache(vaddr,
566 user_data, length); 570 user_data, length);
567 io_mapping_unmap_atomic(vaddr_atomic); 571 io_mapping_unmap_atomic(vaddr_atomic);
568 return unwritten; 572 return unwritten;
@@ -925,9 +929,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
925 uint32_t write_domain = args->write_domain; 929 uint32_t write_domain = args->write_domain;
926 int ret; 930 int ret;
927 931
928 if (!(dev->driver->driver_features & DRIVER_GEM))
929 return -ENODEV;
930
931 /* Only handle setting domains to types used by the CPU. */ 932 /* Only handle setting domains to types used by the CPU. */
932 if (write_domain & I915_GEM_GPU_DOMAINS) 933 if (write_domain & I915_GEM_GPU_DOMAINS)
933 return -EINVAL; 934 return -EINVAL;
@@ -981,9 +982,6 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
981 struct drm_i915_gem_object *obj; 982 struct drm_i915_gem_object *obj;
982 int ret = 0; 983 int ret = 0;
983 984
984 if (!(dev->driver->driver_features & DRIVER_GEM))
985 return -ENODEV;
986
987 ret = i915_mutex_lock_interruptible(dev); 985 ret = i915_mutex_lock_interruptible(dev);
988 if (ret) 986 if (ret)
989 return ret; 987 return ret;
@@ -1019,9 +1017,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1019 struct drm_gem_object *obj; 1017 struct drm_gem_object *obj;
1020 unsigned long addr; 1018 unsigned long addr;
1021 1019
1022 if (!(dev->driver->driver_features & DRIVER_GEM))
1023 return -ENODEV;
1024
1025 obj = drm_gem_object_lookup(dev, file, args->handle); 1020 obj = drm_gem_object_lookup(dev, file, args->handle);
1026 if (obj == NULL) 1021 if (obj == NULL)
1027 return -ENOENT; 1022 return -ENOENT;
@@ -1247,9 +1242,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
1247 struct drm_i915_gem_object *obj; 1242 struct drm_i915_gem_object *obj;
1248 int ret; 1243 int ret;
1249 1244
1250 if (!(dev->driver->driver_features & DRIVER_GEM))
1251 return -ENODEV;
1252
1253 ret = i915_mutex_lock_interruptible(dev); 1245 ret = i915_mutex_lock_interruptible(dev);
1254 if (ret) 1246 if (ret)
1255 return ret; 1247 return ret;
@@ -1307,9 +1299,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1307{ 1299{
1308 struct drm_i915_gem_mmap_gtt *args = data; 1300 struct drm_i915_gem_mmap_gtt *args = data;
1309 1301
1310 if (!(dev->driver->driver_features & DRIVER_GEM))
1311 return -ENODEV;
1312
1313 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 1302 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1314} 1303}
1315 1304
@@ -1450,10 +1439,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1450 struct drm_device *dev = obj->base.dev; 1439 struct drm_device *dev = obj->base.dev;
1451 struct drm_i915_private *dev_priv = dev->dev_private; 1440 struct drm_i915_private *dev_priv = dev->dev_private;
1452 1441
1453 if (obj->pin_count != 0) 1442 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1454 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1455 else
1456 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1457 1443
1458 BUG_ON(!list_empty(&obj->gpu_write_list)); 1444 BUG_ON(!list_empty(&obj->gpu_write_list));
1459 BUG_ON(!obj->active); 1445 BUG_ON(!obj->active);
@@ -1779,20 +1765,6 @@ i915_gem_retire_requests(struct drm_device *dev)
1779 drm_i915_private_t *dev_priv = dev->dev_private; 1765 drm_i915_private_t *dev_priv = dev->dev_private;
1780 int i; 1766 int i;
1781 1767
1782 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1783 struct drm_i915_gem_object *obj, *next;
1784
1785 /* We must be careful that during unbind() we do not
1786 * accidentally infinitely recurse into retire requests.
1787 * Currently:
1788 * retire -> free -> unbind -> wait -> retire_ring
1789 */
1790 list_for_each_entry_safe(obj, next,
1791 &dev_priv->mm.deferred_free_list,
1792 mm_list)
1793 i915_gem_free_object_tail(obj);
1794 }
1795
1796 for (i = 0; i < I915_NUM_RINGS; i++) 1768 for (i = 0; i < I915_NUM_RINGS; i++)
1797 i915_gem_retire_requests_ring(&dev_priv->ring[i]); 1769 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
1798} 1770}
@@ -1845,20 +1817,10 @@ i915_gem_retire_work_handler(struct work_struct *work)
1845 mutex_unlock(&dev->struct_mutex); 1817 mutex_unlock(&dev->struct_mutex);
1846} 1818}
1847 1819
1848/** 1820static int
1849 * Waits for a sequence number to be signaled, and cleans up the 1821i915_gem_check_wedge(struct drm_i915_private *dev_priv)
1850 * request and object lists appropriately for that event.
1851 */
1852int
1853i915_wait_request(struct intel_ring_buffer *ring,
1854 uint32_t seqno,
1855 bool do_retire)
1856{ 1822{
1857 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1823 BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
1858 u32 ier;
1859 int ret = 0;
1860
1861 BUG_ON(seqno == 0);
1862 1824
1863 if (atomic_read(&dev_priv->mm.wedged)) { 1825 if (atomic_read(&dev_priv->mm.wedged)) {
1864 struct completion *x = &dev_priv->error_completion; 1826 struct completion *x = &dev_priv->error_completion;
@@ -1873,6 +1835,20 @@ i915_wait_request(struct intel_ring_buffer *ring,
1873 return recovery_complete ? -EIO : -EAGAIN; 1835 return recovery_complete ? -EIO : -EAGAIN;
1874 } 1836 }
1875 1837
1838 return 0;
1839}
1840
1841/*
1842 * Compare seqno against outstanding lazy request. Emit a request if they are
1843 * equal.
1844 */
1845static int
1846i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1847{
1848 int ret = 0;
1849
1850 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1851
1876 if (seqno == ring->outstanding_lazy_request) { 1852 if (seqno == ring->outstanding_lazy_request) {
1877 struct drm_i915_gem_request *request; 1853 struct drm_i915_gem_request *request;
1878 1854
@@ -1886,56 +1862,67 @@ i915_wait_request(struct intel_ring_buffer *ring,
1886 return ret; 1862 return ret;
1887 } 1863 }
1888 1864
1889 seqno = request->seqno; 1865 BUG_ON(seqno != request->seqno);
1890 } 1866 }
1891 1867
1892 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { 1868 return ret;
1893 if (HAS_PCH_SPLIT(ring->dev)) 1869}
1894 ier = I915_READ(DEIER) | I915_READ(GTIER); 1870
1895 else if (IS_VALLEYVIEW(ring->dev)) 1871static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1896 ier = I915_READ(GTIER) | I915_READ(VLV_IER); 1872 bool interruptible)
1897 else 1873{
1898 ier = I915_READ(IER); 1874 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1899 if (!ier) { 1875 int ret = 0;
1900 DRM_ERROR("something (likely vbetool) disabled "
1901 "interrupts, re-enabling\n");
1902 ring->dev->driver->irq_preinstall(ring->dev);
1903 ring->dev->driver->irq_postinstall(ring->dev);
1904 }
1905 1876
1906 trace_i915_gem_request_wait_begin(ring, seqno); 1877 if (i915_seqno_passed(ring->get_seqno(ring), seqno))
1878 return 0;
1907 1879
1908 ring->waiting_seqno = seqno; 1880 trace_i915_gem_request_wait_begin(ring, seqno);
1909 if (ring->irq_get(ring)) { 1881 if (WARN_ON(!ring->irq_get(ring)))
1910 if (dev_priv->mm.interruptible) 1882 return -ENODEV;
1911 ret = wait_event_interruptible(ring->irq_queue,
1912 i915_seqno_passed(ring->get_seqno(ring), seqno)
1913 || atomic_read(&dev_priv->mm.wedged));
1914 else
1915 wait_event(ring->irq_queue,
1916 i915_seqno_passed(ring->get_seqno(ring), seqno)
1917 || atomic_read(&dev_priv->mm.wedged));
1918 1883
1919 ring->irq_put(ring); 1884#define EXIT_COND \
1920 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring), 1885 (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
1921 seqno) || 1886 atomic_read(&dev_priv->mm.wedged))
1922 atomic_read(&dev_priv->mm.wedged), 3000))
1923 ret = -EBUSY;
1924 ring->waiting_seqno = 0;
1925 1887
1926 trace_i915_gem_request_wait_end(ring, seqno); 1888 if (interruptible)
1927 } 1889 ret = wait_event_interruptible(ring->irq_queue,
1890 EXIT_COND);
1891 else
1892 wait_event(ring->irq_queue, EXIT_COND);
1893
1894 ring->irq_put(ring);
1895 trace_i915_gem_request_wait_end(ring, seqno);
1896#undef EXIT_COND
1897
1898 return ret;
1899}
1900
1901/**
1902 * Waits for a sequence number to be signaled, and cleans up the
1903 * request and object lists appropriately for that event.
1904 */
1905int
1906i915_wait_request(struct intel_ring_buffer *ring,
1907 uint32_t seqno)
1908{
1909 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1910 int ret = 0;
1911
1912 BUG_ON(seqno == 0);
1913
1914 ret = i915_gem_check_wedge(dev_priv);
1915 if (ret)
1916 return ret;
1917
1918 ret = i915_gem_check_olr(ring, seqno);
1919 if (ret)
1920 return ret;
1921
1922 ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
1928 if (atomic_read(&dev_priv->mm.wedged)) 1923 if (atomic_read(&dev_priv->mm.wedged))
1929 ret = -EAGAIN; 1924 ret = -EAGAIN;
1930 1925
1931 /* Directly dispatch request retiring. While we have the work queue
1932 * to handle this, the waiter on a request often wants an associated
1933 * buffer to have made it to the inactive list, and we would need
1934 * a separate wait queue to handle that.
1935 */
1936 if (ret == 0 && do_retire)
1937 i915_gem_retire_requests_ring(ring);
1938
1939 return ret; 1926 return ret;
1940} 1927}
1941 1928
@@ -1957,10 +1944,10 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
1957 * it. 1944 * it.
1958 */ 1945 */
1959 if (obj->active) { 1946 if (obj->active) {
1960 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno, 1947 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
1961 true);
1962 if (ret) 1948 if (ret)
1963 return ret; 1949 return ret;
1950 i915_gem_retire_requests_ring(obj->ring);
1964 } 1951 }
1965 1952
1966 return 0; 1953 return 0;
@@ -1998,22 +1985,9 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
1998 if (seqno <= from->sync_seqno[idx]) 1985 if (seqno <= from->sync_seqno[idx])
1999 return 0; 1986 return 0;
2000 1987
2001 if (seqno == from->outstanding_lazy_request) { 1988 ret = i915_gem_check_olr(obj->ring, seqno);
2002 struct drm_i915_gem_request *request; 1989 if (ret)
2003 1990 return ret;
2004 request = kzalloc(sizeof(*request), GFP_KERNEL);
2005 if (request == NULL)
2006 return -ENOMEM;
2007
2008 ret = i915_add_request(from, NULL, request);
2009 if (ret) {
2010 kfree(request);
2011 return ret;
2012 }
2013
2014 seqno = request->seqno;
2015 }
2016
2017 1991
2018 ret = to->sync_to(to, from, seqno); 1992 ret = to->sync_to(to, from, seqno);
2019 if (!ret) 1993 if (!ret)
@@ -2064,7 +2038,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2064 } 2038 }
2065 2039
2066 ret = i915_gem_object_finish_gpu(obj); 2040 ret = i915_gem_object_finish_gpu(obj);
2067 if (ret == -ERESTARTSYS) 2041 if (ret)
2068 return ret; 2042 return ret;
2069 /* Continue on if we fail due to EIO, the GPU is hung so we 2043 /* Continue on if we fail due to EIO, the GPU is hung so we
2070 * should be safe and we need to cleanup or else we might 2044 * should be safe and we need to cleanup or else we might
@@ -2091,7 +2065,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2091 2065
2092 /* release the fence reg _after_ flushing */ 2066 /* release the fence reg _after_ flushing */
2093 ret = i915_gem_object_put_fence(obj); 2067 ret = i915_gem_object_put_fence(obj);
2094 if (ret == -ERESTARTSYS) 2068 if (ret)
2095 return ret; 2069 return ret;
2096 2070
2097 trace_i915_gem_object_unbind(obj); 2071 trace_i915_gem_object_unbind(obj);
@@ -2143,7 +2117,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
2143 return 0; 2117 return 0;
2144} 2118}
2145 2119
2146static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire) 2120static int i915_ring_idle(struct intel_ring_buffer *ring)
2147{ 2121{
2148 int ret; 2122 int ret;
2149 2123
@@ -2157,18 +2131,17 @@ static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
2157 return ret; 2131 return ret;
2158 } 2132 }
2159 2133
2160 return i915_wait_request(ring, i915_gem_next_request_seqno(ring), 2134 return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
2161 do_retire);
2162} 2135}
2163 2136
2164int i915_gpu_idle(struct drm_device *dev, bool do_retire) 2137int i915_gpu_idle(struct drm_device *dev)
2165{ 2138{
2166 drm_i915_private_t *dev_priv = dev->dev_private; 2139 drm_i915_private_t *dev_priv = dev->dev_private;
2167 int ret, i; 2140 int ret, i;
2168 2141
2169 /* Flush everything onto the inactive list. */ 2142 /* Flush everything onto the inactive list. */
2170 for (i = 0; i < I915_NUM_RINGS; i++) { 2143 for (i = 0; i < I915_NUM_RINGS; i++) {
2171 ret = i915_ring_idle(&dev_priv->ring[i], do_retire); 2144 ret = i915_ring_idle(&dev_priv->ring[i]);
2172 if (ret) 2145 if (ret)
2173 return ret; 2146 return ret;
2174 } 2147 }
@@ -2357,9 +2330,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2357 } 2330 }
2358 2331
2359 if (obj->last_fenced_seqno) { 2332 if (obj->last_fenced_seqno) {
2360 ret = i915_wait_request(obj->ring, 2333 ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
2361 obj->last_fenced_seqno,
2362 false);
2363 if (ret) 2334 if (ret)
2364 return ret; 2335 return ret;
2365 2336
@@ -2454,7 +2425,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2454 /* Have we updated the tiling parameters upon the object and so 2425 /* Have we updated the tiling parameters upon the object and so
2455 * will need to serialise the write to the associated fence register? 2426 * will need to serialise the write to the associated fence register?
2456 */ 2427 */
2457 if (obj->tiling_changed) { 2428 if (obj->fence_dirty) {
2458 ret = i915_gem_object_flush_fence(obj); 2429 ret = i915_gem_object_flush_fence(obj);
2459 if (ret) 2430 if (ret)
2460 return ret; 2431 return ret;
@@ -2463,7 +2434,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2463 /* Just update our place in the LRU if our fence is getting reused. */ 2434 /* Just update our place in the LRU if our fence is getting reused. */
2464 if (obj->fence_reg != I915_FENCE_REG_NONE) { 2435 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2465 reg = &dev_priv->fence_regs[obj->fence_reg]; 2436 reg = &dev_priv->fence_regs[obj->fence_reg];
2466 if (!obj->tiling_changed) { 2437 if (!obj->fence_dirty) {
2467 list_move_tail(&reg->lru_list, 2438 list_move_tail(&reg->lru_list,
2468 &dev_priv->mm.fence_list); 2439 &dev_priv->mm.fence_list);
2469 return 0; 2440 return 0;
@@ -2486,7 +2457,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2486 return 0; 2457 return 0;
2487 2458
2488 i915_gem_object_update_fence(obj, reg, enable); 2459 i915_gem_object_update_fence(obj, reg, enable);
2489 obj->tiling_changed = false; 2460 obj->fence_dirty = false;
2490 2461
2491 return 0; 2462 return 0;
2492} 2463}
@@ -2732,6 +2703,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2732int 2703int
2733i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 2704i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2734{ 2705{
2706 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2735 uint32_t old_write_domain, old_read_domains; 2707 uint32_t old_write_domain, old_read_domains;
2736 int ret; 2708 int ret;
2737 2709
@@ -2772,6 +2744,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2772 old_read_domains, 2744 old_read_domains,
2773 old_write_domain); 2745 old_write_domain);
2774 2746
2747 /* And bump the LRU for this access */
2748 if (i915_gem_object_is_inactive(obj))
2749 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2750
2775 return 0; 2751 return 0;
2776} 2752}
2777 2753
@@ -3020,28 +2996,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3020 if (seqno == 0) 2996 if (seqno == 0)
3021 return 0; 2997 return 0;
3022 2998
3023 ret = 0; 2999 ret = __wait_seqno(ring, seqno, true);
3024 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3025 /* And wait for the seqno passing without holding any locks and
3026 * causing extra latency for others. This is safe as the irq
3027 * generation is designed to be run atomically and so is
3028 * lockless.
3029 */
3030 if (ring->irq_get(ring)) {
3031 ret = wait_event_interruptible(ring->irq_queue,
3032 i915_seqno_passed(ring->get_seqno(ring), seqno)
3033 || atomic_read(&dev_priv->mm.wedged));
3034 ring->irq_put(ring);
3035
3036 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3037 ret = -EIO;
3038 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
3039 seqno) ||
3040 atomic_read(&dev_priv->mm.wedged), 3000)) {
3041 ret = -EBUSY;
3042 }
3043 }
3044
3045 if (ret == 0) 3000 if (ret == 0)
3046 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 3001 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3047 3002
@@ -3053,12 +3008,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3053 uint32_t alignment, 3008 uint32_t alignment,
3054 bool map_and_fenceable) 3009 bool map_and_fenceable)
3055{ 3010{
3056 struct drm_device *dev = obj->base.dev;
3057 struct drm_i915_private *dev_priv = dev->dev_private;
3058 int ret; 3011 int ret;
3059 3012
3060 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); 3013 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
3061 WARN_ON(i915_verify_lists(dev));
3062 3014
3063 if (obj->gtt_space != NULL) { 3015 if (obj->gtt_space != NULL) {
3064 if ((alignment && obj->gtt_offset & (alignment - 1)) || 3016 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
@@ -3086,34 +3038,20 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3086 if (!obj->has_global_gtt_mapping && map_and_fenceable) 3038 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3087 i915_gem_gtt_bind_object(obj, obj->cache_level); 3039 i915_gem_gtt_bind_object(obj, obj->cache_level);
3088 3040
3089 if (obj->pin_count++ == 0) { 3041 obj->pin_count++;
3090 if (!obj->active)
3091 list_move_tail(&obj->mm_list,
3092 &dev_priv->mm.pinned_list);
3093 }
3094 obj->pin_mappable |= map_and_fenceable; 3042 obj->pin_mappable |= map_and_fenceable;
3095 3043
3096 WARN_ON(i915_verify_lists(dev));
3097 return 0; 3044 return 0;
3098} 3045}
3099 3046
3100void 3047void
3101i915_gem_object_unpin(struct drm_i915_gem_object *obj) 3048i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3102{ 3049{
3103 struct drm_device *dev = obj->base.dev;
3104 drm_i915_private_t *dev_priv = dev->dev_private;
3105
3106 WARN_ON(i915_verify_lists(dev));
3107 BUG_ON(obj->pin_count == 0); 3050 BUG_ON(obj->pin_count == 0);
3108 BUG_ON(obj->gtt_space == NULL); 3051 BUG_ON(obj->gtt_space == NULL);
3109 3052
3110 if (--obj->pin_count == 0) { 3053 if (--obj->pin_count == 0)
3111 if (!obj->active)
3112 list_move_tail(&obj->mm_list,
3113 &dev_priv->mm.inactive_list);
3114 obj->pin_mappable = false; 3054 obj->pin_mappable = false;
3115 }
3116 WARN_ON(i915_verify_lists(dev));
3117} 3055}
3118 3056
3119int 3057int
@@ -3237,20 +3175,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3237 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 3175 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3238 ret = i915_gem_flush_ring(obj->ring, 3176 ret = i915_gem_flush_ring(obj->ring,
3239 0, obj->base.write_domain); 3177 0, obj->base.write_domain);
3240 } else if (obj->ring->outstanding_lazy_request == 3178 } else {
3241 obj->last_rendering_seqno) { 3179 ret = i915_gem_check_olr(obj->ring,
3242 struct drm_i915_gem_request *request; 3180 obj->last_rendering_seqno);
3243
3244 /* This ring is not being cleared by active usage,
3245 * so emit a request to do so.
3246 */
3247 request = kzalloc(sizeof(*request), GFP_KERNEL);
3248 if (request) {
3249 ret = i915_add_request(obj->ring, NULL, request);
3250 if (ret)
3251 kfree(request);
3252 } else
3253 ret = -ENOMEM;
3254 } 3181 }
3255 3182
3256 /* Update the active list for the hardware's current position. 3183 /* Update the active list for the hardware's current position.
@@ -3386,21 +3313,29 @@ int i915_gem_init_object(struct drm_gem_object *obj)
3386 return 0; 3313 return 0;
3387} 3314}
3388 3315
3389static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) 3316void i915_gem_free_object(struct drm_gem_object *gem_obj)
3390{ 3317{
3318 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3391 struct drm_device *dev = obj->base.dev; 3319 struct drm_device *dev = obj->base.dev;
3392 drm_i915_private_t *dev_priv = dev->dev_private; 3320 drm_i915_private_t *dev_priv = dev->dev_private;
3393 int ret;
3394
3395 ret = i915_gem_object_unbind(obj);
3396 if (ret == -ERESTARTSYS) {
3397 list_move(&obj->mm_list,
3398 &dev_priv->mm.deferred_free_list);
3399 return;
3400 }
3401 3321
3402 trace_i915_gem_object_destroy(obj); 3322 trace_i915_gem_object_destroy(obj);
3403 3323
3324 if (obj->phys_obj)
3325 i915_gem_detach_phys_object(dev, obj);
3326
3327 obj->pin_count = 0;
3328 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3329 bool was_interruptible;
3330
3331 was_interruptible = dev_priv->mm.interruptible;
3332 dev_priv->mm.interruptible = false;
3333
3334 WARN_ON(i915_gem_object_unbind(obj));
3335
3336 dev_priv->mm.interruptible = was_interruptible;
3337 }
3338
3404 if (obj->base.map_list.map) 3339 if (obj->base.map_list.map)
3405 drm_gem_free_mmap_offset(&obj->base); 3340 drm_gem_free_mmap_offset(&obj->base);
3406 3341
@@ -3411,20 +3346,6 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
3411 kfree(obj); 3346 kfree(obj);
3412} 3347}
3413 3348
3414void i915_gem_free_object(struct drm_gem_object *gem_obj)
3415{
3416 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3417 struct drm_device *dev = obj->base.dev;
3418
3419 while (obj->pin_count > 0)
3420 i915_gem_object_unpin(obj);
3421
3422 if (obj->phys_obj)
3423 i915_gem_detach_phys_object(dev, obj);
3424
3425 i915_gem_free_object_tail(obj);
3426}
3427
3428int 3349int
3429i915_gem_idle(struct drm_device *dev) 3350i915_gem_idle(struct drm_device *dev)
3430{ 3351{
@@ -3438,20 +3359,16 @@ i915_gem_idle(struct drm_device *dev)
3438 return 0; 3359 return 0;
3439 } 3360 }
3440 3361
3441 ret = i915_gpu_idle(dev, true); 3362 ret = i915_gpu_idle(dev);
3442 if (ret) { 3363 if (ret) {
3443 mutex_unlock(&dev->struct_mutex); 3364 mutex_unlock(&dev->struct_mutex);
3444 return ret; 3365 return ret;
3445 } 3366 }
3367 i915_gem_retire_requests(dev);
3446 3368
3447 /* Under UMS, be paranoid and evict. */ 3369 /* Under UMS, be paranoid and evict. */
3448 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 3370 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3449 ret = i915_gem_evict_inactive(dev, false); 3371 i915_gem_evict_everything(dev, false);
3450 if (ret) {
3451 mutex_unlock(&dev->struct_mutex);
3452 return ret;
3453 }
3454 }
3455 3372
3456 i915_gem_reset_fences(dev); 3373 i915_gem_reset_fences(dev);
3457 3374
@@ -3489,9 +3406,9 @@ void i915_gem_init_swizzling(struct drm_device *dev)
3489 3406
3490 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); 3407 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3491 if (IS_GEN6(dev)) 3408 if (IS_GEN6(dev))
3492 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB)); 3409 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3493 else 3410 else
3494 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB)); 3411 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3495} 3412}
3496 3413
3497void i915_gem_init_ppgtt(struct drm_device *dev) 3414void i915_gem_init_ppgtt(struct drm_device *dev)
@@ -3540,7 +3457,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
3540 ecochk = I915_READ(GAM_ECOCHK); 3457 ecochk = I915_READ(GAM_ECOCHK);
3541 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | 3458 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3542 ECOCHK_PPGTT_CACHE64B); 3459 ECOCHK_PPGTT_CACHE64B);
3543 I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE)); 3460 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
3544 } else if (INTEL_INFO(dev)->gen >= 7) { 3461 } else if (INTEL_INFO(dev)->gen >= 7) {
3545 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B); 3462 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
3546 /* GFX_MODE is per-ring on gen7+ */ 3463 /* GFX_MODE is per-ring on gen7+ */
@@ -3551,7 +3468,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
3551 3468
3552 if (INTEL_INFO(dev)->gen >= 7) 3469 if (INTEL_INFO(dev)->gen >= 7)
3553 I915_WRITE(RING_MODE_GEN7(ring), 3470 I915_WRITE(RING_MODE_GEN7(ring),
3554 GFX_MODE_ENABLE(GFX_PPGTT_ENABLE)); 3471 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
3555 3472
3556 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); 3473 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
3557 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); 3474 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
@@ -3595,6 +3512,71 @@ cleanup_render_ring:
3595 return ret; 3512 return ret;
3596} 3513}
3597 3514
3515static bool
3516intel_enable_ppgtt(struct drm_device *dev)
3517{
3518 if (i915_enable_ppgtt >= 0)
3519 return i915_enable_ppgtt;
3520
3521#ifdef CONFIG_INTEL_IOMMU
3522 /* Disable ppgtt on SNB if VT-d is on. */
3523 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
3524 return false;
3525#endif
3526
3527 return true;
3528}
3529
3530int i915_gem_init(struct drm_device *dev)
3531{
3532 struct drm_i915_private *dev_priv = dev->dev_private;
3533 unsigned long gtt_size, mappable_size;
3534 int ret;
3535
3536 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
3537 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
3538
3539 mutex_lock(&dev->struct_mutex);
3540 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
3541 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
3542 * aperture accordingly when using aliasing ppgtt. */
3543 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
3544
3545 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
3546
3547 ret = i915_gem_init_aliasing_ppgtt(dev);
3548 if (ret) {
3549 mutex_unlock(&dev->struct_mutex);
3550 return ret;
3551 }
3552 } else {
3553 /* Let GEM Manage all of the aperture.
3554 *
3555 * However, leave one page at the end still bound to the scratch
3556 * page. There are a number of places where the hardware
3557 * apparently prefetches past the end of the object, and we've
3558 * seen multiple hangs with the GPU head pointer stuck in a
3559 * batchbuffer bound at the last page of the aperture. One page
3560 * should be enough to keep any prefetching inside of the
3561 * aperture.
3562 */
3563 i915_gem_init_global_gtt(dev, 0, mappable_size,
3564 gtt_size);
3565 }
3566
3567 ret = i915_gem_init_hw(dev);
3568 mutex_unlock(&dev->struct_mutex);
3569 if (ret) {
3570 i915_gem_cleanup_aliasing_ppgtt(dev);
3571 return ret;
3572 }
3573
3574 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
3575 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3576 dev_priv->dri1.allow_batchbuffer = 1;
3577 return 0;
3578}
3579
3598void 3580void
3599i915_gem_cleanup_ringbuffer(struct drm_device *dev) 3581i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3600{ 3582{
@@ -3694,9 +3676,7 @@ i915_gem_load(struct drm_device *dev)
3694 INIT_LIST_HEAD(&dev_priv->mm.active_list); 3676 INIT_LIST_HEAD(&dev_priv->mm.active_list);
3695 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 3677 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3696 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 3678 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3697 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
3698 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 3679 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3699 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
3700 INIT_LIST_HEAD(&dev_priv->mm.gtt_list); 3680 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3701 for (i = 0; i < I915_NUM_RINGS; i++) 3681 for (i = 0; i < I915_NUM_RINGS; i++)
3702 init_ring_lists(&dev_priv->ring[i]); 3682 init_ring_lists(&dev_priv->ring[i]);
@@ -3708,12 +3688,8 @@ i915_gem_load(struct drm_device *dev)
3708 3688
3709 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 3689 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3710 if (IS_GEN3(dev)) { 3690 if (IS_GEN3(dev)) {
3711 u32 tmp = I915_READ(MI_ARB_STATE); 3691 I915_WRITE(MI_ARB_STATE,
3712 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) { 3692 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
3713 /* arb state is a masked write, so set bit + bit in mask */
3714 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3715 I915_WRITE(MI_ARB_STATE, tmp);
3716 }
3717 } 3693 }
3718 3694
3719 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; 3695 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
@@ -4016,7 +3992,7 @@ rescan:
4016 * This has a dramatic impact to reduce the number of 3992 * This has a dramatic impact to reduce the number of
4017 * OOM-killer events whilst running the GPU aggressively. 3993 * OOM-killer events whilst running the GPU aggressively.
4018 */ 3994 */
4019 if (i915_gpu_idle(dev, true) == 0) 3995 if (i915_gpu_idle(dev) == 0)
4020 goto rescan; 3996 goto rescan;
4021 } 3997 }
4022 mutex_unlock(&dev->struct_mutex); 3998 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index cc93cac242d6..a4f6aaabca99 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -114,22 +114,6 @@ i915_verify_lists(struct drm_device *dev)
114 } 114 }
115 } 115 }
116 116
117 list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
118 if (obj->base.dev != dev ||
119 !atomic_read(&obj->base.refcount.refcount)) {
120 DRM_ERROR("freed pinned %p\n", obj);
121 err++;
122 break;
123 } else if (!obj->pin_count || obj->active ||
124 (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
125 DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
126 obj,
127 obj->pin_count, obj->active,
128 obj->base.write_domain);
129 err++;
130 }
131 }
132
133 return warned = err; 117 return warned = err;
134} 118}
135#endif /* WATCH_INACTIVE */ 119#endif /* WATCH_INACTIVE */
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 21a82710f4b2..3bcf0451d07c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -35,6 +35,9 @@
35static bool 35static bool
36mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) 36mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
37{ 37{
38 if (obj->pin_count)
39 return false;
40
38 list_add(&obj->exec_list, unwind); 41 list_add(&obj->exec_list, unwind);
39 return drm_mm_scan_add_block(obj->gtt_space); 42 return drm_mm_scan_add_block(obj->gtt_space);
40} 43}
@@ -90,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
90 /* Now merge in the soon-to-be-expired objects... */ 93 /* Now merge in the soon-to-be-expired objects... */
91 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 94 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
92 /* Does the object require an outstanding flush? */ 95 /* Does the object require an outstanding flush? */
93 if (obj->base.write_domain || obj->pin_count) 96 if (obj->base.write_domain)
94 continue; 97 continue;
95 98
96 if (mark_free(obj, &unwind_list)) 99 if (mark_free(obj, &unwind_list))
@@ -99,14 +102,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
99 102
100 /* Finally add anything with a pending flush (in order of retirement) */ 103 /* Finally add anything with a pending flush (in order of retirement) */
101 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { 104 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
102 if (obj->pin_count)
103 continue;
104
105 if (mark_free(obj, &unwind_list)) 105 if (mark_free(obj, &unwind_list))
106 goto found; 106 goto found;
107 } 107 }
108 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 108 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
109 if (!obj->base.write_domain || obj->pin_count) 109 if (!obj->base.write_domain)
110 continue; 110 continue;
111 111
112 if (mark_free(obj, &unwind_list)) 112 if (mark_free(obj, &unwind_list))
@@ -166,8 +166,9 @@ int
166i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) 166i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
167{ 167{
168 drm_i915_private_t *dev_priv = dev->dev_private; 168 drm_i915_private_t *dev_priv = dev->dev_private;
169 int ret; 169 struct drm_i915_gem_object *obj, *next;
170 bool lists_empty; 170 bool lists_empty;
171 int ret,i;
171 172
172 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 173 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
173 list_empty(&dev_priv->mm.flushing_list) && 174 list_empty(&dev_priv->mm.flushing_list) &&
@@ -177,31 +178,30 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
177 178
178 trace_i915_gem_evict_everything(dev, purgeable_only); 179 trace_i915_gem_evict_everything(dev, purgeable_only);
179 180
180 /* Flush everything (on to the inactive lists) and evict */ 181 ret = i915_gpu_idle(dev);
181 ret = i915_gpu_idle(dev, true);
182 if (ret) 182 if (ret)
183 return ret; 183 return ret;
184 184
185 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 185 /* The gpu_idle will flush everything in the write domain to the
186 * active list. Then we must move everything off the active list
187 * with retire requests.
188 */
189 for (i = 0; i < I915_NUM_RINGS; i++)
190 if (WARN_ON(!list_empty(&dev_priv->ring[i].gpu_write_list)))
191 return -EBUSY;
186 192
187 return i915_gem_evict_inactive(dev, purgeable_only); 193 i915_gem_retire_requests(dev);
188}
189 194
190/** Unbinds all inactive objects. */ 195 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
191int
192i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
193{
194 drm_i915_private_t *dev_priv = dev->dev_private;
195 struct drm_i915_gem_object *obj, *next;
196 196
197 /* Having flushed everything, unbind() should never raise an error */
197 list_for_each_entry_safe(obj, next, 198 list_for_each_entry_safe(obj, next,
198 &dev_priv->mm.inactive_list, mm_list) { 199 &dev_priv->mm.inactive_list, mm_list) {
199 if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { 200 if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
200 int ret = i915_gem_object_unbind(obj); 201 if (obj->pin_count == 0)
201 if (ret) 202 WARN_ON(i915_gem_object_unbind(obj));
202 return ret;
203 } 203 }
204 } 204 }
205 205
206 return 0; 206 return ret;
207} 207}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index c77bfa9ad340..206b9bbe6979 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1116,11 +1116,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1116 return -EINVAL; 1116 return -EINVAL;
1117 } 1117 }
1118 1118
1119 if (INTEL_INFO(dev)->gen >= 5) {
1120 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1121 return -EINVAL;
1122 }
1123
1119 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { 1124 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1120 DRM_DEBUG("execbuf with %u cliprects\n", 1125 DRM_DEBUG("execbuf with %u cliprects\n",
1121 args->num_cliprects); 1126 args->num_cliprects);
1122 return -EINVAL; 1127 return -EINVAL;
1123 } 1128 }
1129
1124 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), 1130 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
1125 GFP_KERNEL); 1131 GFP_KERNEL);
1126 if (cliprects == NULL) { 1132 if (cliprects == NULL) {
@@ -1225,9 +1231,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1225 * so every billion or so execbuffers, we need to stall 1231 * so every billion or so execbuffers, we need to stall
1226 * the GPU in order to reset the counters. 1232 * the GPU in order to reset the counters.
1227 */ 1233 */
1228 ret = i915_gpu_idle(dev, true); 1234 ret = i915_gpu_idle(dev);
1229 if (ret) 1235 if (ret)
1230 goto err; 1236 goto err;
1237 i915_gem_retire_requests(dev);
1231 1238
1232 BUG_ON(ring->sync_seqno[i]); 1239 BUG_ON(ring->sync_seqno[i]);
1233 } 1240 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 25c8bf9d1d4e..29d573c27b35 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -317,7 +317,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
317 317
318 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { 318 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
319 dev_priv->mm.interruptible = false; 319 dev_priv->mm.interruptible = false;
320 if (i915_gpu_idle(dev_priv->dev, false)) { 320 if (i915_gpu_idle(dev_priv->dev)) {
321 DRM_ERROR("Couldn't idle GPU\n"); 321 DRM_ERROR("Couldn't idle GPU\n");
322 /* Wait a bit, in hopes it avoids the hang */ 322 /* Wait a bit, in hopes it avoids the hang */
323 udelay(10); 323 udelay(10);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
new file mode 100644
index 000000000000..ada2e90a2a60
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -0,0 +1,202 @@
1/*
2 * Copyright © 2008-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34/*
35 * The BIOS typically reserves some of the system's memory for the exclusive
36 * use of the integrated graphics. This memory is no longer available for
37 * use by the OS and so the user finds that his system has less memory
38 * available than he put in. We refer to this memory as stolen.
39 *
40 * The BIOS will allocate its framebuffer from the stolen memory. Our
41 * goal is try to reuse that object for our own fbcon which must always
42 * be available for panics. Anything else we can reuse the stolen memory
43 * for is a boon.
44 */
45
46#define PTE_ADDRESS_MASK 0xfffff000
47#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
48#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
49#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
50#define PTE_MAPPING_TYPE_CACHED (3 << 1)
51#define PTE_MAPPING_TYPE_MASK (3 << 1)
52#define PTE_VALID (1 << 0)
53
54/**
55 * i915_stolen_to_phys - take an offset into stolen memory and turn it into
56 * a physical one
57 * @dev: drm device
58 * @offset: address to translate
59 *
60 * Some chip functions require allocations from stolen space and need the
61 * physical address of the memory in question.
62 */
63static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
64{
65 struct drm_i915_private *dev_priv = dev->dev_private;
66 struct pci_dev *pdev = dev_priv->bridge_dev;
67 u32 base;
68
69#if 0
70 /* On the machines I have tested the Graphics Base of Stolen Memory
71 * is unreliable, so compute the base by subtracting the stolen memory
72 * from the Top of Low Usable DRAM which is where the BIOS places
73 * the graphics stolen memory.
74 */
75 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
76 /* top 32bits are reserved = 0 */
77 pci_read_config_dword(pdev, 0xA4, &base);
78 } else {
79 /* XXX presume 8xx is the same as i915 */
80 pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
81 }
82#else
83 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
84 u16 val;
85 pci_read_config_word(pdev, 0xb0, &val);
86 base = val >> 4 << 20;
87 } else {
88 u8 val;
89 pci_read_config_byte(pdev, 0x9c, &val);
90 base = val >> 3 << 27;
91 }
92 base -= dev_priv->mm.gtt->stolen_size;
93#endif
94
95 return base + offset;
96}
97
98static void i915_warn_stolen(struct drm_device *dev)
99{
100 DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
101 DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
102}
103
104static void i915_setup_compression(struct drm_device *dev, int size)
105{
106 struct drm_i915_private *dev_priv = dev->dev_private;
107 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
108 unsigned long cfb_base;
109 unsigned long ll_base = 0;
110
111 /* Just in case the BIOS is doing something questionable. */
112 intel_disable_fbc(dev);
113
114 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
115 if (compressed_fb)
116 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
117 if (!compressed_fb)
118 goto err;
119
120 cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
121 if (!cfb_base)
122 goto err_fb;
123
124 if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
125 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
126 4096, 4096, 0);
127 if (compressed_llb)
128 compressed_llb = drm_mm_get_block(compressed_llb,
129 4096, 4096);
130 if (!compressed_llb)
131 goto err_fb;
132
133 ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
134 if (!ll_base)
135 goto err_llb;
136 }
137
138 dev_priv->cfb_size = size;
139
140 dev_priv->compressed_fb = compressed_fb;
141 if (HAS_PCH_SPLIT(dev))
142 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
143 else if (IS_GM45(dev)) {
144 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
145 } else {
146 I915_WRITE(FBC_CFB_BASE, cfb_base);
147 I915_WRITE(FBC_LL_BASE, ll_base);
148 dev_priv->compressed_llb = compressed_llb;
149 }
150
151 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
152 cfb_base, ll_base, size >> 20);
153 return;
154
155err_llb:
156 drm_mm_put_block(compressed_llb);
157err_fb:
158 drm_mm_put_block(compressed_fb);
159err:
160 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
161 i915_warn_stolen(dev);
162}
163
164static void i915_cleanup_compression(struct drm_device *dev)
165{
166 struct drm_i915_private *dev_priv = dev->dev_private;
167
168 drm_mm_put_block(dev_priv->compressed_fb);
169 if (dev_priv->compressed_llb)
170 drm_mm_put_block(dev_priv->compressed_llb);
171}
172
173void i915_gem_cleanup_stolen(struct drm_device *dev)
174{
175 if (I915_HAS_FBC(dev) && i915_powersave)
176 i915_cleanup_compression(dev);
177}
178
179int i915_gem_init_stolen(struct drm_device *dev)
180{
181 struct drm_i915_private *dev_priv = dev->dev_private;
182 unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
183
184 /* Basic memrange allocator for stolen space */
185 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
186
187 /* Try to set up FBC with a reasonable compressed buffer size */
188 if (I915_HAS_FBC(dev) && i915_powersave) {
189 int cfb_size;
190
191 /* Leave 1M for line length buffer & misc. */
192
193 /* Try to get a 32M buffer... */
194 if (prealloc_size > (36*1024*1024))
195 cfb_size = 32*1024*1024;
196 else /* fall back to 7/8 of the stolen space */
197 cfb_size = prealloc_size * 7 / 8;
198 i915_setup_compression(dev, cfb_size);
199 }
200
201 return 0;
202}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 1a9306665987..b964df51cec7 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -354,9 +354,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
354 /* We need to rebind the object if its current allocation 354 /* We need to rebind the object if its current allocation
355 * no longer meets the alignment restrictions for its new 355 * no longer meets the alignment restrictions for its new
356 * tiling mode. Otherwise we can just leave it alone, but 356 * tiling mode. Otherwise we can just leave it alone, but
357 * need to ensure that any fence register is cleared. 357 * need to ensure that any fence register is updated before
358 * the next fenced (either through the GTT or by the BLT unit
359 * on older GPUs) access.
360 *
361 * After updating the tiling parameters, we then flag whether
362 * we need to update an associated fence register. Note this
363 * has to also include the unfenced register the GPU uses
364 * whilst executing a fenced command for an untiled object.
358 */ 365 */
359 i915_gem_release_mmap(obj);
360 366
361 obj->map_and_fenceable = 367 obj->map_and_fenceable =
362 obj->gtt_space == NULL || 368 obj->gtt_space == NULL ||
@@ -374,9 +380,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
374 } 380 }
375 381
376 if (ret == 0) { 382 if (ret == 0) {
377 obj->tiling_changed = true; 383 obj->fence_dirty =
384 obj->fenced_gpu_access ||
385 obj->fence_reg != I915_FENCE_REG_NONE;
386
378 obj->tiling_mode = args->tiling_mode; 387 obj->tiling_mode = args->tiling_mode;
379 obj->stride = args->stride; 388 obj->stride = args->stride;
389
390 /* Force the fence to be reacquired for GTT access */
391 i915_gem_release_mmap(obj);
380 } 392 }
381 } 393 }
382 /* we have to maintain this existing ABI... */ 394 /* we have to maintain this existing ABI... */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ab023ca73b45..b4999b5288e8 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -37,35 +37,6 @@
37#include "i915_trace.h" 37#include "i915_trace.h"
38#include "intel_drv.h" 38#include "intel_drv.h"
39 39
40#define MAX_NOPID ((u32)~0)
41
42/**
43 * Interrupts that are always left unmasked.
44 *
45 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
46 * we leave them always unmasked in IMR and then control enabling them through
47 * PIPESTAT alone.
48 */
49#define I915_INTERRUPT_ENABLE_FIX \
50 (I915_ASLE_INTERRUPT | \
51 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
52 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
53 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
54 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
55 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
56
57/** Interrupts that we mask and unmask at runtime. */
58#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
59
60#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
61 PIPE_VBLANK_INTERRUPT_STATUS)
62
63#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
64 PIPE_VBLANK_INTERRUPT_ENABLE)
65
66#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
67 DRM_I915_VBLANK_PIPE_B)
68
69/* For display hotplug interrupt */ 40/* For display hotplug interrupt */
70static void 41static void
71ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 42ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -360,15 +331,12 @@ static void notify_ring(struct drm_device *dev,
360 struct intel_ring_buffer *ring) 331 struct intel_ring_buffer *ring)
361{ 332{
362 struct drm_i915_private *dev_priv = dev->dev_private; 333 struct drm_i915_private *dev_priv = dev->dev_private;
363 u32 seqno;
364 334
365 if (ring->obj == NULL) 335 if (ring->obj == NULL)
366 return; 336 return;
367 337
368 seqno = ring->get_seqno(ring); 338 trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
369 trace_i915_gem_request_complete(ring, seqno);
370 339
371 ring->irq_seqno = seqno;
372 wake_up_all(&ring->irq_queue); 340 wake_up_all(&ring->irq_queue);
373 if (i915_enable_hangcheck) { 341 if (i915_enable_hangcheck) {
374 dev_priv->hangcheck_count = 0; 342 dev_priv->hangcheck_count = 0;
@@ -541,17 +509,13 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
541 if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) { 509 if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
542 drm_handle_vblank(dev, 0); 510 drm_handle_vblank(dev, 0);
543 vblank++; 511 vblank++;
544 if (!dev_priv->flip_pending_is_done) { 512 intel_finish_page_flip(dev, 0);
545 intel_finish_page_flip(dev, 0);
546 }
547 } 513 }
548 514
549 if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) { 515 if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
550 drm_handle_vblank(dev, 1); 516 drm_handle_vblank(dev, 1);
551 vblank++; 517 vblank++;
552 if (!dev_priv->flip_pending_is_done) { 518 intel_finish_page_flip(dev, 0);
553 intel_finish_page_flip(dev, 0);
554 }
555 } 519 }
556 520
557 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 521 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
@@ -618,7 +582,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
618 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 582 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
619 int ret = IRQ_NONE; 583 int ret = IRQ_NONE;
620 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 584 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
621 struct drm_i915_master_private *master_priv;
622 585
623 atomic_inc(&dev_priv->irq_received); 586 atomic_inc(&dev_priv->irq_received);
624 587
@@ -637,13 +600,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
637 600
638 ret = IRQ_HANDLED; 601 ret = IRQ_HANDLED;
639 602
640 if (dev->primary->master) {
641 master_priv = dev->primary->master->driver_priv;
642 if (master_priv->sarea_priv)
643 master_priv->sarea_priv->last_dispatch =
644 READ_BREADCRUMB(dev_priv);
645 }
646
647 snb_gt_irq_handler(dev, dev_priv, gt_iir); 603 snb_gt_irq_handler(dev, dev_priv, gt_iir);
648 604
649 if (de_iir & DE_GSE_IVB) 605 if (de_iir & DE_GSE_IVB)
@@ -659,12 +615,20 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
659 intel_finish_page_flip_plane(dev, 1); 615 intel_finish_page_flip_plane(dev, 1);
660 } 616 }
661 617
618 if (de_iir & DE_PLANEC_FLIP_DONE_IVB) {
619 intel_prepare_page_flip(dev, 2);
620 intel_finish_page_flip_plane(dev, 2);
621 }
622
662 if (de_iir & DE_PIPEA_VBLANK_IVB) 623 if (de_iir & DE_PIPEA_VBLANK_IVB)
663 drm_handle_vblank(dev, 0); 624 drm_handle_vblank(dev, 0);
664 625
665 if (de_iir & DE_PIPEB_VBLANK_IVB) 626 if (de_iir & DE_PIPEB_VBLANK_IVB)
666 drm_handle_vblank(dev, 1); 627 drm_handle_vblank(dev, 1);
667 628
629 if (de_iir & DE_PIPEC_VBLANK_IVB)
630 drm_handle_vblank(dev, 2);
631
668 /* check event from PCH */ 632 /* check event from PCH */
669 if (de_iir & DE_PCH_EVENT_IVB) { 633 if (de_iir & DE_PCH_EVENT_IVB) {
670 if (pch_iir & SDE_HOTPLUG_MASK_CPT) 634 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
@@ -705,7 +669,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
705 int ret = IRQ_NONE; 669 int ret = IRQ_NONE;
706 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 670 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
707 u32 hotplug_mask; 671 u32 hotplug_mask;
708 struct drm_i915_master_private *master_priv;
709 672
710 atomic_inc(&dev_priv->irq_received); 673 atomic_inc(&dev_priv->irq_received);
711 674
@@ -730,13 +693,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
730 693
731 ret = IRQ_HANDLED; 694 ret = IRQ_HANDLED;
732 695
733 if (dev->primary->master) {
734 master_priv = dev->primary->master->driver_priv;
735 if (master_priv->sarea_priv)
736 master_priv->sarea_priv->last_dispatch =
737 READ_BREADCRUMB(dev_priv);
738 }
739
740 if (IS_GEN5(dev)) 696 if (IS_GEN5(dev))
741 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 697 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
742 else 698 else
@@ -810,7 +766,7 @@ static void i915_error_work_func(struct work_struct *work)
810 if (atomic_read(&dev_priv->mm.wedged)) { 766 if (atomic_read(&dev_priv->mm.wedged)) {
811 DRM_DEBUG_DRIVER("resetting chip\n"); 767 DRM_DEBUG_DRIVER("resetting chip\n");
812 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 768 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
813 if (!i915_reset(dev, GRDOM_RENDER)) { 769 if (!i915_reset(dev)) {
814 atomic_set(&dev_priv->mm.wedged, 0); 770 atomic_set(&dev_priv->mm.wedged, 0);
815 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 771 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
816 } 772 }
@@ -902,10 +858,11 @@ i915_error_object_free(struct drm_i915_error_object *obj)
902 kfree(obj); 858 kfree(obj);
903} 859}
904 860
905static void 861void
906i915_error_state_free(struct drm_device *dev, 862i915_error_state_free(struct kref *error_ref)
907 struct drm_i915_error_state *error)
908{ 863{
864 struct drm_i915_error_state *error = container_of(error_ref,
865 typeof(*error), ref);
909 int i; 866 int i;
910 867
911 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 868 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
@@ -918,37 +875,56 @@ i915_error_state_free(struct drm_device *dev,
918 kfree(error->overlay); 875 kfree(error->overlay);
919 kfree(error); 876 kfree(error);
920} 877}
921 878static void capture_bo(struct drm_i915_error_buffer *err,
922static u32 capture_bo_list(struct drm_i915_error_buffer *err, 879 struct drm_i915_gem_object *obj)
923 int count, 880{
924 struct list_head *head) 881 err->size = obj->base.size;
882 err->name = obj->base.name;
883 err->seqno = obj->last_rendering_seqno;
884 err->gtt_offset = obj->gtt_offset;
885 err->read_domains = obj->base.read_domains;
886 err->write_domain = obj->base.write_domain;
887 err->fence_reg = obj->fence_reg;
888 err->pinned = 0;
889 if (obj->pin_count > 0)
890 err->pinned = 1;
891 if (obj->user_pin_count > 0)
892 err->pinned = -1;
893 err->tiling = obj->tiling_mode;
894 err->dirty = obj->dirty;
895 err->purgeable = obj->madv != I915_MADV_WILLNEED;
896 err->ring = obj->ring ? obj->ring->id : -1;
897 err->cache_level = obj->cache_level;
898}
899
900static u32 capture_active_bo(struct drm_i915_error_buffer *err,
901 int count, struct list_head *head)
925{ 902{
926 struct drm_i915_gem_object *obj; 903 struct drm_i915_gem_object *obj;
927 int i = 0; 904 int i = 0;
928 905
929 list_for_each_entry(obj, head, mm_list) { 906 list_for_each_entry(obj, head, mm_list) {
930 err->size = obj->base.size; 907 capture_bo(err++, obj);
931 err->name = obj->base.name;
932 err->seqno = obj->last_rendering_seqno;
933 err->gtt_offset = obj->gtt_offset;
934 err->read_domains = obj->base.read_domains;
935 err->write_domain = obj->base.write_domain;
936 err->fence_reg = obj->fence_reg;
937 err->pinned = 0;
938 if (obj->pin_count > 0)
939 err->pinned = 1;
940 if (obj->user_pin_count > 0)
941 err->pinned = -1;
942 err->tiling = obj->tiling_mode;
943 err->dirty = obj->dirty;
944 err->purgeable = obj->madv != I915_MADV_WILLNEED;
945 err->ring = obj->ring ? obj->ring->id : -1;
946 err->cache_level = obj->cache_level;
947
948 if (++i == count) 908 if (++i == count)
949 break; 909 break;
910 }
911
912 return i;
913}
914
915static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
916 int count, struct list_head *head)
917{
918 struct drm_i915_gem_object *obj;
919 int i = 0;
920
921 list_for_each_entry(obj, head, gtt_list) {
922 if (obj->pin_count == 0)
923 continue;
950 924
951 err++; 925 capture_bo(err++, obj);
926 if (++i == count)
927 break;
952 } 928 }
953 929
954 return i; 930 return i;
@@ -1045,6 +1021,7 @@ static void i915_record_ring_state(struct drm_device *dev,
1045 error->instdone[ring->id] = I915_READ(INSTDONE); 1021 error->instdone[ring->id] = I915_READ(INSTDONE);
1046 } 1022 }
1047 1023
1024 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1048 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 1025 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1049 error->seqno[ring->id] = ring->get_seqno(ring); 1026 error->seqno[ring->id] = ring->get_seqno(ring);
1050 error->acthd[ring->id] = intel_ring_get_active_head(ring); 1027 error->acthd[ring->id] = intel_ring_get_active_head(ring);
@@ -1134,8 +1111,19 @@ static void i915_capture_error_state(struct drm_device *dev)
1134 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", 1111 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1135 dev->primary->index); 1112 dev->primary->index);
1136 1113
1114 kref_init(&error->ref);
1137 error->eir = I915_READ(EIR); 1115 error->eir = I915_READ(EIR);
1138 error->pgtbl_er = I915_READ(PGTBL_ER); 1116 error->pgtbl_er = I915_READ(PGTBL_ER);
1117
1118 if (HAS_PCH_SPLIT(dev))
1119 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1120 else if (IS_VALLEYVIEW(dev))
1121 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1122 else if (IS_GEN2(dev))
1123 error->ier = I915_READ16(IER);
1124 else
1125 error->ier = I915_READ(IER);
1126
1139 for_each_pipe(pipe) 1127 for_each_pipe(pipe)
1140 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1128 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1141 1129
@@ -1155,8 +1143,9 @@ static void i915_capture_error_state(struct drm_device *dev)
1155 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 1143 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1156 i++; 1144 i++;
1157 error->active_bo_count = i; 1145 error->active_bo_count = i;
1158 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) 1146 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
1159 i++; 1147 if (obj->pin_count)
1148 i++;
1160 error->pinned_bo_count = i - error->active_bo_count; 1149 error->pinned_bo_count = i - error->active_bo_count;
1161 1150
1162 error->active_bo = NULL; 1151 error->active_bo = NULL;
@@ -1171,15 +1160,15 @@ static void i915_capture_error_state(struct drm_device *dev)
1171 1160
1172 if (error->active_bo) 1161 if (error->active_bo)
1173 error->active_bo_count = 1162 error->active_bo_count =
1174 capture_bo_list(error->active_bo, 1163 capture_active_bo(error->active_bo,
1175 error->active_bo_count, 1164 error->active_bo_count,
1176 &dev_priv->mm.active_list); 1165 &dev_priv->mm.active_list);
1177 1166
1178 if (error->pinned_bo) 1167 if (error->pinned_bo)
1179 error->pinned_bo_count = 1168 error->pinned_bo_count =
1180 capture_bo_list(error->pinned_bo, 1169 capture_pinned_bo(error->pinned_bo,
1181 error->pinned_bo_count, 1170 error->pinned_bo_count,
1182 &dev_priv->mm.pinned_list); 1171 &dev_priv->mm.gtt_list);
1183 1172
1184 do_gettimeofday(&error->time); 1173 do_gettimeofday(&error->time);
1185 1174
@@ -1194,7 +1183,7 @@ static void i915_capture_error_state(struct drm_device *dev)
1194 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1183 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1195 1184
1196 if (error) 1185 if (error)
1197 i915_error_state_free(dev, error); 1186 i915_error_state_free(&error->ref);
1198} 1187}
1199 1188
1200void i915_destroy_error_state(struct drm_device *dev) 1189void i915_destroy_error_state(struct drm_device *dev)
@@ -1209,7 +1198,7 @@ void i915_destroy_error_state(struct drm_device *dev)
1209 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1198 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1210 1199
1211 if (error) 1200 if (error)
1212 i915_error_state_free(dev, error); 1201 kref_put(&error->ref, i915_error_state_free);
1213} 1202}
1214#else 1203#else
1215#define i915_capture_error_state(x) 1204#define i915_capture_error_state(x)
@@ -1385,248 +1374,6 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1385 } 1374 }
1386} 1375}
1387 1376
1388static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1389{
1390 struct drm_device *dev = (struct drm_device *) arg;
1391 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1392 struct drm_i915_master_private *master_priv;
1393 u32 iir, new_iir;
1394 u32 pipe_stats[I915_MAX_PIPES];
1395 u32 vblank_status;
1396 int vblank = 0;
1397 unsigned long irqflags;
1398 int irq_received;
1399 int ret = IRQ_NONE, pipe;
1400 bool blc_event = false;
1401
1402 atomic_inc(&dev_priv->irq_received);
1403
1404 iir = I915_READ(IIR);
1405
1406 if (INTEL_INFO(dev)->gen >= 4)
1407 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
1408 else
1409 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
1410
1411 for (;;) {
1412 irq_received = iir != 0;
1413
1414 /* Can't rely on pipestat interrupt bit in iir as it might
1415 * have been cleared after the pipestat interrupt was received.
1416 * It doesn't set the bit in iir again, but it still produces
1417 * interrupts (for non-MSI).
1418 */
1419 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1420 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1421 i915_handle_error(dev, false);
1422
1423 for_each_pipe(pipe) {
1424 int reg = PIPESTAT(pipe);
1425 pipe_stats[pipe] = I915_READ(reg);
1426
1427 /*
1428 * Clear the PIPE*STAT regs before the IIR
1429 */
1430 if (pipe_stats[pipe] & 0x8000ffff) {
1431 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1432 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1433 pipe_name(pipe));
1434 I915_WRITE(reg, pipe_stats[pipe]);
1435 irq_received = 1;
1436 }
1437 }
1438 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1439
1440 if (!irq_received)
1441 break;
1442
1443 ret = IRQ_HANDLED;
1444
1445 /* Consume port. Then clear IIR or we'll miss events */
1446 if ((I915_HAS_HOTPLUG(dev)) &&
1447 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
1448 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1449
1450 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1451 hotplug_status);
1452 if (hotplug_status & dev_priv->hotplug_supported_mask)
1453 queue_work(dev_priv->wq,
1454 &dev_priv->hotplug_work);
1455
1456 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1457 I915_READ(PORT_HOTPLUG_STAT);
1458 }
1459
1460 I915_WRITE(IIR, iir);
1461 new_iir = I915_READ(IIR); /* Flush posted writes */
1462
1463 if (dev->primary->master) {
1464 master_priv = dev->primary->master->driver_priv;
1465 if (master_priv->sarea_priv)
1466 master_priv->sarea_priv->last_dispatch =
1467 READ_BREADCRUMB(dev_priv);
1468 }
1469
1470 if (iir & I915_USER_INTERRUPT)
1471 notify_ring(dev, &dev_priv->ring[RCS]);
1472 if (iir & I915_BSD_USER_INTERRUPT)
1473 notify_ring(dev, &dev_priv->ring[VCS]);
1474
1475 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1476 intel_prepare_page_flip(dev, 0);
1477 if (dev_priv->flip_pending_is_done)
1478 intel_finish_page_flip_plane(dev, 0);
1479 }
1480
1481 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1482 intel_prepare_page_flip(dev, 1);
1483 if (dev_priv->flip_pending_is_done)
1484 intel_finish_page_flip_plane(dev, 1);
1485 }
1486
1487 for_each_pipe(pipe) {
1488 if (pipe_stats[pipe] & vblank_status &&
1489 drm_handle_vblank(dev, pipe)) {
1490 vblank++;
1491 if (!dev_priv->flip_pending_is_done) {
1492 i915_pageflip_stall_check(dev, pipe);
1493 intel_finish_page_flip(dev, pipe);
1494 }
1495 }
1496
1497 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1498 blc_event = true;
1499 }
1500
1501
1502 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1503 intel_opregion_asle_intr(dev);
1504
1505 /* With MSI, interrupts are only generated when iir
1506 * transitions from zero to nonzero. If another bit got
1507 * set while we were handling the existing iir bits, then
1508 * we would never get another interrupt.
1509 *
1510 * This is fine on non-MSI as well, as if we hit this path
1511 * we avoid exiting the interrupt handler only to generate
1512 * another one.
1513 *
1514 * Note that for MSI this could cause a stray interrupt report
1515 * if an interrupt landed in the time between writing IIR and
1516 * the posting read. This should be rare enough to never
1517 * trigger the 99% of 100,000 interrupts test for disabling
1518 * stray interrupts.
1519 */
1520 iir = new_iir;
1521 }
1522
1523 return ret;
1524}
1525
1526static int i915_emit_irq(struct drm_device * dev)
1527{
1528 drm_i915_private_t *dev_priv = dev->dev_private;
1529 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1530
1531 i915_kernel_lost_context(dev);
1532
1533 DRM_DEBUG_DRIVER("\n");
1534
1535 dev_priv->counter++;
1536 if (dev_priv->counter > 0x7FFFFFFFUL)
1537 dev_priv->counter = 1;
1538 if (master_priv->sarea_priv)
1539 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
1540
1541 if (BEGIN_LP_RING(4) == 0) {
1542 OUT_RING(MI_STORE_DWORD_INDEX);
1543 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1544 OUT_RING(dev_priv->counter);
1545 OUT_RING(MI_USER_INTERRUPT);
1546 ADVANCE_LP_RING();
1547 }
1548
1549 return dev_priv->counter;
1550}
1551
1552static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1553{
1554 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1555 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1556 int ret = 0;
1557 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1558
1559 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1560 READ_BREADCRUMB(dev_priv));
1561
1562 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
1563 if (master_priv->sarea_priv)
1564 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1565 return 0;
1566 }
1567
1568 if (master_priv->sarea_priv)
1569 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1570
1571 if (ring->irq_get(ring)) {
1572 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
1573 READ_BREADCRUMB(dev_priv) >= irq_nr);
1574 ring->irq_put(ring);
1575 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
1576 ret = -EBUSY;
1577
1578 if (ret == -EBUSY) {
1579 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1580 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
1581 }
1582
1583 return ret;
1584}
1585
1586/* Needs the lock as it touches the ring.
1587 */
1588int i915_irq_emit(struct drm_device *dev, void *data,
1589 struct drm_file *file_priv)
1590{
1591 drm_i915_private_t *dev_priv = dev->dev_private;
1592 drm_i915_irq_emit_t *emit = data;
1593 int result;
1594
1595 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
1596 DRM_ERROR("called with no initialization\n");
1597 return -EINVAL;
1598 }
1599
1600 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1601
1602 mutex_lock(&dev->struct_mutex);
1603 result = i915_emit_irq(dev);
1604 mutex_unlock(&dev->struct_mutex);
1605
1606 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
1607 DRM_ERROR("copy_to_user\n");
1608 return -EFAULT;
1609 }
1610
1611 return 0;
1612}
1613
1614/* Doesn't need the hardware lock.
1615 */
1616int i915_irq_wait(struct drm_device *dev, void *data,
1617 struct drm_file *file_priv)
1618{
1619 drm_i915_private_t *dev_priv = dev->dev_private;
1620 drm_i915_irq_wait_t *irqwait = data;
1621
1622 if (!dev_priv) {
1623 DRM_ERROR("called with no initialization\n");
1624 return -EINVAL;
1625 }
1626
1627 return i915_wait_irq(dev, irqwait->irq_seq);
1628}
1629
1630/* Called from drm generic code, passed 'crtc' which 1377/* Called from drm generic code, passed 'crtc' which
1631 * we use as a pipe index 1378 * we use as a pipe index
1632 */ 1379 */
@@ -1648,7 +1395,7 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
1648 1395
1649 /* maintain vblank delivery even in deep C-states */ 1396 /* maintain vblank delivery even in deep C-states */
1650 if (dev_priv->info->gen == 3) 1397 if (dev_priv->info->gen == 3)
1651 I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); 1398 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1652 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1399 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1653 1400
1654 return 0; 1401 return 0;
@@ -1679,8 +1426,8 @@ static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1679 return -EINVAL; 1426 return -EINVAL;
1680 1427
1681 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1428 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1682 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1429 ironlake_enable_display_irq(dev_priv,
1683 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); 1430 DE_PIPEA_VBLANK_IVB << (5 * pipe));
1684 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1431 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1685 1432
1686 return 0; 1433 return 0;
@@ -1722,8 +1469,7 @@ static void i915_disable_vblank(struct drm_device *dev, int pipe)
1722 1469
1723 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1470 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1724 if (dev_priv->info->gen == 3) 1471 if (dev_priv->info->gen == 3)
1725 I915_WRITE(INSTPM, 1472 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1726 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
1727 1473
1728 i915_disable_pipestat(dev_priv, pipe, 1474 i915_disable_pipestat(dev_priv, pipe,
1729 PIPE_VBLANK_INTERRUPT_ENABLE | 1475 PIPE_VBLANK_INTERRUPT_ENABLE |
@@ -1748,8 +1494,8 @@ static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1748 unsigned long irqflags; 1494 unsigned long irqflags;
1749 1495
1750 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1496 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1751 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1497 ironlake_disable_display_irq(dev_priv,
1752 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); 1498 DE_PIPEA_VBLANK_IVB << (pipe * 5));
1753 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1499 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1754} 1500}
1755 1501
@@ -1774,61 +1520,6 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1774 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1520 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1775} 1521}
1776 1522
1777
1778/* Set the vblank monitor pipe
1779 */
1780int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1781 struct drm_file *file_priv)
1782{
1783 drm_i915_private_t *dev_priv = dev->dev_private;
1784
1785 if (!dev_priv) {
1786 DRM_ERROR("called with no initialization\n");
1787 return -EINVAL;
1788 }
1789
1790 return 0;
1791}
1792
1793int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1794 struct drm_file *file_priv)
1795{
1796 drm_i915_private_t *dev_priv = dev->dev_private;
1797 drm_i915_vblank_pipe_t *pipe = data;
1798
1799 if (!dev_priv) {
1800 DRM_ERROR("called with no initialization\n");
1801 return -EINVAL;
1802 }
1803
1804 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1805
1806 return 0;
1807}
1808
1809/**
1810 * Schedule buffer swap at given vertical blank.
1811 */
1812int i915_vblank_swap(struct drm_device *dev, void *data,
1813 struct drm_file *file_priv)
1814{
1815 /* The delayed swap mechanism was fundamentally racy, and has been
1816 * removed. The model was that the client requested a delayed flip/swap
1817 * from the kernel, then waited for vblank before continuing to perform
1818 * rendering. The problem was that the kernel might wake the client
1819 * up before it dispatched the vblank swap (since the lock has to be
1820 * held while touching the ringbuffer), in which case the client would
1821 * clear and start the next frame before the swap occurred, and
1822 * flicker would occur in addition to likely missing the vblank.
1823 *
1824 * In the absence of this ioctl, userland falls back to a correct path
1825 * of waiting for a vblank, then dispatching the swap on its own.
1826 * Context switching to userland and back is plenty fast enough for
1827 * meeting the requirements of vblank swapping.
1828 */
1829 return -EINVAL;
1830}
1831
1832static u32 1523static u32
1833ring_last_seqno(struct intel_ring_buffer *ring) 1524ring_last_seqno(struct intel_ring_buffer *ring)
1834{ 1525{
@@ -1838,14 +1529,17 @@ ring_last_seqno(struct intel_ring_buffer *ring)
1838 1529
1839static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 1530static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1840{ 1531{
1532 /* We don't check whether the ring even exists before calling this
1533 * function. Hence check whether it's initialized. */
1534 if (ring->obj == NULL)
1535 return true;
1536
1841 if (list_empty(&ring->request_list) || 1537 if (list_empty(&ring->request_list) ||
1842 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { 1538 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1843 /* Issue a wake-up to catch stuck h/w. */ 1539 /* Issue a wake-up to catch stuck h/w. */
1844 if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) { 1540 if (waitqueue_active(&ring->irq_queue)) {
1845 DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", 1541 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1846 ring->name, 1542 ring->name);
1847 ring->waiting_seqno,
1848 ring->get_seqno(ring));
1849 wake_up_all(&ring->irq_queue); 1543 wake_up_all(&ring->irq_queue);
1850 *err = true; 1544 *err = true;
1851 } 1545 }
@@ -1973,10 +1667,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
1973 1667
1974 atomic_set(&dev_priv->irq_received, 0); 1668 atomic_set(&dev_priv->irq_received, 0);
1975 1669
1976 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1977 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1978 if (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
1979 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
1980 1670
1981 I915_WRITE(HWSTAM, 0xeffe); 1671 I915_WRITE(HWSTAM, 0xeffe);
1982 1672
@@ -2004,9 +1694,6 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
2004 1694
2005 atomic_set(&dev_priv->irq_received, 0); 1695 atomic_set(&dev_priv->irq_received, 0);
2006 1696
2007 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2008 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2009
2010 /* VLV magic */ 1697 /* VLV magic */
2011 I915_WRITE(VLV_IMR, 0); 1698 I915_WRITE(VLV_IMR, 0);
2012 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 1699 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
@@ -2061,13 +1748,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
2061 u32 render_irqs; 1748 u32 render_irqs;
2062 u32 hotplug_mask; 1749 u32 hotplug_mask;
2063 1750
2064 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
2065 if (HAS_BSD(dev))
2066 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
2067 if (HAS_BLT(dev))
2068 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
2069
2070 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
2071 dev_priv->irq_mask = ~display_mask; 1751 dev_priv->irq_mask = ~display_mask;
2072 1752
2073 /* should always can generate irq */ 1753 /* should always can generate irq */
@@ -2130,26 +1810,24 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
2130{ 1810{
2131 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1811 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2132 /* enable kind of interrupts always enabled */ 1812 /* enable kind of interrupts always enabled */
2133 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 1813 u32 display_mask =
2134 DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB | 1814 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2135 DE_PLANEB_FLIP_DONE_IVB; 1815 DE_PLANEC_FLIP_DONE_IVB |
1816 DE_PLANEB_FLIP_DONE_IVB |
1817 DE_PLANEA_FLIP_DONE_IVB;
2136 u32 render_irqs; 1818 u32 render_irqs;
2137 u32 hotplug_mask; 1819 u32 hotplug_mask;
2138 1820
2139 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
2140 if (HAS_BSD(dev))
2141 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
2142 if (HAS_BLT(dev))
2143 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
2144
2145 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
2146 dev_priv->irq_mask = ~display_mask; 1821 dev_priv->irq_mask = ~display_mask;
2147 1822
2148 /* should always can generate irq */ 1823 /* should always can generate irq */
2149 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1824 I915_WRITE(DEIIR, I915_READ(DEIIR));
2150 I915_WRITE(DEIMR, dev_priv->irq_mask); 1825 I915_WRITE(DEIMR, dev_priv->irq_mask);
2151 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB | 1826 I915_WRITE(DEIER,
2152 DE_PIPEB_VBLANK_IVB); 1827 display_mask |
1828 DE_PIPEC_VBLANK_IVB |
1829 DE_PIPEB_VBLANK_IVB |
1830 DE_PIPEA_VBLANK_IVB);
2153 POSTING_READ(DEIER); 1831 POSTING_READ(DEIER);
2154 1832
2155 dev_priv->gt_irq_mask = ~0; 1833 dev_priv->gt_irq_mask = ~0;
@@ -2192,16 +1870,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2192 1870
2193 dev_priv->irq_mask = ~enable_mask; 1871 dev_priv->irq_mask = ~enable_mask;
2194 1872
2195
2196 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
2197 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
2198 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
2199
2200 dev_priv->pipestat[0] = 0; 1873 dev_priv->pipestat[0] = 0;
2201 dev_priv->pipestat[1] = 0; 1874 dev_priv->pipestat[1] = 0;
2202 1875
2203 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
2204
2205 /* Hack for broken MSIs on VLV */ 1876 /* Hack for broken MSIs on VLV */
2206 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); 1877 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
2207 pci_read_config_word(dev->pdev, 0x98, &msid); 1878 pci_read_config_word(dev->pdev, 0x98, &msid);
@@ -2268,15 +1939,413 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2268 return 0; 1939 return 0;
2269} 1940}
2270 1941
2271static void i915_driver_irq_preinstall(struct drm_device * dev) 1942static void valleyview_irq_uninstall(struct drm_device *dev)
1943{
1944 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1945 int pipe;
1946
1947 if (!dev_priv)
1948 return;
1949
1950 for_each_pipe(pipe)
1951 I915_WRITE(PIPESTAT(pipe), 0xffff);
1952
1953 I915_WRITE(HWSTAM, 0xffffffff);
1954 I915_WRITE(PORT_HOTPLUG_EN, 0);
1955 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1956 for_each_pipe(pipe)
1957 I915_WRITE(PIPESTAT(pipe), 0xffff);
1958 I915_WRITE(VLV_IIR, 0xffffffff);
1959 I915_WRITE(VLV_IMR, 0xffffffff);
1960 I915_WRITE(VLV_IER, 0x0);
1961 POSTING_READ(VLV_IER);
1962}
1963
1964static void ironlake_irq_uninstall(struct drm_device *dev)
1965{
1966 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1967
1968 if (!dev_priv)
1969 return;
1970
1971 I915_WRITE(HWSTAM, 0xffffffff);
1972
1973 I915_WRITE(DEIMR, 0xffffffff);
1974 I915_WRITE(DEIER, 0x0);
1975 I915_WRITE(DEIIR, I915_READ(DEIIR));
1976
1977 I915_WRITE(GTIMR, 0xffffffff);
1978 I915_WRITE(GTIER, 0x0);
1979 I915_WRITE(GTIIR, I915_READ(GTIIR));
1980
1981 I915_WRITE(SDEIMR, 0xffffffff);
1982 I915_WRITE(SDEIER, 0x0);
1983 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1984}
1985
1986static void i8xx_irq_preinstall(struct drm_device * dev)
2272{ 1987{
2273 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1988 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2274 int pipe; 1989 int pipe;
2275 1990
2276 atomic_set(&dev_priv->irq_received, 0); 1991 atomic_set(&dev_priv->irq_received, 0);
2277 1992
2278 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1993 for_each_pipe(pipe)
2279 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1994 I915_WRITE(PIPESTAT(pipe), 0);
1995 I915_WRITE16(IMR, 0xffff);
1996 I915_WRITE16(IER, 0x0);
1997 POSTING_READ16(IER);
1998}
1999
2000static int i8xx_irq_postinstall(struct drm_device *dev)
2001{
2002 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2003
2004 dev_priv->pipestat[0] = 0;
2005 dev_priv->pipestat[1] = 0;
2006
2007 I915_WRITE16(EMR,
2008 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2009
2010 /* Unmask the interrupts that we always want on. */
2011 dev_priv->irq_mask =
2012 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2013 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2014 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2015 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2016 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2017 I915_WRITE16(IMR, dev_priv->irq_mask);
2018
2019 I915_WRITE16(IER,
2020 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2021 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2022 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2023 I915_USER_INTERRUPT);
2024 POSTING_READ16(IER);
2025
2026 return 0;
2027}
2028
2029static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
2030{
2031 struct drm_device *dev = (struct drm_device *) arg;
2032 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2033 u16 iir, new_iir;
2034 u32 pipe_stats[2];
2035 unsigned long irqflags;
2036 int irq_received;
2037 int pipe;
2038 u16 flip_mask =
2039 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2040 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2041
2042 atomic_inc(&dev_priv->irq_received);
2043
2044 iir = I915_READ16(IIR);
2045 if (iir == 0)
2046 return IRQ_NONE;
2047
2048 while (iir & ~flip_mask) {
2049 /* Can't rely on pipestat interrupt bit in iir as it might
2050 * have been cleared after the pipestat interrupt was received.
2051 * It doesn't set the bit in iir again, but it still produces
2052 * interrupts (for non-MSI).
2053 */
2054 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2055 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2056 i915_handle_error(dev, false);
2057
2058 for_each_pipe(pipe) {
2059 int reg = PIPESTAT(pipe);
2060 pipe_stats[pipe] = I915_READ(reg);
2061
2062 /*
2063 * Clear the PIPE*STAT regs before the IIR
2064 */
2065 if (pipe_stats[pipe] & 0x8000ffff) {
2066 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2067 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2068 pipe_name(pipe));
2069 I915_WRITE(reg, pipe_stats[pipe]);
2070 irq_received = 1;
2071 }
2072 }
2073 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2074
2075 I915_WRITE16(IIR, iir & ~flip_mask);
2076 new_iir = I915_READ16(IIR); /* Flush posted writes */
2077
2078 i915_update_dri1_breadcrumb(dev);
2079
2080 if (iir & I915_USER_INTERRUPT)
2081 notify_ring(dev, &dev_priv->ring[RCS]);
2082
2083 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2084 drm_handle_vblank(dev, 0)) {
2085 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
2086 intel_prepare_page_flip(dev, 0);
2087 intel_finish_page_flip(dev, 0);
2088 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2089 }
2090 }
2091
2092 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2093 drm_handle_vblank(dev, 1)) {
2094 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
2095 intel_prepare_page_flip(dev, 1);
2096 intel_finish_page_flip(dev, 1);
2097 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2098 }
2099 }
2100
2101 iir = new_iir;
2102 }
2103
2104 return IRQ_HANDLED;
2105}
2106
2107static void i8xx_irq_uninstall(struct drm_device * dev)
2108{
2109 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2110 int pipe;
2111
2112 for_each_pipe(pipe) {
2113 /* Clear enable bits; then clear status bits */
2114 I915_WRITE(PIPESTAT(pipe), 0);
2115 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2116 }
2117 I915_WRITE16(IMR, 0xffff);
2118 I915_WRITE16(IER, 0x0);
2119 I915_WRITE16(IIR, I915_READ16(IIR));
2120}
2121
2122static void i915_irq_preinstall(struct drm_device * dev)
2123{
2124 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2125 int pipe;
2126
2127 atomic_set(&dev_priv->irq_received, 0);
2128
2129 if (I915_HAS_HOTPLUG(dev)) {
2130 I915_WRITE(PORT_HOTPLUG_EN, 0);
2131 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2132 }
2133
2134 I915_WRITE16(HWSTAM, 0xeffe);
2135 for_each_pipe(pipe)
2136 I915_WRITE(PIPESTAT(pipe), 0);
2137 I915_WRITE(IMR, 0xffffffff);
2138 I915_WRITE(IER, 0x0);
2139 POSTING_READ(IER);
2140}
2141
2142static int i915_irq_postinstall(struct drm_device *dev)
2143{
2144 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2145 u32 enable_mask;
2146
2147 dev_priv->pipestat[0] = 0;
2148 dev_priv->pipestat[1] = 0;
2149
2150 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2151
2152 /* Unmask the interrupts that we always want on. */
2153 dev_priv->irq_mask =
2154 ~(I915_ASLE_INTERRUPT |
2155 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2156 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2157 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2158 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2159 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2160
2161 enable_mask =
2162 I915_ASLE_INTERRUPT |
2163 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2164 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2165 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2166 I915_USER_INTERRUPT;
2167
2168 if (I915_HAS_HOTPLUG(dev)) {
2169 /* Enable in IER... */
2170 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2171 /* and unmask in IMR */
2172 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2173 }
2174
2175 I915_WRITE(IMR, dev_priv->irq_mask);
2176 I915_WRITE(IER, enable_mask);
2177 POSTING_READ(IER);
2178
2179 if (I915_HAS_HOTPLUG(dev)) {
2180 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2181
2182 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2183 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2184 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2185 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2186 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2187 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2188 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
2189 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2190 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
2191 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2192 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2193 hotplug_en |= CRT_HOTPLUG_INT_EN;
2194 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2195 }
2196
2197 /* Ignore TV since it's buggy */
2198
2199 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2200 }
2201
2202 intel_opregion_enable_asle(dev);
2203
2204 return 0;
2205}
2206
2207static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
2208{
2209 struct drm_device *dev = (struct drm_device *) arg;
2210 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2211 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2212 unsigned long irqflags;
2213 u32 flip_mask =
2214 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2215 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2216 u32 flip[2] = {
2217 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2218 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2219 };
2220 int pipe, ret = IRQ_NONE;
2221
2222 atomic_inc(&dev_priv->irq_received);
2223
2224 iir = I915_READ(IIR);
2225 do {
2226 bool irq_received = (iir & ~flip_mask) != 0;
2227 bool blc_event = false;
2228
2229 /* Can't rely on pipestat interrupt bit in iir as it might
2230 * have been cleared after the pipestat interrupt was received.
2231 * It doesn't set the bit in iir again, but it still produces
2232 * interrupts (for non-MSI).
2233 */
2234 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2235 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2236 i915_handle_error(dev, false);
2237
2238 for_each_pipe(pipe) {
2239 int reg = PIPESTAT(pipe);
2240 pipe_stats[pipe] = I915_READ(reg);
2241
2242 /* Clear the PIPE*STAT regs before the IIR */
2243 if (pipe_stats[pipe] & 0x8000ffff) {
2244 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2245 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2246 pipe_name(pipe));
2247 I915_WRITE(reg, pipe_stats[pipe]);
2248 irq_received = true;
2249 }
2250 }
2251 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2252
2253 if (!irq_received)
2254 break;
2255
2256 /* Consume port. Then clear IIR or we'll miss events */
2257 if ((I915_HAS_HOTPLUG(dev)) &&
2258 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2259 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2260
2261 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2262 hotplug_status);
2263 if (hotplug_status & dev_priv->hotplug_supported_mask)
2264 queue_work(dev_priv->wq,
2265 &dev_priv->hotplug_work);
2266
2267 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2268 POSTING_READ(PORT_HOTPLUG_STAT);
2269 }
2270
2271 I915_WRITE(IIR, iir & ~flip_mask);
2272 new_iir = I915_READ(IIR); /* Flush posted writes */
2273
2274 if (iir & I915_USER_INTERRUPT)
2275 notify_ring(dev, &dev_priv->ring[RCS]);
2276
2277 for_each_pipe(pipe) {
2278 int plane = pipe;
2279 if (IS_MOBILE(dev))
2280 plane = !plane;
2281 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2282 drm_handle_vblank(dev, pipe)) {
2283 if (iir & flip[plane]) {
2284 intel_prepare_page_flip(dev, plane);
2285 intel_finish_page_flip(dev, pipe);
2286 flip_mask &= ~flip[plane];
2287 }
2288 }
2289
2290 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2291 blc_event = true;
2292 }
2293
2294 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2295 intel_opregion_asle_intr(dev);
2296
2297 /* With MSI, interrupts are only generated when iir
2298 * transitions from zero to nonzero. If another bit got
2299 * set while we were handling the existing iir bits, then
2300 * we would never get another interrupt.
2301 *
2302 * This is fine on non-MSI as well, as if we hit this path
2303 * we avoid exiting the interrupt handler only to generate
2304 * another one.
2305 *
2306 * Note that for MSI this could cause a stray interrupt report
2307 * if an interrupt landed in the time between writing IIR and
2308 * the posting read. This should be rare enough to never
2309 * trigger the 99% of 100,000 interrupts test for disabling
2310 * stray interrupts.
2311 */
2312 ret = IRQ_HANDLED;
2313 iir = new_iir;
2314 } while (iir & ~flip_mask);
2315
2316 i915_update_dri1_breadcrumb(dev);
2317
2318 return ret;
2319}
2320
2321static void i915_irq_uninstall(struct drm_device * dev)
2322{
2323 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2324 int pipe;
2325
2326 if (I915_HAS_HOTPLUG(dev)) {
2327 I915_WRITE(PORT_HOTPLUG_EN, 0);
2328 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2329 }
2330
2331 I915_WRITE16(HWSTAM, 0xffff);
2332 for_each_pipe(pipe) {
2333 /* Clear enable bits; then clear status bits */
2334 I915_WRITE(PIPESTAT(pipe), 0);
2335 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2336 }
2337 I915_WRITE(IMR, 0xffffffff);
2338 I915_WRITE(IER, 0x0);
2339
2340 I915_WRITE(IIR, I915_READ(IIR));
2341}
2342
2343static void i965_irq_preinstall(struct drm_device * dev)
2344{
2345 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2346 int pipe;
2347
2348 atomic_set(&dev_priv->irq_received, 0);
2280 2349
2281 if (I915_HAS_HOTPLUG(dev)) { 2350 if (I915_HAS_HOTPLUG(dev)) {
2282 I915_WRITE(PORT_HOTPLUG_EN, 0); 2351 I915_WRITE(PORT_HOTPLUG_EN, 0);
@@ -2291,20 +2360,25 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
2291 POSTING_READ(IER); 2360 POSTING_READ(IER);
2292} 2361}
2293 2362
2294/* 2363static int i965_irq_postinstall(struct drm_device *dev)
2295 * Must be called after intel_modeset_init or hotplug interrupts won't be
2296 * enabled correctly.
2297 */
2298static int i915_driver_irq_postinstall(struct drm_device *dev)
2299{ 2364{
2300 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2365 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2301 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 2366 u32 enable_mask;
2302 u32 error_mask; 2367 u32 error_mask;
2303 2368
2304 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
2305
2306 /* Unmask the interrupts that we always want on. */ 2369 /* Unmask the interrupts that we always want on. */
2307 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; 2370 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2371 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2372 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2373 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2374 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2375 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2376
2377 enable_mask = ~dev_priv->irq_mask;
2378 enable_mask |= I915_USER_INTERRUPT;
2379
2380 if (IS_G4X(dev))
2381 enable_mask |= I915_BSD_USER_INTERRUPT;
2308 2382
2309 dev_priv->pipestat[0] = 0; 2383 dev_priv->pipestat[0] = 0;
2310 dev_priv->pipestat[1] = 0; 2384 dev_priv->pipestat[1] = 0;
@@ -2371,55 +2445,124 @@ static int i915_driver_irq_postinstall(struct drm_device *dev)
2371 return 0; 2445 return 0;
2372} 2446}
2373 2447
2374static void valleyview_irq_uninstall(struct drm_device *dev) 2448static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
2375{ 2449{
2450 struct drm_device *dev = (struct drm_device *) arg;
2376 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2451 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2377 int pipe; 2452 u32 iir, new_iir;
2453 u32 pipe_stats[I915_MAX_PIPES];
2454 unsigned long irqflags;
2455 int irq_received;
2456 int ret = IRQ_NONE, pipe;
2378 2457
2379 if (!dev_priv) 2458 atomic_inc(&dev_priv->irq_received);
2380 return;
2381 2459
2382 dev_priv->vblank_pipe = 0; 2460 iir = I915_READ(IIR);
2383 2461
2384 for_each_pipe(pipe) 2462 for (;;) {
2385 I915_WRITE(PIPESTAT(pipe), 0xffff); 2463 bool blc_event = false;
2386 2464
2387 I915_WRITE(HWSTAM, 0xffffffff); 2465 irq_received = iir != 0;
2388 I915_WRITE(PORT_HOTPLUG_EN, 0);
2389 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2390 for_each_pipe(pipe)
2391 I915_WRITE(PIPESTAT(pipe), 0xffff);
2392 I915_WRITE(VLV_IIR, 0xffffffff);
2393 I915_WRITE(VLV_IMR, 0xffffffff);
2394 I915_WRITE(VLV_IER, 0x0);
2395 POSTING_READ(VLV_IER);
2396}
2397 2466
2398static void ironlake_irq_uninstall(struct drm_device *dev) 2467 /* Can't rely on pipestat interrupt bit in iir as it might
2399{ 2468 * have been cleared after the pipestat interrupt was received.
2400 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2469 * It doesn't set the bit in iir again, but it still produces
2470 * interrupts (for non-MSI).
2471 */
2472 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2473 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2474 i915_handle_error(dev, false);
2401 2475
2402 if (!dev_priv) 2476 for_each_pipe(pipe) {
2403 return; 2477 int reg = PIPESTAT(pipe);
2478 pipe_stats[pipe] = I915_READ(reg);
2479
2480 /*
2481 * Clear the PIPE*STAT regs before the IIR
2482 */
2483 if (pipe_stats[pipe] & 0x8000ffff) {
2484 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2485 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2486 pipe_name(pipe));
2487 I915_WRITE(reg, pipe_stats[pipe]);
2488 irq_received = 1;
2489 }
2490 }
2491 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2404 2492
2405 dev_priv->vblank_pipe = 0; 2493 if (!irq_received)
2494 break;
2406 2495
2407 I915_WRITE(HWSTAM, 0xffffffff); 2496 ret = IRQ_HANDLED;
2408 2497
2409 I915_WRITE(DEIMR, 0xffffffff); 2498 /* Consume port. Then clear IIR or we'll miss events */
2410 I915_WRITE(DEIER, 0x0); 2499 if ((I915_HAS_HOTPLUG(dev)) &&
2411 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2500 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2501 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2412 2502
2413 I915_WRITE(GTIMR, 0xffffffff); 2503 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2414 I915_WRITE(GTIER, 0x0); 2504 hotplug_status);
2415 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2505 if (hotplug_status & dev_priv->hotplug_supported_mask)
2506 queue_work(dev_priv->wq,
2507 &dev_priv->hotplug_work);
2416 2508
2417 I915_WRITE(SDEIMR, 0xffffffff); 2509 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2418 I915_WRITE(SDEIER, 0x0); 2510 I915_READ(PORT_HOTPLUG_STAT);
2419 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2511 }
2512
2513 I915_WRITE(IIR, iir);
2514 new_iir = I915_READ(IIR); /* Flush posted writes */
2515
2516 if (iir & I915_USER_INTERRUPT)
2517 notify_ring(dev, &dev_priv->ring[RCS]);
2518 if (iir & I915_BSD_USER_INTERRUPT)
2519 notify_ring(dev, &dev_priv->ring[VCS]);
2520
2521 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2522 intel_prepare_page_flip(dev, 0);
2523
2524 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2525 intel_prepare_page_flip(dev, 1);
2526
2527 for_each_pipe(pipe) {
2528 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2529 drm_handle_vblank(dev, pipe)) {
2530 i915_pageflip_stall_check(dev, pipe);
2531 intel_finish_page_flip(dev, pipe);
2532 }
2533
2534 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2535 blc_event = true;
2536 }
2537
2538
2539 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2540 intel_opregion_asle_intr(dev);
2541
2542 /* With MSI, interrupts are only generated when iir
2543 * transitions from zero to nonzero. If another bit got
2544 * set while we were handling the existing iir bits, then
2545 * we would never get another interrupt.
2546 *
2547 * This is fine on non-MSI as well, as if we hit this path
2548 * we avoid exiting the interrupt handler only to generate
2549 * another one.
2550 *
2551 * Note that for MSI this could cause a stray interrupt report
2552 * if an interrupt landed in the time between writing IIR and
2553 * the posting read. This should be rare enough to never
2554 * trigger the 99% of 100,000 interrupts test for disabling
2555 * stray interrupts.
2556 */
2557 iir = new_iir;
2558 }
2559
2560 i915_update_dri1_breadcrumb(dev);
2561
2562 return ret;
2420} 2563}
2421 2564
2422static void i915_driver_irq_uninstall(struct drm_device * dev) 2565static void i965_irq_uninstall(struct drm_device * dev)
2423{ 2566{
2424 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2567 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2425 int pipe; 2568 int pipe;
@@ -2427,8 +2570,6 @@ static void i915_driver_irq_uninstall(struct drm_device * dev)
2427 if (!dev_priv) 2570 if (!dev_priv)
2428 return; 2571 return;
2429 2572
2430 dev_priv->vblank_pipe = 0;
2431
2432 if (I915_HAS_HOTPLUG(dev)) { 2573 if (I915_HAS_HOTPLUG(dev)) {
2433 I915_WRITE(PORT_HOTPLUG_EN, 0); 2574 I915_WRITE(PORT_HOTPLUG_EN, 0);
2434 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2575 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -2448,6 +2589,12 @@ static void i915_driver_irq_uninstall(struct drm_device * dev)
2448 2589
2449void intel_irq_init(struct drm_device *dev) 2590void intel_irq_init(struct drm_device *dev)
2450{ 2591{
2592 struct drm_i915_private *dev_priv = dev->dev_private;
2593
2594 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2595 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2596 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
2597
2451 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2598 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2452 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2599 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2453 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev) || 2600 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev) ||
@@ -2485,10 +2632,25 @@ void intel_irq_init(struct drm_device *dev)
2485 dev->driver->enable_vblank = ironlake_enable_vblank; 2632 dev->driver->enable_vblank = ironlake_enable_vblank;
2486 dev->driver->disable_vblank = ironlake_disable_vblank; 2633 dev->driver->disable_vblank = ironlake_disable_vblank;
2487 } else { 2634 } else {
2488 dev->driver->irq_preinstall = i915_driver_irq_preinstall; 2635 if (INTEL_INFO(dev)->gen == 2) {
2489 dev->driver->irq_postinstall = i915_driver_irq_postinstall; 2636 dev->driver->irq_preinstall = i8xx_irq_preinstall;
2490 dev->driver->irq_uninstall = i915_driver_irq_uninstall; 2637 dev->driver->irq_postinstall = i8xx_irq_postinstall;
2491 dev->driver->irq_handler = i915_driver_irq_handler; 2638 dev->driver->irq_handler = i8xx_irq_handler;
2639 dev->driver->irq_uninstall = i8xx_irq_uninstall;
2640 } else if (INTEL_INFO(dev)->gen == 3) {
2641 /* IIR "flip pending" means done if this bit is set */
2642 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
2643
2644 dev->driver->irq_preinstall = i915_irq_preinstall;
2645 dev->driver->irq_postinstall = i915_irq_postinstall;
2646 dev->driver->irq_uninstall = i915_irq_uninstall;
2647 dev->driver->irq_handler = i915_irq_handler;
2648 } else {
2649 dev->driver->irq_preinstall = i965_irq_preinstall;
2650 dev->driver->irq_postinstall = i965_irq_postinstall;
2651 dev->driver->irq_uninstall = i965_irq_uninstall;
2652 dev->driver->irq_handler = i965_irq_handler;
2653 }
2492 dev->driver->enable_vblank = i915_enable_vblank; 2654 dev->driver->enable_vblank = i915_enable_vblank;
2493 dev->driver->disable_vblank = i915_disable_vblank; 2655 dev->driver->disable_vblank = i915_disable_vblank;
2494 } 2656 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5ac9837e49a5..10e71a9f8bd9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -29,6 +29,9 @@
29 29
30#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) 30#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
31 31
32#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
33#define _MASKED_BIT_DISABLE(a) ((a) << 16)
34
32/* 35/*
33 * The Bridge device's PCI config space has information about the 36 * The Bridge device's PCI config space has information about the
34 * fb aperture size and the amount of pre-reserved memory. 37 * fb aperture size and the amount of pre-reserved memory.
@@ -79,6 +82,7 @@
79#define GRDOM_FULL (0<<2) 82#define GRDOM_FULL (0<<2)
80#define GRDOM_RENDER (1<<2) 83#define GRDOM_RENDER (1<<2)
81#define GRDOM_MEDIA (3<<2) 84#define GRDOM_MEDIA (3<<2)
85#define GRDOM_RESET_ENABLE (1<<0)
82 86
83#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ 87#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
84#define GEN6_MBC_SNPCR_SHIFT 21 88#define GEN6_MBC_SNPCR_SHIFT 21
@@ -425,8 +429,6 @@
425#define ARB_MODE 0x04030 429#define ARB_MODE 0x04030
426#define ARB_MODE_SWIZZLE_SNB (1<<4) 430#define ARB_MODE_SWIZZLE_SNB (1<<4)
427#define ARB_MODE_SWIZZLE_IVB (1<<5) 431#define ARB_MODE_SWIZZLE_IVB (1<<5)
428#define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x)
429#define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x)
430#define RENDER_HWS_PGA_GEN7 (0x04080) 432#define RENDER_HWS_PGA_GEN7 (0x04080)
431#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) 433#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
432#define DONE_REG 0x40b0 434#define DONE_REG 0x40b0
@@ -514,9 +516,6 @@
514#define GFX_PSMI_GRANULARITY (1<<10) 516#define GFX_PSMI_GRANULARITY (1<<10)
515#define GFX_PPGTT_ENABLE (1<<9) 517#define GFX_PPGTT_ENABLE (1<<9)
516 518
517#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
518#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
519
520#define SCPD0 0x0209c /* 915+ only */ 519#define SCPD0 0x0209c /* 915+ only */
521#define IER 0x020a0 520#define IER 0x020a0
522#define IIR 0x020a4 521#define IIR 0x020a4
@@ -572,7 +571,6 @@
572#define LM_BURST_LENGTH 0x00000700 571#define LM_BURST_LENGTH 0x00000700
573#define LM_FIFO_WATERMARK 0x0000001F 572#define LM_FIFO_WATERMARK 0x0000001F
574#define MI_ARB_STATE 0x020e4 /* 915+ only */ 573#define MI_ARB_STATE 0x020e4 /* 915+ only */
575#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
576 574
577/* Make render/texture TLB fetches lower priorty than associated data 575/* Make render/texture TLB fetches lower priorty than associated data
578 * fetches. This is not turned on by default 576 * fetches. This is not turned on by default
@@ -637,7 +635,6 @@
637#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ 635#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
638 636
639#define CACHE_MODE_0 0x02120 /* 915+ only */ 637#define CACHE_MODE_0 0x02120 /* 915+ only */
640#define CM0_MASK_SHIFT 16
641#define CM0_IZ_OPT_DISABLE (1<<6) 638#define CM0_IZ_OPT_DISABLE (1<<6)
642#define CM0_ZR_OPT_DISABLE (1<<5) 639#define CM0_ZR_OPT_DISABLE (1<<5)
643#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) 640#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@@ -3224,11 +3221,14 @@
3224#define DE_PCH_EVENT_IVB (1<<28) 3221#define DE_PCH_EVENT_IVB (1<<28)
3225#define DE_DP_A_HOTPLUG_IVB (1<<27) 3222#define DE_DP_A_HOTPLUG_IVB (1<<27)
3226#define DE_AUX_CHANNEL_A_IVB (1<<26) 3223#define DE_AUX_CHANNEL_A_IVB (1<<26)
3224#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
3225#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
3226#define DE_PIPEC_VBLANK_IVB (1<<10)
3227#define DE_SPRITEB_FLIP_DONE_IVB (1<<9) 3227#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
3228#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
3229#define DE_PLANEB_FLIP_DONE_IVB (1<<8) 3228#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
3230#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
3231#define DE_PIPEB_VBLANK_IVB (1<<5) 3229#define DE_PIPEB_VBLANK_IVB (1<<5)
3230#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
3231#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
3232#define DE_PIPEA_VBLANK_IVB (1<<0) 3232#define DE_PIPEA_VBLANK_IVB (1<<0)
3233 3233
3234#define VLV_MASTER_IER 0x4400c /* Gunit master IER */ 3234#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
@@ -3402,15 +3402,15 @@
3402 3402
3403#define _PCH_DPLL_A 0xc6014 3403#define _PCH_DPLL_A 0xc6014
3404#define _PCH_DPLL_B 0xc6018 3404#define _PCH_DPLL_B 0xc6018
3405#define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) 3405#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
3406 3406
3407#define _PCH_FPA0 0xc6040 3407#define _PCH_FPA0 0xc6040
3408#define FP_CB_TUNE (0x3<<22) 3408#define FP_CB_TUNE (0x3<<22)
3409#define _PCH_FPA1 0xc6044 3409#define _PCH_FPA1 0xc6044
3410#define _PCH_FPB0 0xc6048 3410#define _PCH_FPB0 0xc6048
3411#define _PCH_FPB1 0xc604c 3411#define _PCH_FPB1 0xc604c
3412#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0) 3412#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
3413#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1) 3413#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
3414 3414
3415#define PCH_DPLL_TEST 0xc606c 3415#define PCH_DPLL_TEST 0xc606c
3416 3416
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 0c3e3bf67c28..73a5c3c12fe0 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -40,7 +40,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
40 return false; 40 return false;
41 41
42 if (HAS_PCH_SPLIT(dev)) 42 if (HAS_PCH_SPLIT(dev))
43 dpll_reg = PCH_DPLL(pipe); 43 dpll_reg = _PCH_DPLL(pipe);
44 else 44 else
45 dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; 45 dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
46 46
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8c239f2d6bcd..6b4139064f9c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -384,28 +384,6 @@ out_unlock:
384 return val; 384 return val;
385} 385}
386 386
387static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
388 u32 val)
389{
390 unsigned long flags;
391
392 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
393 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
394 DRM_ERROR("DPIO idle wait timed out\n");
395 goto out_unlock;
396 }
397
398 I915_WRITE(DPIO_DATA, val);
399 I915_WRITE(DPIO_REG, reg);
400 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
401 DPIO_BYTE);
402 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
403 DRM_ERROR("DPIO write wait timed out\n");
404
405out_unlock:
406 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
407}
408
409static void vlv_init_dpio(struct drm_device *dev) 387static void vlv_init_dpio(struct drm_device *dev)
410{ 388{
411 struct drm_i915_private *dev_priv = dev->dev_private; 389 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -911,26 +889,28 @@ static void assert_pll(struct drm_i915_private *dev_priv,
911 889
912/* For ILK+ */ 890/* For ILK+ */
913static void assert_pch_pll(struct drm_i915_private *dev_priv, 891static void assert_pch_pll(struct drm_i915_private *dev_priv,
914 enum pipe pipe, bool state) 892 struct intel_crtc *intel_crtc, bool state)
915{ 893{
916 int reg; 894 int reg;
917 u32 val; 895 u32 val;
918 bool cur_state; 896 bool cur_state;
919 897
898 if (!intel_crtc->pch_pll) {
899 WARN(1, "asserting PCH PLL enabled with no PLL\n");
900 return;
901 }
902
920 if (HAS_PCH_CPT(dev_priv->dev)) { 903 if (HAS_PCH_CPT(dev_priv->dev)) {
921 u32 pch_dpll; 904 u32 pch_dpll;
922 905
923 pch_dpll = I915_READ(PCH_DPLL_SEL); 906 pch_dpll = I915_READ(PCH_DPLL_SEL);
924 907
925 /* Make sure the selected PLL is enabled to the transcoder */ 908 /* Make sure the selected PLL is enabled to the transcoder */
926 WARN(!((pch_dpll >> (4 * pipe)) & 8), 909 WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8),
927 "transcoder %d PLL not enabled\n", pipe); 910 "transcoder %d PLL not enabled\n", intel_crtc->pipe);
928
929 /* Convert the transcoder pipe number to a pll pipe number */
930 pipe = (pch_dpll >> (4 * pipe)) & 1;
931 } 911 }
932 912
933 reg = PCH_DPLL(pipe); 913 reg = intel_crtc->pch_pll->pll_reg;
934 val = I915_READ(reg); 914 val = I915_READ(reg);
935 cur_state = !!(val & DPLL_VCO_ENABLE); 915 cur_state = !!(val & DPLL_VCO_ENABLE);
936 WARN(cur_state != state, 916 WARN(cur_state != state,
@@ -1306,60 +1286,79 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1306 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1286 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1307 * drives the transcoder clock. 1287 * drives the transcoder clock.
1308 */ 1288 */
1309static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, 1289static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
1310 enum pipe pipe)
1311{ 1290{
1291 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1292 struct intel_pch_pll *pll = intel_crtc->pch_pll;
1312 int reg; 1293 int reg;
1313 u32 val; 1294 u32 val;
1314 1295
1315 if (pipe > 1)
1316 return;
1317
1318 /* PCH only available on ILK+ */ 1296 /* PCH only available on ILK+ */
1319 BUG_ON(dev_priv->info->gen < 5); 1297 BUG_ON(dev_priv->info->gen < 5);
1298 BUG_ON(pll == NULL);
1299 BUG_ON(pll->refcount == 0);
1300
1301 DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1302 pll->pll_reg, pll->active, pll->on,
1303 intel_crtc->base.base.id);
1320 1304
1321 /* PCH refclock must be enabled first */ 1305 /* PCH refclock must be enabled first */
1322 assert_pch_refclk_enabled(dev_priv); 1306 assert_pch_refclk_enabled(dev_priv);
1323 1307
1324 reg = PCH_DPLL(pipe); 1308 if (pll->active++ && pll->on) {
1309 assert_pch_pll_enabled(dev_priv, intel_crtc);
1310 return;
1311 }
1312
1313 DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
1314
1315 reg = pll->pll_reg;
1325 val = I915_READ(reg); 1316 val = I915_READ(reg);
1326 val |= DPLL_VCO_ENABLE; 1317 val |= DPLL_VCO_ENABLE;
1327 I915_WRITE(reg, val); 1318 I915_WRITE(reg, val);
1328 POSTING_READ(reg); 1319 POSTING_READ(reg);
1329 udelay(200); 1320 udelay(200);
1321
1322 pll->on = true;
1330} 1323}
1331 1324
1332static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, 1325static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
1333 enum pipe pipe)
1334{ 1326{
1327 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1328 struct intel_pch_pll *pll = intel_crtc->pch_pll;
1335 int reg; 1329 int reg;
1336 u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL, 1330 u32 val;
1337 pll_sel = TRANSC_DPLL_ENABLE;
1338
1339 if (pipe > 1)
1340 return;
1341 1331
1342 /* PCH only available on ILK+ */ 1332 /* PCH only available on ILK+ */
1343 BUG_ON(dev_priv->info->gen < 5); 1333 BUG_ON(dev_priv->info->gen < 5);
1334 if (pll == NULL)
1335 return;
1344 1336
1345 /* Make sure transcoder isn't still depending on us */ 1337 BUG_ON(pll->refcount == 0);
1346 assert_transcoder_disabled(dev_priv, pipe);
1347 1338
1348 if (pipe == 0) 1339 DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1349 pll_sel |= TRANSC_DPLLA_SEL; 1340 pll->pll_reg, pll->active, pll->on,
1350 else if (pipe == 1) 1341 intel_crtc->base.base.id);
1351 pll_sel |= TRANSC_DPLLB_SEL;
1352 1342
1353 1343 BUG_ON(pll->active == 0);
1354 if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel) 1344 if (--pll->active) {
1345 assert_pch_pll_enabled(dev_priv, intel_crtc);
1355 return; 1346 return;
1347 }
1348
1349 DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
1350
1351 /* Make sure transcoder isn't still depending on us */
1352 assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
1356 1353
1357 reg = PCH_DPLL(pipe); 1354 reg = pll->pll_reg;
1358 val = I915_READ(reg); 1355 val = I915_READ(reg);
1359 val &= ~DPLL_VCO_ENABLE; 1356 val &= ~DPLL_VCO_ENABLE;
1360 I915_WRITE(reg, val); 1357 I915_WRITE(reg, val);
1361 POSTING_READ(reg); 1358 POSTING_READ(reg);
1362 udelay(200); 1359 udelay(200);
1360
1361 pll->on = false;
1363} 1362}
1364 1363
1365static void intel_enable_transcoder(struct drm_i915_private *dev_priv, 1364static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
@@ -1373,7 +1372,7 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1373 BUG_ON(dev_priv->info->gen < 5); 1372 BUG_ON(dev_priv->info->gen < 5);
1374 1373
1375 /* Make sure PCH DPLL is enabled */ 1374 /* Make sure PCH DPLL is enabled */
1376 assert_pch_pll_enabled(dev_priv, pipe); 1375 assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
1377 1376
1378 /* FDI must be feeding us bits for PCH ports */ 1377 /* FDI must be feeding us bits for PCH ports */
1379 assert_fdi_tx_enabled(dev_priv, pipe); 1378 assert_fdi_tx_enabled(dev_priv, pipe);
@@ -2507,26 +2506,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
2507 udelay(100); 2506 udelay(100);
2508} 2507}
2509 2508
2510/*
2511 * When we disable a pipe, we need to clear any pending scanline wait events
2512 * to avoid hanging the ring, which we assume we are waiting on.
2513 */
2514static void intel_clear_scanline_wait(struct drm_device *dev)
2515{
2516 struct drm_i915_private *dev_priv = dev->dev_private;
2517 struct intel_ring_buffer *ring;
2518 u32 tmp;
2519
2520 if (IS_GEN2(dev))
2521 /* Can't break the hang on i8xx */
2522 return;
2523
2524 ring = LP_RING(dev_priv);
2525 tmp = I915_READ_CTL(ring);
2526 if (tmp & RING_WAIT)
2527 I915_WRITE_CTL(ring, tmp);
2528}
2529
2530static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 2509static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2531{ 2510{
2532 struct drm_device *dev = crtc->dev; 2511 struct drm_device *dev = crtc->dev;
@@ -2578,29 +2557,36 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2578 struct drm_i915_private *dev_priv = dev->dev_private; 2557 struct drm_i915_private *dev_priv = dev->dev_private;
2579 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2558 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2580 int pipe = intel_crtc->pipe; 2559 int pipe = intel_crtc->pipe;
2581 u32 reg, temp, transc_sel; 2560 u32 reg, temp;
2582 2561
2583 /* For PCH output, training FDI link */ 2562 /* For PCH output, training FDI link */
2584 dev_priv->display.fdi_link_train(crtc); 2563 dev_priv->display.fdi_link_train(crtc);
2585 2564
2586 intel_enable_pch_pll(dev_priv, pipe); 2565 intel_enable_pch_pll(intel_crtc);
2587 2566
2588 if (HAS_PCH_CPT(dev)) { 2567 if (HAS_PCH_CPT(dev)) {
2589 transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL : 2568 u32 sel;
2590 TRANSC_DPLLB_SEL;
2591 2569
2592 /* Be sure PCH DPLL SEL is set */
2593 temp = I915_READ(PCH_DPLL_SEL); 2570 temp = I915_READ(PCH_DPLL_SEL);
2594 if (pipe == 0) { 2571 switch (pipe) {
2595 temp &= ~(TRANSA_DPLLB_SEL); 2572 default:
2596 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); 2573 case 0:
2597 } else if (pipe == 1) { 2574 temp |= TRANSA_DPLL_ENABLE;
2598 temp &= ~(TRANSB_DPLLB_SEL); 2575 sel = TRANSA_DPLLB_SEL;
2599 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); 2576 break;
2600 } else if (pipe == 2) { 2577 case 1:
2601 temp &= ~(TRANSC_DPLLB_SEL); 2578 temp |= TRANSB_DPLL_ENABLE;
2602 temp |= (TRANSC_DPLL_ENABLE | transc_sel); 2579 sel = TRANSB_DPLLB_SEL;
2580 break;
2581 case 2:
2582 temp |= TRANSC_DPLL_ENABLE;
2583 sel = TRANSC_DPLLB_SEL;
2584 break;
2603 } 2585 }
2586 if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
2587 temp |= sel;
2588 else
2589 temp &= ~sel;
2604 I915_WRITE(PCH_DPLL_SEL, temp); 2590 I915_WRITE(PCH_DPLL_SEL, temp);
2605 } 2591 }
2606 2592
@@ -2658,6 +2644,82 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2658 intel_enable_transcoder(dev_priv, pipe); 2644 intel_enable_transcoder(dev_priv, pipe);
2659} 2645}
2660 2646
2647static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
2648{
2649 struct intel_pch_pll *pll = intel_crtc->pch_pll;
2650
2651 if (pll == NULL)
2652 return;
2653
2654 if (pll->refcount == 0) {
2655 WARN(1, "bad PCH PLL refcount\n");
2656 return;
2657 }
2658
2659 --pll->refcount;
2660 intel_crtc->pch_pll = NULL;
2661}
2662
2663static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
2664{
2665 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
2666 struct intel_pch_pll *pll;
2667 int i;
2668
2669 pll = intel_crtc->pch_pll;
2670 if (pll) {
2671 DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
2672 intel_crtc->base.base.id, pll->pll_reg);
2673 goto prepare;
2674 }
2675
2676 for (i = 0; i < dev_priv->num_pch_pll; i++) {
2677 pll = &dev_priv->pch_plls[i];
2678
2679 /* Only want to check enabled timings first */
2680 if (pll->refcount == 0)
2681 continue;
2682
2683 if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
2684 fp == I915_READ(pll->fp0_reg)) {
2685 DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
2686 intel_crtc->base.base.id,
2687 pll->pll_reg, pll->refcount, pll->active);
2688
2689 goto found;
2690 }
2691 }
2692
2693 /* Ok no matching timings, maybe there's a free one? */
2694 for (i = 0; i < dev_priv->num_pch_pll; i++) {
2695 pll = &dev_priv->pch_plls[i];
2696 if (pll->refcount == 0) {
2697 DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
2698 intel_crtc->base.base.id, pll->pll_reg);
2699 goto found;
2700 }
2701 }
2702
2703 return NULL;
2704
2705found:
2706 intel_crtc->pch_pll = pll;
2707 pll->refcount++;
2708 DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
2709prepare: /* separate function? */
2710 DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
2711
2712 /* Wait for the clocks to stabilize before rewriting the regs */
2713 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
2714 POSTING_READ(pll->pll_reg);
2715 udelay(150);
2716
2717 I915_WRITE(pll->fp0_reg, fp);
2718 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
2719 pll->on = false;
2720 return pll;
2721}
2722
2661void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) 2723void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
2662{ 2724{
2663 struct drm_i915_private *dev_priv = dev->dev_private; 2725 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2802,8 +2864,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
2802 } 2864 }
2803 2865
2804 /* disable PCH DPLL */ 2866 /* disable PCH DPLL */
2805 if (!intel_crtc->no_pll) 2867 intel_disable_pch_pll(intel_crtc);
2806 intel_disable_pch_pll(dev_priv, pipe);
2807 2868
2808 /* Switch from PCDclk to Rawclk */ 2869 /* Switch from PCDclk to Rawclk */
2809 reg = FDI_RX_CTL(pipe); 2870 reg = FDI_RX_CTL(pipe);
@@ -2831,7 +2892,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
2831 2892
2832 mutex_lock(&dev->struct_mutex); 2893 mutex_lock(&dev->struct_mutex);
2833 intel_update_fbc(dev); 2894 intel_update_fbc(dev);
2834 intel_clear_scanline_wait(dev);
2835 mutex_unlock(&dev->struct_mutex); 2895 mutex_unlock(&dev->struct_mutex);
2836} 2896}
2837 2897
@@ -2859,6 +2919,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2859 } 2919 }
2860} 2920}
2861 2921
2922static void ironlake_crtc_off(struct drm_crtc *crtc)
2923{
2924 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2925 intel_put_pch_pll(intel_crtc);
2926}
2927
2862static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 2928static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
2863{ 2929{
2864 if (!enable && intel_crtc->overlay) { 2930 if (!enable && intel_crtc->overlay) {
@@ -2930,7 +2996,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
2930 intel_crtc->active = false; 2996 intel_crtc->active = false;
2931 intel_update_fbc(dev); 2997 intel_update_fbc(dev);
2932 intel_update_watermarks(dev); 2998 intel_update_watermarks(dev);
2933 intel_clear_scanline_wait(dev);
2934} 2999}
2935 3000
2936static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) 3001static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2950,6 +3015,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2950 } 3015 }
2951} 3016}
2952 3017
3018static void i9xx_crtc_off(struct drm_crtc *crtc)
3019{
3020}
3021
2953/** 3022/**
2954 * Sets the power management mode of the pipe and plane. 3023 * Sets the power management mode of the pipe and plane.
2955 */ 3024 */
@@ -2997,8 +3066,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
2997{ 3066{
2998 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 3067 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
2999 struct drm_device *dev = crtc->dev; 3068 struct drm_device *dev = crtc->dev;
3069 struct drm_i915_private *dev_priv = dev->dev_private;
3000 3070
3001 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 3071 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3072 dev_priv->display.off(crtc);
3073
3002 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); 3074 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3003 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); 3075 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3004 3076
@@ -3822,7 +3894,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
3822 3894
3823 I915_WRITE(DSPCNTR(plane), dspcntr); 3895 I915_WRITE(DSPCNTR(plane), dspcntr);
3824 POSTING_READ(DSPCNTR(plane)); 3896 POSTING_READ(DSPCNTR(plane));
3825 intel_enable_plane(dev_priv, plane, pipe);
3826 3897
3827 ret = intel_pipe_set_base(crtc, x, y, old_fb); 3898 ret = intel_pipe_set_base(crtc, x, y, old_fb);
3828 3899
@@ -4241,29 +4312,18 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4241 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 4312 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
4242 drm_mode_debug_printmodeline(mode); 4313 drm_mode_debug_printmodeline(mode);
4243 4314
4244 /* PCH eDP needs FDI, but CPU eDP does not */ 4315 /* CPU eDP is the only output that doesn't need a PCH PLL of its own */
4245 if (!intel_crtc->no_pll) { 4316 if (!is_cpu_edp) {
4246 if (!is_cpu_edp) { 4317 struct intel_pch_pll *pll;
4247 I915_WRITE(PCH_FP0(pipe), fp);
4248 I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4249 4318
4250 POSTING_READ(PCH_DPLL(pipe)); 4319 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
4251 udelay(150); 4320 if (pll == NULL) {
4252 } 4321 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
4253 } else { 4322 pipe);
4254 if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
4255 fp == I915_READ(PCH_FP0(0))) {
4256 intel_crtc->use_pll_a = true;
4257 DRM_DEBUG_KMS("using pipe a dpll\n");
4258 } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
4259 fp == I915_READ(PCH_FP0(1))) {
4260 intel_crtc->use_pll_a = false;
4261 DRM_DEBUG_KMS("using pipe b dpll\n");
4262 } else {
4263 DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
4264 return -EINVAL; 4323 return -EINVAL;
4265 } 4324 }
4266 } 4325 } else
4326 intel_put_pch_pll(intel_crtc);
4267 4327
4268 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4328 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4269 * This is an exception to the general rule that mode_set doesn't turn 4329 * This is an exception to the general rule that mode_set doesn't turn
@@ -4320,11 +4380,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4320 I915_WRITE(TRANSDPLINK_N1(pipe), 0); 4380 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
4321 } 4381 }
4322 4382
4323 if (!intel_crtc->no_pll && (!edp_encoder || is_pch_edp)) { 4383 if (intel_crtc->pch_pll) {
4324 I915_WRITE(PCH_DPLL(pipe), dpll); 4384 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
4325 4385
4326 /* Wait for the clocks to stabilize. */ 4386 /* Wait for the clocks to stabilize. */
4327 POSTING_READ(PCH_DPLL(pipe)); 4387 POSTING_READ(intel_crtc->pch_pll->pll_reg);
4328 udelay(150); 4388 udelay(150);
4329 4389
4330 /* The pixel multiplier can only be updated once the 4390 /* The pixel multiplier can only be updated once the
@@ -4332,20 +4392,20 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4332 * 4392 *
4333 * So write it again. 4393 * So write it again.
4334 */ 4394 */
4335 I915_WRITE(PCH_DPLL(pipe), dpll); 4395 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
4336 } 4396 }
4337 4397
4338 intel_crtc->lowfreq_avail = false; 4398 intel_crtc->lowfreq_avail = false;
4339 if (!intel_crtc->no_pll) { 4399 if (intel_crtc->pch_pll) {
4340 if (is_lvds && has_reduced_clock && i915_powersave) { 4400 if (is_lvds && has_reduced_clock && i915_powersave) {
4341 I915_WRITE(PCH_FP1(pipe), fp2); 4401 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
4342 intel_crtc->lowfreq_avail = true; 4402 intel_crtc->lowfreq_avail = true;
4343 if (HAS_PIPE_CXSR(dev)) { 4403 if (HAS_PIPE_CXSR(dev)) {
4344 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 4404 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4345 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 4405 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4346 } 4406 }
4347 } else { 4407 } else {
4348 I915_WRITE(PCH_FP1(pipe), fp); 4408 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
4349 if (HAS_PIPE_CXSR(dev)) { 4409 if (HAS_PIPE_CXSR(dev)) {
4350 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 4410 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4351 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 4411 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
@@ -4777,9 +4837,6 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
4777 else 4837 else
4778 i9xx_update_cursor(crtc, base); 4838 i9xx_update_cursor(crtc, base);
4779 } 4839 }
4780
4781 if (visible)
4782 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
4783} 4840}
4784 4841
4785static int intel_crtc_cursor_set(struct drm_crtc *crtc, 4842static int intel_crtc_cursor_set(struct drm_crtc *crtc,
@@ -5303,7 +5360,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
5303 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 5360 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
5304 5361
5305 drm_mode_set_name(mode); 5362 drm_mode_set_name(mode);
5306 drm_mode_set_crtcinfo(mode, 0);
5307 5363
5308 return mode; 5364 return mode;
5309} 5365}
@@ -5475,9 +5531,10 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
5475 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 5531 if (!drm_core_check_feature(dev, DRIVER_MODESET))
5476 return; 5532 return;
5477 5533
5478 if (!dev_priv->busy) 5534 if (!dev_priv->busy) {
5535 intel_sanitize_pm(dev);
5479 dev_priv->busy = true; 5536 dev_priv->busy = true;
5480 else 5537 } else
5481 mod_timer(&dev_priv->idle_timer, jiffies + 5538 mod_timer(&dev_priv->idle_timer, jiffies +
5482 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 5539 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5483 5540
@@ -5653,16 +5710,17 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
5653 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5710 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5654 unsigned long offset; 5711 unsigned long offset;
5655 u32 flip_mask; 5712 u32 flip_mask;
5713 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
5656 int ret; 5714 int ret;
5657 5715
5658 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5716 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
5659 if (ret) 5717 if (ret)
5660 goto err; 5718 goto err;
5661 5719
5662 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5720 /* Offset into the new buffer for cases of shared fbs between CRTCs */
5663 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; 5721 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
5664 5722
5665 ret = BEGIN_LP_RING(6); 5723 ret = intel_ring_begin(ring, 6);
5666 if (ret) 5724 if (ret)
5667 goto err_unpin; 5725 goto err_unpin;
5668 5726
@@ -5673,14 +5731,14 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
5673 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5731 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5674 else 5732 else
5675 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 5733 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5676 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5734 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
5677 OUT_RING(MI_NOOP); 5735 intel_ring_emit(ring, MI_NOOP);
5678 OUT_RING(MI_DISPLAY_FLIP | 5736 intel_ring_emit(ring, MI_DISPLAY_FLIP |
5679 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5737 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5680 OUT_RING(fb->pitches[0]); 5738 intel_ring_emit(ring, fb->pitches[0]);
5681 OUT_RING(obj->gtt_offset + offset); 5739 intel_ring_emit(ring, obj->gtt_offset + offset);
5682 OUT_RING(0); /* aux display base address, unused */ 5740 intel_ring_emit(ring, 0); /* aux display base address, unused */
5683 ADVANCE_LP_RING(); 5741 intel_ring_advance(ring);
5684 return 0; 5742 return 0;
5685 5743
5686err_unpin: 5744err_unpin:
@@ -5698,16 +5756,17 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
5698 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5756 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5699 unsigned long offset; 5757 unsigned long offset;
5700 u32 flip_mask; 5758 u32 flip_mask;
5759 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
5701 int ret; 5760 int ret;
5702 5761
5703 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5762 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
5704 if (ret) 5763 if (ret)
5705 goto err; 5764 goto err;
5706 5765
5707 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5766 /* Offset into the new buffer for cases of shared fbs between CRTCs */
5708 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; 5767 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
5709 5768
5710 ret = BEGIN_LP_RING(6); 5769 ret = intel_ring_begin(ring, 6);
5711 if (ret) 5770 if (ret)
5712 goto err_unpin; 5771 goto err_unpin;
5713 5772
@@ -5715,15 +5774,15 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
5715 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5774 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5716 else 5775 else
5717 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 5776 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5718 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5777 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
5719 OUT_RING(MI_NOOP); 5778 intel_ring_emit(ring, MI_NOOP);
5720 OUT_RING(MI_DISPLAY_FLIP_I915 | 5779 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
5721 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5780 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5722 OUT_RING(fb->pitches[0]); 5781 intel_ring_emit(ring, fb->pitches[0]);
5723 OUT_RING(obj->gtt_offset + offset); 5782 intel_ring_emit(ring, obj->gtt_offset + offset);
5724 OUT_RING(MI_NOOP); 5783 intel_ring_emit(ring, MI_NOOP);
5725 5784
5726 ADVANCE_LP_RING(); 5785 intel_ring_advance(ring);
5727 return 0; 5786 return 0;
5728 5787
5729err_unpin: 5788err_unpin:
@@ -5740,13 +5799,14 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
5740 struct drm_i915_private *dev_priv = dev->dev_private; 5799 struct drm_i915_private *dev_priv = dev->dev_private;
5741 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5800 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5742 uint32_t pf, pipesrc; 5801 uint32_t pf, pipesrc;
5802 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
5743 int ret; 5803 int ret;
5744 5804
5745 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5805 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
5746 if (ret) 5806 if (ret)
5747 goto err; 5807 goto err;
5748 5808
5749 ret = BEGIN_LP_RING(4); 5809 ret = intel_ring_begin(ring, 4);
5750 if (ret) 5810 if (ret)
5751 goto err_unpin; 5811 goto err_unpin;
5752 5812
@@ -5754,10 +5814,10 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
5754 * Display Registers (which do not change across a page-flip) 5814 * Display Registers (which do not change across a page-flip)
5755 * so we need only reprogram the base address. 5815 * so we need only reprogram the base address.
5756 */ 5816 */
5757 OUT_RING(MI_DISPLAY_FLIP | 5817 intel_ring_emit(ring, MI_DISPLAY_FLIP |
5758 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5818 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5759 OUT_RING(fb->pitches[0]); 5819 intel_ring_emit(ring, fb->pitches[0]);
5760 OUT_RING(obj->gtt_offset | obj->tiling_mode); 5820 intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
5761 5821
5762 /* XXX Enabling the panel-fitter across page-flip is so far 5822 /* XXX Enabling the panel-fitter across page-flip is so far
5763 * untested on non-native modes, so ignore it for now. 5823 * untested on non-native modes, so ignore it for now.
@@ -5765,8 +5825,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
5765 */ 5825 */
5766 pf = 0; 5826 pf = 0;
5767 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 5827 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
5768 OUT_RING(pf | pipesrc); 5828 intel_ring_emit(ring, pf | pipesrc);
5769 ADVANCE_LP_RING(); 5829 intel_ring_advance(ring);
5770 return 0; 5830 return 0;
5771 5831
5772err_unpin: 5832err_unpin:
@@ -5782,21 +5842,22 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
5782{ 5842{
5783 struct drm_i915_private *dev_priv = dev->dev_private; 5843 struct drm_i915_private *dev_priv = dev->dev_private;
5784 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5844 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5845 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
5785 uint32_t pf, pipesrc; 5846 uint32_t pf, pipesrc;
5786 int ret; 5847 int ret;
5787 5848
5788 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5849 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
5789 if (ret) 5850 if (ret)
5790 goto err; 5851 goto err;
5791 5852
5792 ret = BEGIN_LP_RING(4); 5853 ret = intel_ring_begin(ring, 4);
5793 if (ret) 5854 if (ret)
5794 goto err_unpin; 5855 goto err_unpin;
5795 5856
5796 OUT_RING(MI_DISPLAY_FLIP | 5857 intel_ring_emit(ring, MI_DISPLAY_FLIP |
5797 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5858 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5798 OUT_RING(fb->pitches[0] | obj->tiling_mode); 5859 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
5799 OUT_RING(obj->gtt_offset); 5860 intel_ring_emit(ring, obj->gtt_offset);
5800 5861
5801 /* Contrary to the suggestions in the documentation, 5862 /* Contrary to the suggestions in the documentation,
5802 * "Enable Panel Fitter" does not seem to be required when page 5863 * "Enable Panel Fitter" does not seem to be required when page
@@ -5806,8 +5867,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
5806 */ 5867 */
5807 pf = 0; 5868 pf = 0;
5808 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 5869 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
5809 OUT_RING(pf | pipesrc); 5870 intel_ring_emit(ring, pf | pipesrc);
5810 ADVANCE_LP_RING(); 5871 intel_ring_advance(ring);
5811 return 0; 5872 return 0;
5812 5873
5813err_unpin: 5874err_unpin:
@@ -6027,6 +6088,23 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
6027 .page_flip = intel_crtc_page_flip, 6088 .page_flip = intel_crtc_page_flip,
6028}; 6089};
6029 6090
6091static void intel_pch_pll_init(struct drm_device *dev)
6092{
6093 drm_i915_private_t *dev_priv = dev->dev_private;
6094 int i;
6095
6096 if (dev_priv->num_pch_pll == 0) {
6097 DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
6098 return;
6099 }
6100
6101 for (i = 0; i < dev_priv->num_pch_pll; i++) {
6102 dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
6103 dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
6104 dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
6105 }
6106}
6107
6030static void intel_crtc_init(struct drm_device *dev, int pipe) 6108static void intel_crtc_init(struct drm_device *dev, int pipe)
6031{ 6109{
6032 drm_i915_private_t *dev_priv = dev->dev_private; 6110 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -6064,8 +6142,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
6064 intel_crtc->bpp = 24; /* default for pre-Ironlake */ 6142 intel_crtc->bpp = 24; /* default for pre-Ironlake */
6065 6143
6066 if (HAS_PCH_SPLIT(dev)) { 6144 if (HAS_PCH_SPLIT(dev)) {
6067 if (pipe == 2 && IS_IVYBRIDGE(dev))
6068 intel_crtc->no_pll = true;
6069 intel_helper_funcs.prepare = ironlake_crtc_prepare; 6145 intel_helper_funcs.prepare = ironlake_crtc_prepare;
6070 intel_helper_funcs.commit = ironlake_crtc_commit; 6146 intel_helper_funcs.commit = ironlake_crtc_commit;
6071 } else { 6147 } else {
@@ -6084,15 +6160,12 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
6084int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 6160int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
6085 struct drm_file *file) 6161 struct drm_file *file)
6086{ 6162{
6087 drm_i915_private_t *dev_priv = dev->dev_private;
6088 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 6163 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
6089 struct drm_mode_object *drmmode_obj; 6164 struct drm_mode_object *drmmode_obj;
6090 struct intel_crtc *crtc; 6165 struct intel_crtc *crtc;
6091 6166
6092 if (!dev_priv) { 6167 if (!drm_core_check_feature(dev, DRIVER_MODESET))
6093 DRM_ERROR("called with no initialization\n"); 6168 return -ENODEV;
6094 return -EINVAL;
6095 }
6096 6169
6097 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, 6170 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
6098 DRM_MODE_OBJECT_CRTC); 6171 DRM_MODE_OBJECT_CRTC);
@@ -6348,10 +6421,12 @@ static void intel_init_display(struct drm_device *dev)
6348 if (HAS_PCH_SPLIT(dev)) { 6421 if (HAS_PCH_SPLIT(dev)) {
6349 dev_priv->display.dpms = ironlake_crtc_dpms; 6422 dev_priv->display.dpms = ironlake_crtc_dpms;
6350 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 6423 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
6424 dev_priv->display.off = ironlake_crtc_off;
6351 dev_priv->display.update_plane = ironlake_update_plane; 6425 dev_priv->display.update_plane = ironlake_update_plane;
6352 } else { 6426 } else {
6353 dev_priv->display.dpms = i9xx_crtc_dpms; 6427 dev_priv->display.dpms = i9xx_crtc_dpms;
6354 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 6428 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
6429 dev_priv->display.off = i9xx_crtc_off;
6355 dev_priv->display.update_plane = i9xx_update_plane; 6430 dev_priv->display.update_plane = i9xx_update_plane;
6356 } 6431 }
6357 6432
@@ -6614,6 +6689,8 @@ void intel_modeset_init(struct drm_device *dev)
6614 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); 6689 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
6615 } 6690 }
6616 6691
6692 intel_pch_pll_init(dev);
6693
6617 /* Just disable it once at startup */ 6694 /* Just disable it once at startup */
6618 i915_disable_vga(dev); 6695 i915_disable_vga(dev);
6619 intel_setup_outputs(dev); 6696 intel_setup_outputs(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 44cf32c8bcbf..a1a5ce71558a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -688,7 +688,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
688 int lane_count, clock; 688 int lane_count, clock;
689 int max_lane_count = intel_dp_max_lane_count(intel_dp); 689 int max_lane_count = intel_dp_max_lane_count(intel_dp);
690 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 690 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
691 int bpp; 691 int bpp, mode_rate;
692 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 692 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
693 693
694 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 694 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -702,24 +702,30 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
702 mode->clock = intel_dp->panel_fixed_mode->clock; 702 mode->clock = intel_dp->panel_fixed_mode->clock;
703 } 703 }
704 704
705 DRM_DEBUG_KMS("DP link computation with max lane count %i "
706 "max bw %02x pixel clock %iKHz\n",
707 max_lane_count, bws[max_clock], mode->clock);
708
705 if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode)) 709 if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
706 return false; 710 return false;
707 711
708 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 712 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
713 mode_rate = intel_dp_link_required(mode->clock, bpp);
709 714
710 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 715 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
711 for (clock = 0; clock <= max_clock; clock++) { 716 for (clock = 0; clock <= max_clock; clock++) {
712 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 717 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
713 718
714 if (intel_dp_link_required(mode->clock, bpp) 719 if (mode_rate <= link_avail) {
715 <= link_avail) {
716 intel_dp->link_bw = bws[clock]; 720 intel_dp->link_bw = bws[clock];
717 intel_dp->lane_count = lane_count; 721 intel_dp->lane_count = lane_count;
718 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); 722 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
719 DRM_DEBUG_KMS("Display port link bw %02x lane " 723 DRM_DEBUG_KMS("DP link bw %02x lane "
720 "count %d clock %d\n", 724 "count %d clock %d bpp %d\n",
721 intel_dp->link_bw, intel_dp->lane_count, 725 intel_dp->link_bw, intel_dp->lane_count,
722 adjusted_mode->clock); 726 adjusted_mode->clock, bpp);
727 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
728 mode_rate, link_avail);
723 return true; 729 return true;
724 } 730 }
725 } 731 }
@@ -2439,6 +2445,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2439 } 2445 }
2440 2446
2441 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2447 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2448
2442 connector->interlace_allowed = true; 2449 connector->interlace_allowed = true;
2443 connector->doublescan_allowed = 0; 2450 connector->doublescan_allowed = 0;
2444 2451
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7a7cae77f0ca..e5ee166e2faf 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -183,8 +183,8 @@ struct intel_crtc {
183 bool cursor_visible; 183 bool cursor_visible;
184 unsigned int bpp; 184 unsigned int bpp;
185 185
186 bool no_pll; /* tertiary pipe for IVB */ 186 /* We can share PLLs across outputs if the timings match */
187 bool use_pll_a; 187 struct intel_pch_pll *pch_pll;
188}; 188};
189 189
190struct intel_plane { 190struct intel_plane {
@@ -238,6 +238,8 @@ struct cxsr_latency {
238#define DIP_TYPE_AVI 0x82 238#define DIP_TYPE_AVI 0x82
239#define DIP_VERSION_AVI 0x2 239#define DIP_VERSION_AVI 0x2
240#define DIP_LEN_AVI 13 240#define DIP_LEN_AVI 13
241#define DIP_AVI_PR_1 0
242#define DIP_AVI_PR_2 1
241 243
242#define DIP_TYPE_SPD 0x83 244#define DIP_TYPE_SPD 0x83
243#define DIP_VERSION_SPD 0x1 245#define DIP_VERSION_SPD 0x1
@@ -271,8 +273,8 @@ struct dip_infoframe {
271 uint8_t ITC_EC_Q_SC; 273 uint8_t ITC_EC_Q_SC;
272 /* PB4 - VIC 6:0 */ 274 /* PB4 - VIC 6:0 */
273 uint8_t VIC; 275 uint8_t VIC;
274 /* PB5 - PR 3:0 */ 276 /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
275 uint8_t PR; 277 uint8_t YQ_CN_PR;
276 /* PB6 to PB13 */ 278 /* PB6 to PB13 */
277 uint16_t top_bar_end; 279 uint16_t top_bar_end;
278 uint16_t bottom_bar_start; 280 uint16_t bottom_bar_start;
@@ -346,6 +348,8 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
346extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, 348extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
347 enum plane plane); 349 enum plane plane);
348 350
351void intel_sanitize_pm(struct drm_device *dev);
352
349/* intel_panel.c */ 353/* intel_panel.c */
350extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 354extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
351 struct drm_display_mode *adjusted_mode); 355 struct drm_display_mode *adjusted_mode);
@@ -405,10 +409,6 @@ extern void intel_enable_clock_gating(struct drm_device *dev);
405extern void ironlake_disable_rc6(struct drm_device *dev); 409extern void ironlake_disable_rc6(struct drm_device *dev);
406extern void ironlake_enable_drps(struct drm_device *dev); 410extern void ironlake_enable_drps(struct drm_device *dev);
407extern void ironlake_disable_drps(struct drm_device *dev); 411extern void ironlake_disable_drps(struct drm_device *dev);
408extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
409extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
410extern void gen6_disable_rps(struct drm_device *dev);
411extern void intel_init_emon(struct drm_device *dev);
412 412
413extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 413extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
414 struct drm_i915_gem_object *obj, 414 struct drm_i915_gem_object *obj,
@@ -466,5 +466,13 @@ extern void intel_init_pm(struct drm_device *dev);
466extern bool intel_fbc_enabled(struct drm_device *dev); 466extern bool intel_fbc_enabled(struct drm_device *dev);
467extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); 467extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
468extern void intel_update_fbc(struct drm_device *dev); 468extern void intel_update_fbc(struct drm_device *dev);
469/* IPS */
470extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
471extern void intel_gpu_ips_teardown(void);
472
473extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
474extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
475extern void gen6_disable_rps(struct drm_device *dev);
476extern void intel_init_emon(struct drm_device *dev);
469 477
470#endif /* __INTEL_DRV_H__ */ 478#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 1eef50d470d2..bf218753cbaf 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -220,7 +220,8 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
220 intel_hdmi->write_infoframe(encoder, frame); 220 intel_hdmi->write_infoframe(encoder, frame);
221} 221}
222 222
223static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) 223static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
224 struct drm_display_mode *adjusted_mode)
224{ 225{
225 struct dip_infoframe avi_if = { 226 struct dip_infoframe avi_if = {
226 .type = DIP_TYPE_AVI, 227 .type = DIP_TYPE_AVI,
@@ -228,6 +229,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
228 .len = DIP_LEN_AVI, 229 .len = DIP_LEN_AVI,
229 }; 230 };
230 231
232 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
233 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
234
231 intel_set_infoframe(encoder, &avi_if); 235 intel_set_infoframe(encoder, &avi_if);
232} 236}
233 237
@@ -290,7 +294,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
290 I915_WRITE(intel_hdmi->sdvox_reg, sdvox); 294 I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
291 POSTING_READ(intel_hdmi->sdvox_reg); 295 POSTING_READ(intel_hdmi->sdvox_reg);
292 296
293 intel_hdmi_set_avi_infoframe(encoder); 297 intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
294 intel_hdmi_set_spd_infoframe(encoder); 298 intel_hdmi_set_spd_infoframe(encoder);
295} 299}
296 300
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 34929aeca66b..18bd0af855dc 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -151,7 +151,7 @@ struct opregion_asle {
151static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 151static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
152{ 152{
153 struct drm_i915_private *dev_priv = dev->dev_private; 153 struct drm_i915_private *dev_priv = dev->dev_private;
154 struct opregion_asle *asle = dev_priv->opregion.asle; 154 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
155 u32 max; 155 u32 max;
156 156
157 if (!(bclp & ASLE_BCLP_VALID)) 157 if (!(bclp & ASLE_BCLP_VALID))
@@ -163,7 +163,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
163 163
164 max = intel_panel_get_max_backlight(dev); 164 max = intel_panel_get_max_backlight(dev);
165 intel_panel_set_backlight(dev, bclp * max / 255); 165 intel_panel_set_backlight(dev, bclp * max / 255);
166 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; 166 iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
167 167
168 return 0; 168 return 0;
169} 169}
@@ -200,14 +200,14 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
200void intel_opregion_asle_intr(struct drm_device *dev) 200void intel_opregion_asle_intr(struct drm_device *dev)
201{ 201{
202 struct drm_i915_private *dev_priv = dev->dev_private; 202 struct drm_i915_private *dev_priv = dev->dev_private;
203 struct opregion_asle *asle = dev_priv->opregion.asle; 203 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
204 u32 asle_stat = 0; 204 u32 asle_stat = 0;
205 u32 asle_req; 205 u32 asle_req;
206 206
207 if (!asle) 207 if (!asle)
208 return; 208 return;
209 209
210 asle_req = asle->aslc & ASLE_REQ_MSK; 210 asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
211 211
212 if (!asle_req) { 212 if (!asle_req) {
213 DRM_DEBUG_DRIVER("non asle set request??\n"); 213 DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -215,31 +215,31 @@ void intel_opregion_asle_intr(struct drm_device *dev)
215 } 215 }
216 216
217 if (asle_req & ASLE_SET_ALS_ILLUM) 217 if (asle_req & ASLE_SET_ALS_ILLUM)
218 asle_stat |= asle_set_als_illum(dev, asle->alsi); 218 asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
219 219
220 if (asle_req & ASLE_SET_BACKLIGHT) 220 if (asle_req & ASLE_SET_BACKLIGHT)
221 asle_stat |= asle_set_backlight(dev, asle->bclp); 221 asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
222 222
223 if (asle_req & ASLE_SET_PFIT) 223 if (asle_req & ASLE_SET_PFIT)
224 asle_stat |= asle_set_pfit(dev, asle->pfit); 224 asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
225 225
226 if (asle_req & ASLE_SET_PWM_FREQ) 226 if (asle_req & ASLE_SET_PWM_FREQ)
227 asle_stat |= asle_set_pwm_freq(dev, asle->pfmb); 227 asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
228 228
229 asle->aslc = asle_stat; 229 iowrite32(asle_stat, &asle->aslc);
230} 230}
231 231
232void intel_opregion_gse_intr(struct drm_device *dev) 232void intel_opregion_gse_intr(struct drm_device *dev)
233{ 233{
234 struct drm_i915_private *dev_priv = dev->dev_private; 234 struct drm_i915_private *dev_priv = dev->dev_private;
235 struct opregion_asle *asle = dev_priv->opregion.asle; 235 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
236 u32 asle_stat = 0; 236 u32 asle_stat = 0;
237 u32 asle_req; 237 u32 asle_req;
238 238
239 if (!asle) 239 if (!asle)
240 return; 240 return;
241 241
242 asle_req = asle->aslc & ASLE_REQ_MSK; 242 asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
243 243
244 if (!asle_req) { 244 if (!asle_req) {
245 DRM_DEBUG_DRIVER("non asle set request??\n"); 245 DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -252,7 +252,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
252 } 252 }
253 253
254 if (asle_req & ASLE_SET_BACKLIGHT) 254 if (asle_req & ASLE_SET_BACKLIGHT)
255 asle_stat |= asle_set_backlight(dev, asle->bclp); 255 asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
256 256
257 if (asle_req & ASLE_SET_PFIT) { 257 if (asle_req & ASLE_SET_PFIT) {
258 DRM_DEBUG_DRIVER("Pfit is not supported\n"); 258 DRM_DEBUG_DRIVER("Pfit is not supported\n");
@@ -264,7 +264,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
264 asle_stat |= ASLE_PWM_FREQ_FAILED; 264 asle_stat |= ASLE_PWM_FREQ_FAILED;
265 } 265 }
266 266
267 asle->aslc = asle_stat; 267 iowrite32(asle_stat, &asle->aslc);
268} 268}
269#define ASLE_ALS_EN (1<<0) 269#define ASLE_ALS_EN (1<<0)
270#define ASLE_BLC_EN (1<<1) 270#define ASLE_BLC_EN (1<<1)
@@ -274,15 +274,16 @@ void intel_opregion_gse_intr(struct drm_device *dev)
274void intel_opregion_enable_asle(struct drm_device *dev) 274void intel_opregion_enable_asle(struct drm_device *dev)
275{ 275{
276 struct drm_i915_private *dev_priv = dev->dev_private; 276 struct drm_i915_private *dev_priv = dev->dev_private;
277 struct opregion_asle *asle = dev_priv->opregion.asle; 277 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
278 278
279 if (asle) { 279 if (asle) {
280 if (IS_MOBILE(dev)) 280 if (IS_MOBILE(dev))
281 intel_enable_asle(dev); 281 intel_enable_asle(dev);
282 282
283 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | 283 iowrite32(ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
284 ASLE_PFMB_EN; 284 ASLE_PFMB_EN,
285 asle->ardy = 1; 285 &asle->tche);
286 iowrite32(1, &asle->ardy);
286 } 287 }
287} 288}
288 289
@@ -300,7 +301,7 @@ static int intel_opregion_video_event(struct notifier_block *nb,
300 Linux, these are handled by the dock, button and video drivers. 301 Linux, these are handled by the dock, button and video drivers.
301 */ 302 */
302 303
303 struct opregion_acpi *acpi; 304 struct opregion_acpi __iomem *acpi;
304 struct acpi_bus_event *event = data; 305 struct acpi_bus_event *event = data;
305 int ret = NOTIFY_OK; 306 int ret = NOTIFY_OK;
306 307
@@ -312,10 +313,11 @@ static int intel_opregion_video_event(struct notifier_block *nb,
312 313
313 acpi = system_opregion->acpi; 314 acpi = system_opregion->acpi;
314 315
315 if (event->type == 0x80 && !(acpi->cevt & 0x1)) 316 if (event->type == 0x80 &&
317 (ioread32(&acpi->cevt) & 1) == 0)
316 ret = NOTIFY_BAD; 318 ret = NOTIFY_BAD;
317 319
318 acpi->csts = 0; 320 iowrite32(0, &acpi->csts);
319 321
320 return ret; 322 return ret;
321} 323}
@@ -339,6 +341,7 @@ static void intel_didl_outputs(struct drm_device *dev)
339 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; 341 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
340 unsigned long long device_id; 342 unsigned long long device_id;
341 acpi_status status; 343 acpi_status status;
344 u32 temp;
342 int i = 0; 345 int i = 0;
343 346
344 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); 347 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
@@ -373,7 +376,8 @@ static void intel_didl_outputs(struct drm_device *dev)
373 if (ACPI_SUCCESS(status)) { 376 if (ACPI_SUCCESS(status)) {
374 if (!device_id) 377 if (!device_id)
375 goto blind_set; 378 goto blind_set;
376 opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f); 379 iowrite32((u32)(device_id & 0x0f0f),
380 &opregion->acpi->didl[i]);
377 i++; 381 i++;
378 } 382 }
379 } 383 }
@@ -381,7 +385,7 @@ static void intel_didl_outputs(struct drm_device *dev)
381end: 385end:
382 /* If fewer than 8 outputs, the list must be null terminated */ 386 /* If fewer than 8 outputs, the list must be null terminated */
383 if (i < 8) 387 if (i < 8)
384 opregion->acpi->didl[i] = 0; 388 iowrite32(0, &opregion->acpi->didl[i]);
385 return; 389 return;
386 390
387blind_set: 391blind_set:
@@ -415,7 +419,9 @@ blind_set:
415 output_type = ACPI_LVDS_OUTPUT; 419 output_type = ACPI_LVDS_OUTPUT;
416 break; 420 break;
417 } 421 }
418 opregion->acpi->didl[i] |= (1<<31) | output_type | i; 422 temp = ioread32(&opregion->acpi->didl[i]);
423 iowrite32(temp | (1<<31) | output_type | i,
424 &opregion->acpi->didl[i]);
419 i++; 425 i++;
420 } 426 }
421 goto end; 427 goto end;
@@ -436,8 +442,8 @@ void intel_opregion_init(struct drm_device *dev)
436 /* Notify BIOS we are ready to handle ACPI video ext notifs. 442 /* Notify BIOS we are ready to handle ACPI video ext notifs.
437 * Right now, all the events are handled by the ACPI video module. 443 * Right now, all the events are handled by the ACPI video module.
438 * We don't actually need to do anything with them. */ 444 * We don't actually need to do anything with them. */
439 opregion->acpi->csts = 0; 445 iowrite32(0, &opregion->acpi->csts);
440 opregion->acpi->drdy = 1; 446 iowrite32(1, &opregion->acpi->drdy);
441 447
442 system_opregion = opregion; 448 system_opregion = opregion;
443 register_acpi_notifier(&intel_opregion_notifier); 449 register_acpi_notifier(&intel_opregion_notifier);
@@ -456,7 +462,7 @@ void intel_opregion_fini(struct drm_device *dev)
456 return; 462 return;
457 463
458 if (opregion->acpi) { 464 if (opregion->acpi) {
459 opregion->acpi->drdy = 0; 465 iowrite32(0, &opregion->acpi->drdy);
460 466
461 system_opregion = NULL; 467 system_opregion = NULL;
462 unregister_acpi_notifier(&intel_opregion_notifier); 468 unregister_acpi_notifier(&intel_opregion_notifier);
@@ -476,8 +482,9 @@ int intel_opregion_setup(struct drm_device *dev)
476{ 482{
477 struct drm_i915_private *dev_priv = dev->dev_private; 483 struct drm_i915_private *dev_priv = dev->dev_private;
478 struct intel_opregion *opregion = &dev_priv->opregion; 484 struct intel_opregion *opregion = &dev_priv->opregion;
479 void *base; 485 void __iomem *base;
480 u32 asls, mboxes; 486 u32 asls, mboxes;
487 char buf[sizeof(OPREGION_SIGNATURE)];
481 int err = 0; 488 int err = 0;
482 489
483 pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); 490 pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
@@ -491,7 +498,9 @@ int intel_opregion_setup(struct drm_device *dev)
491 if (!base) 498 if (!base)
492 return -ENOMEM; 499 return -ENOMEM;
493 500
494 if (memcmp(base, OPREGION_SIGNATURE, 16)) { 501 memcpy_fromio(buf, base, sizeof(buf));
502
503 if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
495 DRM_DEBUG_DRIVER("opregion signature mismatch\n"); 504 DRM_DEBUG_DRIVER("opregion signature mismatch\n");
496 err = -EINVAL; 505 err = -EINVAL;
497 goto err_out; 506 goto err_out;
@@ -501,7 +510,7 @@ int intel_opregion_setup(struct drm_device *dev)
501 510
502 opregion->lid_state = base + ACPI_CLID; 511 opregion->lid_state = base + ACPI_CLID;
503 512
504 mboxes = opregion->header->mboxes; 513 mboxes = ioread32(&opregion->header->mboxes);
505 if (mboxes & MBOX_ACPI) { 514 if (mboxes & MBOX_ACPI) {
506 DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); 515 DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
507 opregion->acpi = base + OPREGION_ACPI_OFFSET; 516 opregion->acpi = base + OPREGION_ACPI_OFFSET;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 80b331c322fb..458743da3774 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -187,14 +187,14 @@ struct intel_overlay {
187 void (*flip_tail)(struct intel_overlay *); 187 void (*flip_tail)(struct intel_overlay *);
188}; 188};
189 189
190static struct overlay_registers * 190static struct overlay_registers __iomem *
191intel_overlay_map_regs(struct intel_overlay *overlay) 191intel_overlay_map_regs(struct intel_overlay *overlay)
192{ 192{
193 drm_i915_private_t *dev_priv = overlay->dev->dev_private; 193 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
194 struct overlay_registers *regs; 194 struct overlay_registers __iomem *regs;
195 195
196 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 196 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
197 regs = overlay->reg_bo->phys_obj->handle->vaddr; 197 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
198 else 198 else
199 regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping, 199 regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
200 overlay->reg_bo->gtt_offset); 200 overlay->reg_bo->gtt_offset);
@@ -203,7 +203,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
203} 203}
204 204
205static void intel_overlay_unmap_regs(struct intel_overlay *overlay, 205static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
206 struct overlay_registers *regs) 206 struct overlay_registers __iomem *regs)
207{ 207{
208 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 208 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
209 io_mapping_unmap(regs); 209 io_mapping_unmap(regs);
@@ -215,20 +215,21 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
215{ 215{
216 struct drm_device *dev = overlay->dev; 216 struct drm_device *dev = overlay->dev;
217 drm_i915_private_t *dev_priv = dev->dev_private; 217 drm_i915_private_t *dev_priv = dev->dev_private;
218 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
218 int ret; 219 int ret;
219 220
220 BUG_ON(overlay->last_flip_req); 221 BUG_ON(overlay->last_flip_req);
221 ret = i915_add_request(LP_RING(dev_priv), NULL, request); 222 ret = i915_add_request(ring, NULL, request);
222 if (ret) { 223 if (ret) {
223 kfree(request); 224 kfree(request);
224 return ret; 225 return ret;
225 } 226 }
226 overlay->last_flip_req = request->seqno; 227 overlay->last_flip_req = request->seqno;
227 overlay->flip_tail = tail; 228 overlay->flip_tail = tail;
228 ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req, 229 ret = i915_wait_request(ring, overlay->last_flip_req);
229 true);
230 if (ret) 230 if (ret)
231 return ret; 231 return ret;
232 i915_gem_retire_requests(dev);
232 233
233 overlay->last_flip_req = 0; 234 overlay->last_flip_req = 0;
234 return 0; 235 return 0;
@@ -262,7 +263,7 @@ i830_activate_pipe_a(struct drm_device *dev)
262 DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n"); 263 DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
263 264
264 mode = drm_mode_duplicate(dev, &vesa_640x480); 265 mode = drm_mode_duplicate(dev, &vesa_640x480);
265 drm_mode_set_crtcinfo(mode, 0); 266
266 if (!drm_crtc_helper_set_mode(&crtc->base, mode, 267 if (!drm_crtc_helper_set_mode(&crtc->base, mode,
267 crtc->base.x, crtc->base.y, 268 crtc->base.x, crtc->base.y,
268 crtc->base.fb)) 269 crtc->base.fb))
@@ -287,6 +288,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
287{ 288{
288 struct drm_device *dev = overlay->dev; 289 struct drm_device *dev = overlay->dev;
289 struct drm_i915_private *dev_priv = dev->dev_private; 290 struct drm_i915_private *dev_priv = dev->dev_private;
291 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
290 struct drm_i915_gem_request *request; 292 struct drm_i915_gem_request *request;
291 int pipe_a_quirk = 0; 293 int pipe_a_quirk = 0;
292 int ret; 294 int ret;
@@ -306,17 +308,17 @@ static int intel_overlay_on(struct intel_overlay *overlay)
306 goto out; 308 goto out;
307 } 309 }
308 310
309 ret = BEGIN_LP_RING(4); 311 ret = intel_ring_begin(ring, 4);
310 if (ret) { 312 if (ret) {
311 kfree(request); 313 kfree(request);
312 goto out; 314 goto out;
313 } 315 }
314 316
315 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); 317 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
316 OUT_RING(overlay->flip_addr | OFC_UPDATE); 318 intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
317 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 319 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
318 OUT_RING(MI_NOOP); 320 intel_ring_emit(ring, MI_NOOP);
319 ADVANCE_LP_RING(); 321 intel_ring_advance(ring);
320 322
321 ret = intel_overlay_do_wait_request(overlay, request, NULL); 323 ret = intel_overlay_do_wait_request(overlay, request, NULL);
322out: 324out:
@@ -332,6 +334,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
332{ 334{
333 struct drm_device *dev = overlay->dev; 335 struct drm_device *dev = overlay->dev;
334 drm_i915_private_t *dev_priv = dev->dev_private; 336 drm_i915_private_t *dev_priv = dev->dev_private;
337 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
335 struct drm_i915_gem_request *request; 338 struct drm_i915_gem_request *request;
336 u32 flip_addr = overlay->flip_addr; 339 u32 flip_addr = overlay->flip_addr;
337 u32 tmp; 340 u32 tmp;
@@ -351,16 +354,16 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
351 if (tmp & (1 << 17)) 354 if (tmp & (1 << 17))
352 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 355 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
353 356
354 ret = BEGIN_LP_RING(2); 357 ret = intel_ring_begin(ring, 2);
355 if (ret) { 358 if (ret) {
356 kfree(request); 359 kfree(request);
357 return ret; 360 return ret;
358 } 361 }
359 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 362 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
360 OUT_RING(flip_addr); 363 intel_ring_emit(ring, flip_addr);
361 ADVANCE_LP_RING(); 364 intel_ring_advance(ring);
362 365
363 ret = i915_add_request(LP_RING(dev_priv), NULL, request); 366 ret = i915_add_request(ring, NULL, request);
364 if (ret) { 367 if (ret) {
365 kfree(request); 368 kfree(request);
366 return ret; 369 return ret;
@@ -401,6 +404,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
401{ 404{
402 struct drm_device *dev = overlay->dev; 405 struct drm_device *dev = overlay->dev;
403 struct drm_i915_private *dev_priv = dev->dev_private; 406 struct drm_i915_private *dev_priv = dev->dev_private;
407 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
404 u32 flip_addr = overlay->flip_addr; 408 u32 flip_addr = overlay->flip_addr;
405 struct drm_i915_gem_request *request; 409 struct drm_i915_gem_request *request;
406 int ret; 410 int ret;
@@ -417,20 +421,20 @@ static int intel_overlay_off(struct intel_overlay *overlay)
417 * of the hw. Do it in both cases */ 421 * of the hw. Do it in both cases */
418 flip_addr |= OFC_UPDATE; 422 flip_addr |= OFC_UPDATE;
419 423
420 ret = BEGIN_LP_RING(6); 424 ret = intel_ring_begin(ring, 6);
421 if (ret) { 425 if (ret) {
422 kfree(request); 426 kfree(request);
423 return ret; 427 return ret;
424 } 428 }
425 /* wait for overlay to go idle */ 429 /* wait for overlay to go idle */
426 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 430 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
427 OUT_RING(flip_addr); 431 intel_ring_emit(ring, flip_addr);
428 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 432 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
429 /* turn overlay off */ 433 /* turn overlay off */
430 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 434 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
431 OUT_RING(flip_addr); 435 intel_ring_emit(ring, flip_addr);
432 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 436 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
433 ADVANCE_LP_RING(); 437 intel_ring_advance(ring);
434 438
435 return intel_overlay_do_wait_request(overlay, request, 439 return intel_overlay_do_wait_request(overlay, request,
436 intel_overlay_off_tail); 440 intel_overlay_off_tail);
@@ -442,15 +446,16 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
442{ 446{
443 struct drm_device *dev = overlay->dev; 447 struct drm_device *dev = overlay->dev;
444 drm_i915_private_t *dev_priv = dev->dev_private; 448 drm_i915_private_t *dev_priv = dev->dev_private;
449 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
445 int ret; 450 int ret;
446 451
447 if (overlay->last_flip_req == 0) 452 if (overlay->last_flip_req == 0)
448 return 0; 453 return 0;
449 454
450 ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req, 455 ret = i915_wait_request(ring, overlay->last_flip_req);
451 true);
452 if (ret) 456 if (ret)
453 return ret; 457 return ret;
458 i915_gem_retire_requests(dev);
454 459
455 if (overlay->flip_tail) 460 if (overlay->flip_tail)
456 overlay->flip_tail(overlay); 461 overlay->flip_tail(overlay);
@@ -467,6 +472,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
467{ 472{
468 struct drm_device *dev = overlay->dev; 473 struct drm_device *dev = overlay->dev;
469 drm_i915_private_t *dev_priv = dev->dev_private; 474 drm_i915_private_t *dev_priv = dev->dev_private;
475 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
470 int ret; 476 int ret;
471 477
472 /* Only wait if there is actually an old frame to release to 478 /* Only wait if there is actually an old frame to release to
@@ -483,15 +489,15 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
483 if (request == NULL) 489 if (request == NULL)
484 return -ENOMEM; 490 return -ENOMEM;
485 491
486 ret = BEGIN_LP_RING(2); 492 ret = intel_ring_begin(ring, 2);
487 if (ret) { 493 if (ret) {
488 kfree(request); 494 kfree(request);
489 return ret; 495 return ret;
490 } 496 }
491 497
492 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 498 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
493 OUT_RING(MI_NOOP); 499 intel_ring_emit(ring, MI_NOOP);
494 ADVANCE_LP_RING(); 500 intel_ring_advance(ring);
495 501
496 ret = intel_overlay_do_wait_request(overlay, request, 502 ret = intel_overlay_do_wait_request(overlay, request,
497 intel_overlay_release_old_vid_tail); 503 intel_overlay_release_old_vid_tail);
@@ -619,14 +625,15 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
619 0x3000, 0x0800, 0x3000 625 0x3000, 0x0800, 0x3000
620}; 626};
621 627
622static void update_polyphase_filter(struct overlay_registers *regs) 628static void update_polyphase_filter(struct overlay_registers __iomem *regs)
623{ 629{
624 memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs)); 630 memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
625 memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs)); 631 memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
632 sizeof(uv_static_hcoeffs));
626} 633}
627 634
628static bool update_scaling_factors(struct intel_overlay *overlay, 635static bool update_scaling_factors(struct intel_overlay *overlay,
629 struct overlay_registers *regs, 636 struct overlay_registers __iomem *regs,
630 struct put_image_params *params) 637 struct put_image_params *params)
631{ 638{
632 /* fixed point with a 12 bit shift */ 639 /* fixed point with a 12 bit shift */
@@ -665,16 +672,19 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
665 overlay->old_xscale = xscale; 672 overlay->old_xscale = xscale;
666 overlay->old_yscale = yscale; 673 overlay->old_yscale = yscale;
667 674
668 regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) | 675 iowrite32(((yscale & FRACT_MASK) << 20) |
669 ((xscale >> FP_SHIFT) << 16) | 676 ((xscale >> FP_SHIFT) << 16) |
670 ((xscale & FRACT_MASK) << 3)); 677 ((xscale & FRACT_MASK) << 3),
678 &regs->YRGBSCALE);
671 679
672 regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) | 680 iowrite32(((yscale_UV & FRACT_MASK) << 20) |
673 ((xscale_UV >> FP_SHIFT) << 16) | 681 ((xscale_UV >> FP_SHIFT) << 16) |
674 ((xscale_UV & FRACT_MASK) << 3)); 682 ((xscale_UV & FRACT_MASK) << 3),
683 &regs->UVSCALE);
675 684
676 regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) | 685 iowrite32((((yscale >> FP_SHIFT) << 16) |
677 ((yscale_UV >> FP_SHIFT) << 0))); 686 ((yscale_UV >> FP_SHIFT) << 0)),
687 &regs->UVSCALEV);
678 688
679 if (scale_changed) 689 if (scale_changed)
680 update_polyphase_filter(regs); 690 update_polyphase_filter(regs);
@@ -683,30 +693,32 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
683} 693}
684 694
685static void update_colorkey(struct intel_overlay *overlay, 695static void update_colorkey(struct intel_overlay *overlay,
686 struct overlay_registers *regs) 696 struct overlay_registers __iomem *regs)
687{ 697{
688 u32 key = overlay->color_key; 698 u32 key = overlay->color_key;
689 699
690 switch (overlay->crtc->base.fb->bits_per_pixel) { 700 switch (overlay->crtc->base.fb->bits_per_pixel) {
691 case 8: 701 case 8:
692 regs->DCLRKV = 0; 702 iowrite32(0, &regs->DCLRKV);
693 regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE; 703 iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
694 break; 704 break;
695 705
696 case 16: 706 case 16:
697 if (overlay->crtc->base.fb->depth == 15) { 707 if (overlay->crtc->base.fb->depth == 15) {
698 regs->DCLRKV = RGB15_TO_COLORKEY(key); 708 iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV);
699 regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE; 709 iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE,
710 &regs->DCLRKM);
700 } else { 711 } else {
701 regs->DCLRKV = RGB16_TO_COLORKEY(key); 712 iowrite32(RGB16_TO_COLORKEY(key), &regs->DCLRKV);
702 regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE; 713 iowrite32(CLK_RGB16_MASK | DST_KEY_ENABLE,
714 &regs->DCLRKM);
703 } 715 }
704 break; 716 break;
705 717
706 case 24: 718 case 24:
707 case 32: 719 case 32:
708 regs->DCLRKV = key; 720 iowrite32(key, &regs->DCLRKV);
709 regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE; 721 iowrite32(CLK_RGB24_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
710 break; 722 break;
711 } 723 }
712} 724}
@@ -761,9 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
761 struct put_image_params *params) 773 struct put_image_params *params)
762{ 774{
763 int ret, tmp_width; 775 int ret, tmp_width;
764 struct overlay_registers *regs; 776 struct overlay_registers __iomem *regs;
765 bool scale_changed = false; 777 bool scale_changed = false;
766 struct drm_device *dev = overlay->dev; 778 struct drm_device *dev = overlay->dev;
779 u32 swidth, swidthsw, sheight, ostride;
767 780
768 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 781 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
769 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); 782 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
@@ -782,16 +795,18 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
782 goto out_unpin; 795 goto out_unpin;
783 796
784 if (!overlay->active) { 797 if (!overlay->active) {
798 u32 oconfig;
785 regs = intel_overlay_map_regs(overlay); 799 regs = intel_overlay_map_regs(overlay);
786 if (!regs) { 800 if (!regs) {
787 ret = -ENOMEM; 801 ret = -ENOMEM;
788 goto out_unpin; 802 goto out_unpin;
789 } 803 }
790 regs->OCONFIG = OCONF_CC_OUT_8BIT; 804 oconfig = OCONF_CC_OUT_8BIT;
791 if (IS_GEN4(overlay->dev)) 805 if (IS_GEN4(overlay->dev))
792 regs->OCONFIG |= OCONF_CSC_MODE_BT709; 806 oconfig |= OCONF_CSC_MODE_BT709;
793 regs->OCONFIG |= overlay->crtc->pipe == 0 ? 807 oconfig |= overlay->crtc->pipe == 0 ?
794 OCONF_PIPE_A : OCONF_PIPE_B; 808 OCONF_PIPE_A : OCONF_PIPE_B;
809 iowrite32(oconfig, &regs->OCONFIG);
795 intel_overlay_unmap_regs(overlay, regs); 810 intel_overlay_unmap_regs(overlay, regs);
796 811
797 ret = intel_overlay_on(overlay); 812 ret = intel_overlay_on(overlay);
@@ -805,42 +820,46 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
805 goto out_unpin; 820 goto out_unpin;
806 } 821 }
807 822
808 regs->DWINPOS = (params->dst_y << 16) | params->dst_x; 823 iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
809 regs->DWINSZ = (params->dst_h << 16) | params->dst_w; 824 iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
810 825
811 if (params->format & I915_OVERLAY_YUV_PACKED) 826 if (params->format & I915_OVERLAY_YUV_PACKED)
812 tmp_width = packed_width_bytes(params->format, params->src_w); 827 tmp_width = packed_width_bytes(params->format, params->src_w);
813 else 828 else
814 tmp_width = params->src_w; 829 tmp_width = params->src_w;
815 830
816 regs->SWIDTH = params->src_w; 831 swidth = params->src_w;
817 regs->SWIDTHSW = calc_swidthsw(overlay->dev, 832 swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
818 params->offset_Y, tmp_width); 833 sheight = params->src_h;
819 regs->SHEIGHT = params->src_h; 834 iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y);
820 regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y; 835 ostride = params->stride_Y;
821 regs->OSTRIDE = params->stride_Y;
822 836
823 if (params->format & I915_OVERLAY_YUV_PLANAR) { 837 if (params->format & I915_OVERLAY_YUV_PLANAR) {
824 int uv_hscale = uv_hsubsampling(params->format); 838 int uv_hscale = uv_hsubsampling(params->format);
825 int uv_vscale = uv_vsubsampling(params->format); 839 int uv_vscale = uv_vsubsampling(params->format);
826 u32 tmp_U, tmp_V; 840 u32 tmp_U, tmp_V;
827 regs->SWIDTH |= (params->src_w/uv_hscale) << 16; 841 swidth |= (params->src_w/uv_hscale) << 16;
828 tmp_U = calc_swidthsw(overlay->dev, params->offset_U, 842 tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
829 params->src_w/uv_hscale); 843 params->src_w/uv_hscale);
830 tmp_V = calc_swidthsw(overlay->dev, params->offset_V, 844 tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
831 params->src_w/uv_hscale); 845 params->src_w/uv_hscale);
832 regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; 846 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
833 regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; 847 sheight |= (params->src_h/uv_vscale) << 16;
834 regs->OBUF_0U = new_bo->gtt_offset + params->offset_U; 848 iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U);
835 regs->OBUF_0V = new_bo->gtt_offset + params->offset_V; 849 iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V);
836 regs->OSTRIDE |= params->stride_UV << 16; 850 ostride |= params->stride_UV << 16;
837 } 851 }
838 852
853 iowrite32(swidth, &regs->SWIDTH);
854 iowrite32(swidthsw, &regs->SWIDTHSW);
855 iowrite32(sheight, &regs->SHEIGHT);
856 iowrite32(ostride, &regs->OSTRIDE);
857
839 scale_changed = update_scaling_factors(overlay, regs, params); 858 scale_changed = update_scaling_factors(overlay, regs, params);
840 859
841 update_colorkey(overlay, regs); 860 update_colorkey(overlay, regs);
842 861
843 regs->OCMD = overlay_cmd_reg(params); 862 iowrite32(overlay_cmd_reg(params), &regs->OCMD);
844 863
845 intel_overlay_unmap_regs(overlay, regs); 864 intel_overlay_unmap_regs(overlay, regs);
846 865
@@ -860,7 +879,7 @@ out_unpin:
860 879
861int intel_overlay_switch_off(struct intel_overlay *overlay) 880int intel_overlay_switch_off(struct intel_overlay *overlay)
862{ 881{
863 struct overlay_registers *regs; 882 struct overlay_registers __iomem *regs;
864 struct drm_device *dev = overlay->dev; 883 struct drm_device *dev = overlay->dev;
865 int ret; 884 int ret;
866 885
@@ -879,7 +898,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
879 return ret; 898 return ret;
880 899
881 regs = intel_overlay_map_regs(overlay); 900 regs = intel_overlay_map_regs(overlay);
882 regs->OCMD = 0; 901 iowrite32(0, &regs->OCMD);
883 intel_overlay_unmap_regs(overlay, regs); 902 intel_overlay_unmap_regs(overlay, regs);
884 903
885 ret = intel_overlay_off(overlay); 904 ret = intel_overlay_off(overlay);
@@ -1109,11 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1109 struct put_image_params *params; 1128 struct put_image_params *params;
1110 int ret; 1129 int ret;
1111 1130
1112 if (!dev_priv) { 1131 /* No need to check for DRIVER_MODESET - we don't set it up then. */
1113 DRM_ERROR("called with no initialization\n");
1114 return -EINVAL;
1115 }
1116
1117 overlay = dev_priv->overlay; 1132 overlay = dev_priv->overlay;
1118 if (!overlay) { 1133 if (!overlay) {
1119 DRM_DEBUG("userspace bug: no overlay\n"); 1134 DRM_DEBUG("userspace bug: no overlay\n");
@@ -1250,10 +1265,11 @@ out_free:
1250} 1265}
1251 1266
1252static void update_reg_attrs(struct intel_overlay *overlay, 1267static void update_reg_attrs(struct intel_overlay *overlay,
1253 struct overlay_registers *regs) 1268 struct overlay_registers __iomem *regs)
1254{ 1269{
1255 regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff); 1270 iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
1256 regs->OCLRC1 = overlay->saturation; 1271 &regs->OCLRC0);
1272 iowrite32(overlay->saturation, &regs->OCLRC1);
1257} 1273}
1258 1274
1259static bool check_gamma_bounds(u32 gamma1, u32 gamma2) 1275static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
@@ -1306,14 +1322,10 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1306 struct drm_intel_overlay_attrs *attrs = data; 1322 struct drm_intel_overlay_attrs *attrs = data;
1307 drm_i915_private_t *dev_priv = dev->dev_private; 1323 drm_i915_private_t *dev_priv = dev->dev_private;
1308 struct intel_overlay *overlay; 1324 struct intel_overlay *overlay;
1309 struct overlay_registers *regs; 1325 struct overlay_registers __iomem *regs;
1310 int ret; 1326 int ret;
1311 1327
1312 if (!dev_priv) { 1328 /* No need to check for DRIVER_MODESET - we don't set it up then. */
1313 DRM_ERROR("called with no initialization\n");
1314 return -EINVAL;
1315 }
1316
1317 overlay = dev_priv->overlay; 1329 overlay = dev_priv->overlay;
1318 if (!overlay) { 1330 if (!overlay) {
1319 DRM_DEBUG("userspace bug: no overlay\n"); 1331 DRM_DEBUG("userspace bug: no overlay\n");
@@ -1396,7 +1408,7 @@ void intel_setup_overlay(struct drm_device *dev)
1396 drm_i915_private_t *dev_priv = dev->dev_private; 1408 drm_i915_private_t *dev_priv = dev->dev_private;
1397 struct intel_overlay *overlay; 1409 struct intel_overlay *overlay;
1398 struct drm_i915_gem_object *reg_bo; 1410 struct drm_i915_gem_object *reg_bo;
1399 struct overlay_registers *regs; 1411 struct overlay_registers __iomem *regs;
1400 int ret; 1412 int ret;
1401 1413
1402 if (!HAS_OVERLAY(dev)) 1414 if (!HAS_OVERLAY(dev))
@@ -1451,7 +1463,7 @@ void intel_setup_overlay(struct drm_device *dev)
1451 if (!regs) 1463 if (!regs)
1452 goto out_unpin_bo; 1464 goto out_unpin_bo;
1453 1465
1454 memset(regs, 0, sizeof(struct overlay_registers)); 1466 memset_io(regs, 0, sizeof(struct overlay_registers));
1455 update_polyphase_filter(regs); 1467 update_polyphase_filter(regs);
1456 update_reg_attrs(overlay, regs); 1468 update_reg_attrs(overlay, regs);
1457 1469
@@ -1499,14 +1511,17 @@ struct intel_overlay_error_state {
1499 u32 isr; 1511 u32 isr;
1500}; 1512};
1501 1513
1502static struct overlay_registers * 1514static struct overlay_registers __iomem *
1503intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 1515intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1504{ 1516{
1505 drm_i915_private_t *dev_priv = overlay->dev->dev_private; 1517 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
1506 struct overlay_registers *regs; 1518 struct overlay_registers __iomem *regs;
1507 1519
1508 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1520 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1509 regs = overlay->reg_bo->phys_obj->handle->vaddr; 1521 /* Cast to make sparse happy, but it's wc memory anyway, so
1522 * equivalent to the wc io mapping on X86. */
1523 regs = (struct overlay_registers __iomem *)
1524 overlay->reg_bo->phys_obj->handle->vaddr;
1510 else 1525 else
1511 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 1526 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
1512 overlay->reg_bo->gtt_offset); 1527 overlay->reg_bo->gtt_offset);
@@ -1515,7 +1530,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1515} 1530}
1516 1531
1517static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, 1532static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
1518 struct overlay_registers *regs) 1533 struct overlay_registers __iomem *regs)
1519{ 1534{
1520 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1535 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1521 io_mapping_unmap_atomic(regs); 1536 io_mapping_unmap_atomic(regs);
@@ -1540,9 +1555,9 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1540 error->dovsta = I915_READ(DOVSTA); 1555 error->dovsta = I915_READ(DOVSTA);
1541 error->isr = I915_READ(ISR); 1556 error->isr = I915_READ(ISR);
1542 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1557 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1543 error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr; 1558 error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
1544 else 1559 else
1545 error->base = (long) overlay->reg_bo->gtt_offset; 1560 error->base = overlay->reg_bo->gtt_offset;
1546 1561
1547 regs = intel_overlay_map_regs_atomic(overlay); 1562 regs = intel_overlay_map_regs_atomic(overlay);
1548 if (!regs) 1563 if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 36940a390ef2..43892341079a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -28,6 +28,8 @@
28#include <linux/cpufreq.h> 28#include <linux/cpufreq.h>
29#include "i915_drv.h" 29#include "i915_drv.h"
30#include "intel_drv.h" 30#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h>
31 33
32/* FBC, or Frame Buffer Compression, is a technique employed to compress the 34/* FBC, or Frame Buffer Compression, is a technique employed to compress the
33 * framebuffer contents in-memory, aiming at reducing the required bandwidth 35 * framebuffer contents in-memory, aiming at reducing the required bandwidth
@@ -524,6 +526,113 @@ out_disable:
524 } 526 }
525} 527}
526 528
529static void i915_pineview_get_mem_freq(struct drm_device *dev)
530{
531 drm_i915_private_t *dev_priv = dev->dev_private;
532 u32 tmp;
533
534 tmp = I915_READ(CLKCFG);
535
536 switch (tmp & CLKCFG_FSB_MASK) {
537 case CLKCFG_FSB_533:
538 dev_priv->fsb_freq = 533; /* 133*4 */
539 break;
540 case CLKCFG_FSB_800:
541 dev_priv->fsb_freq = 800; /* 200*4 */
542 break;
543 case CLKCFG_FSB_667:
544 dev_priv->fsb_freq = 667; /* 167*4 */
545 break;
546 case CLKCFG_FSB_400:
547 dev_priv->fsb_freq = 400; /* 100*4 */
548 break;
549 }
550
551 switch (tmp & CLKCFG_MEM_MASK) {
552 case CLKCFG_MEM_533:
553 dev_priv->mem_freq = 533;
554 break;
555 case CLKCFG_MEM_667:
556 dev_priv->mem_freq = 667;
557 break;
558 case CLKCFG_MEM_800:
559 dev_priv->mem_freq = 800;
560 break;
561 }
562
563 /* detect pineview DDR3 setting */
564 tmp = I915_READ(CSHRDDR3CTL);
565 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
566}
567
568static void i915_ironlake_get_mem_freq(struct drm_device *dev)
569{
570 drm_i915_private_t *dev_priv = dev->dev_private;
571 u16 ddrpll, csipll;
572
573 ddrpll = I915_READ16(DDRMPLL1);
574 csipll = I915_READ16(CSIPLL0);
575
576 switch (ddrpll & 0xff) {
577 case 0xc:
578 dev_priv->mem_freq = 800;
579 break;
580 case 0x10:
581 dev_priv->mem_freq = 1066;
582 break;
583 case 0x14:
584 dev_priv->mem_freq = 1333;
585 break;
586 case 0x18:
587 dev_priv->mem_freq = 1600;
588 break;
589 default:
590 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
591 ddrpll & 0xff);
592 dev_priv->mem_freq = 0;
593 break;
594 }
595
596 dev_priv->r_t = dev_priv->mem_freq;
597
598 switch (csipll & 0x3ff) {
599 case 0x00c:
600 dev_priv->fsb_freq = 3200;
601 break;
602 case 0x00e:
603 dev_priv->fsb_freq = 3733;
604 break;
605 case 0x010:
606 dev_priv->fsb_freq = 4266;
607 break;
608 case 0x012:
609 dev_priv->fsb_freq = 4800;
610 break;
611 case 0x014:
612 dev_priv->fsb_freq = 5333;
613 break;
614 case 0x016:
615 dev_priv->fsb_freq = 5866;
616 break;
617 case 0x018:
618 dev_priv->fsb_freq = 6400;
619 break;
620 default:
621 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
622 csipll & 0x3ff);
623 dev_priv->fsb_freq = 0;
624 break;
625 }
626
627 if (dev_priv->fsb_freq == 3200) {
628 dev_priv->c_m = 0;
629 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
630 dev_priv->c_m = 1;
631 } else {
632 dev_priv->c_m = 2;
633 }
634}
635
527static const struct cxsr_latency cxsr_latency_table[] = { 636static const struct cxsr_latency cxsr_latency_table[] = {
528 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 637 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
529 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 638 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
@@ -562,7 +671,7 @@ static const struct cxsr_latency cxsr_latency_table[] = {
562 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ 671 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
563}; 672};
564 673
565const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, 674static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
566 int is_ddr3, 675 int is_ddr3,
567 int fsb, 676 int fsb,
568 int mem) 677 int mem)
@@ -2436,6 +2545,7 @@ static int ironlake_setup_rc6(struct drm_device *dev)
2436void ironlake_enable_rc6(struct drm_device *dev) 2545void ironlake_enable_rc6(struct drm_device *dev)
2437{ 2546{
2438 struct drm_i915_private *dev_priv = dev->dev_private; 2547 struct drm_i915_private *dev_priv = dev->dev_private;
2548 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
2439 int ret; 2549 int ret;
2440 2550
2441 /* rc6 disabled by default due to repeated reports of hanging during 2551 /* rc6 disabled by default due to repeated reports of hanging during
@@ -2455,31 +2565,31 @@ void ironlake_enable_rc6(struct drm_device *dev)
2455 * GPU can automatically power down the render unit if given a page 2565 * GPU can automatically power down the render unit if given a page
2456 * to save state. 2566 * to save state.
2457 */ 2567 */
2458 ret = BEGIN_LP_RING(6); 2568 ret = intel_ring_begin(ring, 6);
2459 if (ret) { 2569 if (ret) {
2460 ironlake_teardown_rc6(dev); 2570 ironlake_teardown_rc6(dev);
2461 mutex_unlock(&dev->struct_mutex); 2571 mutex_unlock(&dev->struct_mutex);
2462 return; 2572 return;
2463 } 2573 }
2464 2574
2465 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 2575 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
2466 OUT_RING(MI_SET_CONTEXT); 2576 intel_ring_emit(ring, MI_SET_CONTEXT);
2467 OUT_RING(dev_priv->renderctx->gtt_offset | 2577 intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
2468 MI_MM_SPACE_GTT | 2578 MI_MM_SPACE_GTT |
2469 MI_SAVE_EXT_STATE_EN | 2579 MI_SAVE_EXT_STATE_EN |
2470 MI_RESTORE_EXT_STATE_EN | 2580 MI_RESTORE_EXT_STATE_EN |
2471 MI_RESTORE_INHIBIT); 2581 MI_RESTORE_INHIBIT);
2472 OUT_RING(MI_SUSPEND_FLUSH); 2582 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
2473 OUT_RING(MI_NOOP); 2583 intel_ring_emit(ring, MI_NOOP);
2474 OUT_RING(MI_FLUSH); 2584 intel_ring_emit(ring, MI_FLUSH);
2475 ADVANCE_LP_RING(); 2585 intel_ring_advance(ring);
2476 2586
2477 /* 2587 /*
2478 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW 2588 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
2479 * does an implicit flush, combined with MI_FLUSH above, it should be 2589 * does an implicit flush, combined with MI_FLUSH above, it should be
2480 * safe to assume that renderctx is valid 2590 * safe to assume that renderctx is valid
2481 */ 2591 */
2482 ret = intel_wait_ring_idle(LP_RING(dev_priv)); 2592 ret = intel_wait_ring_idle(ring);
2483 if (ret) { 2593 if (ret) {
2484 DRM_ERROR("failed to enable ironlake power power savings\n"); 2594 DRM_ERROR("failed to enable ironlake power power savings\n");
2485 ironlake_teardown_rc6(dev); 2595 ironlake_teardown_rc6(dev);
@@ -2507,6 +2617,485 @@ static unsigned long intel_pxfreq(u32 vidfreq)
2507 return freq; 2617 return freq;
2508} 2618}
2509 2619
2620static const struct cparams {
2621 u16 i;
2622 u16 t;
2623 u16 m;
2624 u16 c;
2625} cparams[] = {
2626 { 1, 1333, 301, 28664 },
2627 { 1, 1066, 294, 24460 },
2628 { 1, 800, 294, 25192 },
2629 { 0, 1333, 276, 27605 },
2630 { 0, 1066, 276, 27605 },
2631 { 0, 800, 231, 23784 },
2632};
2633
2634unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2635{
2636 u64 total_count, diff, ret;
2637 u32 count1, count2, count3, m = 0, c = 0;
2638 unsigned long now = jiffies_to_msecs(jiffies), diff1;
2639 int i;
2640
2641 diff1 = now - dev_priv->last_time1;
2642
2643 /* Prevent division-by-zero if we are asking too fast.
2644 * Also, we don't get interesting results if we are polling
2645 * faster than once in 10ms, so just return the saved value
2646 * in such cases.
2647 */
2648 if (diff1 <= 10)
2649 return dev_priv->chipset_power;
2650
2651 count1 = I915_READ(DMIEC);
2652 count2 = I915_READ(DDREC);
2653 count3 = I915_READ(CSIEC);
2654
2655 total_count = count1 + count2 + count3;
2656
2657 /* FIXME: handle per-counter overflow */
2658 if (total_count < dev_priv->last_count1) {
2659 diff = ~0UL - dev_priv->last_count1;
2660 diff += total_count;
2661 } else {
2662 diff = total_count - dev_priv->last_count1;
2663 }
2664
2665 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
2666 if (cparams[i].i == dev_priv->c_m &&
2667 cparams[i].t == dev_priv->r_t) {
2668 m = cparams[i].m;
2669 c = cparams[i].c;
2670 break;
2671 }
2672 }
2673
2674 diff = div_u64(diff, diff1);
2675 ret = ((m * diff) + c);
2676 ret = div_u64(ret, 10);
2677
2678 dev_priv->last_count1 = total_count;
2679 dev_priv->last_time1 = now;
2680
2681 dev_priv->chipset_power = ret;
2682
2683 return ret;
2684}
2685
2686unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
2687{
2688 unsigned long m, x, b;
2689 u32 tsfs;
2690
2691 tsfs = I915_READ(TSFS);
2692
2693 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
2694 x = I915_READ8(TR1);
2695
2696 b = tsfs & TSFS_INTR_MASK;
2697
2698 return ((m * x) / 127) - b;
2699}
2700
2701static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
2702{
2703 static const struct v_table {
2704 u16 vd; /* in .1 mil */
2705 u16 vm; /* in .1 mil */
2706 } v_table[] = {
2707 { 0, 0, },
2708 { 375, 0, },
2709 { 500, 0, },
2710 { 625, 0, },
2711 { 750, 0, },
2712 { 875, 0, },
2713 { 1000, 0, },
2714 { 1125, 0, },
2715 { 4125, 3000, },
2716 { 4125, 3000, },
2717 { 4125, 3000, },
2718 { 4125, 3000, },
2719 { 4125, 3000, },
2720 { 4125, 3000, },
2721 { 4125, 3000, },
2722 { 4125, 3000, },
2723 { 4125, 3000, },
2724 { 4125, 3000, },
2725 { 4125, 3000, },
2726 { 4125, 3000, },
2727 { 4125, 3000, },
2728 { 4125, 3000, },
2729 { 4125, 3000, },
2730 { 4125, 3000, },
2731 { 4125, 3000, },
2732 { 4125, 3000, },
2733 { 4125, 3000, },
2734 { 4125, 3000, },
2735 { 4125, 3000, },
2736 { 4125, 3000, },
2737 { 4125, 3000, },
2738 { 4125, 3000, },
2739 { 4250, 3125, },
2740 { 4375, 3250, },
2741 { 4500, 3375, },
2742 { 4625, 3500, },
2743 { 4750, 3625, },
2744 { 4875, 3750, },
2745 { 5000, 3875, },
2746 { 5125, 4000, },
2747 { 5250, 4125, },
2748 { 5375, 4250, },
2749 { 5500, 4375, },
2750 { 5625, 4500, },
2751 { 5750, 4625, },
2752 { 5875, 4750, },
2753 { 6000, 4875, },
2754 { 6125, 5000, },
2755 { 6250, 5125, },
2756 { 6375, 5250, },
2757 { 6500, 5375, },
2758 { 6625, 5500, },
2759 { 6750, 5625, },
2760 { 6875, 5750, },
2761 { 7000, 5875, },
2762 { 7125, 6000, },
2763 { 7250, 6125, },
2764 { 7375, 6250, },
2765 { 7500, 6375, },
2766 { 7625, 6500, },
2767 { 7750, 6625, },
2768 { 7875, 6750, },
2769 { 8000, 6875, },
2770 { 8125, 7000, },
2771 { 8250, 7125, },
2772 { 8375, 7250, },
2773 { 8500, 7375, },
2774 { 8625, 7500, },
2775 { 8750, 7625, },
2776 { 8875, 7750, },
2777 { 9000, 7875, },
2778 { 9125, 8000, },
2779 { 9250, 8125, },
2780 { 9375, 8250, },
2781 { 9500, 8375, },
2782 { 9625, 8500, },
2783 { 9750, 8625, },
2784 { 9875, 8750, },
2785 { 10000, 8875, },
2786 { 10125, 9000, },
2787 { 10250, 9125, },
2788 { 10375, 9250, },
2789 { 10500, 9375, },
2790 { 10625, 9500, },
2791 { 10750, 9625, },
2792 { 10875, 9750, },
2793 { 11000, 9875, },
2794 { 11125, 10000, },
2795 { 11250, 10125, },
2796 { 11375, 10250, },
2797 { 11500, 10375, },
2798 { 11625, 10500, },
2799 { 11750, 10625, },
2800 { 11875, 10750, },
2801 { 12000, 10875, },
2802 { 12125, 11000, },
2803 { 12250, 11125, },
2804 { 12375, 11250, },
2805 { 12500, 11375, },
2806 { 12625, 11500, },
2807 { 12750, 11625, },
2808 { 12875, 11750, },
2809 { 13000, 11875, },
2810 { 13125, 12000, },
2811 { 13250, 12125, },
2812 { 13375, 12250, },
2813 { 13500, 12375, },
2814 { 13625, 12500, },
2815 { 13750, 12625, },
2816 { 13875, 12750, },
2817 { 14000, 12875, },
2818 { 14125, 13000, },
2819 { 14250, 13125, },
2820 { 14375, 13250, },
2821 { 14500, 13375, },
2822 { 14625, 13500, },
2823 { 14750, 13625, },
2824 { 14875, 13750, },
2825 { 15000, 13875, },
2826 { 15125, 14000, },
2827 { 15250, 14125, },
2828 { 15375, 14250, },
2829 { 15500, 14375, },
2830 { 15625, 14500, },
2831 { 15750, 14625, },
2832 { 15875, 14750, },
2833 { 16000, 14875, },
2834 { 16125, 15000, },
2835 };
2836 if (dev_priv->info->is_mobile)
2837 return v_table[pxvid].vm;
2838 else
2839 return v_table[pxvid].vd;
2840}
2841
2842void i915_update_gfx_val(struct drm_i915_private *dev_priv)
2843{
2844 struct timespec now, diff1;
2845 u64 diff;
2846 unsigned long diffms;
2847 u32 count;
2848
2849 if (dev_priv->info->gen != 5)
2850 return;
2851
2852 getrawmonotonic(&now);
2853 diff1 = timespec_sub(now, dev_priv->last_time2);
2854
2855 /* Don't divide by 0 */
2856 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
2857 if (!diffms)
2858 return;
2859
2860 count = I915_READ(GFXEC);
2861
2862 if (count < dev_priv->last_count2) {
2863 diff = ~0UL - dev_priv->last_count2;
2864 diff += count;
2865 } else {
2866 diff = count - dev_priv->last_count2;
2867 }
2868
2869 dev_priv->last_count2 = count;
2870 dev_priv->last_time2 = now;
2871
2872 /* More magic constants... */
2873 diff = diff * 1181;
2874 diff = div_u64(diff, diffms * 10);
2875 dev_priv->gfx_power = diff;
2876}
2877
2878unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
2879{
2880 unsigned long t, corr, state1, corr2, state2;
2881 u32 pxvid, ext_v;
2882
2883 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
2884 pxvid = (pxvid >> 24) & 0x7f;
2885 ext_v = pvid_to_extvid(dev_priv, pxvid);
2886
2887 state1 = ext_v;
2888
2889 t = i915_mch_val(dev_priv);
2890
2891 /* Revel in the empirically derived constants */
2892
2893 /* Correction factor in 1/100000 units */
2894 if (t > 80)
2895 corr = ((t * 2349) + 135940);
2896 else if (t >= 50)
2897 corr = ((t * 964) + 29317);
2898 else /* < 50 */
2899 corr = ((t * 301) + 1004);
2900
2901 corr = corr * ((150142 * state1) / 10000 - 78642);
2902 corr /= 100000;
2903 corr2 = (corr * dev_priv->corr);
2904
2905 state2 = (corr2 * state1) / 10000;
2906 state2 /= 100; /* convert to mW */
2907
2908 i915_update_gfx_val(dev_priv);
2909
2910 return dev_priv->gfx_power + state2;
2911}
2912
2913/* Global for IPS driver to get at the current i915 device */
2914static struct drm_i915_private *i915_mch_dev;
2915/*
2916 * Lock protecting IPS related data structures
2917 * - i915_mch_dev
2918 * - dev_priv->max_delay
2919 * - dev_priv->min_delay
2920 * - dev_priv->fmax
2921 * - dev_priv->gpu_busy
2922 */
2923static DEFINE_SPINLOCK(mchdev_lock);
2924
2925/**
2926 * i915_read_mch_val - return value for IPS use
2927 *
2928 * Calculate and return a value for the IPS driver to use when deciding whether
2929 * we have thermal and power headroom to increase CPU or GPU power budget.
2930 */
2931unsigned long i915_read_mch_val(void)
2932{
2933 struct drm_i915_private *dev_priv;
2934 unsigned long chipset_val, graphics_val, ret = 0;
2935
2936 spin_lock(&mchdev_lock);
2937 if (!i915_mch_dev)
2938 goto out_unlock;
2939 dev_priv = i915_mch_dev;
2940
2941 chipset_val = i915_chipset_val(dev_priv);
2942 graphics_val = i915_gfx_val(dev_priv);
2943
2944 ret = chipset_val + graphics_val;
2945
2946out_unlock:
2947 spin_unlock(&mchdev_lock);
2948
2949 return ret;
2950}
2951EXPORT_SYMBOL_GPL(i915_read_mch_val);
2952
2953/**
2954 * i915_gpu_raise - raise GPU frequency limit
2955 *
2956 * Raise the limit; IPS indicates we have thermal headroom.
2957 */
2958bool i915_gpu_raise(void)
2959{
2960 struct drm_i915_private *dev_priv;
2961 bool ret = true;
2962
2963 spin_lock(&mchdev_lock);
2964 if (!i915_mch_dev) {
2965 ret = false;
2966 goto out_unlock;
2967 }
2968 dev_priv = i915_mch_dev;
2969
2970 if (dev_priv->max_delay > dev_priv->fmax)
2971 dev_priv->max_delay--;
2972
2973out_unlock:
2974 spin_unlock(&mchdev_lock);
2975
2976 return ret;
2977}
2978EXPORT_SYMBOL_GPL(i915_gpu_raise);
2979
2980/**
2981 * i915_gpu_lower - lower GPU frequency limit
2982 *
2983 * IPS indicates we're close to a thermal limit, so throttle back the GPU
2984 * frequency maximum.
2985 */
2986bool i915_gpu_lower(void)
2987{
2988 struct drm_i915_private *dev_priv;
2989 bool ret = true;
2990
2991 spin_lock(&mchdev_lock);
2992 if (!i915_mch_dev) {
2993 ret = false;
2994 goto out_unlock;
2995 }
2996 dev_priv = i915_mch_dev;
2997
2998 if (dev_priv->max_delay < dev_priv->min_delay)
2999 dev_priv->max_delay++;
3000
3001out_unlock:
3002 spin_unlock(&mchdev_lock);
3003
3004 return ret;
3005}
3006EXPORT_SYMBOL_GPL(i915_gpu_lower);
3007
3008/**
3009 * i915_gpu_busy - indicate GPU business to IPS
3010 *
3011 * Tell the IPS driver whether or not the GPU is busy.
3012 */
3013bool i915_gpu_busy(void)
3014{
3015 struct drm_i915_private *dev_priv;
3016 bool ret = false;
3017
3018 spin_lock(&mchdev_lock);
3019 if (!i915_mch_dev)
3020 goto out_unlock;
3021 dev_priv = i915_mch_dev;
3022
3023 ret = dev_priv->busy;
3024
3025out_unlock:
3026 spin_unlock(&mchdev_lock);
3027
3028 return ret;
3029}
3030EXPORT_SYMBOL_GPL(i915_gpu_busy);
3031
3032/**
3033 * i915_gpu_turbo_disable - disable graphics turbo
3034 *
3035 * Disable graphics turbo by resetting the max frequency and setting the
3036 * current frequency to the default.
3037 */
3038bool i915_gpu_turbo_disable(void)
3039{
3040 struct drm_i915_private *dev_priv;
3041 bool ret = true;
3042
3043 spin_lock(&mchdev_lock);
3044 if (!i915_mch_dev) {
3045 ret = false;
3046 goto out_unlock;
3047 }
3048 dev_priv = i915_mch_dev;
3049
3050 dev_priv->max_delay = dev_priv->fstart;
3051
3052 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
3053 ret = false;
3054
3055out_unlock:
3056 spin_unlock(&mchdev_lock);
3057
3058 return ret;
3059}
3060EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
3061
3062/**
3063 * Tells the intel_ips driver that the i915 driver is now loaded, if
3064 * IPS got loaded first.
3065 *
3066 * This awkward dance is so that neither module has to depend on the
3067 * other in order for IPS to do the appropriate communication of
3068 * GPU turbo limits to i915.
3069 */
3070static void
3071ips_ping_for_i915_load(void)
3072{
3073 void (*link)(void);
3074
3075 link = symbol_get(ips_link_to_i915_driver);
3076 if (link) {
3077 link();
3078 symbol_put(ips_link_to_i915_driver);
3079 }
3080}
3081
3082void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
3083{
3084 spin_lock(&mchdev_lock);
3085 i915_mch_dev = dev_priv;
3086 dev_priv->mchdev_lock = &mchdev_lock;
3087 spin_unlock(&mchdev_lock);
3088
3089 ips_ping_for_i915_load();
3090}
3091
3092void intel_gpu_ips_teardown(void)
3093{
3094 spin_lock(&mchdev_lock);
3095 i915_mch_dev = NULL;
3096 spin_unlock(&mchdev_lock);
3097}
3098
2510void intel_init_emon(struct drm_device *dev) 3099void intel_init_emon(struct drm_device *dev)
2511{ 3100{
2512 struct drm_i915_private *dev_priv = dev->dev_private; 3101 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2663,9 +3252,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
2663 I915_WRITE(WM2_LP_ILK, 0); 3252 I915_WRITE(WM2_LP_ILK, 0);
2664 I915_WRITE(WM1_LP_ILK, 0); 3253 I915_WRITE(WM1_LP_ILK, 0);
2665 3254
2666 /* clear masked bit */
2667 I915_WRITE(CACHE_MODE_0, 3255 I915_WRITE(CACHE_MODE_0,
2668 CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT); 3256 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
2669 3257
2670 I915_WRITE(GEN6_UCGCTL1, 3258 I915_WRITE(GEN6_UCGCTL1,
2671 I915_READ(GEN6_UCGCTL1) | 3259 I915_READ(GEN6_UCGCTL1) |
@@ -2776,6 +3364,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
2776 } 3364 }
2777 3365
2778 gen7_setup_fixed_func_scheduler(dev_priv); 3366 gen7_setup_fixed_func_scheduler(dev_priv);
3367
3368 /* WaDisable4x2SubspanOptimization */
3369 I915_WRITE(CACHE_MODE_1,
3370 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
2779} 3371}
2780 3372
2781static void valleyview_init_clock_gating(struct drm_device *dev) 3373static void valleyview_init_clock_gating(struct drm_device *dev)
@@ -2821,9 +3413,8 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
2821 intel_flush_display_plane(dev_priv, pipe); 3413 intel_flush_display_plane(dev_priv, pipe);
2822 } 3414 }
2823 3415
2824 I915_WRITE(CACHE_MODE_1, I915_READ(CACHE_MODE_1) | 3416 I915_WRITE(CACHE_MODE_1,
2825 (PIXEL_SUBSPAN_COLLECT_OPT_DISABLE << 16) | 3417 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
2826 PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
2827} 3418}
2828 3419
2829static void g4x_init_clock_gating(struct drm_device *dev) 3420static void g4x_init_clock_gating(struct drm_device *dev)
@@ -2875,6 +3466,9 @@ static void gen3_init_clock_gating(struct drm_device *dev)
2875 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 3466 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
2876 DSTATE_DOT_CLOCK_GATING; 3467 DSTATE_DOT_CLOCK_GATING;
2877 I915_WRITE(D_STATE, dstate); 3468 I915_WRITE(D_STATE, dstate);
3469
3470 if (IS_PINEVIEW(dev))
3471 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
2878} 3472}
2879 3473
2880static void i85x_init_clock_gating(struct drm_device *dev) 3474static void i85x_init_clock_gating(struct drm_device *dev)
@@ -2931,6 +3525,41 @@ void intel_init_clock_gating(struct drm_device *dev)
2931 dev_priv->display.init_pch_clock_gating(dev); 3525 dev_priv->display.init_pch_clock_gating(dev);
2932} 3526}
2933 3527
3528static void gen6_sanitize_pm(struct drm_device *dev)
3529{
3530 struct drm_i915_private *dev_priv = dev->dev_private;
3531 u32 limits, delay, old;
3532
3533 gen6_gt_force_wake_get(dev_priv);
3534
3535 old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
3536 /* Make sure we continue to get interrupts
3537 * until we hit the minimum or maximum frequencies.
3538 */
3539 limits &= ~(0x3f << 16 | 0x3f << 24);
3540 delay = dev_priv->cur_delay;
3541 if (delay < dev_priv->max_delay)
3542 limits |= (dev_priv->max_delay & 0x3f) << 24;
3543 if (delay > dev_priv->min_delay)
3544 limits |= (dev_priv->min_delay & 0x3f) << 16;
3545
3546 if (old != limits) {
3547 DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n",
3548 limits, old);
3549 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3550 }
3551
3552 gen6_gt_force_wake_put(dev_priv);
3553}
3554
3555void intel_sanitize_pm(struct drm_device *dev)
3556{
3557 struct drm_i915_private *dev_priv = dev->dev_private;
3558
3559 if (dev_priv->display.sanitize_pm)
3560 dev_priv->display.sanitize_pm(dev);
3561}
3562
2934/* Set up chip specific power management-related functions */ 3563/* Set up chip specific power management-related functions */
2935void intel_init_pm(struct drm_device *dev) 3564void intel_init_pm(struct drm_device *dev)
2936{ 3565{
@@ -2953,6 +3582,12 @@ void intel_init_pm(struct drm_device *dev)
2953 /* 855GM needs testing */ 3582 /* 855GM needs testing */
2954 } 3583 }
2955 3584
3585 /* For cxsr */
3586 if (IS_PINEVIEW(dev))
3587 i915_pineview_get_mem_freq(dev);
3588 else if (IS_GEN5(dev))
3589 i915_ironlake_get_mem_freq(dev);
3590
2956 /* For FIFO watermark updates */ 3591 /* For FIFO watermark updates */
2957 if (HAS_PCH_SPLIT(dev)) { 3592 if (HAS_PCH_SPLIT(dev)) {
2958 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get; 3593 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
@@ -3007,6 +3642,7 @@ void intel_init_pm(struct drm_device *dev)
3007 dev_priv->display.update_wm = NULL; 3642 dev_priv->display.update_wm = NULL;
3008 } 3643 }
3009 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 3644 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
3645 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3010 } else if (IS_IVYBRIDGE(dev)) { 3646 } else if (IS_IVYBRIDGE(dev)) {
3011 /* FIXME: detect B0+ stepping and use auto training */ 3647 /* FIXME: detect B0+ stepping and use auto training */
3012 if (SNB_READ_WM0_LATENCY()) { 3648 if (SNB_READ_WM0_LATENCY()) {
@@ -3018,6 +3654,7 @@ void intel_init_pm(struct drm_device *dev)
3018 dev_priv->display.update_wm = NULL; 3654 dev_priv->display.update_wm = NULL;
3019 } 3655 }
3020 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 3656 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3657 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3021 } else 3658 } else
3022 dev_priv->display.update_wm = NULL; 3659 dev_priv->display.update_wm = NULL;
3023 } else if (IS_VALLEYVIEW(dev)) { 3660 } else if (IS_VALLEYVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b5ef7c145ee5..b59b6d5b7583 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -401,12 +401,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
401 int ret = init_ring_common(ring); 401 int ret = init_ring_common(ring);
402 402
403 if (INTEL_INFO(dev)->gen > 3) { 403 if (INTEL_INFO(dev)->gen > 3) {
404 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; 404 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
405 I915_WRITE(MI_MODE, mode);
406 if (IS_GEN7(dev)) 405 if (IS_GEN7(dev))
407 I915_WRITE(GFX_MODE_GEN7, 406 I915_WRITE(GFX_MODE_GEN7,
408 GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | 407 _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
409 GFX_MODE_ENABLE(GFX_REPLAY_MODE)); 408 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
410 } 409 }
411 410
412 if (INTEL_INFO(dev)->gen >= 5) { 411 if (INTEL_INFO(dev)->gen >= 5) {
@@ -415,19 +414,19 @@ static int init_render_ring(struct intel_ring_buffer *ring)
415 return ret; 414 return ret;
416 } 415 }
417 416
418 if (INTEL_INFO(dev)->gen >= 6) { 417 if (IS_GEN6(dev)) {
419 I915_WRITE(INSTPM,
420 INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
421
422 /* From the Sandybridge PRM, volume 1 part 3, page 24: 418 /* From the Sandybridge PRM, volume 1 part 3, page 24:
423 * "If this bit is set, STCunit will have LRA as replacement 419 * "If this bit is set, STCunit will have LRA as replacement
424 * policy. [...] This bit must be reset. LRA replacement 420 * policy. [...] This bit must be reset. LRA replacement
425 * policy is not supported." 421 * policy is not supported."
426 */ 422 */
427 I915_WRITE(CACHE_MODE_0, 423 I915_WRITE(CACHE_MODE_0,
428 CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT); 424 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
429 } 425 }
430 426
427 if (INTEL_INFO(dev)->gen >= 6)
428 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
429
431 return ret; 430 return ret;
432} 431}
433 432
@@ -621,17 +620,18 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
621{ 620{
622 struct drm_device *dev = ring->dev; 621 struct drm_device *dev = ring->dev;
623 drm_i915_private_t *dev_priv = dev->dev_private; 622 drm_i915_private_t *dev_priv = dev->dev_private;
623 unsigned long flags;
624 624
625 if (!dev->irq_enabled) 625 if (!dev->irq_enabled)
626 return false; 626 return false;
627 627
628 spin_lock(&ring->irq_lock); 628 spin_lock_irqsave(&dev_priv->irq_lock, flags);
629 if (ring->irq_refcount++ == 0) { 629 if (ring->irq_refcount++ == 0) {
630 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 630 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
631 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 631 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
632 POSTING_READ(GTIMR); 632 POSTING_READ(GTIMR);
633 } 633 }
634 spin_unlock(&ring->irq_lock); 634 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
635 635
636 return true; 636 return true;
637} 637}
@@ -641,14 +641,15 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
641{ 641{
642 struct drm_device *dev = ring->dev; 642 struct drm_device *dev = ring->dev;
643 drm_i915_private_t *dev_priv = dev->dev_private; 643 drm_i915_private_t *dev_priv = dev->dev_private;
644 unsigned long flags;
644 645
645 spin_lock(&ring->irq_lock); 646 spin_lock_irqsave(&dev_priv->irq_lock, flags);
646 if (--ring->irq_refcount == 0) { 647 if (--ring->irq_refcount == 0) {
647 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 648 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
648 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 649 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
649 POSTING_READ(GTIMR); 650 POSTING_READ(GTIMR);
650 } 651 }
651 spin_unlock(&ring->irq_lock); 652 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
652} 653}
653 654
654static bool 655static bool
@@ -656,17 +657,18 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
656{ 657{
657 struct drm_device *dev = ring->dev; 658 struct drm_device *dev = ring->dev;
658 drm_i915_private_t *dev_priv = dev->dev_private; 659 drm_i915_private_t *dev_priv = dev->dev_private;
660 unsigned long flags;
659 661
660 if (!dev->irq_enabled) 662 if (!dev->irq_enabled)
661 return false; 663 return false;
662 664
663 spin_lock(&ring->irq_lock); 665 spin_lock_irqsave(&dev_priv->irq_lock, flags);
664 if (ring->irq_refcount++ == 0) { 666 if (ring->irq_refcount++ == 0) {
665 dev_priv->irq_mask &= ~ring->irq_enable_mask; 667 dev_priv->irq_mask &= ~ring->irq_enable_mask;
666 I915_WRITE(IMR, dev_priv->irq_mask); 668 I915_WRITE(IMR, dev_priv->irq_mask);
667 POSTING_READ(IMR); 669 POSTING_READ(IMR);
668 } 670 }
669 spin_unlock(&ring->irq_lock); 671 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
670 672
671 return true; 673 return true;
672} 674}
@@ -676,14 +678,52 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
676{ 678{
677 struct drm_device *dev = ring->dev; 679 struct drm_device *dev = ring->dev;
678 drm_i915_private_t *dev_priv = dev->dev_private; 680 drm_i915_private_t *dev_priv = dev->dev_private;
681 unsigned long flags;
679 682
680 spin_lock(&ring->irq_lock); 683 spin_lock_irqsave(&dev_priv->irq_lock, flags);
681 if (--ring->irq_refcount == 0) { 684 if (--ring->irq_refcount == 0) {
682 dev_priv->irq_mask |= ring->irq_enable_mask; 685 dev_priv->irq_mask |= ring->irq_enable_mask;
683 I915_WRITE(IMR, dev_priv->irq_mask); 686 I915_WRITE(IMR, dev_priv->irq_mask);
684 POSTING_READ(IMR); 687 POSTING_READ(IMR);
685 } 688 }
686 spin_unlock(&ring->irq_lock); 689 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
690}
691
692static bool
693i8xx_ring_get_irq(struct intel_ring_buffer *ring)
694{
695 struct drm_device *dev = ring->dev;
696 drm_i915_private_t *dev_priv = dev->dev_private;
697 unsigned long flags;
698
699 if (!dev->irq_enabled)
700 return false;
701
702 spin_lock_irqsave(&dev_priv->irq_lock, flags);
703 if (ring->irq_refcount++ == 0) {
704 dev_priv->irq_mask &= ~ring->irq_enable_mask;
705 I915_WRITE16(IMR, dev_priv->irq_mask);
706 POSTING_READ16(IMR);
707 }
708 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
709
710 return true;
711}
712
713static void
714i8xx_ring_put_irq(struct intel_ring_buffer *ring)
715{
716 struct drm_device *dev = ring->dev;
717 drm_i915_private_t *dev_priv = dev->dev_private;
718 unsigned long flags;
719
720 spin_lock_irqsave(&dev_priv->irq_lock, flags);
721 if (--ring->irq_refcount == 0) {
722 dev_priv->irq_mask |= ring->irq_enable_mask;
723 I915_WRITE16(IMR, dev_priv->irq_mask);
724 POSTING_READ16(IMR);
725 }
726 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
687} 727}
688 728
689void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 729void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -762,6 +802,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
762{ 802{
763 struct drm_device *dev = ring->dev; 803 struct drm_device *dev = ring->dev;
764 drm_i915_private_t *dev_priv = dev->dev_private; 804 drm_i915_private_t *dev_priv = dev->dev_private;
805 unsigned long flags;
765 806
766 if (!dev->irq_enabled) 807 if (!dev->irq_enabled)
767 return false; 808 return false;
@@ -771,14 +812,14 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
771 * blt/bsd rings on ivb. */ 812 * blt/bsd rings on ivb. */
772 gen6_gt_force_wake_get(dev_priv); 813 gen6_gt_force_wake_get(dev_priv);
773 814
774 spin_lock(&ring->irq_lock); 815 spin_lock_irqsave(&dev_priv->irq_lock, flags);
775 if (ring->irq_refcount++ == 0) { 816 if (ring->irq_refcount++ == 0) {
776 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 817 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
777 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 818 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
778 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 819 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
779 POSTING_READ(GTIMR); 820 POSTING_READ(GTIMR);
780 } 821 }
781 spin_unlock(&ring->irq_lock); 822 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
782 823
783 return true; 824 return true;
784} 825}
@@ -788,15 +829,16 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
788{ 829{
789 struct drm_device *dev = ring->dev; 830 struct drm_device *dev = ring->dev;
790 drm_i915_private_t *dev_priv = dev->dev_private; 831 drm_i915_private_t *dev_priv = dev->dev_private;
832 unsigned long flags;
791 833
792 spin_lock(&ring->irq_lock); 834 spin_lock_irqsave(&dev_priv->irq_lock, flags);
793 if (--ring->irq_refcount == 0) { 835 if (--ring->irq_refcount == 0) {
794 I915_WRITE_IMR(ring, ~0); 836 I915_WRITE_IMR(ring, ~0);
795 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 837 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
796 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 838 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
797 POSTING_READ(GTIMR); 839 POSTING_READ(GTIMR);
798 } 840 }
799 spin_unlock(&ring->irq_lock); 841 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
800 842
801 gen6_gt_force_wake_put(dev_priv); 843 gen6_gt_force_wake_put(dev_priv);
802} 844}
@@ -858,7 +900,6 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
858 900
859static void cleanup_status_page(struct intel_ring_buffer *ring) 901static void cleanup_status_page(struct intel_ring_buffer *ring)
860{ 902{
861 drm_i915_private_t *dev_priv = ring->dev->dev_private;
862 struct drm_i915_gem_object *obj; 903 struct drm_i915_gem_object *obj;
863 904
864 obj = ring->status_page.obj; 905 obj = ring->status_page.obj;
@@ -869,14 +910,11 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
869 i915_gem_object_unpin(obj); 910 i915_gem_object_unpin(obj);
870 drm_gem_object_unreference(&obj->base); 911 drm_gem_object_unreference(&obj->base);
871 ring->status_page.obj = NULL; 912 ring->status_page.obj = NULL;
872
873 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
874} 913}
875 914
876static int init_status_page(struct intel_ring_buffer *ring) 915static int init_status_page(struct intel_ring_buffer *ring)
877{ 916{
878 struct drm_device *dev = ring->dev; 917 struct drm_device *dev = ring->dev;
879 drm_i915_private_t *dev_priv = dev->dev_private;
880 struct drm_i915_gem_object *obj; 918 struct drm_i915_gem_object *obj;
881 int ret; 919 int ret;
882 920
@@ -897,7 +935,6 @@ static int init_status_page(struct intel_ring_buffer *ring)
897 ring->status_page.gfx_addr = obj->gtt_offset; 935 ring->status_page.gfx_addr = obj->gtt_offset;
898 ring->status_page.page_addr = kmap(obj->pages[0]); 936 ring->status_page.page_addr = kmap(obj->pages[0]);
899 if (ring->status_page.page_addr == NULL) { 937 if (ring->status_page.page_addr == NULL) {
900 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
901 goto err_unpin; 938 goto err_unpin;
902 } 939 }
903 ring->status_page.obj = obj; 940 ring->status_page.obj = obj;
@@ -930,7 +967,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
930 ring->size = 32 * PAGE_SIZE; 967 ring->size = 32 * PAGE_SIZE;
931 968
932 init_waitqueue_head(&ring->irq_queue); 969 init_waitqueue_head(&ring->irq_queue);
933 spin_lock_init(&ring->irq_lock);
934 970
935 if (I915_NEED_GFX_HWS(dev)) { 971 if (I915_NEED_GFX_HWS(dev)) {
936 ret = init_status_page(ring); 972 ret = init_status_page(ring);
@@ -951,20 +987,14 @@ static int intel_init_ring_buffer(struct drm_device *dev,
951 if (ret) 987 if (ret)
952 goto err_unref; 988 goto err_unref;
953 989
954 ring->map.size = ring->size; 990 ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
955 ring->map.offset = dev->agp->base + obj->gtt_offset; 991 ring->size);
956 ring->map.type = 0; 992 if (ring->virtual_start == NULL) {
957 ring->map.flags = 0;
958 ring->map.mtrr = 0;
959
960 drm_core_ioremap_wc(&ring->map, dev);
961 if (ring->map.handle == NULL) {
962 DRM_ERROR("Failed to map ringbuffer.\n"); 993 DRM_ERROR("Failed to map ringbuffer.\n");
963 ret = -EINVAL; 994 ret = -EINVAL;
964 goto err_unpin; 995 goto err_unpin;
965 } 996 }
966 997
967 ring->virtual_start = ring->map.handle;
968 ret = ring->init(ring); 998 ret = ring->init(ring);
969 if (ret) 999 if (ret)
970 goto err_unmap; 1000 goto err_unmap;
@@ -980,7 +1010,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
980 return 0; 1010 return 0;
981 1011
982err_unmap: 1012err_unmap:
983 drm_core_ioremapfree(&ring->map, dev); 1013 iounmap(ring->virtual_start);
984err_unpin: 1014err_unpin:
985 i915_gem_object_unpin(obj); 1015 i915_gem_object_unpin(obj);
986err_unref: 1016err_unref:
@@ -1008,7 +1038,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1008 1038
1009 I915_WRITE_CTL(ring, 0); 1039 I915_WRITE_CTL(ring, 0);
1010 1040
1011 drm_core_ioremapfree(&ring->map, ring->dev); 1041 iounmap(ring->virtual_start);
1012 1042
1013 i915_gem_object_unpin(ring->obj); 1043 i915_gem_object_unpin(ring->obj);
1014 drm_gem_object_unreference(&ring->obj->base); 1044 drm_gem_object_unreference(&ring->obj->base);
@@ -1022,7 +1052,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1022 1052
1023static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) 1053static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1024{ 1054{
1025 unsigned int *virt; 1055 uint32_t __iomem *virt;
1026 int rem = ring->size - ring->tail; 1056 int rem = ring->size - ring->tail;
1027 1057
1028 if (ring->space < rem) { 1058 if (ring->space < rem) {
@@ -1031,12 +1061,10 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1031 return ret; 1061 return ret;
1032 } 1062 }
1033 1063
1034 virt = (unsigned int *)(ring->virtual_start + ring->tail); 1064 virt = ring->virtual_start + ring->tail;
1035 rem /= 8; 1065 rem /= 4;
1036 while (rem--) { 1066 while (rem--)
1037 *virt++ = MI_NOOP; 1067 iowrite32(MI_NOOP, virt++);
1038 *virt++ = MI_NOOP;
1039 }
1040 1068
1041 ring->tail = 0; 1069 ring->tail = 0;
1042 ring->space = ring_space(ring); 1070 ring->space = ring_space(ring);
@@ -1057,9 +1085,11 @@ static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1057 was_interruptible = dev_priv->mm.interruptible; 1085 was_interruptible = dev_priv->mm.interruptible;
1058 dev_priv->mm.interruptible = false; 1086 dev_priv->mm.interruptible = false;
1059 1087
1060 ret = i915_wait_request(ring, seqno, true); 1088 ret = i915_wait_request(ring, seqno);
1061 1089
1062 dev_priv->mm.interruptible = was_interruptible; 1090 dev_priv->mm.interruptible = was_interruptible;
1091 if (!ret)
1092 i915_gem_retire_requests_ring(ring);
1063 1093
1064 return ret; 1094 return ret;
1065} 1095}
@@ -1133,15 +1163,12 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1133 return ret; 1163 return ret;
1134 1164
1135 trace_i915_ring_wait_begin(ring); 1165 trace_i915_ring_wait_begin(ring);
1136 if (drm_core_check_feature(dev, DRIVER_GEM)) 1166 /* With GEM the hangcheck timer should kick us out of the loop,
1137 /* With GEM the hangcheck timer should kick us out of the loop, 1167 * leaving it early runs the risk of corrupting GEM state (due
1138 * leaving it early runs the risk of corrupting GEM state (due 1168 * to running on almost untested codepaths). But on resume
1139 * to running on almost untested codepaths). But on resume 1169 * timers don't work yet, so prevent a complete hang in that
1140 * timers don't work yet, so prevent a complete hang in that 1170 * case by choosing an insanely large timeout. */
1141 * case by choosing an insanely large timeout. */ 1171 end = jiffies + 60 * HZ;
1142 end = jiffies + 60 * HZ;
1143 else
1144 end = jiffies + 3 * HZ;
1145 1172
1146 do { 1173 do {
1147 ring->head = I915_READ_HEAD(ring); 1174 ring->head = I915_READ_HEAD(ring);
@@ -1193,7 +1220,11 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1193 1220
1194void intel_ring_advance(struct intel_ring_buffer *ring) 1221void intel_ring_advance(struct intel_ring_buffer *ring)
1195{ 1222{
1223 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1224
1196 ring->tail &= ring->size - 1; 1225 ring->tail &= ring->size - 1;
1226 if (dev_priv->stop_rings & intel_ring_flag(ring))
1227 return;
1197 ring->write_tail(ring, ring->tail); 1228 ring->write_tail(ring, ring->tail);
1198} 1229}
1199 1230
@@ -1318,8 +1349,13 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1318 else 1349 else
1319 ring->flush = gen4_render_ring_flush; 1350 ring->flush = gen4_render_ring_flush;
1320 ring->get_seqno = ring_get_seqno; 1351 ring->get_seqno = ring_get_seqno;
1321 ring->irq_get = i9xx_ring_get_irq; 1352 if (IS_GEN2(dev)) {
1322 ring->irq_put = i9xx_ring_put_irq; 1353 ring->irq_get = i8xx_ring_get_irq;
1354 ring->irq_put = i8xx_ring_put_irq;
1355 } else {
1356 ring->irq_get = i9xx_ring_get_irq;
1357 ring->irq_put = i9xx_ring_put_irq;
1358 }
1323 ring->irq_enable_mask = I915_USER_INTERRUPT; 1359 ring->irq_enable_mask = I915_USER_INTERRUPT;
1324 } 1360 }
1325 ring->write_tail = ring_write_tail; 1361 ring->write_tail = ring_write_tail;
@@ -1366,8 +1402,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1366 else 1402 else
1367 ring->flush = gen4_render_ring_flush; 1403 ring->flush = gen4_render_ring_flush;
1368 ring->get_seqno = ring_get_seqno; 1404 ring->get_seqno = ring_get_seqno;
1369 ring->irq_get = i9xx_ring_get_irq; 1405 if (IS_GEN2(dev)) {
1370 ring->irq_put = i9xx_ring_put_irq; 1406 ring->irq_get = i8xx_ring_get_irq;
1407 ring->irq_put = i8xx_ring_put_irq;
1408 } else {
1409 ring->irq_get = i9xx_ring_get_irq;
1410 ring->irq_put = i9xx_ring_put_irq;
1411 }
1371 ring->irq_enable_mask = I915_USER_INTERRUPT; 1412 ring->irq_enable_mask = I915_USER_INTERRUPT;
1372 ring->write_tail = ring_write_tail; 1413 ring->write_tail = ring_write_tail;
1373 if (INTEL_INFO(dev)->gen >= 4) 1414 if (INTEL_INFO(dev)->gen >= 4)
@@ -1392,20 +1433,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1392 if (IS_I830(ring->dev)) 1433 if (IS_I830(ring->dev))
1393 ring->effective_size -= 128; 1434 ring->effective_size -= 128;
1394 1435
1395 ring->map.offset = start; 1436 ring->virtual_start = ioremap_wc(start, size);
1396 ring->map.size = size; 1437 if (ring->virtual_start == NULL) {
1397 ring->map.type = 0;
1398 ring->map.flags = 0;
1399 ring->map.mtrr = 0;
1400
1401 drm_core_ioremap_wc(&ring->map, dev);
1402 if (ring->map.handle == NULL) {
1403 DRM_ERROR("can not ioremap virtual address for" 1438 DRM_ERROR("can not ioremap virtual address for"
1404 " ring buffer\n"); 1439 " ring buffer\n");
1405 return -ENOMEM; 1440 return -ENOMEM;
1406 } 1441 }
1407 1442
1408 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1409 return 0; 1443 return 0;
1410} 1444}
1411 1445
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 06a66adf69c2..baba75714578 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -2,7 +2,7 @@
2#define _INTEL_RINGBUFFER_H_ 2#define _INTEL_RINGBUFFER_H_
3 3
4struct intel_hw_status_page { 4struct intel_hw_status_page {
5 u32 __iomem *page_addr; 5 u32 *page_addr;
6 unsigned int gfx_addr; 6 unsigned int gfx_addr;
7 struct drm_i915_gem_object *obj; 7 struct drm_i915_gem_object *obj;
8}; 8};
@@ -56,12 +56,9 @@ struct intel_ring_buffer {
56 */ 56 */
57 u32 last_retired_head; 57 u32 last_retired_head;
58 58
59 spinlock_t irq_lock; 59 u32 irq_refcount; /* protected by dev_priv->irq_lock */
60 u32 irq_refcount;
61 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 60 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
62 u32 irq_seqno; /* last seq seem at irq time */
63 u32 trace_irq_seqno; 61 u32 trace_irq_seqno;
64 u32 waiting_seqno;
65 u32 sync_seqno[I915_NUM_RINGS-1]; 62 u32 sync_seqno[I915_NUM_RINGS-1];
66 bool __must_check (*irq_get)(struct intel_ring_buffer *ring); 63 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
67 void (*irq_put)(struct intel_ring_buffer *ring); 64 void (*irq_put)(struct intel_ring_buffer *ring);
@@ -118,7 +115,6 @@ struct intel_ring_buffer {
118 u32 outstanding_lazy_request; 115 u32 outstanding_lazy_request;
119 116
120 wait_queue_head_t irq_queue; 117 wait_queue_head_t irq_queue;
121 drm_local_map_t map;
122 118
123 void *private; 119 void *private;
124}; 120};
@@ -152,7 +148,9 @@ static inline u32
152intel_read_status_page(struct intel_ring_buffer *ring, 148intel_read_status_page(struct intel_ring_buffer *ring,
153 int reg) 149 int reg)
154{ 150{
155 return ioread32(ring->status_page.page_addr + reg); 151 /* Ensure that the compiler doesn't optimize away the load. */
152 barrier();
153 return ring->status_page.page_addr[reg];
156} 154}
157 155
158/** 156/**
@@ -170,10 +168,7 @@ intel_read_status_page(struct intel_ring_buffer *ring,
170 * 168 *
171 * The area from dword 0x20 to 0x3ff is available for driver usage. 169 * The area from dword 0x20 to 0x3ff is available for driver usage.
172 */ 170 */
173#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
174#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
175#define I915_GEM_HWS_INDEX 0x20 171#define I915_GEM_HWS_INDEX 0x20
176#define I915_BREADCRUMB_INDEX 0x21
177 172
178void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 173void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
179 174
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3d9dfa57130b..7d3f238e8265 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -747,18 +747,18 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
747 uint16_t h_sync_offset, v_sync_offset; 747 uint16_t h_sync_offset, v_sync_offset;
748 int mode_clock; 748 int mode_clock;
749 749
750 width = mode->crtc_hdisplay; 750 width = mode->hdisplay;
751 height = mode->crtc_vdisplay; 751 height = mode->vdisplay;
752 752
753 /* do some mode translations */ 753 /* do some mode translations */
754 h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; 754 h_blank_len = mode->htotal - mode->hdisplay;
755 h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; 755 h_sync_len = mode->hsync_end - mode->hsync_start;
756 756
757 v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; 757 v_blank_len = mode->vtotal - mode->vdisplay;
758 v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; 758 v_sync_len = mode->vsync_end - mode->vsync_start;
759 759
760 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; 760 h_sync_offset = mode->hsync_start - mode->hdisplay;
761 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; 761 v_sync_offset = mode->vsync_start - mode->vdisplay;
762 762
763 mode_clock = mode->clock; 763 mode_clock = mode->clock;
764 mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1; 764 mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
@@ -1578,9 +1578,6 @@ end:
1578 intel_sdvo->sdvo_lvds_fixed_mode = 1578 intel_sdvo->sdvo_lvds_fixed_mode =
1579 drm_mode_duplicate(connector->dev, newmode); 1579 drm_mode_duplicate(connector->dev, newmode);
1580 1580
1581 drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
1582 0);
1583
1584 intel_sdvo->is_lvds = true; 1581 intel_sdvo->is_lvds = true;
1585 break; 1582 break;
1586 } 1583 }
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index fbf03b996587..2a20fb0781d7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -110,14 +110,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
110 * when scaling is disabled. 110 * when scaling is disabled.
111 */ 111 */
112 if (crtc_w != src_w || crtc_h != src_h) { 112 if (crtc_w != src_w || crtc_h != src_h) {
113 dev_priv->sprite_scaling_enabled = true; 113 if (!dev_priv->sprite_scaling_enabled) {
114 intel_update_watermarks(dev); 114 dev_priv->sprite_scaling_enabled = true;
115 intel_wait_for_vblank(dev, pipe); 115 intel_update_watermarks(dev);
116 intel_wait_for_vblank(dev, pipe);
117 }
116 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; 118 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
117 } else { 119 } else {
118 dev_priv->sprite_scaling_enabled = false; 120 if (dev_priv->sprite_scaling_enabled) {
119 /* potentially re-enable LP watermarks */ 121 dev_priv->sprite_scaling_enabled = false;
120 intel_update_watermarks(dev); 122 /* potentially re-enable LP watermarks */
123 intel_update_watermarks(dev);
124 }
121 } 125 }
122 126
123 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); 127 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
@@ -151,6 +155,9 @@ ivb_disable_plane(struct drm_plane *plane)
151 /* Activate double buffered register update */ 155 /* Activate double buffered register update */
152 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); 156 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
153 POSTING_READ(SPRSURF(pipe)); 157 POSTING_READ(SPRSURF(pipe));
158
159 dev_priv->sprite_scaling_enabled = false;
160 intel_update_watermarks(dev);
154} 161}
155 162
156static int 163static int
@@ -551,14 +558,13 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
551 struct drm_file *file_priv) 558 struct drm_file *file_priv)
552{ 559{
553 struct drm_intel_sprite_colorkey *set = data; 560 struct drm_intel_sprite_colorkey *set = data;
554 struct drm_i915_private *dev_priv = dev->dev_private;
555 struct drm_mode_object *obj; 561 struct drm_mode_object *obj;
556 struct drm_plane *plane; 562 struct drm_plane *plane;
557 struct intel_plane *intel_plane; 563 struct intel_plane *intel_plane;
558 int ret = 0; 564 int ret = 0;
559 565
560 if (!dev_priv) 566 if (!drm_core_check_feature(dev, DRIVER_MODESET))
561 return -EINVAL; 567 return -ENODEV;
562 568
563 /* Make sure we don't try to enable both src & dest simultaneously */ 569 /* Make sure we don't try to enable both src & dest simultaneously */
564 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) 570 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
@@ -585,14 +591,13 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
585 struct drm_file *file_priv) 591 struct drm_file *file_priv)
586{ 592{
587 struct drm_intel_sprite_colorkey *get = data; 593 struct drm_intel_sprite_colorkey *get = data;
588 struct drm_i915_private *dev_priv = dev->dev_private;
589 struct drm_mode_object *obj; 594 struct drm_mode_object *obj;
590 struct drm_plane *plane; 595 struct drm_plane *plane;
591 struct intel_plane *intel_plane; 596 struct intel_plane *intel_plane;
592 int ret = 0; 597 int ret = 0;
593 598
594 if (!dev_priv) 599 if (!drm_core_check_feature(dev, DRIVER_MODESET))
595 return -EINVAL; 600 return -ENODEV;
596 601
597 mutex_lock(&dev->mode_config.mutex); 602 mutex_lock(&dev->mode_config.mutex);
598 603
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 67f444d632fb..3346612d2953 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1249,11 +1249,8 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1249 int type; 1249 int type;
1250 1250
1251 mode = reported_modes[0]; 1251 mode = reported_modes[0];
1252 drm_mode_set_crtcinfo(&mode, 0);
1253 1252
1254 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { 1253 if (force) {
1255 type = intel_tv_detect_type(intel_tv, connector);
1256 } else if (force) {
1257 struct intel_load_detect_pipe tmp; 1254 struct intel_load_detect_pipe tmp;
1258 1255
1259 if (intel_get_load_detect_pipe(&intel_tv->base, connector, 1256 if (intel_get_load_detect_pipe(&intel_tv->base, connector,