aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/Kconfig11
-rw-r--r--drivers/gpu/drm/drm_mm.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c65
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c19
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h98
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c204
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c27
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c11
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c279
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h29
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c363
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h14
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c54
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c32
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h11
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c3
25 files changed, 1035 insertions, 385 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 39573c5f7518..955555d6ec88 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -168,6 +168,17 @@ config DRM_I915_KMS
168 the driver to bind to PCI devices, which precludes loading things 168 the driver to bind to PCI devices, which precludes loading things
169 like intelfb. 169 like intelfb.
170 170
171config DRM_I915_PRELIMINARY_HW_SUPPORT
172 bool "Enable preliminary support for prerelease Intel hardware by default"
173 depends on DRM_I915
174 help
175 Choose this option if you have prerelease Intel hardware and want the
176 i915 driver to support it by default. You can enable such support at
177 runtime with the module option i915.preliminary_hw_support=1; this
178 option changes the default for that module option.
179
180 If in doubt, say "N".
181
171config DRM_MGA 182config DRM_MGA
172 tristate "Matrox g200/g400" 183 tristate "Matrox g200/g400"
173 depends on DRM && PCI 184 depends on DRM && PCI
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index aded1e11e8ff..af93cc55259f 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -254,6 +254,9 @@ void drm_mm_remove_node(struct drm_mm_node *node)
254 struct drm_mm *mm = node->mm; 254 struct drm_mm *mm = node->mm;
255 struct drm_mm_node *prev_node; 255 struct drm_mm_node *prev_node;
256 256
257 if (WARN_ON(!node->allocated))
258 return;
259
257 BUG_ON(node->scanned_block || node->scanned_prev_free 260 BUG_ON(node->scanned_block || node->scanned_prev_free
258 || node->scanned_next_free); 261 || node->scanned_next_free);
259 262
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1a87cc9fd899..55ab9246e1b9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -31,6 +31,7 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/export.h> 32#include <linux/export.h>
33#include <linux/list_sort.h> 33#include <linux/list_sort.h>
34#include <asm/msr-index.h>
34#include <drm/drmP.h> 35#include <drm/drmP.h>
35#include "intel_drv.h" 36#include "intel_drv.h"
36#include "intel_ringbuffer.h" 37#include "intel_ringbuffer.h"
@@ -99,7 +100,7 @@ static void
99describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 100describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
100{ 101{
101 struct i915_vma *vma; 102 struct i915_vma *vma;
102 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %d %d %d%s%s%s", 103 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
103 &obj->base, 104 &obj->base,
104 get_pin_flag(obj), 105 get_pin_flag(obj),
105 get_tiling_flag(obj), 106 get_tiling_flag(obj),
@@ -117,6 +118,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
117 seq_printf(m, " (name: %d)", obj->base.name); 118 seq_printf(m, " (name: %d)", obj->base.name);
118 if (obj->pin_count) 119 if (obj->pin_count)
119 seq_printf(m, " (pinned x %d)", obj->pin_count); 120 seq_printf(m, " (pinned x %d)", obj->pin_count);
121 if (obj->pin_display)
122 seq_printf(m, " (display)");
120 if (obj->fence_reg != I915_FENCE_REG_NONE) 123 if (obj->fence_reg != I915_FENCE_REG_NONE)
121 seq_printf(m, " (fence: %d)", obj->fence_reg); 124 seq_printf(m, " (fence: %d)", obj->fence_reg);
122 list_for_each_entry(vma, &obj->vma_list, vma_link) { 125 list_for_each_entry(vma, &obj->vma_list, vma_link) {
@@ -193,9 +196,9 @@ static int obj_rank_by_stolen(void *priv,
193 struct list_head *A, struct list_head *B) 196 struct list_head *A, struct list_head *B)
194{ 197{
195 struct drm_i915_gem_object *a = 198 struct drm_i915_gem_object *a =
196 container_of(A, struct drm_i915_gem_object, exec_list); 199 container_of(A, struct drm_i915_gem_object, obj_exec_link);
197 struct drm_i915_gem_object *b = 200 struct drm_i915_gem_object *b =
198 container_of(B, struct drm_i915_gem_object, exec_list); 201 container_of(B, struct drm_i915_gem_object, obj_exec_link);
199 202
200 return a->stolen->start - b->stolen->start; 203 return a->stolen->start - b->stolen->start;
201} 204}
@@ -219,7 +222,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
219 if (obj->stolen == NULL) 222 if (obj->stolen == NULL)
220 continue; 223 continue;
221 224
222 list_add(&obj->exec_list, &stolen); 225 list_add(&obj->obj_exec_link, &stolen);
223 226
224 total_obj_size += obj->base.size; 227 total_obj_size += obj->base.size;
225 total_gtt_size += i915_gem_obj_ggtt_size(obj); 228 total_gtt_size += i915_gem_obj_ggtt_size(obj);
@@ -229,7 +232,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
229 if (obj->stolen == NULL) 232 if (obj->stolen == NULL)
230 continue; 233 continue;
231 234
232 list_add(&obj->exec_list, &stolen); 235 list_add(&obj->obj_exec_link, &stolen);
233 236
234 total_obj_size += obj->base.size; 237 total_obj_size += obj->base.size;
235 count++; 238 count++;
@@ -237,11 +240,11 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237 list_sort(NULL, &stolen, obj_rank_by_stolen); 240 list_sort(NULL, &stolen, obj_rank_by_stolen);
238 seq_puts(m, "Stolen:\n"); 241 seq_puts(m, "Stolen:\n");
239 while (!list_empty(&stolen)) { 242 while (!list_empty(&stolen)) {
240 obj = list_first_entry(&stolen, typeof(*obj), exec_list); 243 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
241 seq_puts(m, " "); 244 seq_puts(m, " ");
242 describe_obj(m, obj); 245 describe_obj(m, obj);
243 seq_putc(m, '\n'); 246 seq_putc(m, '\n');
244 list_del_init(&obj->exec_list); 247 list_del_init(&obj->obj_exec_link);
245 } 248 }
246 mutex_unlock(&dev->struct_mutex); 249 mutex_unlock(&dev->struct_mutex);
247 250
@@ -1767,6 +1770,52 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
1767 return 0; 1770 return 0;
1768} 1771}
1769 1772
1773static int i915_energy_uJ(struct seq_file *m, void *data)
1774{
1775 struct drm_info_node *node = m->private;
1776 struct drm_device *dev = node->minor->dev;
1777 struct drm_i915_private *dev_priv = dev->dev_private;
1778 u64 power;
1779 u32 units;
1780
1781 if (INTEL_INFO(dev)->gen < 6)
1782 return -ENODEV;
1783
1784 rdmsrl(MSR_RAPL_POWER_UNIT, power);
1785 power = (power & 0x1f00) >> 8;
1786 units = 1000000 / (1 << power); /* convert to uJ */
1787 power = I915_READ(MCH_SECP_NRG_STTS);
1788 power *= units;
1789
1790 seq_printf(m, "%llu", (long long unsigned)power);
1791
1792 return 0;
1793}
1794
1795static int i915_pc8_status(struct seq_file *m, void *unused)
1796{
1797 struct drm_info_node *node = (struct drm_info_node *) m->private;
1798 struct drm_device *dev = node->minor->dev;
1799 struct drm_i915_private *dev_priv = dev->dev_private;
1800
1801 if (!IS_HASWELL(dev)) {
1802 seq_puts(m, "not supported\n");
1803 return 0;
1804 }
1805
1806 mutex_lock(&dev_priv->pc8.lock);
1807 seq_printf(m, "Requirements met: %s\n",
1808 yesno(dev_priv->pc8.requirements_met));
1809 seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle));
1810 seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
1811 seq_printf(m, "IRQs disabled: %s\n",
1812 yesno(dev_priv->pc8.irqs_disabled));
1813 seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled));
1814 mutex_unlock(&dev_priv->pc8.lock);
1815
1816 return 0;
1817}
1818
1770static int 1819static int
1771i915_wedged_get(void *data, u64 *val) 1820i915_wedged_get(void *data, u64 *val)
1772{ 1821{
@@ -2206,6 +2255,8 @@ static struct drm_info_list i915_debugfs_list[] = {
2206 {"i915_dpio", i915_dpio_info, 0}, 2255 {"i915_dpio", i915_dpio_info, 0},
2207 {"i915_llc", i915_llc, 0}, 2256 {"i915_llc", i915_llc, 0},
2208 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 2257 {"i915_edp_psr_status", i915_edp_psr_status, 0},
2258 {"i915_energy_uJ", i915_energy_uJ, 0},
2259 {"i915_pc8_status", i915_pc8_status, 0},
2209}; 2260};
2210#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 2261#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2211 2262
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 54f86242e80e..3e4e6073d171 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -976,6 +976,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
976 case I915_PARAM_HAS_LLC: 976 case I915_PARAM_HAS_LLC:
977 value = HAS_LLC(dev); 977 value = HAS_LLC(dev);
978 break; 978 break;
979 case I915_PARAM_HAS_WT:
980 value = HAS_WT(dev);
981 break;
979 case I915_PARAM_HAS_ALIASING_PPGTT: 982 case I915_PARAM_HAS_ALIASING_PPGTT:
980 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; 983 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
981 break; 984 break;
@@ -1483,8 +1486,24 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1483 mutex_init(&dev_priv->rps.hw_lock); 1486 mutex_init(&dev_priv->rps.hw_lock);
1484 mutex_init(&dev_priv->modeset_restore_lock); 1487 mutex_init(&dev_priv->modeset_restore_lock);
1485 1488
1489 mutex_init(&dev_priv->pc8.lock);
1490 dev_priv->pc8.requirements_met = false;
1491 dev_priv->pc8.gpu_idle = false;
1492 dev_priv->pc8.irqs_disabled = false;
1493 dev_priv->pc8.enabled = false;
1494 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
1495 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
1496
1486 i915_dump_device_info(dev_priv); 1497 i915_dump_device_info(dev_priv);
1487 1498
1499 /* Not all pre-production machines fall into this category, only the
1500 * very first ones. Almost everything should work, except for maybe
1501 * suspend/resume. And we don't implement workarounds that affect only
1502 * pre-production machines. */
1503 if (IS_HSW_EARLY_SDV(dev))
1504 DRM_INFO("This is an early pre-production Haswell machine. "
1505 "It may not be fully functional.\n");
1506
1488 if (i915_get_bridge_dev(dev)) { 1507 if (i915_get_bridge_dev(dev)) {
1489 ret = -EIO; 1508 ret = -EIO;
1490 goto free_priv; 1509 goto free_priv;
@@ -1677,8 +1696,13 @@ int i915_driver_unload(struct drm_device *dev)
1677 1696
1678 intel_gpu_ips_teardown(); 1697 intel_gpu_ips_teardown();
1679 1698
1680 if (HAS_POWER_WELL(dev)) 1699 if (HAS_POWER_WELL(dev)) {
1700 /* The i915.ko module is still not prepared to be loaded when
1701 * the power well is not enabled, so just enable it in case
1702 * we're going to unload/reload. */
1703 intel_set_power_well(dev, true);
1681 i915_remove_power_well(dev); 1704 i915_remove_power_well(dev);
1705 }
1682 1706
1683 i915_teardown_sysfs(dev); 1707 i915_teardown_sysfs(dev);
1684 1708
@@ -1724,6 +1748,8 @@ int i915_driver_unload(struct drm_device *dev)
1724 cancel_work_sync(&dev_priv->gpu_error.work); 1748 cancel_work_sync(&dev_priv->gpu_error.work);
1725 i915_destroy_error_state(dev); 1749 i915_destroy_error_state(dev);
1726 1750
1751 cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
1752
1727 if (dev->pdev->msi_enabled) 1753 if (dev->pdev->msi_enabled)
1728 pci_disable_msi(dev->pdev); 1754 pci_disable_msi(dev->pdev);
1729 1755
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index eec47bd00353..735dd5625e9e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -122,10 +122,10 @@ int i915_enable_psr __read_mostly = 0;
122module_param_named(enable_psr, i915_enable_psr, int, 0600); 122module_param_named(enable_psr, i915_enable_psr, int, 0600);
123MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); 123MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
124 124
125unsigned int i915_preliminary_hw_support __read_mostly = 0; 125unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
126module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); 126module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
127MODULE_PARM_DESC(preliminary_hw_support, 127MODULE_PARM_DESC(preliminary_hw_support,
128 "Enable preliminary hardware support. (default: false)"); 128 "Enable preliminary hardware support.");
129 129
130int i915_disable_power_well __read_mostly = 1; 130int i915_disable_power_well __read_mostly = 1;
131module_param_named(disable_power_well, i915_disable_power_well, int, 0600); 131module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
@@ -141,6 +141,14 @@ module_param_named(fastboot, i915_fastboot, bool, 0600);
141MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time " 141MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
142 "(default: false)"); 142 "(default: false)");
143 143
144int i915_enable_pc8 __read_mostly = 1;
145module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
146MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
147
148int i915_pc8_timeout __read_mostly = 5000;
149module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
150MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
151
144bool i915_prefault_disable __read_mostly; 152bool i915_prefault_disable __read_mostly;
145module_param_named(prefault_disable, i915_prefault_disable, bool, 0600); 153module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
146MODULE_PARM_DESC(prefault_disable, 154MODULE_PARM_DESC(prefault_disable,
@@ -557,6 +565,9 @@ static int i915_drm_freeze(struct drm_device *dev)
557 dev_priv->modeset_restore = MODESET_SUSPENDED; 565 dev_priv->modeset_restore = MODESET_SUSPENDED;
558 mutex_unlock(&dev_priv->modeset_restore_lock); 566 mutex_unlock(&dev_priv->modeset_restore_lock);
559 567
568 /* We do a lot of poking in a lot of registers, make sure they work
569 * properly. */
570 hsw_disable_package_c8(dev_priv);
560 intel_set_power_well(dev, true); 571 intel_set_power_well(dev, true);
561 572
562 drm_kms_helper_poll_disable(dev); 573 drm_kms_helper_poll_disable(dev);
@@ -713,6 +724,10 @@ static int __i915_drm_thaw(struct drm_device *dev)
713 schedule_work(&dev_priv->console_resume_work); 724 schedule_work(&dev_priv->console_resume_work);
714 } 725 }
715 726
727 /* Undo what we did at i915_drm_freeze so the refcount goes back to the
728 * expected level. */
729 hsw_enable_package_c8(dev_priv);
730
716 mutex_lock(&dev_priv->modeset_restore_lock); 731 mutex_lock(&dev_priv->modeset_restore_lock);
717 dev_priv->modeset_restore = MODESET_DONE; 732 dev_priv->modeset_restore = MODESET_DONE;
718 mutex_unlock(&dev_priv->modeset_restore_lock); 733 mutex_unlock(&dev_priv->modeset_restore_lock);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d54354421538..f22c81d040c0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -455,6 +455,7 @@ enum i915_cache_level {
455 caches, eg sampler/render caches, and the 455 caches, eg sampler/render caches, and the
456 large Last-Level-Cache. LLC is coherent with 456 large Last-Level-Cache. LLC is coherent with
457 the CPU, but L3 is only visible to the GPU. */ 457 the CPU, but L3 is only visible to the GPU. */
458 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
458}; 459};
459 460
460typedef uint32_t gen6_gtt_pte_t; 461typedef uint32_t gen6_gtt_pte_t;
@@ -563,6 +564,10 @@ struct i915_vma {
563 struct list_head mm_list; 564 struct list_head mm_list;
564 565
565 struct list_head vma_link; /* Link in the object's VMA list */ 566 struct list_head vma_link; /* Link in the object's VMA list */
567
568 /** This vma's place in the batchbuffer or on the eviction list */
569 struct list_head exec_list;
570
566}; 571};
567 572
568struct i915_ctx_hang_stats { 573struct i915_ctx_hang_stats {
@@ -1072,6 +1077,75 @@ struct intel_wm_level {
1072 uint32_t fbc_val; 1077 uint32_t fbc_val;
1073}; 1078};
1074 1079
1080/*
1081 * This struct tracks the state needed for the Package C8+ feature.
1082 *
1083 * Package states C8 and deeper are really deep PC states that can only be
1084 * reached when all the devices on the system allow it, so even if the graphics
1085 * device allows PC8+, it doesn't mean the system will actually get to these
1086 * states.
1087 *
1088 * Our driver only allows PC8+ when all the outputs are disabled, the power well
1089 * is disabled and the GPU is idle. When these conditions are met, we manually
1090 * do the other conditions: disable the interrupts, clocks and switch LCPLL
1091 * refclk to Fclk.
1092 *
1093 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1094 * the state of some registers, so when we come back from PC8+ we need to
1095 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1096 * need to take care of the registers kept by RC6.
1097 *
1098 * The interrupt disabling is part of the requirements. We can only leave the
1099 * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
1100 * can lock the machine.
1101 *
1102 * Ideally every piece of our code that needs PC8+ disabled would call
1103 * hsw_disable_package_c8, which would increment disable_count and prevent the
1104 * system from reaching PC8+. But we don't have a symmetric way to do this for
1105 * everything, so we have the requirements_met and gpu_idle variables. When we
1106 * switch requirements_met or gpu_idle to true we decrease disable_count, and
1107 * increase it in the opposite case. The requirements_met variable is true when
1108 * all the CRTCs, encoders and the power well are disabled. The gpu_idle
1109 * variable is true when the GPU is idle.
1110 *
1111 * In addition to everything, we only actually enable PC8+ if disable_count
1112 * stays at zero for at least some seconds. This is implemented with the
1113 * enable_work variable. We do this so we don't enable/disable PC8 dozens of
1114 * consecutive times when all screens are disabled and some background app
1115 * queries the state of our connectors, or we have some application constantly
1116 * waking up to use the GPU. Only after the enable_work function actually
1117 * enables PC8+ the "enable" variable will become true, which means that it can
1118 * be false even if disable_count is 0.
1119 *
1120 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1121 * goes back to false exactly before we reenable the IRQs. We use this variable
1122 * to check if someone is trying to enable/disable IRQs while they're supposed
1123 * to be disabled. This shouldn't happen and we'll print some error messages in
1124 * case it happens, but if it actually happens we'll also update the variables
1125 * inside struct regsave so when we restore the IRQs they will contain the
1126 * latest expected values.
1127 *
1128 * For more, read "Display Sequences for Package C8" on our documentation.
1129 */
1130struct i915_package_c8 {
1131 bool requirements_met;
1132 bool gpu_idle;
1133 bool irqs_disabled;
1134 /* Only true after the delayed work task actually enables it. */
1135 bool enabled;
1136 int disable_count;
1137 struct mutex lock;
1138 struct delayed_work enable_work;
1139
1140 struct {
1141 uint32_t deimr;
1142 uint32_t sdeimr;
1143 uint32_t gtimr;
1144 uint32_t gtier;
1145 uint32_t gen6_pmimr;
1146 } regsave;
1147};
1148
1075typedef struct drm_i915_private { 1149typedef struct drm_i915_private {
1076 struct drm_device *dev; 1150 struct drm_device *dev;
1077 struct kmem_cache *slab; 1151 struct kmem_cache *slab;
@@ -1119,6 +1193,7 @@ typedef struct drm_i915_private {
1119 /** Cached value of IMR to avoid reads in updating the bitfield */ 1193 /** Cached value of IMR to avoid reads in updating the bitfield */
1120 u32 irq_mask; 1194 u32 irq_mask;
1121 u32 gt_irq_mask; 1195 u32 gt_irq_mask;
1196 u32 pm_irq_mask;
1122 1197
1123 struct work_struct hotplug_work; 1198 struct work_struct hotplug_work;
1124 bool enable_hotplug_processing; 1199 bool enable_hotplug_processing;
@@ -1255,6 +1330,8 @@ typedef struct drm_i915_private {
1255 uint16_t cur_latency[5]; 1330 uint16_t cur_latency[5];
1256 } wm; 1331 } wm;
1257 1332
1333 struct i915_package_c8 pc8;
1334
1258 /* Old dri1 support infrastructure, beware the dragons ya fools entering 1335 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1259 * here! */ 1336 * here! */
1260 struct i915_dri1_state dri1; 1337 struct i915_dri1_state dri1;
@@ -1312,6 +1389,8 @@ struct drm_i915_gem_object {
1312 struct list_head global_list; 1389 struct list_head global_list;
1313 1390
1314 struct list_head ring_list; 1391 struct list_head ring_list;
1392 /** Used in execbuf to temporarily hold a ref */
1393 struct list_head obj_exec_link;
1315 /** This object's place in the batchbuffer or on the eviction list */ 1394 /** This object's place in the batchbuffer or on the eviction list */
1316 struct list_head exec_list; 1395 struct list_head exec_list;
1317 1396
@@ -1378,6 +1457,7 @@ struct drm_i915_gem_object {
1378 */ 1457 */
1379 unsigned int fault_mappable:1; 1458 unsigned int fault_mappable:1;
1380 unsigned int pin_mappable:1; 1459 unsigned int pin_mappable:1;
1460 unsigned int pin_display:1;
1381 1461
1382 /* 1462 /*
1383 * Is the GPU currently using a fence to access this buffer, 1463 * Is the GPU currently using a fence to access this buffer,
@@ -1385,7 +1465,7 @@ struct drm_i915_gem_object {
1385 unsigned int pending_fenced_gpu_access:1; 1465 unsigned int pending_fenced_gpu_access:1;
1386 unsigned int fenced_gpu_access:1; 1466 unsigned int fenced_gpu_access:1;
1387 1467
1388 unsigned int cache_level:2; 1468 unsigned int cache_level:3;
1389 1469
1390 unsigned int has_aliasing_ppgtt_mapping:1; 1470 unsigned int has_aliasing_ppgtt_mapping:1;
1391 unsigned int has_global_gtt_mapping:1; 1471 unsigned int has_global_gtt_mapping:1;
@@ -1498,7 +1578,6 @@ struct drm_i915_file_private {
1498#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) 1578#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1499#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 1579#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1500#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 1580#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1501#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1502#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1581#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1503#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 1582#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1504#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ 1583#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
@@ -1510,6 +1589,8 @@ struct drm_i915_file_private {
1510#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1589#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1511#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1590#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1512#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1591#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1592#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1593 ((dev)->pci_device & 0xFF00) == 0x0C00)
1513#define IS_ULT(dev) (IS_HASWELL(dev) && \ 1594#define IS_ULT(dev) (IS_HASWELL(dev) && \
1514 ((dev)->pci_device & 0xFF00) == 0x0A00) 1595 ((dev)->pci_device & 0xFF00) == 0x0A00)
1515 1596
@@ -1530,6 +1611,7 @@ struct drm_i915_file_private {
1530#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) 1611#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1531#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring) 1612#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
1532#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 1613#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1614#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
1533#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1615#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1534 1616
1535#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 1617#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
@@ -1552,8 +1634,6 @@ struct drm_i915_file_private {
1552#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 1634#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1553#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 1635#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1554#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1636#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1555/* dsparb controlled by hw only */
1556#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1557 1637
1558#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 1638#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1559#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1639#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
@@ -1561,8 +1641,6 @@ struct drm_i915_file_private {
1561 1641
1562#define HAS_IPS(dev) (IS_ULT(dev)) 1642#define HAS_IPS(dev) (IS_ULT(dev))
1563 1643
1564#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1565
1566#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 1644#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
1567#define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) 1645#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
1568#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1646#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
@@ -1629,6 +1707,8 @@ extern unsigned int i915_preliminary_hw_support __read_mostly;
1629extern int i915_disable_power_well __read_mostly; 1707extern int i915_disable_power_well __read_mostly;
1630extern int i915_enable_ips __read_mostly; 1708extern int i915_enable_ips __read_mostly;
1631extern bool i915_fastboot __read_mostly; 1709extern bool i915_fastboot __read_mostly;
1710extern int i915_enable_pc8 __read_mostly;
1711extern int i915_pc8_timeout __read_mostly;
1632extern bool i915_prefault_disable __read_mostly; 1712extern bool i915_prefault_disable __read_mostly;
1633 1713
1634extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1714extern int i915_suspend(struct drm_device *dev, pm_message_t state);
@@ -1839,7 +1919,7 @@ static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
1839} 1919}
1840 1920
1841void i915_gem_reset(struct drm_device *dev); 1921void i915_gem_reset(struct drm_device *dev);
1842void i915_gem_clflush_object(struct drm_i915_gem_object *obj); 1922bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
1843int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 1923int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1844int __must_check i915_gem_init(struct drm_device *dev); 1924int __must_check i915_gem_init(struct drm_device *dev);
1845int __must_check i915_gem_init_hw(struct drm_device *dev); 1925int __must_check i915_gem_init_hw(struct drm_device *dev);
@@ -1866,6 +1946,7 @@ int __must_check
1866i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 1946i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1867 u32 alignment, 1947 u32 alignment,
1868 struct intel_ring_buffer *pipelined); 1948 struct intel_ring_buffer *pipelined);
1949void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
1869int i915_gem_attach_phys_object(struct drm_device *dev, 1950int i915_gem_attach_phys_object(struct drm_device *dev,
1870 struct drm_i915_gem_object *obj, 1951 struct drm_i915_gem_object *obj,
1871 int id, 1952 int id,
@@ -1901,6 +1982,9 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
1901 struct i915_address_space *vm); 1982 struct i915_address_space *vm);
1902struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 1983struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
1903 struct i915_address_space *vm); 1984 struct i915_address_space *vm);
1985struct i915_vma *
1986i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
1987 struct i915_address_space *vm);
1904/* Some GGTT VM helpers */ 1988/* Some GGTT VM helpers */
1905#define obj_to_ggtt(obj) \ 1989#define obj_to_ggtt(obj) \
1906 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) 1990 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 498ef8a7bbc7..2d1cb10d846f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -38,7 +38,8 @@
38#include <linux/dma-buf.h> 38#include <linux/dma-buf.h>
39 39
40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force);
42static __must_check int 43static __must_check int
43i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 44i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
44 struct i915_address_space *vm, 45 struct i915_address_space *vm,
@@ -62,6 +63,20 @@ static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
62static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); 63static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
63static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 64static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
64 65
66static bool cpu_cache_is_coherent(struct drm_device *dev,
67 enum i915_cache_level level)
68{
69 return HAS_LLC(dev) || level != I915_CACHE_NONE;
70}
71
72static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
73{
74 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
75 return true;
76
77 return obj->pin_display;
78}
79
65static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) 80static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
66{ 81{
67 if (obj->tiling_mode) 82 if (obj->tiling_mode)
@@ -414,8 +429,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
414 * read domain and manually flush cachelines (if required). This 429 * read domain and manually flush cachelines (if required). This
415 * optimizes for the case when the gpu will dirty the data 430 * optimizes for the case when the gpu will dirty the data
416 * anyway again before the next pread happens. */ 431 * anyway again before the next pread happens. */
417 if (obj->cache_level == I915_CACHE_NONE) 432 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
418 needs_clflush = 1;
419 if (i915_gem_obj_bound_any(obj)) { 433 if (i915_gem_obj_bound_any(obj)) {
420 ret = i915_gem_object_set_to_gtt_domain(obj, false); 434 ret = i915_gem_object_set_to_gtt_domain(obj, false);
421 if (ret) 435 if (ret)
@@ -731,19 +745,18 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
731 * write domain and manually flush cachelines (if required). This 745 * write domain and manually flush cachelines (if required). This
732 * optimizes for the case when the gpu will use the data 746 * optimizes for the case when the gpu will use the data
733 * right away and we therefore have to clflush anyway. */ 747 * right away and we therefore have to clflush anyway. */
734 if (obj->cache_level == I915_CACHE_NONE) 748 needs_clflush_after = cpu_write_needs_clflush(obj);
735 needs_clflush_after = 1;
736 if (i915_gem_obj_bound_any(obj)) { 749 if (i915_gem_obj_bound_any(obj)) {
737 ret = i915_gem_object_set_to_gtt_domain(obj, true); 750 ret = i915_gem_object_set_to_gtt_domain(obj, true);
738 if (ret) 751 if (ret)
739 return ret; 752 return ret;
740 } 753 }
741 } 754 }
742 /* Same trick applies for invalidate partially written cachelines before 755 /* Same trick applies to invalidate partially written cachelines read
743 * writing. */ 756 * before writing. */
744 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU) 757 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
745 && obj->cache_level == I915_CACHE_NONE) 758 needs_clflush_before =
746 needs_clflush_before = 1; 759 !cpu_cache_is_coherent(dev, obj->cache_level);
747 760
748 ret = i915_gem_object_get_pages(obj); 761 ret = i915_gem_object_get_pages(obj);
749 if (ret) 762 if (ret)
@@ -822,8 +835,8 @@ out:
822 */ 835 */
823 if (!needs_clflush_after && 836 if (!needs_clflush_after &&
824 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 837 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
825 i915_gem_clflush_object(obj); 838 if (i915_gem_clflush_object(obj, obj->pin_display))
826 i915_gem_chipset_flush(dev); 839 i915_gem_chipset_flush(dev);
827 } 840 }
828 } 841 }
829 842
@@ -900,9 +913,9 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
900 goto out; 913 goto out;
901 } 914 }
902 915
903 if (obj->cache_level == I915_CACHE_NONE && 916 if (obj->tiling_mode == I915_TILING_NONE &&
904 obj->tiling_mode == I915_TILING_NONE && 917 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
905 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 918 cpu_write_needs_clflush(obj)) {
906 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); 919 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
907 /* Note that the gtt paths might fail with non-page-backed user 920 /* Note that the gtt paths might fail with non-page-backed user
908 * pointers (e.g. gtt mappings when moving data between 921 * pointers (e.g. gtt mappings when moving data between
@@ -986,6 +999,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
986 bool wait_forever = true; 999 bool wait_forever = true;
987 int ret; 1000 int ret;
988 1001
1002 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1003
989 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1004 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
990 return 0; 1005 return 0;
991 1006
@@ -1251,8 +1266,8 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1251 } 1266 }
1252 1267
1253 /* Pinned buffers may be scanout, so flush the cache */ 1268 /* Pinned buffers may be scanout, so flush the cache */
1254 if (obj->pin_count) 1269 if (obj->pin_display)
1255 i915_gem_object_flush_cpu_write_domain(obj); 1270 i915_gem_object_flush_cpu_write_domain(obj, true);
1256 1271
1257 drm_gem_object_unreference(&obj->base); 1272 drm_gem_object_unreference(&obj->base);
1258unlock: 1273unlock:
@@ -1622,7 +1637,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1622 * hope for the best. 1637 * hope for the best.
1623 */ 1638 */
1624 WARN_ON(ret != -EIO); 1639 WARN_ON(ret != -EIO);
1625 i915_gem_clflush_object(obj); 1640 i915_gem_clflush_object(obj, true);
1626 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; 1641 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1627 } 1642 }
1628 1643
@@ -2188,7 +2203,7 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
2188 offset = i915_gem_obj_offset(request->batch_obj, 2203 offset = i915_gem_obj_offset(request->batch_obj,
2189 request_to_vm(request)); 2204 request_to_vm(request));
2190 2205
2191 if (ring->hangcheck.action != wait && 2206 if (ring->hangcheck.action != HANGCHECK_WAIT &&
2192 i915_request_guilty(request, acthd, &inside)) { 2207 i915_request_guilty(request, acthd, &inside)) {
2193 DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n", 2208 DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2194 ring->name, 2209 ring->name,
@@ -2593,6 +2608,9 @@ int i915_vma_unbind(struct i915_vma *vma)
2593 if (list_empty(&vma->vma_link)) 2608 if (list_empty(&vma->vma_link))
2594 return 0; 2609 return 0;
2595 2610
2611 if (!drm_mm_node_allocated(&vma->node))
2612 goto destroy;
2613
2596 if (obj->pin_count) 2614 if (obj->pin_count)
2597 return -EBUSY; 2615 return -EBUSY;
2598 2616
@@ -2630,6 +2648,8 @@ int i915_vma_unbind(struct i915_vma *vma)
2630 obj->map_and_fenceable = true; 2648 obj->map_and_fenceable = true;
2631 2649
2632 drm_mm_remove_node(&vma->node); 2650 drm_mm_remove_node(&vma->node);
2651
2652destroy:
2633 i915_gem_vma_destroy(vma); 2653 i915_gem_vma_destroy(vma);
2634 2654
2635 /* Since the unbound list is global, only move to that list if 2655 /* Since the unbound list is global, only move to that list if
@@ -3088,15 +3108,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3088 struct drm_device *dev = obj->base.dev; 3108 struct drm_device *dev = obj->base.dev;
3089 drm_i915_private_t *dev_priv = dev->dev_private; 3109 drm_i915_private_t *dev_priv = dev->dev_private;
3090 u32 size, fence_size, fence_alignment, unfenced_alignment; 3110 u32 size, fence_size, fence_alignment, unfenced_alignment;
3091 bool mappable, fenceable;
3092 size_t gtt_max = 3111 size_t gtt_max =
3093 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total; 3112 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
3094 struct i915_vma *vma; 3113 struct i915_vma *vma;
3095 int ret; 3114 int ret;
3096 3115
3097 if (WARN_ON(!list_empty(&obj->vma_list)))
3098 return -EBUSY;
3099
3100 fence_size = i915_gem_get_gtt_size(dev, 3116 fence_size = i915_gem_get_gtt_size(dev,
3101 obj->base.size, 3117 obj->base.size,
3102 obj->tiling_mode); 3118 obj->tiling_mode);
@@ -3135,16 +3151,17 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3135 3151
3136 i915_gem_object_pin_pages(obj); 3152 i915_gem_object_pin_pages(obj);
3137 3153
3138 /* FIXME: For now we only ever use 1 VMA per object */
3139 BUG_ON(!i915_is_ggtt(vm)); 3154 BUG_ON(!i915_is_ggtt(vm));
3140 WARN_ON(!list_empty(&obj->vma_list));
3141 3155
3142 vma = i915_gem_vma_create(obj, vm); 3156 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3143 if (IS_ERR(vma)) { 3157 if (IS_ERR(vma)) {
3144 ret = PTR_ERR(vma); 3158 ret = PTR_ERR(vma);
3145 goto err_unpin; 3159 goto err_unpin;
3146 } 3160 }
3147 3161
3162 /* For now we only ever use 1 vma per object */
3163 WARN_ON(!list_is_singular(&obj->vma_list));
3164
3148search_free: 3165search_free:
3149 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3166 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3150 size, alignment, 3167 size, alignment,
@@ -3173,18 +3190,19 @@ search_free:
3173 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); 3190 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3174 list_add_tail(&vma->mm_list, &vm->inactive_list); 3191 list_add_tail(&vma->mm_list, &vm->inactive_list);
3175 3192
3176 fenceable = 3193 if (i915_is_ggtt(vm)) {
3177 i915_is_ggtt(vm) && 3194 bool mappable, fenceable;
3178 i915_gem_obj_ggtt_size(obj) == fence_size &&
3179 (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
3180 3195
3181 mappable = 3196 fenceable = (vma->node.size == fence_size &&
3182 i915_is_ggtt(vm) && 3197 (vma->node.start & (fence_alignment - 1)) == 0);
3183 vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end; 3198
3199 mappable = (vma->node.start + obj->base.size <=
3200 dev_priv->gtt.mappable_end);
3184 3201
3185 /* Map and fenceable only changes if the VM is the global GGTT */
3186 if (i915_is_ggtt(vm))
3187 obj->map_and_fenceable = mappable && fenceable; 3202 obj->map_and_fenceable = mappable && fenceable;
3203 }
3204
3205 WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
3188 3206
3189 trace_i915_vma_bind(vma, map_and_fenceable); 3207 trace_i915_vma_bind(vma, map_and_fenceable);
3190 i915_gem_verify_gtt(dev); 3208 i915_gem_verify_gtt(dev);
@@ -3199,22 +3217,23 @@ err_unpin:
3199 return ret; 3217 return ret;
3200} 3218}
3201 3219
3202void 3220bool
3203i915_gem_clflush_object(struct drm_i915_gem_object *obj) 3221i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3222 bool force)
3204{ 3223{
3205 /* If we don't have a page list set up, then we're not pinned 3224 /* If we don't have a page list set up, then we're not pinned
3206 * to GPU, and we can ignore the cache flush because it'll happen 3225 * to GPU, and we can ignore the cache flush because it'll happen
3207 * again at bind time. 3226 * again at bind time.
3208 */ 3227 */
3209 if (obj->pages == NULL) 3228 if (obj->pages == NULL)
3210 return; 3229 return false;
3211 3230
3212 /* 3231 /*
3213 * Stolen memory is always coherent with the GPU as it is explicitly 3232 * Stolen memory is always coherent with the GPU as it is explicitly
3214 * marked as wc by the system, or the system is cache-coherent. 3233 * marked as wc by the system, or the system is cache-coherent.
3215 */ 3234 */
3216 if (obj->stolen) 3235 if (obj->stolen)
3217 return; 3236 return false;
3218 3237
3219 /* If the GPU is snooping the contents of the CPU cache, 3238 /* If the GPU is snooping the contents of the CPU cache,
3220 * we do not need to manually clear the CPU cache lines. However, 3239 * we do not need to manually clear the CPU cache lines. However,
@@ -3224,12 +3243,13 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3224 * snooping behaviour occurs naturally as the result of our domain 3243 * snooping behaviour occurs naturally as the result of our domain
3225 * tracking. 3244 * tracking.
3226 */ 3245 */
3227 if (obj->cache_level != I915_CACHE_NONE) 3246 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3228 return; 3247 return false;
3229 3248
3230 trace_i915_gem_object_clflush(obj); 3249 trace_i915_gem_object_clflush(obj);
3231
3232 drm_clflush_sg(obj->pages); 3250 drm_clflush_sg(obj->pages);
3251
3252 return true;
3233} 3253}
3234 3254
3235/** Flushes the GTT write domain for the object if it's dirty. */ 3255/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3261,15 +3281,17 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3261 3281
3262/** Flushes the CPU write domain for the object if it's dirty. */ 3282/** Flushes the CPU write domain for the object if it's dirty. */
3263static void 3283static void
3264i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) 3284i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3285 bool force)
3265{ 3286{
3266 uint32_t old_write_domain; 3287 uint32_t old_write_domain;
3267 3288
3268 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) 3289 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3269 return; 3290 return;
3270 3291
3271 i915_gem_clflush_object(obj); 3292 if (i915_gem_clflush_object(obj, force))
3272 i915_gem_chipset_flush(obj->base.dev); 3293 i915_gem_chipset_flush(obj->base.dev);
3294
3273 old_write_domain = obj->base.write_domain; 3295 old_write_domain = obj->base.write_domain;
3274 obj->base.write_domain = 0; 3296 obj->base.write_domain = 0;
3275 3297
@@ -3302,7 +3324,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3302 if (ret) 3324 if (ret)
3303 return ret; 3325 return ret;
3304 3326
3305 i915_gem_object_flush_cpu_write_domain(obj); 3327 i915_gem_object_flush_cpu_write_domain(obj, false);
3306 3328
3307 /* Serialise direct access to this object with the barriers for 3329 /* Serialise direct access to this object with the barriers for
3308 * coherent writes from the GPU, by effectively invalidating the 3330 * coherent writes from the GPU, by effectively invalidating the
@@ -3392,7 +3414,11 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3392 obj, cache_level); 3414 obj, cache_level);
3393 } 3415 }
3394 3416
3395 if (cache_level == I915_CACHE_NONE) { 3417 list_for_each_entry(vma, &obj->vma_list, vma_link)
3418 vma->node.color = cache_level;
3419 obj->cache_level = cache_level;
3420
3421 if (cpu_write_needs_clflush(obj)) {
3396 u32 old_read_domains, old_write_domain; 3422 u32 old_read_domains, old_write_domain;
3397 3423
3398 /* If we're coming from LLC cached, then we haven't 3424 /* If we're coming from LLC cached, then we haven't
@@ -3402,7 +3428,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3402 * Just set it to the CPU cache for now. 3428 * Just set it to the CPU cache for now.
3403 */ 3429 */
3404 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); 3430 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3405 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3406 3431
3407 old_read_domains = obj->base.read_domains; 3432 old_read_domains = obj->base.read_domains;
3408 old_write_domain = obj->base.write_domain; 3433 old_write_domain = obj->base.write_domain;
@@ -3415,9 +3440,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3415 old_write_domain); 3440 old_write_domain);
3416 } 3441 }
3417 3442
3418 list_for_each_entry(vma, &obj->vma_list, vma_link)
3419 vma->node.color = cache_level;
3420 obj->cache_level = cache_level;
3421 i915_gem_verify_gtt(dev); 3443 i915_gem_verify_gtt(dev);
3422 return 0; 3444 return 0;
3423} 3445}
@@ -3439,7 +3461,20 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3439 goto unlock; 3461 goto unlock;
3440 } 3462 }
3441 3463
3442 args->caching = obj->cache_level != I915_CACHE_NONE; 3464 switch (obj->cache_level) {
3465 case I915_CACHE_LLC:
3466 case I915_CACHE_L3_LLC:
3467 args->caching = I915_CACHING_CACHED;
3468 break;
3469
3470 case I915_CACHE_WT:
3471 args->caching = I915_CACHING_DISPLAY;
3472 break;
3473
3474 default:
3475 args->caching = I915_CACHING_NONE;
3476 break;
3477 }
3443 3478
3444 drm_gem_object_unreference(&obj->base); 3479 drm_gem_object_unreference(&obj->base);
3445unlock: 3480unlock:
@@ -3462,6 +3497,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3462 case I915_CACHING_CACHED: 3497 case I915_CACHING_CACHED:
3463 level = I915_CACHE_LLC; 3498 level = I915_CACHE_LLC;
3464 break; 3499 break;
3500 case I915_CACHING_DISPLAY:
3501 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3502 break;
3465 default: 3503 default:
3466 return -EINVAL; 3504 return -EINVAL;
3467 } 3505 }
@@ -3484,6 +3522,22 @@ unlock:
3484 return ret; 3522 return ret;
3485} 3523}
3486 3524
3525static bool is_pin_display(struct drm_i915_gem_object *obj)
3526{
3527 /* There are 3 sources that pin objects:
3528 * 1. The display engine (scanouts, sprites, cursors);
3529 * 2. Reservations for execbuffer;
3530 * 3. The user.
3531 *
3532 * We can ignore reservations as we hold the struct_mutex and
3533 * are only called outside of the reservation path. The user
3534 * can only increment pin_count once, and so if after
3535 * subtracting the potential reference by the user, any pin_count
3536 * remains, it must be due to another use by the display engine.
3537 */
3538 return obj->pin_count - !!obj->user_pin_count;
3539}
3540
3487/* 3541/*
3488 * Prepare buffer for display plane (scanout, cursors, etc). 3542 * Prepare buffer for display plane (scanout, cursors, etc).
3489 * Can be called from an uninterruptible phase (modesetting) and allows 3543 * Can be called from an uninterruptible phase (modesetting) and allows
@@ -3503,6 +3557,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3503 return ret; 3557 return ret;
3504 } 3558 }
3505 3559
3560 /* Mark the pin_display early so that we account for the
3561 * display coherency whilst setting up the cache domains.
3562 */
3563 obj->pin_display = true;
3564
3506 /* The display engine is not coherent with the LLC cache on gen6. As 3565 /* The display engine is not coherent with the LLC cache on gen6. As
3507 * a result, we make sure that the pinning that is about to occur is 3566 * a result, we make sure that the pinning that is about to occur is
3508 * done with uncached PTEs. This is lowest common denominator for all 3567 * done with uncached PTEs. This is lowest common denominator for all
@@ -3512,9 +3571,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3512 * of uncaching, which would allow us to flush all the LLC-cached data 3571 * of uncaching, which would allow us to flush all the LLC-cached data
3513 * with that bit in the PTE to main memory with just one PIPE_CONTROL. 3572 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3514 */ 3573 */
3515 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); 3574 ret = i915_gem_object_set_cache_level(obj,
3575 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3516 if (ret) 3576 if (ret)
3517 return ret; 3577 goto err_unpin_display;
3518 3578
3519 /* As the user may map the buffer once pinned in the display plane 3579 /* As the user may map the buffer once pinned in the display plane
3520 * (e.g. libkms for the bootup splash), we have to ensure that we 3580 * (e.g. libkms for the bootup splash), we have to ensure that we
@@ -3522,9 +3582,9 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3522 */ 3582 */
3523 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false); 3583 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
3524 if (ret) 3584 if (ret)
3525 return ret; 3585 goto err_unpin_display;
3526 3586
3527 i915_gem_object_flush_cpu_write_domain(obj); 3587 i915_gem_object_flush_cpu_write_domain(obj, true);
3528 3588
3529 old_write_domain = obj->base.write_domain; 3589 old_write_domain = obj->base.write_domain;
3530 old_read_domains = obj->base.read_domains; 3590 old_read_domains = obj->base.read_domains;
@@ -3540,6 +3600,17 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3540 old_write_domain); 3600 old_write_domain);
3541 3601
3542 return 0; 3602 return 0;
3603
3604err_unpin_display:
3605 obj->pin_display = is_pin_display(obj);
3606 return ret;
3607}
3608
3609void
3610i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3611{
3612 i915_gem_object_unpin(obj);
3613 obj->pin_display = is_pin_display(obj);
3543} 3614}
3544 3615
3545int 3616int
@@ -3585,7 +3656,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3585 3656
3586 /* Flush the CPU cache if it's still invalid. */ 3657 /* Flush the CPU cache if it's still invalid. */
3587 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { 3658 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3588 i915_gem_clflush_object(obj); 3659 i915_gem_clflush_object(obj, false);
3589 3660
3590 obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 3661 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3591 } 3662 }
@@ -3767,10 +3838,6 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3767 obj->user_pin_count++; 3838 obj->user_pin_count++;
3768 obj->pin_filp = file; 3839 obj->pin_filp = file;
3769 3840
3770 /* XXX - flush the CPU caches for pinned objects
3771 * as the X server doesn't manage domains yet
3772 */
3773 i915_gem_object_flush_cpu_write_domain(obj);
3774 args->offset = i915_gem_obj_ggtt_offset(obj); 3841 args->offset = i915_gem_obj_ggtt_offset(obj);
3775out: 3842out:
3776 drm_gem_object_unreference(&obj->base); 3843 drm_gem_object_unreference(&obj->base);
@@ -3913,6 +3980,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
3913 INIT_LIST_HEAD(&obj->global_list); 3980 INIT_LIST_HEAD(&obj->global_list);
3914 INIT_LIST_HEAD(&obj->ring_list); 3981 INIT_LIST_HEAD(&obj->ring_list);
3915 INIT_LIST_HEAD(&obj->exec_list); 3982 INIT_LIST_HEAD(&obj->exec_list);
3983 INIT_LIST_HEAD(&obj->obj_exec_link);
3916 INIT_LIST_HEAD(&obj->vma_list); 3984 INIT_LIST_HEAD(&obj->vma_list);
3917 3985
3918 obj->ops = ops; 3986 obj->ops = ops;
@@ -4052,6 +4120,7 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
4052 4120
4053 INIT_LIST_HEAD(&vma->vma_link); 4121 INIT_LIST_HEAD(&vma->vma_link);
4054 INIT_LIST_HEAD(&vma->mm_list); 4122 INIT_LIST_HEAD(&vma->mm_list);
4123 INIT_LIST_HEAD(&vma->exec_list);
4055 vma->vm = vm; 4124 vma->vm = vm;
4056 vma->obj = obj; 4125 vma->obj = obj;
4057 4126
@@ -4801,3 +4870,16 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4801 4870
4802 return NULL; 4871 return NULL;
4803} 4872}
4873
4874struct i915_vma *
4875i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4876 struct i915_address_space *vm)
4877{
4878 struct i915_vma *vma;
4879
4880 vma = i915_gem_obj_to_vma(obj, vm);
4881 if (!vma)
4882 vma = i915_gem_vma_create(obj, vm);
4883
4884 return vma;
4885}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 63ee1a9f7978..e918b05fcbdd 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -27,10 +27,15 @@
27#include "i915_drv.h" 27#include "i915_drv.h"
28#include <linux/dma-buf.h> 28#include <linux/dma-buf.h>
29 29
30static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
31{
32 return to_intel_bo(buf->priv);
33}
34
30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, 35static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
31 enum dma_data_direction dir) 36 enum dma_data_direction dir)
32{ 37{
33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv; 38 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
34 struct sg_table *st; 39 struct sg_table *st;
35 struct scatterlist *src, *dst; 40 struct scatterlist *src, *dst;
36 int ret, i; 41 int ret, i;
@@ -85,14 +90,22 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
85 struct sg_table *sg, 90 struct sg_table *sg,
86 enum dma_data_direction dir) 91 enum dma_data_direction dir)
87{ 92{
93 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
94
95 mutex_lock(&obj->base.dev->struct_mutex);
96
88 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); 97 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
89 sg_free_table(sg); 98 sg_free_table(sg);
90 kfree(sg); 99 kfree(sg);
100
101 i915_gem_object_unpin_pages(obj);
102
103 mutex_unlock(&obj->base.dev->struct_mutex);
91} 104}
92 105
93static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) 106static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
94{ 107{
95 struct drm_i915_gem_object *obj = dma_buf->priv; 108 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
96 struct drm_device *dev = obj->base.dev; 109 struct drm_device *dev = obj->base.dev;
97 struct sg_page_iter sg_iter; 110 struct sg_page_iter sg_iter;
98 struct page **pages; 111 struct page **pages;
@@ -140,7 +153,7 @@ error:
140 153
141static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 154static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
142{ 155{
143 struct drm_i915_gem_object *obj = dma_buf->priv; 156 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
144 struct drm_device *dev = obj->base.dev; 157 struct drm_device *dev = obj->base.dev;
145 int ret; 158 int ret;
146 159
@@ -183,7 +196,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
183 196
184static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction) 197static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
185{ 198{
186 struct drm_i915_gem_object *obj = dma_buf->priv; 199 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
187 struct drm_device *dev = obj->base.dev; 200 struct drm_device *dev = obj->base.dev;
188 int ret; 201 int ret;
189 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); 202 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
@@ -214,9 +227,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
214struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 227struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
215 struct drm_gem_object *gem_obj, int flags) 228 struct drm_gem_object *gem_obj, int flags)
216{ 229{
217 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 230 return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
218
219 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
220} 231}
221 232
222static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) 233static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -253,7 +264,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
253 264
254 /* is this one of own objects? */ 265 /* is this one of own objects? */
255 if (dma_buf->ops == &i915_dmabuf_ops) { 266 if (dma_buf->ops == &i915_dmabuf_ops) {
256 obj = dma_buf->priv; 267 obj = dma_buf_to_obj(dma_buf);
257 /* is it from our device? */ 268 /* is it from our device? */
258 if (obj->base.dev == dev) { 269 if (obj->base.dev == dev) {
259 /* 270 /*
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 425939b7d343..91b700155850 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -37,7 +37,7 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
37 if (vma->obj->pin_count) 37 if (vma->obj->pin_count)
38 return false; 38 return false;
39 39
40 list_add(&vma->obj->exec_list, unwind); 40 list_add(&vma->exec_list, unwind);
41 return drm_mm_scan_add_block(&vma->node); 41 return drm_mm_scan_add_block(&vma->node);
42} 42}
43 43
@@ -49,7 +49,6 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
49 drm_i915_private_t *dev_priv = dev->dev_private; 49 drm_i915_private_t *dev_priv = dev->dev_private;
50 struct list_head eviction_list, unwind_list; 50 struct list_head eviction_list, unwind_list;
51 struct i915_vma *vma; 51 struct i915_vma *vma;
52 struct drm_i915_gem_object *obj;
53 int ret = 0; 52 int ret = 0;
54 53
55 trace_i915_gem_evict(dev, min_size, alignment, mappable); 54 trace_i915_gem_evict(dev, min_size, alignment, mappable);
@@ -104,14 +103,13 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
104none: 103none:
105 /* Nothing found, clean up and bail out! */ 104 /* Nothing found, clean up and bail out! */
106 while (!list_empty(&unwind_list)) { 105 while (!list_empty(&unwind_list)) {
107 obj = list_first_entry(&unwind_list, 106 vma = list_first_entry(&unwind_list,
108 struct drm_i915_gem_object, 107 struct i915_vma,
109 exec_list); 108 exec_list);
110 vma = i915_gem_obj_to_vma(obj, vm);
111 ret = drm_mm_scan_remove_block(&vma->node); 109 ret = drm_mm_scan_remove_block(&vma->node);
112 BUG_ON(ret); 110 BUG_ON(ret);
113 111
114 list_del_init(&obj->exec_list); 112 list_del_init(&vma->exec_list);
115 } 113 }
116 114
117 /* We expect the caller to unpin, evict all and try again, or give up. 115 /* We expect the caller to unpin, evict all and try again, or give up.
@@ -125,28 +123,30 @@ found:
125 * temporary list. */ 123 * temporary list. */
126 INIT_LIST_HEAD(&eviction_list); 124 INIT_LIST_HEAD(&eviction_list);
127 while (!list_empty(&unwind_list)) { 125 while (!list_empty(&unwind_list)) {
128 obj = list_first_entry(&unwind_list, 126 vma = list_first_entry(&unwind_list,
129 struct drm_i915_gem_object, 127 struct i915_vma,
130 exec_list); 128 exec_list);
131 vma = i915_gem_obj_to_vma(obj, vm);
132 if (drm_mm_scan_remove_block(&vma->node)) { 129 if (drm_mm_scan_remove_block(&vma->node)) {
133 list_move(&obj->exec_list, &eviction_list); 130 list_move(&vma->exec_list, &eviction_list);
134 drm_gem_object_reference(&obj->base); 131 drm_gem_object_reference(&vma->obj->base);
135 continue; 132 continue;
136 } 133 }
137 list_del_init(&obj->exec_list); 134 list_del_init(&vma->exec_list);
138 } 135 }
139 136
140 /* Unbinding will emit any required flushes */ 137 /* Unbinding will emit any required flushes */
141 while (!list_empty(&eviction_list)) { 138 while (!list_empty(&eviction_list)) {
142 obj = list_first_entry(&eviction_list, 139 struct drm_gem_object *obj;
143 struct drm_i915_gem_object, 140 vma = list_first_entry(&eviction_list,
141 struct i915_vma,
144 exec_list); 142 exec_list);
143
144 obj = &vma->obj->base;
145 list_del_init(&vma->exec_list);
145 if (ret == 0) 146 if (ret == 0)
146 ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)); 147 ret = i915_vma_unbind(vma);
147 148
148 list_del_init(&obj->exec_list); 149 drm_gem_object_unreference(obj);
149 drm_gem_object_unreference(&obj->base);
150 } 150 }
151 151
152 return ret; 152 return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8ccc29ac9629..792c52a235ee 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -172,6 +172,56 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
172} 172}
173 173
174static int 174static int
175relocate_entry_cpu(struct drm_i915_gem_object *obj,
176 struct drm_i915_gem_relocation_entry *reloc)
177{
178 uint32_t page_offset = offset_in_page(reloc->offset);
179 char *vaddr;
180 int ret = -EINVAL;
181
182 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
183 if (ret)
184 return ret;
185
186 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
187 reloc->offset >> PAGE_SHIFT));
188 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
189 kunmap_atomic(vaddr);
190
191 return 0;
192}
193
194static int
195relocate_entry_gtt(struct drm_i915_gem_object *obj,
196 struct drm_i915_gem_relocation_entry *reloc)
197{
198 struct drm_device *dev = obj->base.dev;
199 struct drm_i915_private *dev_priv = dev->dev_private;
200 uint32_t __iomem *reloc_entry;
201 void __iomem *reloc_page;
202 int ret = -EINVAL;
203
204 ret = i915_gem_object_set_to_gtt_domain(obj, true);
205 if (ret)
206 return ret;
207
208 ret = i915_gem_object_put_fence(obj);
209 if (ret)
210 return ret;
211
212 /* Map the page containing the relocation we're going to perform. */
213 reloc->offset += i915_gem_obj_ggtt_offset(obj);
214 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
215 reloc->offset & PAGE_MASK);
216 reloc_entry = (uint32_t __iomem *)
217 (reloc_page + offset_in_page(reloc->offset));
218 iowrite32(reloc->delta, reloc_entry);
219 io_mapping_unmap_atomic(reloc_page);
220
221 return 0;
222}
223
224static int
175i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 225i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
176 struct eb_objects *eb, 226 struct eb_objects *eb,
177 struct drm_i915_gem_relocation_entry *reloc, 227 struct drm_i915_gem_relocation_entry *reloc,
@@ -255,40 +305,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
255 return -EFAULT; 305 return -EFAULT;
256 306
257 reloc->delta += target_offset; 307 reloc->delta += target_offset;
258 if (use_cpu_reloc(obj)) { 308 if (use_cpu_reloc(obj))
259 uint32_t page_offset = offset_in_page(reloc->offset); 309 ret = relocate_entry_cpu(obj, reloc);
260 char *vaddr; 310 else
261 311 ret = relocate_entry_gtt(obj, reloc);
262 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
263 if (ret)
264 return ret;
265
266 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
267 reloc->offset >> PAGE_SHIFT));
268 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
269 kunmap_atomic(vaddr);
270 } else {
271 struct drm_i915_private *dev_priv = dev->dev_private;
272 uint32_t __iomem *reloc_entry;
273 void __iomem *reloc_page;
274
275 ret = i915_gem_object_set_to_gtt_domain(obj, true);
276 if (ret)
277 return ret;
278
279 ret = i915_gem_object_put_fence(obj);
280 if (ret)
281 return ret;
282
283 /* Map the page containing the relocation we're going to perform. */
284 reloc->offset += i915_gem_obj_ggtt_offset(obj);
285 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
286 reloc->offset & PAGE_MASK);
287 reloc_entry = (uint32_t __iomem *)
288 (reloc_page + offset_in_page(reloc->offset));
289 iowrite32(reloc->delta, reloc_entry);
290 io_mapping_unmap_atomic(reloc_page);
291 }
292 312
293 /* and update the user's relocation entry */ 313 /* and update the user's relocation entry */
294 reloc->presumed_offset = target_offset; 314 reloc->presumed_offset = target_offset;
@@ -708,6 +728,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
708{ 728{
709 struct drm_i915_gem_object *obj; 729 struct drm_i915_gem_object *obj;
710 uint32_t flush_domains = 0; 730 uint32_t flush_domains = 0;
731 bool flush_chipset = false;
711 int ret; 732 int ret;
712 733
713 list_for_each_entry(obj, objects, exec_list) { 734 list_for_each_entry(obj, objects, exec_list) {
@@ -716,12 +737,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
716 return ret; 737 return ret;
717 738
718 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) 739 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
719 i915_gem_clflush_object(obj); 740 flush_chipset |= i915_gem_clflush_object(obj, false);
720 741
721 flush_domains |= obj->base.write_domain; 742 flush_domains |= obj->base.write_domain;
722 } 743 }
723 744
724 if (flush_domains & I915_GEM_DOMAIN_CPU) 745 if (flush_chipset)
725 i915_gem_chipset_flush(ring->dev); 746 i915_gem_chipset_flush(ring->dev);
726 747
727 if (flush_domains & I915_GEM_DOMAIN_GTT) 748 if (flush_domains & I915_GEM_DOMAIN_GTT)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 24fb989593f0..212f6d8c35ec 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -55,6 +55,7 @@
55#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) 55#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
56#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) 56#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
57#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) 57#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
58#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
58 59
59static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, 60static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
60 enum i915_cache_level level) 61 enum i915_cache_level level)
@@ -138,8 +139,16 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
138 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 139 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
139 pte |= HSW_PTE_ADDR_ENCODE(addr); 140 pte |= HSW_PTE_ADDR_ENCODE(addr);
140 141
141 if (level != I915_CACHE_NONE) 142 switch (level) {
143 case I915_CACHE_NONE:
144 break;
145 case I915_CACHE_WT:
146 pte |= HSW_WT_ELLC_LLC_AGE0;
147 break;
148 default:
142 pte |= HSW_WB_ELLC_LLC_AGE0; 149 pte |= HSW_WB_ELLC_LLC_AGE0;
150 break;
151 }
143 152
144 return pte; 153 return pte;
145} 154}
@@ -487,7 +496,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
487 dev_priv->gtt.base.total / PAGE_SIZE); 496 dev_priv->gtt.base.total / PAGE_SIZE);
488 497
489 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 498 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
490 i915_gem_clflush_object(obj); 499 i915_gem_clflush_object(obj, obj->pin_display);
491 i915_gem_gtt_bind_object(obj, obj->cache_level); 500 i915_gem_gtt_bind_object(obj, obj->cache_level);
492 } 501 }
493 502
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 8912f489f53a..9969d10b80f5 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -296,9 +296,8 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
296 i915_gem_object_pin_pages(obj); 296 i915_gem_object_pin_pages(obj);
297 obj->stolen = stolen; 297 obj->stolen = stolen;
298 298
299 obj->base.write_domain = I915_GEM_DOMAIN_GTT; 299 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
300 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 300 obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
301 obj->cache_level = I915_CACHE_NONE;
302 301
303 return obj; 302 return obj;
304 303
@@ -410,8 +409,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
410 ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); 409 ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
411 if (ret) { 410 if (ret) {
412 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); 411 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
413 i915_gem_vma_destroy(vma); 412 goto err_vma;
414 goto err_out;
415 } 413 }
416 } 414 }
417 415
@@ -422,6 +420,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
422 420
423 return obj; 421 return obj;
424 422
423err_vma:
424 i915_gem_vma_destroy(vma);
425err_out: 425err_out:
426 drm_mm_remove_node(stolen); 426 drm_mm_remove_node(stolen);
427 kfree(stolen); 427 kfree(stolen);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 60393cb9a7c7..558e568d5b45 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -243,6 +243,11 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
243 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", 243 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
244 error->semaphore_mboxes[ring][1], 244 error->semaphore_mboxes[ring][1],
245 error->semaphore_seqno[ring][1]); 245 error->semaphore_seqno[ring][1]);
246 if (HAS_VEBOX(dev)) {
247 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
248 error->semaphore_mboxes[ring][2],
249 error->semaphore_seqno[ring][2]);
250 }
246 } 251 }
247 err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 252 err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
248 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 253 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
@@ -682,6 +687,12 @@ static void i915_record_ring_state(struct drm_device *dev,
682 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; 687 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
683 } 688 }
684 689
690 if (HAS_VEBOX(dev)) {
691 error->semaphore_mboxes[ring->id][2] =
692 I915_READ(RING_SYNC_2(ring->mmio_base));
693 error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2];
694 }
695
685 if (INTEL_INFO(dev)->gen >= 4) { 696 if (INTEL_INFO(dev)->gen >= 4) {
686 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 697 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
687 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 698 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8a77faf4927d..a03b445ceb5f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,6 +85,12 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
85{ 85{
86 assert_spin_locked(&dev_priv->irq_lock); 86 assert_spin_locked(&dev_priv->irq_lock);
87 87
88 if (dev_priv->pc8.irqs_disabled) {
89 WARN(1, "IRQs disabled\n");
90 dev_priv->pc8.regsave.deimr &= ~mask;
91 return;
92 }
93
88 if ((dev_priv->irq_mask & mask) != 0) { 94 if ((dev_priv->irq_mask & mask) != 0) {
89 dev_priv->irq_mask &= ~mask; 95 dev_priv->irq_mask &= ~mask;
90 I915_WRITE(DEIMR, dev_priv->irq_mask); 96 I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -97,6 +103,12 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
97{ 103{
98 assert_spin_locked(&dev_priv->irq_lock); 104 assert_spin_locked(&dev_priv->irq_lock);
99 105
106 if (dev_priv->pc8.irqs_disabled) {
107 WARN(1, "IRQs disabled\n");
108 dev_priv->pc8.regsave.deimr |= mask;
109 return;
110 }
111
100 if ((dev_priv->irq_mask & mask) != mask) { 112 if ((dev_priv->irq_mask & mask) != mask) {
101 dev_priv->irq_mask |= mask; 113 dev_priv->irq_mask |= mask;
102 I915_WRITE(DEIMR, dev_priv->irq_mask); 114 I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -104,6 +116,85 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
104 } 116 }
105} 117}
106 118
119/**
120 * ilk_update_gt_irq - update GTIMR
121 * @dev_priv: driver private
122 * @interrupt_mask: mask of interrupt bits to update
123 * @enabled_irq_mask: mask of interrupt bits to enable
124 */
125static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
126 uint32_t interrupt_mask,
127 uint32_t enabled_irq_mask)
128{
129 assert_spin_locked(&dev_priv->irq_lock);
130
131 if (dev_priv->pc8.irqs_disabled) {
132 WARN(1, "IRQs disabled\n");
133 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
134 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
135 interrupt_mask);
136 return;
137 }
138
139 dev_priv->gt_irq_mask &= ~interrupt_mask;
140 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
141 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
142 POSTING_READ(GTIMR);
143}
144
145void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
146{
147 ilk_update_gt_irq(dev_priv, mask, mask);
148}
149
150void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
151{
152 ilk_update_gt_irq(dev_priv, mask, 0);
153}
154
155/**
156 * snb_update_pm_irq - update GEN6_PMIMR
157 * @dev_priv: driver private
158 * @interrupt_mask: mask of interrupt bits to update
159 * @enabled_irq_mask: mask of interrupt bits to enable
160 */
161static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
162 uint32_t interrupt_mask,
163 uint32_t enabled_irq_mask)
164{
165 uint32_t new_val;
166
167 assert_spin_locked(&dev_priv->irq_lock);
168
169 if (dev_priv->pc8.irqs_disabled) {
170 WARN(1, "IRQs disabled\n");
171 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
172 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
173 interrupt_mask);
174 return;
175 }
176
177 new_val = dev_priv->pm_irq_mask;
178 new_val &= ~interrupt_mask;
179 new_val |= (~enabled_irq_mask & interrupt_mask);
180
181 if (new_val != dev_priv->pm_irq_mask) {
182 dev_priv->pm_irq_mask = new_val;
183 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
184 POSTING_READ(GEN6_PMIMR);
185 }
186}
187
188void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
189{
190 snb_update_pm_irq(dev_priv, mask, mask);
191}
192
193void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
194{
195 snb_update_pm_irq(dev_priv, mask, 0);
196}
197
107static bool ivb_can_enable_err_int(struct drm_device *dev) 198static bool ivb_can_enable_err_int(struct drm_device *dev)
108{ 199{
109 struct drm_i915_private *dev_priv = dev->dev_private; 200 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -194,6 +285,15 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
194 285
195 assert_spin_locked(&dev_priv->irq_lock); 286 assert_spin_locked(&dev_priv->irq_lock);
196 287
288 if (dev_priv->pc8.irqs_disabled &&
289 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
290 WARN(1, "IRQs disabled\n");
291 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
292 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
293 interrupt_mask);
294 return;
295 }
296
197 I915_WRITE(SDEIMR, sdeimr); 297 I915_WRITE(SDEIMR, sdeimr);
198 POSTING_READ(SDEIMR); 298 POSTING_READ(SDEIMR);
199} 299}
@@ -711,17 +811,19 @@ static void gen6_pm_rps_work(struct work_struct *work)
711{ 811{
712 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 812 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
713 rps.work); 813 rps.work);
714 u32 pm_iir, pm_imr; 814 u32 pm_iir;
715 u8 new_delay; 815 u8 new_delay;
716 816
717 spin_lock_irq(&dev_priv->irq_lock); 817 spin_lock_irq(&dev_priv->irq_lock);
718 pm_iir = dev_priv->rps.pm_iir; 818 pm_iir = dev_priv->rps.pm_iir;
719 dev_priv->rps.pm_iir = 0; 819 dev_priv->rps.pm_iir = 0;
720 pm_imr = I915_READ(GEN6_PMIMR);
721 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 820 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
722 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS); 821 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
723 spin_unlock_irq(&dev_priv->irq_lock); 822 spin_unlock_irq(&dev_priv->irq_lock);
724 823
824 /* Make sure we didn't queue anything we're not going to process. */
825 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
826
725 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 827 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
726 return; 828 return;
727 829
@@ -806,8 +908,7 @@ static void ivybridge_parity_work(struct work_struct *work)
806 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 908 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
807 909
808 spin_lock_irqsave(&dev_priv->irq_lock, flags); 910 spin_lock_irqsave(&dev_priv->irq_lock, flags);
809 dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 911 ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
810 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
811 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 912 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
812 913
813 mutex_unlock(&dev_priv->dev->struct_mutex); 914 mutex_unlock(&dev_priv->dev->struct_mutex);
@@ -837,8 +938,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
837 return; 938 return;
838 939
839 spin_lock(&dev_priv->irq_lock); 940 spin_lock(&dev_priv->irq_lock);
840 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 941 ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
841 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
842 spin_unlock(&dev_priv->irq_lock); 942 spin_unlock(&dev_priv->irq_lock);
843 943
844 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 944 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
@@ -879,29 +979,6 @@ static void snb_gt_irq_handler(struct drm_device *dev,
879 ivybridge_parity_error_irq_handler(dev); 979 ivybridge_parity_error_irq_handler(dev);
880} 980}
881 981
882/* Legacy way of handling PM interrupts */
883static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
884 u32 pm_iir)
885{
886 /*
887 * IIR bits should never already be set because IMR should
888 * prevent an interrupt from being shown in IIR. The warning
889 * displays a case where we've unsafely cleared
890 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
891 * type is not a problem, it displays a problem in the logic.
892 *
893 * The mask bit in IMR is cleared by dev_priv->rps.work.
894 */
895
896 spin_lock(&dev_priv->irq_lock);
897 dev_priv->rps.pm_iir |= pm_iir;
898 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
899 POSTING_READ(GEN6_PMIMR);
900 spin_unlock(&dev_priv->irq_lock);
901
902 queue_work(dev_priv->wq, &dev_priv->rps.work);
903}
904
905#define HPD_STORM_DETECT_PERIOD 1000 982#define HPD_STORM_DETECT_PERIOD 1000
906#define HPD_STORM_THRESHOLD 5 983#define HPD_STORM_THRESHOLD 5
907 984
@@ -968,31 +1045,28 @@ static void dp_aux_irq_handler(struct drm_device *dev)
968 wake_up_all(&dev_priv->gmbus_wait_queue); 1045 wake_up_all(&dev_priv->gmbus_wait_queue);
969} 1046}
970 1047
971/* Unlike gen6_rps_irq_handler() from which this function is originally derived, 1048/* The RPS events need forcewake, so we add them to a work queue and mask their
972 * we must be able to deal with other PM interrupts. This is complicated because 1049 * IMR bits until the work is done. Other interrupts can be processed without
973 * of the way in which we use the masks to defer the RPS work (which for 1050 * the work queue. */
974 * posterity is necessary because of forcewake). 1051static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
975 */
976static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
977 u32 pm_iir)
978{ 1052{
979 if (pm_iir & GEN6_PM_RPS_EVENTS) { 1053 if (pm_iir & GEN6_PM_RPS_EVENTS) {
980 spin_lock(&dev_priv->irq_lock); 1054 spin_lock(&dev_priv->irq_lock);
981 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 1055 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
982 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 1056 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
983 /* never want to mask useful interrupts. (also posting read) */
984 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
985 spin_unlock(&dev_priv->irq_lock); 1057 spin_unlock(&dev_priv->irq_lock);
986 1058
987 queue_work(dev_priv->wq, &dev_priv->rps.work); 1059 queue_work(dev_priv->wq, &dev_priv->rps.work);
988 } 1060 }
989 1061
990 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1062 if (HAS_VEBOX(dev_priv->dev)) {
991 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1063 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1064 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
992 1065
993 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1066 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
994 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 1067 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
995 i915_handle_error(dev_priv->dev, false); 1068 i915_handle_error(dev_priv->dev, false);
1069 }
996 } 1070 }
997} 1071}
998 1072
@@ -1064,7 +1138,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1064 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1138 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1065 gmbus_irq_handler(dev); 1139 gmbus_irq_handler(dev);
1066 1140
1067 if (pm_iir & GEN6_PM_RPS_EVENTS) 1141 if (pm_iir)
1068 gen6_rps_irq_handler(dev_priv, pm_iir); 1142 gen6_rps_irq_handler(dev_priv, pm_iir);
1069 1143
1070 I915_WRITE(GTIIR, gt_iir); 1144 I915_WRITE(GTIIR, gt_iir);
@@ -1309,6 +1383,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1309 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1383 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1310 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1384 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1311 irqreturn_t ret = IRQ_NONE; 1385 irqreturn_t ret = IRQ_NONE;
1386 bool err_int_reenable = false;
1312 1387
1313 atomic_inc(&dev_priv->irq_received); 1388 atomic_inc(&dev_priv->irq_received);
1314 1389
@@ -1337,7 +1412,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1337 * handler. */ 1412 * handler. */
1338 if (IS_HASWELL(dev)) { 1413 if (IS_HASWELL(dev)) {
1339 spin_lock(&dev_priv->irq_lock); 1414 spin_lock(&dev_priv->irq_lock);
1340 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 1415 err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
1416 if (err_int_reenable)
1417 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1341 spin_unlock(&dev_priv->irq_lock); 1418 spin_unlock(&dev_priv->irq_lock);
1342 } 1419 }
1343 1420
@@ -1364,16 +1441,13 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1364 if (INTEL_INFO(dev)->gen >= 6) { 1441 if (INTEL_INFO(dev)->gen >= 6) {
1365 u32 pm_iir = I915_READ(GEN6_PMIIR); 1442 u32 pm_iir = I915_READ(GEN6_PMIIR);
1366 if (pm_iir) { 1443 if (pm_iir) {
1367 if (IS_HASWELL(dev)) 1444 gen6_rps_irq_handler(dev_priv, pm_iir);
1368 hsw_pm_irq_handler(dev_priv, pm_iir);
1369 else if (pm_iir & GEN6_PM_RPS_EVENTS)
1370 gen6_rps_irq_handler(dev_priv, pm_iir);
1371 I915_WRITE(GEN6_PMIIR, pm_iir); 1445 I915_WRITE(GEN6_PMIIR, pm_iir);
1372 ret = IRQ_HANDLED; 1446 ret = IRQ_HANDLED;
1373 } 1447 }
1374 } 1448 }
1375 1449
1376 if (IS_HASWELL(dev)) { 1450 if (err_int_reenable) {
1377 spin_lock(&dev_priv->irq_lock); 1451 spin_lock(&dev_priv->irq_lock);
1378 if (ivb_can_enable_err_int(dev)) 1452 if (ivb_can_enable_err_int(dev))
1379 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 1453 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
@@ -1826,10 +1900,10 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
1826 u32 tmp; 1900 u32 tmp;
1827 1901
1828 if (ring->hangcheck.acthd != acthd) 1902 if (ring->hangcheck.acthd != acthd)
1829 return active; 1903 return HANGCHECK_ACTIVE;
1830 1904
1831 if (IS_GEN2(dev)) 1905 if (IS_GEN2(dev))
1832 return hung; 1906 return HANGCHECK_HUNG;
1833 1907
1834 /* Is the chip hanging on a WAIT_FOR_EVENT? 1908 /* Is the chip hanging on a WAIT_FOR_EVENT?
1835 * If so we can simply poke the RB_WAIT bit 1909 * If so we can simply poke the RB_WAIT bit
@@ -1841,24 +1915,24 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
1841 DRM_ERROR("Kicking stuck wait on %s\n", 1915 DRM_ERROR("Kicking stuck wait on %s\n",
1842 ring->name); 1916 ring->name);
1843 I915_WRITE_CTL(ring, tmp); 1917 I915_WRITE_CTL(ring, tmp);
1844 return kick; 1918 return HANGCHECK_KICK;
1845 } 1919 }
1846 1920
1847 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 1921 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
1848 switch (semaphore_passed(ring)) { 1922 switch (semaphore_passed(ring)) {
1849 default: 1923 default:
1850 return hung; 1924 return HANGCHECK_HUNG;
1851 case 1: 1925 case 1:
1852 DRM_ERROR("Kicking stuck semaphore on %s\n", 1926 DRM_ERROR("Kicking stuck semaphore on %s\n",
1853 ring->name); 1927 ring->name);
1854 I915_WRITE_CTL(ring, tmp); 1928 I915_WRITE_CTL(ring, tmp);
1855 return kick; 1929 return HANGCHECK_KICK;
1856 case 0: 1930 case 0:
1857 return wait; 1931 return HANGCHECK_WAIT;
1858 } 1932 }
1859 } 1933 }
1860 1934
1861 return hung; 1935 return HANGCHECK_HUNG;
1862} 1936}
1863 1937
1864/** 1938/**
@@ -1905,8 +1979,6 @@ static void i915_hangcheck_elapsed(unsigned long data)
1905 } else 1979 } else
1906 busy = false; 1980 busy = false;
1907 } else { 1981 } else {
1908 int score;
1909
1910 /* We always increment the hangcheck score 1982 /* We always increment the hangcheck score
1911 * if the ring is busy and still processing 1983 * if the ring is busy and still processing
1912 * the same request, so that no single request 1984 * the same request, so that no single request
@@ -1926,21 +1998,19 @@ static void i915_hangcheck_elapsed(unsigned long data)
1926 acthd); 1998 acthd);
1927 1999
1928 switch (ring->hangcheck.action) { 2000 switch (ring->hangcheck.action) {
1929 case wait: 2001 case HANGCHECK_WAIT:
1930 score = 0;
1931 break; 2002 break;
1932 case active: 2003 case HANGCHECK_ACTIVE:
1933 score = BUSY; 2004 ring->hangcheck.score += BUSY;
1934 break; 2005 break;
1935 case kick: 2006 case HANGCHECK_KICK:
1936 score = KICK; 2007 ring->hangcheck.score += KICK;
1937 break; 2008 break;
1938 case hung: 2009 case HANGCHECK_HUNG:
1939 score = HUNG; 2010 ring->hangcheck.score += HUNG;
1940 stuck[i] = true; 2011 stuck[i] = true;
1941 break; 2012 break;
1942 } 2013 }
1943 ring->hangcheck.score += score;
1944 } 2014 }
1945 } else { 2015 } else {
1946 /* Gradually reduce the count so that we catch DoS 2016 /* Gradually reduce the count so that we catch DoS
@@ -2158,8 +2228,9 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
2158 if (HAS_VEBOX(dev)) 2228 if (HAS_VEBOX(dev))
2159 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 2229 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2160 2230
2231 dev_priv->pm_irq_mask = 0xffffffff;
2161 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2232 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2162 I915_WRITE(GEN6_PMIMR, 0xffffffff); 2233 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
2163 I915_WRITE(GEN6_PMIER, pm_irqs); 2234 I915_WRITE(GEN6_PMIER, pm_irqs);
2164 POSTING_READ(GEN6_PMIER); 2235 POSTING_READ(GEN6_PMIER);
2165 } 2236 }
@@ -2403,7 +2474,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2403 u16 iir, new_iir; 2474 u16 iir, new_iir;
2404 u32 pipe_stats[2]; 2475 u32 pipe_stats[2];
2405 unsigned long irqflags; 2476 unsigned long irqflags;
2406 int irq_received;
2407 int pipe; 2477 int pipe;
2408 u16 flip_mask = 2478 u16 flip_mask =
2409 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2479 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -2437,7 +2507,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2437 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2507 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2438 pipe_name(pipe)); 2508 pipe_name(pipe));
2439 I915_WRITE(reg, pipe_stats[pipe]); 2509 I915_WRITE(reg, pipe_stats[pipe]);
2440 irq_received = 1;
2441 } 2510 }
2442 } 2511 }
2443 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2512 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -3081,3 +3150,67 @@ void intel_hpd_init(struct drm_device *dev)
3081 dev_priv->display.hpd_irq_setup(dev); 3150 dev_priv->display.hpd_irq_setup(dev);
3082 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3151 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3083} 3152}
3153
3154/* Disable interrupts so we can allow Package C8+. */
3155void hsw_pc8_disable_interrupts(struct drm_device *dev)
3156{
3157 struct drm_i915_private *dev_priv = dev->dev_private;
3158 unsigned long irqflags;
3159
3160 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3161
3162 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
3163 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
3164 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
3165 dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3166 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3167
3168 ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
3169 ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
3170 ilk_disable_gt_irq(dev_priv, 0xffffffff);
3171 snb_disable_pm_irq(dev_priv, 0xffffffff);
3172
3173 dev_priv->pc8.irqs_disabled = true;
3174
3175 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3176}
3177
3178/* Restore interrupts so we can recover from Package C8+. */
3179void hsw_pc8_restore_interrupts(struct drm_device *dev)
3180{
3181 struct drm_i915_private *dev_priv = dev->dev_private;
3182 unsigned long irqflags;
3183 uint32_t val, expected;
3184
3185 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3186
3187 val = I915_READ(DEIMR);
3188 expected = ~DE_PCH_EVENT_IVB;
3189 WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
3190
3191 val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
3192 expected = ~SDE_HOTPLUG_MASK_CPT;
3193 WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
3194 val, expected);
3195
3196 val = I915_READ(GTIMR);
3197 expected = 0xffffffff;
3198 WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
3199
3200 val = I915_READ(GEN6_PMIMR);
3201 expected = 0xffffffff;
3202 WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
3203 expected);
3204
3205 dev_priv->pc8.irqs_disabled = false;
3206
3207 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
3208 ibx_enable_display_interrupt(dev_priv,
3209 ~dev_priv->pc8.regsave.sdeimr &
3210 ~SDE_HOTPLUG_MASK_CPT);
3211 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
3212 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
3213 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
3214
3215 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3216}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 88a2c0792f26..56708c64e68f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1447,6 +1447,8 @@
1447#define MCH_SSKPD_WM0_MASK 0x3f 1447#define MCH_SSKPD_WM0_MASK 0x3f
1448#define MCH_SSKPD_WM0_VAL 0xc 1448#define MCH_SSKPD_WM0_VAL 0xc
1449 1449
1450#define MCH_SECP_NRG_STTS (MCHBAR_MIRROR_BASE_SNB + 0x592c)
1451
1450/* Clocking configuration register */ 1452/* Clocking configuration register */
1451#define CLKCFG 0x10c00 1453#define CLKCFG 0x10c00
1452#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ 1454#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
@@ -1703,15 +1705,26 @@
1703 */ 1705 */
1704#define CCID 0x2180 1706#define CCID 0x2180
1705#define CCID_EN (1<<0) 1707#define CCID_EN (1<<0)
1708/*
1709 * Notes on SNB/IVB/VLV context size:
1710 * - Power context is saved elsewhere (LLC or stolen)
1711 * - Ring/execlist context is saved on SNB, not on IVB
1712 * - Extended context size already includes render context size
1713 * - We always need to follow the extended context size.
1714 * SNB BSpec has comments indicating that we should use the
1715 * render context size instead if execlists are disabled, but
1716 * based on empirical testing that's just nonsense.
1717 * - Pipelined/VF state is saved on SNB/IVB respectively
1718 * - GT1 size just indicates how much of render context
1719 * doesn't need saving on GT1
1720 */
1706#define CXT_SIZE 0x21a0 1721#define CXT_SIZE 0x21a0
1707#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f) 1722#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f)
1708#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f) 1723#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f)
1709#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f) 1724#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f)
1710#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f) 1725#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f)
1711#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f) 1726#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f)
1712#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \ 1727#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
1713 GEN6_CXT_RING_SIZE(cxt_reg) + \
1714 GEN6_CXT_RENDER_SIZE(cxt_reg) + \
1715 GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ 1728 GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
1716 GEN6_CXT_PIPELINE_SIZE(cxt_reg)) 1729 GEN6_CXT_PIPELINE_SIZE(cxt_reg))
1717#define GEN7_CXT_SIZE 0x21a8 1730#define GEN7_CXT_SIZE 0x21a8
@@ -1721,11 +1734,7 @@
1721#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) 1734#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
1722#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) 1735#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
1723#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) 1736#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
1724#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \ 1737#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
1725 GEN7_CXT_RING_SIZE(ctx_reg) + \
1726 GEN7_CXT_RENDER_SIZE(ctx_reg) + \
1727 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
1728 GEN7_CXT_GT1_SIZE(ctx_reg) + \
1729 GEN7_CXT_VFSTATE_SIZE(ctx_reg)) 1738 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
1730/* Haswell does have the CXT_SIZE register however it does not appear to be 1739/* Haswell does have the CXT_SIZE register however it does not appear to be
1731 * valid. Now, docs explain in dwords what is in the context object. The full 1740 * valid. Now, docs explain in dwords what is in the context object. The full
@@ -4827,8 +4836,8 @@
4827#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */ 4836#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
4828#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */ 4837#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
4829#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */ 4838#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
4830#define HSW_PWR_WELL_ENABLE (1<<31) 4839#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31)
4831#define HSW_PWR_WELL_STATE (1<<30) 4840#define HSW_PWR_WELL_STATE_ENABLED (1<<30)
4832#define HSW_PWR_WELL_CTL5 0x45410 4841#define HSW_PWR_WELL_CTL5 0x45410
4833#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) 4842#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
4834#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) 4843#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b8c096b4a1de..63aca49d11a8 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1139,10 +1139,13 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1139 1139
1140int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) 1140int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1141{ 1141{
1142 if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) 1142 uint32_t lcpll = I915_READ(LCPLL_CTL);
1143
1144 if (lcpll & LCPLL_CD_SOURCE_FCLK)
1145 return 800000;
1146 else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
1143 return 450000; 1147 return 450000;
1144 else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) == 1148 else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450)
1145 LCPLL_CLK_FREQ_450)
1146 return 450000; 1149 return 450000;
1147 else if (IS_ULT(dev_priv->dev)) 1150 else if (IS_ULT(dev_priv->dev))
1148 return 337500; 1151 return 337500;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b52f374d0f00..10c1db596387 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -690,7 +690,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
690{ 690{
691 u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; 691 u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
692 u32 m, n, fastclk; 692 u32 m, n, fastclk;
693 u32 updrate, minupdate, fracbits, p; 693 u32 updrate, minupdate, p;
694 unsigned long bestppm, ppm, absppm; 694 unsigned long bestppm, ppm, absppm;
695 int dotclk, flag; 695 int dotclk, flag;
696 696
@@ -701,7 +701,6 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
701 fastclk = dotclk / (2*100); 701 fastclk = dotclk / (2*100);
702 updrate = 0; 702 updrate = 0;
703 minupdate = 19200; 703 minupdate = 19200;
704 fracbits = 1;
705 n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0; 704 n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
706 bestm1 = bestm2 = bestp1 = bestp2 = 0; 705 bestm1 = bestm2 = bestp1 = bestp2 = 0;
707 706
@@ -1877,7 +1876,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1877 return 0; 1876 return 0;
1878 1877
1879err_unpin: 1878err_unpin:
1880 i915_gem_object_unpin(obj); 1879 i915_gem_object_unpin_from_display_plane(obj);
1881err_interruptible: 1880err_interruptible:
1882 dev_priv->mm.interruptible = true; 1881 dev_priv->mm.interruptible = true;
1883 return ret; 1882 return ret;
@@ -1886,7 +1885,7 @@ err_interruptible:
1886void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) 1885void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1887{ 1886{
1888 i915_gem_object_unpin_fence(obj); 1887 i915_gem_object_unpin_fence(obj);
1889 i915_gem_object_unpin(obj); 1888 i915_gem_object_unpin_from_display_plane(obj);
1890} 1889}
1891 1890
1892/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 1891/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
@@ -2598,7 +2597,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2598 struct drm_i915_private *dev_priv = dev->dev_private; 2597 struct drm_i915_private *dev_priv = dev->dev_private;
2599 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2598 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2600 int pipe = intel_crtc->pipe; 2599 int pipe = intel_crtc->pipe;
2601 u32 reg, temp, i; 2600 u32 reg, temp, i, j;
2602 2601
2603 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2602 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2604 for train result */ 2603 for train result */
@@ -2614,97 +2613,99 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2614 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 2613 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2615 I915_READ(FDI_RX_IIR(pipe))); 2614 I915_READ(FDI_RX_IIR(pipe)));
2616 2615
2617 /* enable CPU FDI TX and PCH FDI RX */ 2616 /* Try each vswing and preemphasis setting twice before moving on */
2618 reg = FDI_TX_CTL(pipe); 2617 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
2619 temp = I915_READ(reg); 2618 /* disable first in case we need to retry */
2620 temp &= ~FDI_DP_PORT_WIDTH_MASK; 2619 reg = FDI_TX_CTL(pipe);
2621 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); 2620 temp = I915_READ(reg);
2622 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 2621 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2623 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 2622 temp &= ~FDI_TX_ENABLE;
2624 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2623 I915_WRITE(reg, temp);
2625 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2626 temp |= FDI_COMPOSITE_SYNC;
2627 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2628
2629 I915_WRITE(FDI_RX_MISC(pipe),
2630 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2631
2632 reg = FDI_RX_CTL(pipe);
2633 temp = I915_READ(reg);
2634 temp &= ~FDI_LINK_TRAIN_AUTO;
2635 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2636 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2637 temp |= FDI_COMPOSITE_SYNC;
2638 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2639 2624
2640 POSTING_READ(reg); 2625 reg = FDI_RX_CTL(pipe);
2641 udelay(150); 2626 temp = I915_READ(reg);
2627 temp &= ~FDI_LINK_TRAIN_AUTO;
2628 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2629 temp &= ~FDI_RX_ENABLE;
2630 I915_WRITE(reg, temp);
2642 2631
2643 for (i = 0; i < 4; i++) { 2632 /* enable CPU FDI TX and PCH FDI RX */
2644 reg = FDI_TX_CTL(pipe); 2633 reg = FDI_TX_CTL(pipe);
2645 temp = I915_READ(reg); 2634 temp = I915_READ(reg);
2635 temp &= ~FDI_DP_PORT_WIDTH_MASK;
2636 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2637 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2646 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2638 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2647 temp |= snb_b_fdi_train_param[i]; 2639 temp |= snb_b_fdi_train_param[j/2];
2648 I915_WRITE(reg, temp); 2640 temp |= FDI_COMPOSITE_SYNC;
2641 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2649 2642
2650 POSTING_READ(reg); 2643 I915_WRITE(FDI_RX_MISC(pipe),
2651 udelay(500); 2644 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2652 2645
2653 reg = FDI_RX_IIR(pipe); 2646 reg = FDI_RX_CTL(pipe);
2654 temp = I915_READ(reg); 2647 temp = I915_READ(reg);
2655 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2648 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2656 2649 temp |= FDI_COMPOSITE_SYNC;
2657 if (temp & FDI_RX_BIT_LOCK || 2650 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2658 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2659 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2660 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
2661 break;
2662 }
2663 }
2664 if (i == 4)
2665 DRM_ERROR("FDI train 1 fail!\n");
2666 2651
2667 /* Train 2 */ 2652 POSTING_READ(reg);
2668 reg = FDI_TX_CTL(pipe); 2653 udelay(1); /* should be 0.5us */
2669 temp = I915_READ(reg);
2670 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2671 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2672 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2673 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2674 I915_WRITE(reg, temp);
2675 2654
2676 reg = FDI_RX_CTL(pipe); 2655 for (i = 0; i < 4; i++) {
2677 temp = I915_READ(reg); 2656 reg = FDI_RX_IIR(pipe);
2678 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2657 temp = I915_READ(reg);
2679 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 2658 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2680 I915_WRITE(reg, temp);
2681 2659
2682 POSTING_READ(reg); 2660 if (temp & FDI_RX_BIT_LOCK ||
2683 udelay(150); 2661 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2662 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2663 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
2664 i);
2665 break;
2666 }
2667 udelay(1); /* should be 0.5us */
2668 }
2669 if (i == 4) {
2670 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
2671 continue;
2672 }
2684 2673
2685 for (i = 0; i < 4; i++) { 2674 /* Train 2 */
2686 reg = FDI_TX_CTL(pipe); 2675 reg = FDI_TX_CTL(pipe);
2687 temp = I915_READ(reg); 2676 temp = I915_READ(reg);
2688 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2677 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2689 temp |= snb_b_fdi_train_param[i]; 2678 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2679 I915_WRITE(reg, temp);
2680
2681 reg = FDI_RX_CTL(pipe);
2682 temp = I915_READ(reg);
2683 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2684 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2690 I915_WRITE(reg, temp); 2685 I915_WRITE(reg, temp);
2691 2686
2692 POSTING_READ(reg); 2687 POSTING_READ(reg);
2693 udelay(500); 2688 udelay(2); /* should be 1.5us */
2694 2689
2695 reg = FDI_RX_IIR(pipe); 2690 for (i = 0; i < 4; i++) {
2696 temp = I915_READ(reg); 2691 reg = FDI_RX_IIR(pipe);
2697 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2692 temp = I915_READ(reg);
2693 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2698 2694
2699 if (temp & FDI_RX_SYMBOL_LOCK) { 2695 if (temp & FDI_RX_SYMBOL_LOCK ||
2700 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2696 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
2701 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i); 2697 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2702 break; 2698 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
2699 i);
2700 goto train_done;
2701 }
2702 udelay(2); /* should be 1.5us */
2703 } 2703 }
2704 if (i == 4)
2705 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
2704 } 2706 }
2705 if (i == 4)
2706 DRM_ERROR("FDI train 2 fail!\n");
2707 2707
2708train_done:
2708 DRM_DEBUG_KMS("FDI train done.\n"); 2709 DRM_DEBUG_KMS("FDI train done.\n");
2709} 2710}
2710 2711
@@ -4423,13 +4424,10 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4423 int pipe = crtc->pipe; 4424 int pipe = crtc->pipe;
4424 u32 dpll, mdiv; 4425 u32 dpll, mdiv;
4425 u32 bestn, bestm1, bestm2, bestp1, bestp2; 4426 u32 bestn, bestm1, bestm2, bestp1, bestp2;
4426 bool is_hdmi;
4427 u32 coreclk, reg_val, dpll_md; 4427 u32 coreclk, reg_val, dpll_md;
4428 4428
4429 mutex_lock(&dev_priv->dpio_lock); 4429 mutex_lock(&dev_priv->dpio_lock);
4430 4430
4431 is_hdmi = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
4432
4433 bestn = crtc->config.dpll.n; 4431 bestn = crtc->config.dpll.n;
4434 bestm1 = crtc->config.dpll.m1; 4432 bestm1 = crtc->config.dpll.m1;
4435 bestm2 = crtc->config.dpll.m2; 4433 bestm2 = crtc->config.dpll.m2;
@@ -5934,11 +5932,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
5934 struct intel_ddi_plls *plls = &dev_priv->ddi_plls; 5932 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
5935 struct intel_crtc *crtc; 5933 struct intel_crtc *crtc;
5936 unsigned long irqflags; 5934 unsigned long irqflags;
5937 uint32_t val, pch_hpd_mask; 5935 uint32_t val;
5938
5939 pch_hpd_mask = SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT;
5940 if (!(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE))
5941 pch_hpd_mask |= SDE_PORTD_HOTPLUG_CPT | SDE_CRT_HOTPLUG_CPT;
5942 5936
5943 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) 5937 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
5944 WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n", 5938 WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
@@ -5964,7 +5958,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
5964 WARN((val & ~DE_PCH_EVENT_IVB) != val, 5958 WARN((val & ~DE_PCH_EVENT_IVB) != val,
5965 "Unexpected DEIMR bits enabled: 0x%x\n", val); 5959 "Unexpected DEIMR bits enabled: 0x%x\n", val);
5966 val = I915_READ(SDEIMR); 5960 val = I915_READ(SDEIMR);
5967 WARN((val & ~pch_hpd_mask) != val, 5961 WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
5968 "Unexpected SDEIMR bits enabled: 0x%x\n", val); 5962 "Unexpected SDEIMR bits enabled: 0x%x\n", val);
5969 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 5963 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5970} 5964}
@@ -6035,16 +6029,21 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6035 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 6029 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
6036 return; 6030 return;
6037 6031
6032 /* Make sure we're not on PC8 state before disabling PC8, otherwise
6033 * we'll hang the machine! */
6034 dev_priv->uncore.funcs.force_wake_get(dev_priv);
6035
6038 if (val & LCPLL_POWER_DOWN_ALLOW) { 6036 if (val & LCPLL_POWER_DOWN_ALLOW) {
6039 val &= ~LCPLL_POWER_DOWN_ALLOW; 6037 val &= ~LCPLL_POWER_DOWN_ALLOW;
6040 I915_WRITE(LCPLL_CTL, val); 6038 I915_WRITE(LCPLL_CTL, val);
6039 POSTING_READ(LCPLL_CTL);
6041 } 6040 }
6042 6041
6043 val = I915_READ(D_COMP); 6042 val = I915_READ(D_COMP);
6044 val |= D_COMP_COMP_FORCE; 6043 val |= D_COMP_COMP_FORCE;
6045 val &= ~D_COMP_COMP_DISABLE; 6044 val &= ~D_COMP_COMP_DISABLE;
6046 I915_WRITE(D_COMP, val); 6045 I915_WRITE(D_COMP, val);
6047 I915_READ(D_COMP); 6046 POSTING_READ(D_COMP);
6048 6047
6049 val = I915_READ(LCPLL_CTL); 6048 val = I915_READ(LCPLL_CTL);
6050 val &= ~LCPLL_PLL_DISABLE; 6049 val &= ~LCPLL_PLL_DISABLE;
@@ -6062,6 +6061,168 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6062 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 6061 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
6063 DRM_ERROR("Switching back to LCPLL failed\n"); 6062 DRM_ERROR("Switching back to LCPLL failed\n");
6064 } 6063 }
6064
6065 dev_priv->uncore.funcs.force_wake_put(dev_priv);
6066}
6067
6068void hsw_enable_pc8_work(struct work_struct *__work)
6069{
6070 struct drm_i915_private *dev_priv =
6071 container_of(to_delayed_work(__work), struct drm_i915_private,
6072 pc8.enable_work);
6073 struct drm_device *dev = dev_priv->dev;
6074 uint32_t val;
6075
6076 if (dev_priv->pc8.enabled)
6077 return;
6078
6079 DRM_DEBUG_KMS("Enabling package C8+\n");
6080
6081 dev_priv->pc8.enabled = true;
6082
6083 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6084 val = I915_READ(SOUTH_DSPCLK_GATE_D);
6085 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6086 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6087 }
6088
6089 lpt_disable_clkout_dp(dev);
6090 hsw_pc8_disable_interrupts(dev);
6091 hsw_disable_lcpll(dev_priv, true, true);
6092}
6093
6094static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6095{
6096 WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6097 WARN(dev_priv->pc8.disable_count < 1,
6098 "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6099
6100 dev_priv->pc8.disable_count--;
6101 if (dev_priv->pc8.disable_count != 0)
6102 return;
6103
6104 schedule_delayed_work(&dev_priv->pc8.enable_work,
6105 msecs_to_jiffies(i915_pc8_timeout));
6106}
6107
6108static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6109{
6110 struct drm_device *dev = dev_priv->dev;
6111 uint32_t val;
6112
6113 WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6114 WARN(dev_priv->pc8.disable_count < 0,
6115 "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6116
6117 dev_priv->pc8.disable_count++;
6118 if (dev_priv->pc8.disable_count != 1)
6119 return;
6120
6121 cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
6122 if (!dev_priv->pc8.enabled)
6123 return;
6124
6125 DRM_DEBUG_KMS("Disabling package C8+\n");
6126
6127 hsw_restore_lcpll(dev_priv);
6128 hsw_pc8_restore_interrupts(dev);
6129 lpt_init_pch_refclk(dev);
6130
6131 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6132 val = I915_READ(SOUTH_DSPCLK_GATE_D);
6133 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
6134 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6135 }
6136
6137 intel_prepare_ddi(dev);
6138 i915_gem_init_swizzling(dev);
6139 mutex_lock(&dev_priv->rps.hw_lock);
6140 gen6_update_ring_freq(dev);
6141 mutex_unlock(&dev_priv->rps.hw_lock);
6142 dev_priv->pc8.enabled = false;
6143}
6144
6145void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6146{
6147 mutex_lock(&dev_priv->pc8.lock);
6148 __hsw_enable_package_c8(dev_priv);
6149 mutex_unlock(&dev_priv->pc8.lock);
6150}
6151
6152void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6153{
6154 mutex_lock(&dev_priv->pc8.lock);
6155 __hsw_disable_package_c8(dev_priv);
6156 mutex_unlock(&dev_priv->pc8.lock);
6157}
6158
6159static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
6160{
6161 struct drm_device *dev = dev_priv->dev;
6162 struct intel_crtc *crtc;
6163 uint32_t val;
6164
6165 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6166 if (crtc->base.enabled)
6167 return false;
6168
6169 /* This case is still possible since we have the i915.disable_power_well
6170 * parameter and also the KVMr or something else might be requesting the
6171 * power well. */
6172 val = I915_READ(HSW_PWR_WELL_DRIVER);
6173 if (val != 0) {
6174 DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
6175 return false;
6176 }
6177
6178 return true;
6179}
6180
6181/* Since we're called from modeset_global_resources there's no way to
6182 * symmetrically increase and decrease the refcount, so we use
6183 * dev_priv->pc8.requirements_met to track whether we already have the refcount
6184 * or not.
6185 */
6186static void hsw_update_package_c8(struct drm_device *dev)
6187{
6188 struct drm_i915_private *dev_priv = dev->dev_private;
6189 bool allow;
6190
6191 if (!i915_enable_pc8)
6192 return;
6193
6194 mutex_lock(&dev_priv->pc8.lock);
6195
6196 allow = hsw_can_enable_package_c8(dev_priv);
6197
6198 if (allow == dev_priv->pc8.requirements_met)
6199 goto done;
6200
6201 dev_priv->pc8.requirements_met = allow;
6202
6203 if (allow)
6204 __hsw_enable_package_c8(dev_priv);
6205 else
6206 __hsw_disable_package_c8(dev_priv);
6207
6208done:
6209 mutex_unlock(&dev_priv->pc8.lock);
6210}
6211
6212static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
6213{
6214 if (!dev_priv->pc8.gpu_idle) {
6215 dev_priv->pc8.gpu_idle = true;
6216 hsw_enable_package_c8(dev_priv);
6217 }
6218}
6219
6220static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
6221{
6222 if (dev_priv->pc8.gpu_idle) {
6223 dev_priv->pc8.gpu_idle = false;
6224 hsw_disable_package_c8(dev_priv);
6225 }
6065} 6226}
6066 6227
6067static void haswell_modeset_global_resources(struct drm_device *dev) 6228static void haswell_modeset_global_resources(struct drm_device *dev)
@@ -6079,6 +6240,8 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
6079 } 6240 }
6080 6241
6081 intel_set_power_well(dev, enable); 6242 intel_set_power_well(dev, enable);
6243
6244 hsw_update_package_c8(dev);
6082} 6245}
6083 6246
6084static int haswell_crtc_mode_set(struct drm_crtc *crtc, 6247static int haswell_crtc_mode_set(struct drm_crtc *crtc,
@@ -6759,7 +6922,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6759 if (intel_crtc->cursor_bo != obj) 6922 if (intel_crtc->cursor_bo != obj)
6760 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 6923 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
6761 } else 6924 } else
6762 i915_gem_object_unpin(intel_crtc->cursor_bo); 6925 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
6763 drm_gem_object_unreference(&intel_crtc->cursor_bo->base); 6926 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
6764 } 6927 }
6765 6928
@@ -6774,7 +6937,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6774 6937
6775 return 0; 6938 return 0;
6776fail_unpin: 6939fail_unpin:
6777 i915_gem_object_unpin(obj); 6940 i915_gem_object_unpin_from_display_plane(obj);
6778fail_locked: 6941fail_locked:
6779 mutex_unlock(&dev->struct_mutex); 6942 mutex_unlock(&dev->struct_mutex);
6780fail: 6943fail:
@@ -7310,13 +7473,19 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
7310 7473
7311void intel_mark_busy(struct drm_device *dev) 7474void intel_mark_busy(struct drm_device *dev)
7312{ 7475{
7313 i915_update_gfx_val(dev->dev_private); 7476 struct drm_i915_private *dev_priv = dev->dev_private;
7477
7478 hsw_package_c8_gpu_busy(dev_priv);
7479 i915_update_gfx_val(dev_priv);
7314} 7480}
7315 7481
7316void intel_mark_idle(struct drm_device *dev) 7482void intel_mark_idle(struct drm_device *dev)
7317{ 7483{
7484 struct drm_i915_private *dev_priv = dev->dev_private;
7318 struct drm_crtc *crtc; 7485 struct drm_crtc *crtc;
7319 7486
7487 hsw_package_c8_gpu_idle(dev_priv);
7488
7320 if (!i915_powersave) 7489 if (!i915_powersave)
7321 return; 7490 return;
7322 7491
@@ -8891,6 +9060,9 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
8891 drm_mode_debug_printmodeline(set->mode); 9060 drm_mode_debug_printmodeline(set->mode);
8892 config->mode_changed = true; 9061 config->mode_changed = true;
8893 } 9062 }
9063
9064 DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
9065 set->crtc->base.id, config->mode_changed, config->fb_changed);
8894} 9066}
8895 9067
8896static int 9068static int
@@ -8901,14 +9073,13 @@ intel_modeset_stage_output_state(struct drm_device *dev,
8901 struct drm_crtc *new_crtc; 9073 struct drm_crtc *new_crtc;
8902 struct intel_connector *connector; 9074 struct intel_connector *connector;
8903 struct intel_encoder *encoder; 9075 struct intel_encoder *encoder;
8904 int count, ro; 9076 int ro;
8905 9077
8906 /* The upper layers ensure that we either disable a crtc or have a list 9078 /* The upper layers ensure that we either disable a crtc or have a list
8907 * of connectors. For paranoia, double-check this. */ 9079 * of connectors. For paranoia, double-check this. */
8908 WARN_ON(!set->fb && (set->num_connectors != 0)); 9080 WARN_ON(!set->fb && (set->num_connectors != 0));
8909 WARN_ON(set->fb && (set->num_connectors == 0)); 9081 WARN_ON(set->fb && (set->num_connectors == 0));
8910 9082
8911 count = 0;
8912 list_for_each_entry(connector, &dev->mode_config.connector_list, 9083 list_for_each_entry(connector, &dev->mode_config.connector_list,
8913 base.head) { 9084 base.head) {
8914 /* Otherwise traverse passed in connector list and get encoders 9085 /* Otherwise traverse passed in connector list and get encoders
@@ -8942,7 +9113,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
8942 /* connector->new_encoder is now updated for all connectors. */ 9113 /* connector->new_encoder is now updated for all connectors. */
8943 9114
8944 /* Update crtc of enabled connectors. */ 9115 /* Update crtc of enabled connectors. */
8945 count = 0;
8946 list_for_each_entry(connector, &dev->mode_config.connector_list, 9116 list_for_each_entry(connector, &dev->mode_config.connector_list,
8947 base.head) { 9117 base.head) {
8948 if (!connector->new_encoder) 9118 if (!connector->new_encoder)
@@ -10114,6 +10284,17 @@ void i915_redisable_vga(struct drm_device *dev)
10114 struct drm_i915_private *dev_priv = dev->dev_private; 10284 struct drm_i915_private *dev_priv = dev->dev_private;
10115 u32 vga_reg = i915_vgacntrl_reg(dev); 10285 u32 vga_reg = i915_vgacntrl_reg(dev);
10116 10286
10287 /* This function can be called both from intel_modeset_setup_hw_state or
10288 * at a very early point in our resume sequence, where the power well
10289 * structures are not yet restored. Since this function is at a very
10290 * paranoid "someone might have enabled VGA while we were not looking"
10291 * level, just check if the power well is enabled instead of trying to
10292 * follow the "don't touch the power well if we don't need it" policy
10293 * the rest of the driver uses. */
10294 if (HAS_POWER_WELL(dev) &&
10295 (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
10296 return;
10297
10117 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { 10298 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
10118 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 10299 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
10119 i915_disable_vga(dev); 10300 i915_disable_vga(dev);
@@ -10302,7 +10483,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
10302{ 10483{
10303 struct drm_i915_private *dev_priv = dev->dev_private; 10484 struct drm_i915_private *dev_priv = dev->dev_private;
10304 struct drm_crtc *crtc; 10485 struct drm_crtc *crtc;
10305 struct intel_crtc *intel_crtc;
10306 10486
10307 /* 10487 /*
10308 * Interrupts and polling as the first thing to avoid creating havoc. 10488 * Interrupts and polling as the first thing to avoid creating havoc.
@@ -10326,7 +10506,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
10326 if (!crtc->fb) 10506 if (!crtc->fb)
10327 continue; 10507 continue;
10328 10508
10329 intel_crtc = to_intel_crtc(crtc);
10330 intel_increase_pllclock(crtc); 10509 intel_increase_pllclock(crtc);
10331 } 10510 }
10332 10511
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 63b6722d4285..2151d13772b8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -344,6 +344,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
344 else 344 else
345 precharge = 5; 345 precharge = 5;
346 346
347 intel_aux_display_runtime_get(dev_priv);
348
347 /* Try to wait for any previous AUX channel activity */ 349 /* Try to wait for any previous AUX channel activity */
348 for (try = 0; try < 3; try++) { 350 for (try = 0; try < 3; try++) {
349 status = I915_READ_NOTRACE(ch_ctl); 351 status = I915_READ_NOTRACE(ch_ctl);
@@ -434,6 +436,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
434 ret = recv_bytes; 436 ret = recv_bytes;
435out: 437out:
436 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 438 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
439 intel_aux_display_runtime_put(dev_priv);
437 440
438 return ret; 441 return ret;
439} 442}
@@ -2326,7 +2329,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2326 struct drm_device *dev = encoder->dev; 2329 struct drm_device *dev = encoder->dev;
2327 int i; 2330 int i;
2328 uint8_t voltage; 2331 uint8_t voltage;
2329 bool clock_recovery = false;
2330 int voltage_tries, loop_tries; 2332 int voltage_tries, loop_tries;
2331 uint32_t DP = intel_dp->DP; 2333 uint32_t DP = intel_dp->DP;
2332 2334
@@ -2344,7 +2346,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2344 voltage = 0xff; 2346 voltage = 0xff;
2345 voltage_tries = 0; 2347 voltage_tries = 0;
2346 loop_tries = 0; 2348 loop_tries = 0;
2347 clock_recovery = false;
2348 for (;;) { 2349 for (;;) {
2349 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 2350 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
2350 uint8_t link_status[DP_LINK_STATUS_SIZE]; 2351 uint8_t link_status[DP_LINK_STATUS_SIZE];
@@ -2365,7 +2366,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2365 2366
2366 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2367 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2367 DRM_DEBUG_KMS("clock recovery OK\n"); 2368 DRM_DEBUG_KMS("clock recovery OK\n");
2368 clock_recovery = true;
2369 break; 2369 break;
2370 } 2370 }
2371 2371
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 01455aa8b8bb..176080822a74 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -745,6 +745,7 @@ extern void intel_set_power_well(struct drm_device *dev, bool enable);
745extern void intel_enable_gt_powersave(struct drm_device *dev); 745extern void intel_enable_gt_powersave(struct drm_device *dev);
746extern void intel_disable_gt_powersave(struct drm_device *dev); 746extern void intel_disable_gt_powersave(struct drm_device *dev);
747extern void ironlake_teardown_rc6(struct drm_device *dev); 747extern void ironlake_teardown_rc6(struct drm_device *dev);
748void gen6_update_ring_freq(struct drm_device *dev);
748 749
749extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 750extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
750 enum pipe *pipe); 751 enum pipe *pipe);
@@ -778,5 +779,18 @@ extern void intel_edp_psr_update(struct drm_device *dev);
778extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 779extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
779 bool switch_to_fclk, bool allow_power_down); 780 bool switch_to_fclk, bool allow_power_down);
780extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv); 781extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
782extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
783extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
784 uint32_t mask);
785extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
786extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
787 uint32_t mask);
788extern void hsw_enable_pc8_work(struct work_struct *__work);
789extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
790extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
791extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
792extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
793extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
794extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
781 795
782#endif /* __INTEL_DRV_H__ */ 796#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f27b91eeeb64..4148cc85bf7f 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1273,7 +1273,6 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1273{ 1273{
1274 struct intel_digital_port *intel_dig_port; 1274 struct intel_digital_port *intel_dig_port;
1275 struct intel_encoder *intel_encoder; 1275 struct intel_encoder *intel_encoder;
1276 struct drm_encoder *encoder;
1277 struct intel_connector *intel_connector; 1276 struct intel_connector *intel_connector;
1278 1277
1279 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 1278 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
@@ -1287,7 +1286,6 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1287 } 1286 }
1288 1287
1289 intel_encoder = &intel_dig_port->base; 1288 intel_encoder = &intel_dig_port->base;
1290 encoder = &intel_encoder->base;
1291 1289
1292 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, 1290 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
1293 DRM_MODE_ENCODER_TMDS); 1291 DRM_MODE_ENCODER_TMDS);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 639fe192997c..d1c1e0f7f262 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -398,6 +398,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
398 int i, reg_offset; 398 int i, reg_offset;
399 int ret = 0; 399 int ret = 0;
400 400
401 intel_aux_display_runtime_get(dev_priv);
401 mutex_lock(&dev_priv->gmbus_mutex); 402 mutex_lock(&dev_priv->gmbus_mutex);
402 403
403 if (bus->force_bit) { 404 if (bus->force_bit) {
@@ -497,6 +498,7 @@ timeout:
497 498
498out: 499out:
499 mutex_unlock(&dev_priv->gmbus_mutex); 500 mutex_unlock(&dev_priv->gmbus_mutex);
501 intel_aux_display_runtime_put(dev_priv);
500 return ret; 502 return ret;
501} 503}
502 504
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3ac5fe9d428a..0150ba598bf0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3450,11 +3450,11 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
3450 3450
3451 spin_lock_irq(&dev_priv->irq_lock); 3451 spin_lock_irq(&dev_priv->irq_lock);
3452 WARN_ON(dev_priv->rps.pm_iir); 3452 WARN_ON(dev_priv->rps.pm_iir);
3453 I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); 3453 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
3454 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); 3454 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3455 spin_unlock_irq(&dev_priv->irq_lock); 3455 spin_unlock_irq(&dev_priv->irq_lock);
3456 /* unmask all PM interrupts */ 3456 /* only unmask PM interrupts we need. Mask all others. */
3457 I915_WRITE(GEN6_PMINTRMSK, 0); 3457 I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS);
3458} 3458}
3459 3459
3460static void gen6_enable_rps(struct drm_device *dev) 3460static void gen6_enable_rps(struct drm_device *dev)
@@ -3508,7 +3508,10 @@ static void gen6_enable_rps(struct drm_device *dev)
3508 3508
3509 I915_WRITE(GEN6_RC_SLEEP, 0); 3509 I915_WRITE(GEN6_RC_SLEEP, 0);
3510 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 3510 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3511 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 3511 if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
3512 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3513 else
3514 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3512 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); 3515 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3513 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 3516 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3514 3517
@@ -3604,7 +3607,7 @@ static void gen6_enable_rps(struct drm_device *dev)
3604 gen6_gt_force_wake_put(dev_priv); 3607 gen6_gt_force_wake_put(dev_priv);
3605} 3608}
3606 3609
3607static void gen6_update_ring_freq(struct drm_device *dev) 3610void gen6_update_ring_freq(struct drm_device *dev)
3608{ 3611{
3609 struct drm_i915_private *dev_priv = dev->dev_private; 3612 struct drm_i915_private *dev_priv = dev->dev_private;
3610 int min_freq = 15; 3613 int min_freq = 15;
@@ -4861,10 +4864,6 @@ static void gen6_init_clock_gating(struct drm_device *dev)
4861 ILK_DPARBUNIT_CLOCK_GATE_ENABLE | 4864 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
4862 ILK_DPFDUNIT_CLOCK_GATE_ENABLE); 4865 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
4863 4866
4864 /* WaMbcDriverBootEnable:snb */
4865 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4866 GEN6_MBCTL_ENABLE_BOOT_FETCH);
4867
4868 g4x_disable_trickle_feed(dev); 4867 g4x_disable_trickle_feed(dev);
4869 4868
4870 /* The default value should be 0x200 according to docs, but the two 4869 /* The default value should be 0x200 according to docs, but the two
@@ -4960,10 +4959,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
4960 I915_WRITE(CACHE_MODE_1, 4959 I915_WRITE(CACHE_MODE_1,
4961 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 4960 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4962 4961
4963 /* WaMbcDriverBootEnable:hsw */
4964 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4965 GEN6_MBCTL_ENABLE_BOOT_FETCH);
4966
4967 /* WaSwitchSolVfFArbitrationPriority:hsw */ 4962 /* WaSwitchSolVfFArbitrationPriority:hsw */
4968 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 4963 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
4969 4964
@@ -5047,10 +5042,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
5047 5042
5048 g4x_disable_trickle_feed(dev); 5043 g4x_disable_trickle_feed(dev);
5049 5044
5050 /* WaMbcDriverBootEnable:ivb */
5051 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
5052 GEN6_MBCTL_ENABLE_BOOT_FETCH);
5053
5054 /* WaVSRefCountFullforceMissDisable:ivb */ 5045 /* WaVSRefCountFullforceMissDisable:ivb */
5055 gen7_setup_fixed_func_scheduler(dev_priv); 5046 gen7_setup_fixed_func_scheduler(dev_priv);
5056 5047
@@ -5110,11 +5101,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
5110 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 5101 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5111 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 5102 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5112 5103
5113 /* WaMbcDriverBootEnable:vlv */
5114 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
5115 GEN6_MBCTL_ENABLE_BOOT_FETCH);
5116
5117
5118 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 5104 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5119 * gating disable must be set. Failure to set it results in 5105 * gating disable must be set. Failure to set it results in
5120 * flickering pixels due to Z write ordering failures after 5106 * flickering pixels due to Z write ordering failures after
@@ -5282,7 +5268,7 @@ bool intel_display_power_enabled(struct drm_device *dev,
5282 case POWER_DOMAIN_TRANSCODER_B: 5268 case POWER_DOMAIN_TRANSCODER_B:
5283 case POWER_DOMAIN_TRANSCODER_C: 5269 case POWER_DOMAIN_TRANSCODER_C:
5284 return I915_READ(HSW_PWR_WELL_DRIVER) == 5270 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5285 (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE); 5271 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5286 default: 5272 default:
5287 BUG(); 5273 BUG();
5288 } 5274 }
@@ -5295,17 +5281,18 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5295 uint32_t tmp; 5281 uint32_t tmp;
5296 5282
5297 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 5283 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
5298 is_enabled = tmp & HSW_PWR_WELL_STATE; 5284 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
5299 enable_requested = tmp & HSW_PWR_WELL_ENABLE; 5285 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
5300 5286
5301 if (enable) { 5287 if (enable) {
5302 if (!enable_requested) 5288 if (!enable_requested)
5303 I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE); 5289 I915_WRITE(HSW_PWR_WELL_DRIVER,
5290 HSW_PWR_WELL_ENABLE_REQUEST);
5304 5291
5305 if (!is_enabled) { 5292 if (!is_enabled) {
5306 DRM_DEBUG_KMS("Enabling power well\n"); 5293 DRM_DEBUG_KMS("Enabling power well\n");
5307 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 5294 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
5308 HSW_PWR_WELL_STATE), 20)) 5295 HSW_PWR_WELL_STATE_ENABLED), 20))
5309 DRM_ERROR("Timeout enabling power well\n"); 5296 DRM_ERROR("Timeout enabling power well\n");
5310 } 5297 }
5311 } else { 5298 } else {
@@ -5407,10 +5394,21 @@ void intel_init_power_well(struct drm_device *dev)
5407 5394
5408 /* We're taking over the BIOS, so clear any requests made by it since 5395 /* We're taking over the BIOS, so clear any requests made by it since
5409 * the driver is in charge now. */ 5396 * the driver is in charge now. */
5410 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE) 5397 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
5411 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 5398 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
5412} 5399}
5413 5400
5401/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
5402void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
5403{
5404 hsw_disable_package_c8(dev_priv);
5405}
5406
5407void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
5408{
5409 hsw_enable_package_c8(dev_priv);
5410}
5411
5414/* Set up chip specific power management-related functions */ 5412/* Set up chip specific power management-related functions */
5415void intel_init_pm(struct drm_device *dev) 5413void intel_init_pm(struct drm_device *dev)
5416{ 5414{
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 74d02a704515..7de29d40d1ad 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -836,11 +836,8 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
836 return false; 836 return false;
837 837
838 spin_lock_irqsave(&dev_priv->irq_lock, flags); 838 spin_lock_irqsave(&dev_priv->irq_lock, flags);
839 if (ring->irq_refcount++ == 0) { 839 if (ring->irq_refcount++ == 0)
840 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 840 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
841 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
842 POSTING_READ(GTIMR);
843 }
844 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 841 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
845 842
846 return true; 843 return true;
@@ -854,11 +851,8 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
854 unsigned long flags; 851 unsigned long flags;
855 852
856 spin_lock_irqsave(&dev_priv->irq_lock, flags); 853 spin_lock_irqsave(&dev_priv->irq_lock, flags);
857 if (--ring->irq_refcount == 0) { 854 if (--ring->irq_refcount == 0)
858 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 855 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
859 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
860 POSTING_READ(GTIMR);
861 }
862 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 856 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
863} 857}
864 858
@@ -1028,9 +1022,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
1028 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1022 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1029 else 1023 else
1030 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1024 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1031 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 1025 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1032 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1033 POSTING_READ(GTIMR);
1034 } 1026 }
1035 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1027 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1036 1028
@@ -1051,9 +1043,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
1051 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1043 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1052 else 1044 else
1053 I915_WRITE_IMR(ring, ~0); 1045 I915_WRITE_IMR(ring, ~0);
1054 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 1046 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1055 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1056 POSTING_READ(GTIMR);
1057 } 1047 }
1058 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1048 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1059 1049
@@ -1072,10 +1062,8 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
1072 1062
1073 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1063 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1074 if (ring->irq_refcount++ == 0) { 1064 if (ring->irq_refcount++ == 0) {
1075 u32 pm_imr = I915_READ(GEN6_PMIMR);
1076 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1065 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1077 I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask); 1066 snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
1078 POSTING_READ(GEN6_PMIMR);
1079 } 1067 }
1080 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1068 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1081 1069
@@ -1094,10 +1082,8 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1094 1082
1095 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1083 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1096 if (--ring->irq_refcount == 0) { 1084 if (--ring->irq_refcount == 0) {
1097 u32 pm_imr = I915_READ(GEN6_PMIMR);
1098 I915_WRITE_IMR(ring, ~0); 1085 I915_WRITE_IMR(ring, ~0);
1099 I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask); 1086 snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1100 POSTING_READ(GEN6_PMIMR);
1101 } 1087 }
1102 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1088 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1103} 1089}
@@ -1594,6 +1580,8 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1594 if (INTEL_INFO(ring->dev)->gen >= 6) { 1580 if (INTEL_INFO(ring->dev)->gen >= 6) {
1595 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 1581 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1596 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); 1582 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1583 if (HAS_VEBOX(ring->dev))
1584 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
1597 } 1585 }
1598 1586
1599 ring->set_seqno(ring, seqno); 1587 ring->set_seqno(ring, seqno);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6e38256d41e1..432ad5311ba6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -33,11 +33,12 @@ struct intel_hw_status_page {
33#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) 33#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
35 35
36#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) 36enum intel_ring_hangcheck_action {
37#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) 37 HANGCHECK_WAIT,
38#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) 38 HANGCHECK_ACTIVE,
39 39 HANGCHECK_KICK,
40enum intel_ring_hangcheck_action { wait, active, kick, hung }; 40 HANGCHECK_HUNG,
41};
41 42
42struct intel_ring_hangcheck { 43struct intel_ring_hangcheck {
43 bool deadlock; 44 bool deadlock;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 02f220b4e4a1..317e058fb3cf 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -538,7 +538,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
538 &status)) 538 &status))
539 goto log_fail; 539 goto log_fail;
540 540
541 while (status == SDVO_CMD_STATUS_PENDING && --retry) { 541 while ((status == SDVO_CMD_STATUS_PENDING ||
542 status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
542 if (retry < 10) 543 if (retry < 10)
543 msleep(15); 544 msleep(15);
544 else 545 else