aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c12
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c33
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h13
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c409
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c33
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c73
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c92
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h9
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c66
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c21
18 files changed, 508 insertions, 315 deletions
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index dea99d92fb4a..4b7ed5289217 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -709,11 +709,13 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
709 BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count)); 709 BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
710 BUG_ON(!validate_regs_sorted(ring)); 710 BUG_ON(!validate_regs_sorted(ring));
711 711
712 ret = init_hash_table(ring, cmd_tables, cmd_table_count); 712 if (hash_empty(ring->cmd_hash)) {
713 if (ret) { 713 ret = init_hash_table(ring, cmd_tables, cmd_table_count);
714 DRM_ERROR("CMD: cmd_parser_init failed!\n"); 714 if (ret) {
715 fini_hash_table(ring); 715 DRM_ERROR("CMD: cmd_parser_init failed!\n");
716 return ret; 716 fini_hash_table(ring);
717 return ret;
718 }
717 } 719 }
718 720
719 ring->needs_cmd_parser = true; 721 ring->needs_cmd_parser = true;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2e7f03ad5ee2..9933c26017ed 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1336,12 +1336,17 @@ static int i915_load_modeset_init(struct drm_device *dev)
1336 1336
1337 intel_power_domains_init_hw(dev_priv); 1337 intel_power_domains_init_hw(dev_priv);
1338 1338
1339 /*
1340 * We enable some interrupt sources in our postinstall hooks, so mark
1341 * interrupts as enabled _before_ actually enabling them to avoid
1342 * special cases in our ordering checks.
1343 */
1344 dev_priv->pm._irqs_disabled = false;
1345
1339 ret = drm_irq_install(dev, dev->pdev->irq); 1346 ret = drm_irq_install(dev, dev->pdev->irq);
1340 if (ret) 1347 if (ret)
1341 goto cleanup_gem_stolen; 1348 goto cleanup_gem_stolen;
1342 1349
1343 dev_priv->pm._irqs_disabled = false;
1344
1345 /* Important: The output setup functions called by modeset_init need 1350 /* Important: The output setup functions called by modeset_init need
1346 * working irqs for e.g. gmbus and dp aux transfers. */ 1351 * working irqs for e.g. gmbus and dp aux transfers. */
1347 intel_modeset_init(dev); 1352 intel_modeset_init(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ec96f9a9724c..e27cdbe9d524 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -494,6 +494,36 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
494 return true; 494 return true;
495} 495}
496 496
497void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
498{
499 spin_lock_irq(&dev_priv->irq_lock);
500
501 dev_priv->long_hpd_port_mask = 0;
502 dev_priv->short_hpd_port_mask = 0;
503 dev_priv->hpd_event_bits = 0;
504
505 spin_unlock_irq(&dev_priv->irq_lock);
506
507 cancel_work_sync(&dev_priv->dig_port_work);
508 cancel_work_sync(&dev_priv->hotplug_work);
509 cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
510}
511
512static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
513{
514 struct drm_device *dev = dev_priv->dev;
515 struct drm_encoder *encoder;
516
517 drm_modeset_lock_all(dev);
518 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
519 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
520
521 if (intel_encoder->suspend)
522 intel_encoder->suspend(intel_encoder);
523 }
524 drm_modeset_unlock_all(dev);
525}
526
497static int i915_drm_freeze(struct drm_device *dev) 527static int i915_drm_freeze(struct drm_device *dev)
498{ 528{
499 struct drm_i915_private *dev_priv = dev->dev_private; 529 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -538,6 +568,9 @@ static int i915_drm_freeze(struct drm_device *dev)
538 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 568 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
539 569
540 intel_runtime_pm_disable_interrupts(dev); 570 intel_runtime_pm_disable_interrupts(dev);
571 intel_hpd_cancel_work(dev_priv);
572
573 intel_suspend_encoders(dev_priv);
541 574
542 intel_suspend_gt_powersave(dev); 575 intel_suspend_gt_powersave(dev);
543 576
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4412f6a4383b..3524306d8cfb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -184,6 +184,7 @@ enum hpd_pin {
184 if ((1 << (domain)) & (mask)) 184 if ((1 << (domain)) & (mask))
185 185
186struct drm_i915_private; 186struct drm_i915_private;
187struct i915_mm_struct;
187struct i915_mmu_object; 188struct i915_mmu_object;
188 189
189enum intel_dpll_id { 190enum intel_dpll_id {
@@ -1458,7 +1459,7 @@ struct drm_i915_private {
1458 } hpd_mark; 1459 } hpd_mark;
1459 } hpd_stats[HPD_NUM_PINS]; 1460 } hpd_stats[HPD_NUM_PINS];
1460 u32 hpd_event_bits; 1461 u32 hpd_event_bits;
1461 struct timer_list hotplug_reenable_timer; 1462 struct delayed_work hotplug_reenable_work;
1462 1463
1463 struct i915_fbc fbc; 1464 struct i915_fbc fbc;
1464 struct i915_drrs drrs; 1465 struct i915_drrs drrs;
@@ -1506,9 +1507,8 @@ struct drm_i915_private {
1506 struct i915_gtt gtt; /* VM representing the global address space */ 1507 struct i915_gtt gtt; /* VM representing the global address space */
1507 1508
1508 struct i915_gem_mm mm; 1509 struct i915_gem_mm mm;
1509#if defined(CONFIG_MMU_NOTIFIER) 1510 DECLARE_HASHTABLE(mm_structs, 7);
1510 DECLARE_HASHTABLE(mmu_notifiers, 7); 1511 struct mutex mm_lock;
1511#endif
1512 1512
1513 /* Kernel Modesetting */ 1513 /* Kernel Modesetting */
1514 1514
@@ -1814,8 +1814,8 @@ struct drm_i915_gem_object {
1814 unsigned workers :4; 1814 unsigned workers :4;
1815#define I915_GEM_USERPTR_MAX_WORKERS 15 1815#define I915_GEM_USERPTR_MAX_WORKERS 15
1816 1816
1817 struct mm_struct *mm; 1817 struct i915_mm_struct *mm;
1818 struct i915_mmu_object *mn; 1818 struct i915_mmu_object *mmu_object;
1819 struct work_struct *work; 1819 struct work_struct *work;
1820 } userptr; 1820 } userptr;
1821 }; 1821 };
@@ -2178,6 +2178,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2178extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2178extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2179extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2179extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2180int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2180int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2181void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2181 2182
2182extern void intel_console_resume(struct work_struct *work); 2183extern void intel_console_resume(struct work_struct *work);
2183 2184
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ba7f5c6bb50d..ad55b06a3cb1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1590,10 +1590,13 @@ unlock:
1590out: 1590out:
1591 switch (ret) { 1591 switch (ret) {
1592 case -EIO: 1592 case -EIO:
1593 /* If this -EIO is due to a gpu hang, give the reset code a 1593 /*
1594 * chance to clean up the mess. Otherwise return the proper 1594 * We eat errors when the gpu is terminally wedged to avoid
1595 * SIGBUS. */ 1595 * userspace unduly crashing (gl has no provisions for mmaps to
1596 if (i915_terminally_wedged(&dev_priv->gpu_error)) { 1596 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1597 * and so needs to be reported.
1598 */
1599 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1597 ret = VM_FAULT_SIGBUS; 1600 ret = VM_FAULT_SIGBUS;
1598 break; 1601 break;
1599 } 1602 }
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index fe69fc837d9e..d38413997379 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -32,6 +32,15 @@
32#include <linux/mempolicy.h> 32#include <linux/mempolicy.h>
33#include <linux/swap.h> 33#include <linux/swap.h>
34 34
35struct i915_mm_struct {
36 struct mm_struct *mm;
37 struct drm_device *dev;
38 struct i915_mmu_notifier *mn;
39 struct hlist_node node;
40 struct kref kref;
41 struct work_struct work;
42};
43
35#if defined(CONFIG_MMU_NOTIFIER) 44#if defined(CONFIG_MMU_NOTIFIER)
36#include <linux/interval_tree.h> 45#include <linux/interval_tree.h>
37 46
@@ -41,16 +50,12 @@ struct i915_mmu_notifier {
41 struct mmu_notifier mn; 50 struct mmu_notifier mn;
42 struct rb_root objects; 51 struct rb_root objects;
43 struct list_head linear; 52 struct list_head linear;
44 struct drm_device *dev;
45 struct mm_struct *mm;
46 struct work_struct work;
47 unsigned long count;
48 unsigned long serial; 53 unsigned long serial;
49 bool has_linear; 54 bool has_linear;
50}; 55};
51 56
52struct i915_mmu_object { 57struct i915_mmu_object {
53 struct i915_mmu_notifier *mmu; 58 struct i915_mmu_notifier *mn;
54 struct interval_tree_node it; 59 struct interval_tree_node it;
55 struct list_head link; 60 struct list_head link;
56 struct drm_i915_gem_object *obj; 61 struct drm_i915_gem_object *obj;
@@ -96,18 +101,18 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
96 unsigned long start, 101 unsigned long start,
97 unsigned long end) 102 unsigned long end)
98{ 103{
99 struct i915_mmu_object *mmu; 104 struct i915_mmu_object *mo;
100 unsigned long serial; 105 unsigned long serial;
101 106
102restart: 107restart:
103 serial = mn->serial; 108 serial = mn->serial;
104 list_for_each_entry(mmu, &mn->linear, link) { 109 list_for_each_entry(mo, &mn->linear, link) {
105 struct drm_i915_gem_object *obj; 110 struct drm_i915_gem_object *obj;
106 111
107 if (mmu->it.last < start || mmu->it.start > end) 112 if (mo->it.last < start || mo->it.start > end)
108 continue; 113 continue;
109 114
110 obj = mmu->obj; 115 obj = mo->obj;
111 drm_gem_object_reference(&obj->base); 116 drm_gem_object_reference(&obj->base);
112 spin_unlock(&mn->lock); 117 spin_unlock(&mn->lock);
113 118
@@ -160,130 +165,47 @@ static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
160}; 165};
161 166
162static struct i915_mmu_notifier * 167static struct i915_mmu_notifier *
163__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm) 168i915_mmu_notifier_create(struct mm_struct *mm)
164{
165 struct drm_i915_private *dev_priv = to_i915(dev);
166 struct i915_mmu_notifier *mmu;
167
168 /* Protected by dev->struct_mutex */
169 hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
170 if (mmu->mm == mm)
171 return mmu;
172
173 return NULL;
174}
175
176static struct i915_mmu_notifier *
177i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
178{ 169{
179 struct drm_i915_private *dev_priv = to_i915(dev); 170 struct i915_mmu_notifier *mn;
180 struct i915_mmu_notifier *mmu;
181 int ret; 171 int ret;
182 172
183 lockdep_assert_held(&dev->struct_mutex); 173 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
184 174 if (mn == NULL)
185 mmu = __i915_mmu_notifier_lookup(dev, mm);
186 if (mmu)
187 return mmu;
188
189 mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
190 if (mmu == NULL)
191 return ERR_PTR(-ENOMEM); 175 return ERR_PTR(-ENOMEM);
192 176
193 spin_lock_init(&mmu->lock); 177 spin_lock_init(&mn->lock);
194 mmu->dev = dev; 178 mn->mn.ops = &i915_gem_userptr_notifier;
195 mmu->mn.ops = &i915_gem_userptr_notifier; 179 mn->objects = RB_ROOT;
196 mmu->mm = mm; 180 mn->serial = 1;
197 mmu->objects = RB_ROOT; 181 INIT_LIST_HEAD(&mn->linear);
198 mmu->count = 0; 182 mn->has_linear = false;
199 mmu->serial = 1; 183
200 INIT_LIST_HEAD(&mmu->linear); 184 /* Protected by mmap_sem (write-lock) */
201 mmu->has_linear = false; 185 ret = __mmu_notifier_register(&mn->mn, mm);
202
203 /* Protected by mmap_sem (write-lock) */
204 ret = __mmu_notifier_register(&mmu->mn, mm);
205 if (ret) { 186 if (ret) {
206 kfree(mmu); 187 kfree(mn);
207 return ERR_PTR(ret); 188 return ERR_PTR(ret);
208 } 189 }
209 190
210 /* Protected by dev->struct_mutex */ 191 return mn;
211 hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
212 return mmu;
213} 192}
214 193
215static void 194static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
216__i915_mmu_notifier_destroy_worker(struct work_struct *work)
217{ 195{
218 struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work); 196 if (++mn->serial == 0)
219 mmu_notifier_unregister(&mmu->mn, mmu->mm); 197 mn->serial = 1;
220 kfree(mmu);
221}
222
223static void
224__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
225{
226 lockdep_assert_held(&mmu->dev->struct_mutex);
227
228 /* Protected by dev->struct_mutex */
229 hash_del(&mmu->node);
230
231 /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
232 * We enter the function holding struct_mutex, therefore we need
233 * to drop our mutex prior to calling mmu_notifier_unregister in
234 * order to prevent lock inversion (and system-wide deadlock)
235 * between the mmap_sem and struct-mutex. Hence we defer the
236 * unregistration to a workqueue where we hold no locks.
237 */
238 INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
239 schedule_work(&mmu->work);
240}
241
242static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
243{
244 if (++mmu->serial == 0)
245 mmu->serial = 1;
246}
247
248static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
249{
250 struct i915_mmu_object *mn;
251
252 list_for_each_entry(mn, &mmu->linear, link)
253 if (mn->is_linear)
254 return true;
255
256 return false;
257}
258
259static void
260i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
261 struct i915_mmu_object *mn)
262{
263 lockdep_assert_held(&mmu->dev->struct_mutex);
264
265 spin_lock(&mmu->lock);
266 list_del(&mn->link);
267 if (mn->is_linear)
268 mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
269 else
270 interval_tree_remove(&mn->it, &mmu->objects);
271 __i915_mmu_notifier_update_serial(mmu);
272 spin_unlock(&mmu->lock);
273
274 /* Protected against _add() by dev->struct_mutex */
275 if (--mmu->count == 0)
276 __i915_mmu_notifier_destroy(mmu);
277} 198}
278 199
279static int 200static int
280i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, 201i915_mmu_notifier_add(struct drm_device *dev,
281 struct i915_mmu_object *mn) 202 struct i915_mmu_notifier *mn,
203 struct i915_mmu_object *mo)
282{ 204{
283 struct interval_tree_node *it; 205 struct interval_tree_node *it;
284 int ret; 206 int ret;
285 207
286 ret = i915_mutex_lock_interruptible(mmu->dev); 208 ret = i915_mutex_lock_interruptible(dev);
287 if (ret) 209 if (ret)
288 return ret; 210 return ret;
289 211
@@ -291,11 +213,11 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
291 * remove the objects from the interval tree) before we do 213 * remove the objects from the interval tree) before we do
292 * the check for overlapping objects. 214 * the check for overlapping objects.
293 */ 215 */
294 i915_gem_retire_requests(mmu->dev); 216 i915_gem_retire_requests(dev);
295 217
296 spin_lock(&mmu->lock); 218 spin_lock(&mn->lock);
297 it = interval_tree_iter_first(&mmu->objects, 219 it = interval_tree_iter_first(&mn->objects,
298 mn->it.start, mn->it.last); 220 mo->it.start, mo->it.last);
299 if (it) { 221 if (it) {
300 struct drm_i915_gem_object *obj; 222 struct drm_i915_gem_object *obj;
301 223
@@ -312,86 +234,122 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
312 234
313 obj = container_of(it, struct i915_mmu_object, it)->obj; 235 obj = container_of(it, struct i915_mmu_object, it)->obj;
314 if (!obj->userptr.workers) 236 if (!obj->userptr.workers)
315 mmu->has_linear = mn->is_linear = true; 237 mn->has_linear = mo->is_linear = true;
316 else 238 else
317 ret = -EAGAIN; 239 ret = -EAGAIN;
318 } else 240 } else
319 interval_tree_insert(&mn->it, &mmu->objects); 241 interval_tree_insert(&mo->it, &mn->objects);
320 242
321 if (ret == 0) { 243 if (ret == 0) {
322 list_add(&mn->link, &mmu->linear); 244 list_add(&mo->link, &mn->linear);
323 __i915_mmu_notifier_update_serial(mmu); 245 __i915_mmu_notifier_update_serial(mn);
324 } 246 }
325 spin_unlock(&mmu->lock); 247 spin_unlock(&mn->lock);
326 mutex_unlock(&mmu->dev->struct_mutex); 248 mutex_unlock(&dev->struct_mutex);
327 249
328 return ret; 250 return ret;
329} 251}
330 252
253static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
254{
255 struct i915_mmu_object *mo;
256
257 list_for_each_entry(mo, &mn->linear, link)
258 if (mo->is_linear)
259 return true;
260
261 return false;
262}
263
264static void
265i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
266 struct i915_mmu_object *mo)
267{
268 spin_lock(&mn->lock);
269 list_del(&mo->link);
270 if (mo->is_linear)
271 mn->has_linear = i915_mmu_notifier_has_linear(mn);
272 else
273 interval_tree_remove(&mo->it, &mn->objects);
274 __i915_mmu_notifier_update_serial(mn);
275 spin_unlock(&mn->lock);
276}
277
331static void 278static void
332i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) 279i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
333{ 280{
334 struct i915_mmu_object *mn; 281 struct i915_mmu_object *mo;
335 282
336 mn = obj->userptr.mn; 283 mo = obj->userptr.mmu_object;
337 if (mn == NULL) 284 if (mo == NULL)
338 return; 285 return;
339 286
340 i915_mmu_notifier_del(mn->mmu, mn); 287 i915_mmu_notifier_del(mo->mn, mo);
341 obj->userptr.mn = NULL; 288 kfree(mo);
289
290 obj->userptr.mmu_object = NULL;
291}
292
293static struct i915_mmu_notifier *
294i915_mmu_notifier_find(struct i915_mm_struct *mm)
295{
296 if (mm->mn == NULL) {
297 down_write(&mm->mm->mmap_sem);
298 mutex_lock(&to_i915(mm->dev)->mm_lock);
299 if (mm->mn == NULL)
300 mm->mn = i915_mmu_notifier_create(mm->mm);
301 mutex_unlock(&to_i915(mm->dev)->mm_lock);
302 up_write(&mm->mm->mmap_sem);
303 }
304 return mm->mn;
342} 305}
343 306
344static int 307static int
345i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, 308i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
346 unsigned flags) 309 unsigned flags)
347{ 310{
348 struct i915_mmu_notifier *mmu; 311 struct i915_mmu_notifier *mn;
349 struct i915_mmu_object *mn; 312 struct i915_mmu_object *mo;
350 int ret; 313 int ret;
351 314
352 if (flags & I915_USERPTR_UNSYNCHRONIZED) 315 if (flags & I915_USERPTR_UNSYNCHRONIZED)
353 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; 316 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
354 317
355 down_write(&obj->userptr.mm->mmap_sem); 318 if (WARN_ON(obj->userptr.mm == NULL))
356 ret = i915_mutex_lock_interruptible(obj->base.dev); 319 return -EINVAL;
357 if (ret == 0) {
358 mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
359 if (!IS_ERR(mmu))
360 mmu->count++; /* preemptive add to act as a refcount */
361 else
362 ret = PTR_ERR(mmu);
363 mutex_unlock(&obj->base.dev->struct_mutex);
364 }
365 up_write(&obj->userptr.mm->mmap_sem);
366 if (ret)
367 return ret;
368 320
369 mn = kzalloc(sizeof(*mn), GFP_KERNEL); 321 mn = i915_mmu_notifier_find(obj->userptr.mm);
370 if (mn == NULL) { 322 if (IS_ERR(mn))
371 ret = -ENOMEM; 323 return PTR_ERR(mn);
372 goto destroy_mmu;
373 }
374 324
375 mn->mmu = mmu; 325 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
376 mn->it.start = obj->userptr.ptr; 326 if (mo == NULL)
377 mn->it.last = mn->it.start + obj->base.size - 1; 327 return -ENOMEM;
378 mn->obj = obj;
379 328
380 ret = i915_mmu_notifier_add(mmu, mn); 329 mo->mn = mn;
381 if (ret) 330 mo->it.start = obj->userptr.ptr;
382 goto free_mn; 331 mo->it.last = mo->it.start + obj->base.size - 1;
332 mo->obj = obj;
383 333
384 obj->userptr.mn = mn; 334 ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
335 if (ret) {
336 kfree(mo);
337 return ret;
338 }
339
340 obj->userptr.mmu_object = mo;
385 return 0; 341 return 0;
342}
343
344static void
345i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
346 struct mm_struct *mm)
347{
348 if (mn == NULL)
349 return;
386 350
387free_mn: 351 mmu_notifier_unregister(&mn->mn, mm);
388 kfree(mn); 352 kfree(mn);
389destroy_mmu:
390 mutex_lock(&obj->base.dev->struct_mutex);
391 if (--mmu->count == 0)
392 __i915_mmu_notifier_destroy(mmu);
393 mutex_unlock(&obj->base.dev->struct_mutex);
394 return ret;
395} 353}
396 354
397#else 355#else
@@ -413,15 +371,114 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
413 371
414 return 0; 372 return 0;
415} 373}
374
375static void
376i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
377 struct mm_struct *mm)
378{
379}
380
416#endif 381#endif
417 382
383static struct i915_mm_struct *
384__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
385{
386 struct i915_mm_struct *mm;
387
388 /* Protected by dev_priv->mm_lock */
389 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
390 if (mm->mm == real)
391 return mm;
392
393 return NULL;
394}
395
396static int
397i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
398{
399 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
400 struct i915_mm_struct *mm;
401 int ret = 0;
402
403 /* During release of the GEM object we hold the struct_mutex. This
404 * precludes us from calling mmput() at that time as that may be
405 * the last reference and so call exit_mmap(). exit_mmap() will
406 * attempt to reap the vma, and if we were holding a GTT mmap
407 * would then call drm_gem_vm_close() and attempt to reacquire
408 * the struct mutex. So in order to avoid that recursion, we have
409 * to defer releasing the mm reference until after we drop the
410 * struct_mutex, i.e. we need to schedule a worker to do the clean
411 * up.
412 */
413 mutex_lock(&dev_priv->mm_lock);
414 mm = __i915_mm_struct_find(dev_priv, current->mm);
415 if (mm == NULL) {
416 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
417 if (mm == NULL) {
418 ret = -ENOMEM;
419 goto out;
420 }
421
422 kref_init(&mm->kref);
423 mm->dev = obj->base.dev;
424
425 mm->mm = current->mm;
426 atomic_inc(&current->mm->mm_count);
427
428 mm->mn = NULL;
429
430 /* Protected by dev_priv->mm_lock */
431 hash_add(dev_priv->mm_structs,
432 &mm->node, (unsigned long)mm->mm);
433 } else
434 kref_get(&mm->kref);
435
436 obj->userptr.mm = mm;
437out:
438 mutex_unlock(&dev_priv->mm_lock);
439 return ret;
440}
441
442static void
443__i915_mm_struct_free__worker(struct work_struct *work)
444{
445 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
446 i915_mmu_notifier_free(mm->mn, mm->mm);
447 mmdrop(mm->mm);
448 kfree(mm);
449}
450
451static void
452__i915_mm_struct_free(struct kref *kref)
453{
454 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
455
456 /* Protected by dev_priv->mm_lock */
457 hash_del(&mm->node);
458 mutex_unlock(&to_i915(mm->dev)->mm_lock);
459
460 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
461 schedule_work(&mm->work);
462}
463
464static void
465i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
466{
467 if (obj->userptr.mm == NULL)
468 return;
469
470 kref_put_mutex(&obj->userptr.mm->kref,
471 __i915_mm_struct_free,
472 &to_i915(obj->base.dev)->mm_lock);
473 obj->userptr.mm = NULL;
474}
475
418struct get_pages_work { 476struct get_pages_work {
419 struct work_struct work; 477 struct work_struct work;
420 struct drm_i915_gem_object *obj; 478 struct drm_i915_gem_object *obj;
421 struct task_struct *task; 479 struct task_struct *task;
422}; 480};
423 481
424
425#if IS_ENABLED(CONFIG_SWIOTLB) 482#if IS_ENABLED(CONFIG_SWIOTLB)
426#define swiotlb_active() swiotlb_nr_tbl() 483#define swiotlb_active() swiotlb_nr_tbl()
427#else 484#else
@@ -479,7 +536,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
479 if (pvec == NULL) 536 if (pvec == NULL)
480 pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); 537 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
481 if (pvec != NULL) { 538 if (pvec != NULL) {
482 struct mm_struct *mm = obj->userptr.mm; 539 struct mm_struct *mm = obj->userptr.mm->mm;
483 540
484 down_read(&mm->mmap_sem); 541 down_read(&mm->mmap_sem);
485 while (pinned < num_pages) { 542 while (pinned < num_pages) {
@@ -545,7 +602,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
545 602
546 pvec = NULL; 603 pvec = NULL;
547 pinned = 0; 604 pinned = 0;
548 if (obj->userptr.mm == current->mm) { 605 if (obj->userptr.mm->mm == current->mm) {
549 pvec = kmalloc(num_pages*sizeof(struct page *), 606 pvec = kmalloc(num_pages*sizeof(struct page *),
550 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 607 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
551 if (pvec == NULL) { 608 if (pvec == NULL) {
@@ -651,17 +708,13 @@ static void
651i915_gem_userptr_release(struct drm_i915_gem_object *obj) 708i915_gem_userptr_release(struct drm_i915_gem_object *obj)
652{ 709{
653 i915_gem_userptr_release__mmu_notifier(obj); 710 i915_gem_userptr_release__mmu_notifier(obj);
654 711 i915_gem_userptr_release__mm_struct(obj);
655 if (obj->userptr.mm) {
656 mmput(obj->userptr.mm);
657 obj->userptr.mm = NULL;
658 }
659} 712}
660 713
661static int 714static int
662i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) 715i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
663{ 716{
664 if (obj->userptr.mn) 717 if (obj->userptr.mmu_object)
665 return 0; 718 return 0;
666 719
667 return i915_gem_userptr_init__mmu_notifier(obj, 0); 720 return i915_gem_userptr_init__mmu_notifier(obj, 0);
@@ -736,7 +789,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
736 return -ENODEV; 789 return -ENODEV;
737 } 790 }
738 791
739 /* Allocate the new object */
740 obj = i915_gem_object_alloc(dev); 792 obj = i915_gem_object_alloc(dev);
741 if (obj == NULL) 793 if (obj == NULL)
742 return -ENOMEM; 794 return -ENOMEM;
@@ -754,8 +806,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
754 * at binding. This means that we need to hook into the mmu_notifier 806 * at binding. This means that we need to hook into the mmu_notifier
755 * in order to detect if the mmu is destroyed. 807 * in order to detect if the mmu is destroyed.
756 */ 808 */
757 ret = -ENOMEM; 809 ret = i915_gem_userptr_init__mm_struct(obj);
758 if ((obj->userptr.mm = get_task_mm(current))) 810 if (ret == 0)
759 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); 811 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
760 if (ret == 0) 812 if (ret == 0)
761 ret = drm_gem_handle_create(file, &obj->base, &handle); 813 ret = drm_gem_handle_create(file, &obj->base, &handle);
@@ -772,9 +824,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
772int 824int
773i915_gem_init_userptr(struct drm_device *dev) 825i915_gem_init_userptr(struct drm_device *dev)
774{ 826{
775#if defined(CONFIG_MMU_NOTIFIER)
776 struct drm_i915_private *dev_priv = to_i915(dev); 827 struct drm_i915_private *dev_priv = to_i915(dev);
777 hash_init(dev_priv->mmu_notifiers); 828 mutex_init(&dev_priv->mm_lock);
778#endif 829 hash_init(dev_priv->mm_structs);
779 return 0; 830 return 0;
780} 831}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 390ccc2a3096..0050ee9470f1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1189,8 +1189,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
1189 * some connectors */ 1189 * some connectors */
1190 if (hpd_disabled) { 1190 if (hpd_disabled) {
1191 drm_kms_helper_poll_enable(dev); 1191 drm_kms_helper_poll_enable(dev);
1192 mod_timer(&dev_priv->hotplug_reenable_timer, 1192 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
1193 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 1193 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1194 } 1194 }
1195 1195
1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -1213,11 +1213,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
1213 drm_kms_helper_hotplug_event(dev); 1213 drm_kms_helper_hotplug_event(dev);
1214} 1214}
1215 1215
1216static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
1217{
1218 del_timer_sync(&dev_priv->hotplug_reenable_timer);
1219}
1220
1221static void ironlake_rps_change_irq_handler(struct drm_device *dev) 1216static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1222{ 1217{
1223 struct drm_i915_private *dev_priv = dev->dev_private; 1218 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3892,8 +3887,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
3892 if (!dev_priv) 3887 if (!dev_priv)
3893 return; 3888 return;
3894 3889
3895 intel_hpd_irq_uninstall(dev_priv);
3896
3897 gen8_irq_reset(dev); 3890 gen8_irq_reset(dev);
3898} 3891}
3899 3892
@@ -3908,8 +3901,6 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
3908 3901
3909 I915_WRITE(VLV_MASTER_IER, 0); 3902 I915_WRITE(VLV_MASTER_IER, 0);
3910 3903
3911 intel_hpd_irq_uninstall(dev_priv);
3912
3913 for_each_pipe(pipe) 3904 for_each_pipe(pipe)
3914 I915_WRITE(PIPESTAT(pipe), 0xffff); 3905 I915_WRITE(PIPESTAT(pipe), 0xffff);
3915 3906
@@ -3988,8 +3979,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
3988 if (!dev_priv) 3979 if (!dev_priv)
3989 return; 3980 return;
3990 3981
3991 intel_hpd_irq_uninstall(dev_priv);
3992
3993 ironlake_irq_reset(dev); 3982 ironlake_irq_reset(dev);
3994} 3983}
3995 3984
@@ -4360,8 +4349,6 @@ static void i915_irq_uninstall(struct drm_device * dev)
4360 struct drm_i915_private *dev_priv = dev->dev_private; 4349 struct drm_i915_private *dev_priv = dev->dev_private;
4361 int pipe; 4350 int pipe;
4362 4351
4363 intel_hpd_irq_uninstall(dev_priv);
4364
4365 if (I915_HAS_HOTPLUG(dev)) { 4352 if (I915_HAS_HOTPLUG(dev)) {
4366 I915_WRITE(PORT_HOTPLUG_EN, 0); 4353 I915_WRITE(PORT_HOTPLUG_EN, 0);
4367 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4354 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -4598,8 +4585,6 @@ static void i965_irq_uninstall(struct drm_device * dev)
4598 if (!dev_priv) 4585 if (!dev_priv)
4599 return; 4586 return;
4600 4587
4601 intel_hpd_irq_uninstall(dev_priv);
4602
4603 I915_WRITE(PORT_HOTPLUG_EN, 0); 4588 I915_WRITE(PORT_HOTPLUG_EN, 0);
4604 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4589 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4605 4590
@@ -4615,14 +4600,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
4615 I915_WRITE(IIR, I915_READ(IIR)); 4600 I915_WRITE(IIR, I915_READ(IIR));
4616} 4601}
4617 4602
4618static void intel_hpd_irq_reenable(unsigned long data) 4603static void intel_hpd_irq_reenable(struct work_struct *work)
4619{ 4604{
4620 struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; 4605 struct drm_i915_private *dev_priv =
4606 container_of(work, typeof(*dev_priv),
4607 hotplug_reenable_work.work);
4621 struct drm_device *dev = dev_priv->dev; 4608 struct drm_device *dev = dev_priv->dev;
4622 struct drm_mode_config *mode_config = &dev->mode_config; 4609 struct drm_mode_config *mode_config = &dev->mode_config;
4623 unsigned long irqflags; 4610 unsigned long irqflags;
4624 int i; 4611 int i;
4625 4612
4613 intel_runtime_pm_get(dev_priv);
4614
4626 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4615 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4627 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4616 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4628 struct drm_connector *connector; 4617 struct drm_connector *connector;
@@ -4648,6 +4637,8 @@ static void intel_hpd_irq_reenable(unsigned long data)
4648 if (dev_priv->display.hpd_irq_setup) 4637 if (dev_priv->display.hpd_irq_setup)
4649 dev_priv->display.hpd_irq_setup(dev); 4638 dev_priv->display.hpd_irq_setup(dev);
4650 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4639 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4640
4641 intel_runtime_pm_put(dev_priv);
4651} 4642}
4652 4643
4653void intel_irq_init(struct drm_device *dev) 4644void intel_irq_init(struct drm_device *dev)
@@ -4670,8 +4661,8 @@ void intel_irq_init(struct drm_device *dev)
4670 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4661 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4671 i915_hangcheck_elapsed, 4662 i915_hangcheck_elapsed,
4672 (unsigned long) dev); 4663 (unsigned long) dev);
4673 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, 4664 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4674 (unsigned long) dev_priv); 4665 intel_hpd_irq_reenable);
4675 4666
4676 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4667 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4677 4668
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e4d7607da2c4..f29b44c86a2f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -334,16 +334,20 @@
334#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) 334#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
335#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) 335#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
336#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) 336#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
337#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) 337
338#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2))
339#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
338#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) 340#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
339#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) 341#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
340#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) 342#define BLT_WRITE_A (2<<20)
341#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) 343#define BLT_WRITE_RGB (1<<20)
344#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A)
342#define BLT_DEPTH_8 (0<<24) 345#define BLT_DEPTH_8 (0<<24)
343#define BLT_DEPTH_16_565 (1<<24) 346#define BLT_DEPTH_16_565 (1<<24)
344#define BLT_DEPTH_16_1555 (2<<24) 347#define BLT_DEPTH_16_1555 (2<<24)
345#define BLT_DEPTH_32 (3<<24) 348#define BLT_DEPTH_32 (3<<24)
346#define BLT_ROP_GXCOPY (0xcc<<16) 349#define BLT_ROP_SRC_COPY (0xcc<<16)
350#define BLT_ROP_COLOR_COPY (0xf0<<16)
347#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ 351#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
348#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ 352#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
349#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) 353#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index a66955037e4e..eee79e1c3222 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1123,7 +1123,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
1123 } 1123 }
1124} 1124}
1125 1125
1126static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) 1126static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
1127{ 1127{
1128 DRM_DEBUG_KMS("Falling back to manually reading VBT from " 1128 DRM_DEBUG_KMS("Falling back to manually reading VBT from "
1129 "VBIOS ROM for %s\n", 1129 "VBIOS ROM for %s\n",
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 2efaf8e8d9c4..9212e6504e0f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -699,16 +699,21 @@ intel_crt_detect(struct drm_connector *connector, bool force)
699 goto out; 699 goto out;
700 } 700 }
701 701
702 drm_modeset_acquire_init(&ctx, 0);
703
702 /* for pre-945g platforms use load detect */ 704 /* for pre-945g platforms use load detect */
703 if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) { 705 if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
704 if (intel_crt_detect_ddc(connector)) 706 if (intel_crt_detect_ddc(connector))
705 status = connector_status_connected; 707 status = connector_status_connected;
706 else 708 else
707 status = intel_crt_load_detect(crt); 709 status = intel_crt_load_detect(crt);
708 intel_release_load_detect_pipe(connector, &tmp, &ctx); 710 intel_release_load_detect_pipe(connector, &tmp);
709 } else 711 } else
710 status = connector_status_unknown; 712 status = connector_status_unknown;
711 713
714 drm_modeset_drop_locks(&ctx);
715 drm_modeset_acquire_fini(&ctx);
716
712out: 717out:
713 intel_display_power_put(dev_priv, power_domain); 718 intel_display_power_put(dev_priv, power_domain);
714 return status; 719 return status;
@@ -799,7 +804,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
799 .destroy = intel_encoder_destroy, 804 .destroy = intel_encoder_destroy,
800}; 805};
801 806
802static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id) 807static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
803{ 808{
804 DRM_INFO("Skipping CRT initialization for %s\n", id->ident); 809 DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
805 return 1; 810 return 1;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 018fb7222f60..d8324c69fa86 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2233,6 +2233,15 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2233 if (need_vtd_wa(dev) && alignment < 256 * 1024) 2233 if (need_vtd_wa(dev) && alignment < 256 * 1024)
2234 alignment = 256 * 1024; 2234 alignment = 256 * 1024;
2235 2235
2236 /*
2237 * Global gtt pte registers are special registers which actually forward
2238 * writes to a chunk of system memory. Which means that there is no risk
2239 * that the register values disappear as soon as we call
2240 * intel_runtime_pm_put(), so it is correct to wrap only the
2241 * pin/unpin/fence and not more.
2242 */
2243 intel_runtime_pm_get(dev_priv);
2244
2236 dev_priv->mm.interruptible = false; 2245 dev_priv->mm.interruptible = false;
2237 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); 2246 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2238 if (ret) 2247 if (ret)
@@ -2250,12 +2259,14 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2250 i915_gem_object_pin_fence(obj); 2259 i915_gem_object_pin_fence(obj);
2251 2260
2252 dev_priv->mm.interruptible = true; 2261 dev_priv->mm.interruptible = true;
2262 intel_runtime_pm_put(dev_priv);
2253 return 0; 2263 return 0;
2254 2264
2255err_unpin: 2265err_unpin:
2256 i915_gem_object_unpin_from_display_plane(obj); 2266 i915_gem_object_unpin_from_display_plane(obj);
2257err_interruptible: 2267err_interruptible:
2258 dev_priv->mm.interruptible = true; 2268 dev_priv->mm.interruptible = true;
2269 intel_runtime_pm_put(dev_priv);
2259 return ret; 2270 return ret;
2260} 2271}
2261 2272
@@ -4188,10 +4199,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4188 intel_set_pch_fifo_underrun_reporting(dev, pipe, false); 4199 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
4189 4200
4190 intel_disable_pipe(dev_priv, pipe); 4201 intel_disable_pipe(dev_priv, pipe);
4191
4192 if (intel_crtc->config.dp_encoder_is_mst)
4193 intel_ddi_set_vc_payload_alloc(crtc, false);
4194
4195 ironlake_pfit_disable(intel_crtc); 4202 ironlake_pfit_disable(intel_crtc);
4196 4203
4197 for_each_encoder_on_crtc(dev, crtc, encoder) 4204 for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4256,6 +4263,9 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4256 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); 4263 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
4257 intel_disable_pipe(dev_priv, pipe); 4264 intel_disable_pipe(dev_priv, pipe);
4258 4265
4266 if (intel_crtc->config.dp_encoder_is_mst)
4267 intel_ddi_set_vc_payload_alloc(crtc, false);
4268
4259 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 4269 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4260 4270
4261 ironlake_pfit_disable(intel_crtc); 4271 ironlake_pfit_disable(intel_crtc);
@@ -8240,6 +8250,15 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8240 goto fail_locked; 8250 goto fail_locked;
8241 } 8251 }
8242 8252
8253 /*
8254 * Global gtt pte registers are special registers which actually
8255 * forward writes to a chunk of system memory. Which means that
8256 * there is no risk that the register values disappear as soon
8257 * as we call intel_runtime_pm_put(), so it is correct to wrap
8258 * only the pin/unpin/fence and not more.
8259 */
8260 intel_runtime_pm_get(dev_priv);
8261
8243 /* Note that the w/a also requires 2 PTE of padding following 8262 /* Note that the w/a also requires 2 PTE of padding following
8244 * the bo. We currently fill all unused PTE with the shadow 8263 * the bo. We currently fill all unused PTE with the shadow
8245 * page and so we should always have valid PTE following the 8264 * page and so we should always have valid PTE following the
@@ -8252,16 +8271,20 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8252 ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL); 8271 ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
8253 if (ret) { 8272 if (ret) {
8254 DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n"); 8273 DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
8274 intel_runtime_pm_put(dev_priv);
8255 goto fail_locked; 8275 goto fail_locked;
8256 } 8276 }
8257 8277
8258 ret = i915_gem_object_put_fence(obj); 8278 ret = i915_gem_object_put_fence(obj);
8259 if (ret) { 8279 if (ret) {
8260 DRM_DEBUG_KMS("failed to release fence for cursor"); 8280 DRM_DEBUG_KMS("failed to release fence for cursor");
8281 intel_runtime_pm_put(dev_priv);
8261 goto fail_unpin; 8282 goto fail_unpin;
8262 } 8283 }
8263 8284
8264 addr = i915_gem_obj_ggtt_offset(obj); 8285 addr = i915_gem_obj_ggtt_offset(obj);
8286
8287 intel_runtime_pm_put(dev_priv);
8265 } else { 8288 } else {
8266 int align = IS_I830(dev) ? 16 * 1024 : 256; 8289 int align = IS_I830(dev) ? 16 * 1024 : 256;
8267 ret = i915_gem_object_attach_phys(obj, align); 8290 ret = i915_gem_object_attach_phys(obj, align);
@@ -8462,8 +8485,6 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
8462 connector->base.id, connector->name, 8485 connector->base.id, connector->name,
8463 encoder->base.id, encoder->name); 8486 encoder->base.id, encoder->name);
8464 8487
8465 drm_modeset_acquire_init(ctx, 0);
8466
8467retry: 8488retry:
8468 ret = drm_modeset_lock(&config->connection_mutex, ctx); 8489 ret = drm_modeset_lock(&config->connection_mutex, ctx);
8469 if (ret) 8490 if (ret)
@@ -8502,10 +8523,14 @@ retry:
8502 i++; 8523 i++;
8503 if (!(encoder->possible_crtcs & (1 << i))) 8524 if (!(encoder->possible_crtcs & (1 << i)))
8504 continue; 8525 continue;
8505 if (!possible_crtc->enabled) { 8526 if (possible_crtc->enabled)
8506 crtc = possible_crtc; 8527 continue;
8507 break; 8528 /* This can occur when applying the pipe A quirk on resume. */
8508 } 8529 if (to_intel_crtc(possible_crtc)->new_enabled)
8530 continue;
8531
8532 crtc = possible_crtc;
8533 break;
8509 } 8534 }
8510 8535
8511 /* 8536 /*
@@ -8574,15 +8599,11 @@ fail_unlock:
8574 goto retry; 8599 goto retry;
8575 } 8600 }
8576 8601
8577 drm_modeset_drop_locks(ctx);
8578 drm_modeset_acquire_fini(ctx);
8579
8580 return false; 8602 return false;
8581} 8603}
8582 8604
8583void intel_release_load_detect_pipe(struct drm_connector *connector, 8605void intel_release_load_detect_pipe(struct drm_connector *connector,
8584 struct intel_load_detect_pipe *old, 8606 struct intel_load_detect_pipe *old)
8585 struct drm_modeset_acquire_ctx *ctx)
8586{ 8607{
8587 struct intel_encoder *intel_encoder = 8608 struct intel_encoder *intel_encoder =
8588 intel_attached_encoder(connector); 8609 intel_attached_encoder(connector);
@@ -8606,17 +8627,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
8606 drm_framebuffer_unreference(old->release_fb); 8627 drm_framebuffer_unreference(old->release_fb);
8607 } 8628 }
8608 8629
8609 goto unlock;
8610 return; 8630 return;
8611 } 8631 }
8612 8632
8613 /* Switch crtc and encoder back off if necessary */ 8633 /* Switch crtc and encoder back off if necessary */
8614 if (old->dpms_mode != DRM_MODE_DPMS_ON) 8634 if (old->dpms_mode != DRM_MODE_DPMS_ON)
8615 connector->funcs->dpms(connector, old->dpms_mode); 8635 connector->funcs->dpms(connector, old->dpms_mode);
8616
8617unlock:
8618 drm_modeset_drop_locks(ctx);
8619 drm_modeset_acquire_fini(ctx);
8620} 8636}
8621 8637
8622static int i9xx_pll_refclk(struct drm_device *dev, 8638static int i9xx_pll_refclk(struct drm_device *dev,
@@ -11700,8 +11716,8 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
11700 }; 11716 };
11701 const struct drm_rect clip = { 11717 const struct drm_rect clip = {
11702 /* integer pixels */ 11718 /* integer pixels */
11703 .x2 = intel_crtc->config.pipe_src_w, 11719 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11704 .y2 = intel_crtc->config.pipe_src_h, 11720 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11705 }; 11721 };
11706 bool visible; 11722 bool visible;
11707 int ret; 11723 int ret;
@@ -12488,6 +12504,9 @@ static struct intel_quirk intel_quirks[] = {
12488 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 12504 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
12489 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 12505 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
12490 12506
12507 /* Acer C720 Chromebook (Core i3 4005U) */
12508 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
12509
12491 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 12510 /* Toshiba CB35 Chromebook (Celeron 2955U) */
12492 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 12511 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
12493 12512
@@ -12659,7 +12678,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
12659 struct intel_connector *connector; 12678 struct intel_connector *connector;
12660 struct drm_connector *crt = NULL; 12679 struct drm_connector *crt = NULL;
12661 struct intel_load_detect_pipe load_detect_temp; 12680 struct intel_load_detect_pipe load_detect_temp;
12662 struct drm_modeset_acquire_ctx ctx; 12681 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
12663 12682
12664 /* We can't just switch on the pipe A, we need to set things up with a 12683 /* We can't just switch on the pipe A, we need to set things up with a
12665 * proper mode and output configuration. As a gross hack, enable pipe A 12684 * proper mode and output configuration. As a gross hack, enable pipe A
@@ -12676,10 +12695,8 @@ static void intel_enable_pipe_a(struct drm_device *dev)
12676 if (!crt) 12695 if (!crt)
12677 return; 12696 return;
12678 12697
12679 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx)) 12698 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
12680 intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx); 12699 intel_release_load_detect_pipe(crt, &load_detect_temp);
12681
12682
12683} 12700}
12684 12701
12685static bool 12702static bool
@@ -13112,7 +13129,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
13112 * experience fancy races otherwise. 13129 * experience fancy races otherwise.
13113 */ 13130 */
13114 drm_irq_uninstall(dev); 13131 drm_irq_uninstall(dev);
13115 cancel_work_sync(&dev_priv->hotplug_work); 13132 intel_hpd_cancel_work(dev_priv);
13116 dev_priv->pm._irqs_disabled = true; 13133 dev_priv->pm._irqs_disabled = true;
13117 13134
13118 /* 13135 /*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index ee3942f0b068..fdff1d420c14 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1631,6 +1631,10 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1631 1631
1632 pipe_config->adjusted_mode.flags |= flags; 1632 pipe_config->adjusted_mode.flags |= flags;
1633 1633
1634 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1635 tmp & DP_COLOR_RANGE_16_235)
1636 pipe_config->limited_color_range = true;
1637
1634 pipe_config->has_dp_encoder = true; 1638 pipe_config->has_dp_encoder = true;
1635 1639
1636 intel_dp_get_m_n(crtc, pipe_config); 1640 intel_dp_get_m_n(crtc, pipe_config);
@@ -3553,6 +3557,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
3553 if (WARN_ON(!intel_encoder->base.crtc)) 3557 if (WARN_ON(!intel_encoder->base.crtc))
3554 return; 3558 return;
3555 3559
3560 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3561 return;
3562
3556 /* Try to read receiver status if the link appears to be up */ 3563 /* Try to read receiver status if the link appears to be up */
3557 if (!intel_dp_get_link_status(intel_dp, link_status)) { 3564 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3558 return; 3565 return;
@@ -3658,24 +3665,12 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
3658 return intel_dp_detect_dpcd(intel_dp); 3665 return intel_dp_detect_dpcd(intel_dp);
3659} 3666}
3660 3667
3661static enum drm_connector_status 3668static int g4x_digital_port_connected(struct drm_device *dev,
3662g4x_dp_detect(struct intel_dp *intel_dp) 3669 struct intel_digital_port *intel_dig_port)
3663{ 3670{
3664 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3665 struct drm_i915_private *dev_priv = dev->dev_private; 3671 struct drm_i915_private *dev_priv = dev->dev_private;
3666 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3667 uint32_t bit; 3672 uint32_t bit;
3668 3673
3669 /* Can't disconnect eDP, but you can close the lid... */
3670 if (is_edp(intel_dp)) {
3671 enum drm_connector_status status;
3672
3673 status = intel_panel_detect(dev);
3674 if (status == connector_status_unknown)
3675 status = connector_status_connected;
3676 return status;
3677 }
3678
3679 if (IS_VALLEYVIEW(dev)) { 3674 if (IS_VALLEYVIEW(dev)) {
3680 switch (intel_dig_port->port) { 3675 switch (intel_dig_port->port) {
3681 case PORT_B: 3676 case PORT_B:
@@ -3688,7 +3683,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
3688 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; 3683 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
3689 break; 3684 break;
3690 default: 3685 default:
3691 return connector_status_unknown; 3686 return -EINVAL;
3692 } 3687 }
3693 } else { 3688 } else {
3694 switch (intel_dig_port->port) { 3689 switch (intel_dig_port->port) {
@@ -3702,11 +3697,36 @@ g4x_dp_detect(struct intel_dp *intel_dp)
3702 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 3697 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
3703 break; 3698 break;
3704 default: 3699 default:
3705 return connector_status_unknown; 3700 return -EINVAL;
3706 } 3701 }
3707 } 3702 }
3708 3703
3709 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 3704 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
3705 return 0;
3706 return 1;
3707}
3708
3709static enum drm_connector_status
3710g4x_dp_detect(struct intel_dp *intel_dp)
3711{
3712 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3713 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3714 int ret;
3715
3716 /* Can't disconnect eDP, but you can close the lid... */
3717 if (is_edp(intel_dp)) {
3718 enum drm_connector_status status;
3719
3720 status = intel_panel_detect(dev);
3721 if (status == connector_status_unknown)
3722 status = connector_status_connected;
3723 return status;
3724 }
3725
3726 ret = g4x_digital_port_connected(dev, intel_dig_port);
3727 if (ret == -EINVAL)
3728 return connector_status_unknown;
3729 else if (ret == 0)
3710 return connector_status_disconnected; 3730 return connector_status_disconnected;
3711 3731
3712 return intel_dp_detect_dpcd(intel_dp); 3732 return intel_dp_detect_dpcd(intel_dp);
@@ -4003,6 +4023,16 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4003 kfree(intel_dig_port); 4023 kfree(intel_dig_port);
4004} 4024}
4005 4025
4026static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4027{
4028 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4029
4030 if (!is_edp(intel_dp))
4031 return;
4032
4033 edp_panel_vdd_off_sync(intel_dp);
4034}
4035
4006static void intel_dp_encoder_reset(struct drm_encoder *encoder) 4036static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4007{ 4037{
4008 intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder)); 4038 intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
@@ -4037,18 +4067,30 @@ bool
4037intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) 4067intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4038{ 4068{
4039 struct intel_dp *intel_dp = &intel_dig_port->dp; 4069 struct intel_dp *intel_dp = &intel_dig_port->dp;
4070 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4040 struct drm_device *dev = intel_dig_port->base.base.dev; 4071 struct drm_device *dev = intel_dig_port->base.base.dev;
4041 struct drm_i915_private *dev_priv = dev->dev_private; 4072 struct drm_i915_private *dev_priv = dev->dev_private;
4042 int ret; 4073 enum intel_display_power_domain power_domain;
4074 bool ret = true;
4075
4043 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) 4076 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4044 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; 4077 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4045 4078
4046 DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port, 4079 DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
4047 long_hpd ? "long" : "short"); 4080 long_hpd ? "long" : "short");
4048 4081
4082 power_domain = intel_display_port_power_domain(intel_encoder);
4083 intel_display_power_get(dev_priv, power_domain);
4084
4049 if (long_hpd) { 4085 if (long_hpd) {
4050 if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) 4086
4051 goto mst_fail; 4087 if (HAS_PCH_SPLIT(dev)) {
4088 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4089 goto mst_fail;
4090 } else {
4091 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4092 goto mst_fail;
4093 }
4052 4094
4053 if (!intel_dp_get_dpcd(intel_dp)) { 4095 if (!intel_dp_get_dpcd(intel_dp)) {
4054 goto mst_fail; 4096 goto mst_fail;
@@ -4061,8 +4103,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4061 4103
4062 } else { 4104 } else {
4063 if (intel_dp->is_mst) { 4105 if (intel_dp->is_mst) {
4064 ret = intel_dp_check_mst_status(intel_dp); 4106 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4065 if (ret == -EINVAL)
4066 goto mst_fail; 4107 goto mst_fail;
4067 } 4108 }
4068 4109
@@ -4076,7 +4117,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4076 drm_modeset_unlock(&dev->mode_config.connection_mutex); 4117 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4077 } 4118 }
4078 } 4119 }
4079 return false; 4120 ret = false;
4121 goto put_power;
4080mst_fail: 4122mst_fail:
4081 /* if we were in MST mode, and device is not there get out of MST mode */ 4123 /* if we were in MST mode, and device is not there get out of MST mode */
4082 if (intel_dp->is_mst) { 4124 if (intel_dp->is_mst) {
@@ -4084,7 +4126,10 @@ mst_fail:
4084 intel_dp->is_mst = false; 4126 intel_dp->is_mst = false;
4085 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); 4127 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4086 } 4128 }
4087 return true; 4129put_power:
4130 intel_display_power_put(dev_priv, power_domain);
4131
4132 return ret;
4088} 4133}
4089 4134
4090/* Return which DP Port should be selected for Transcoder DP control */ 4135/* Return which DP Port should be selected for Transcoder DP control */
@@ -4722,6 +4767,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
4722 intel_encoder->disable = intel_disable_dp; 4767 intel_encoder->disable = intel_disable_dp;
4723 intel_encoder->get_hw_state = intel_dp_get_hw_state; 4768 intel_encoder->get_hw_state = intel_dp_get_hw_state;
4724 intel_encoder->get_config = intel_dp_get_config; 4769 intel_encoder->get_config = intel_dp_get_config;
4770 intel_encoder->suspend = intel_dp_encoder_suspend;
4725 if (IS_CHERRYVIEW(dev)) { 4771 if (IS_CHERRYVIEW(dev)) {
4726 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 4772 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
4727 intel_encoder->pre_enable = chv_pre_enable_dp; 4773 intel_encoder->pre_enable = chv_pre_enable_dp;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4b2664bd5b81..b8c8bbd8e5f9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -153,6 +153,12 @@ struct intel_encoder {
153 * be set correctly before calling this function. */ 153 * be set correctly before calling this function. */
154 void (*get_config)(struct intel_encoder *, 154 void (*get_config)(struct intel_encoder *,
155 struct intel_crtc_config *pipe_config); 155 struct intel_crtc_config *pipe_config);
156 /*
157 * Called during system suspend after all pending requests for the
158 * encoder are flushed (for example for DP AUX transactions) and
159 * device interrupts are disabled.
160 */
161 void (*suspend)(struct intel_encoder *);
156 int crtc_mask; 162 int crtc_mask;
157 enum hpd_pin hpd_pin; 163 enum hpd_pin hpd_pin;
158}; 164};
@@ -830,8 +836,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
830 struct intel_load_detect_pipe *old, 836 struct intel_load_detect_pipe *old,
831 struct drm_modeset_acquire_ctx *ctx); 837 struct drm_modeset_acquire_ctx *ctx);
832void intel_release_load_detect_pipe(struct drm_connector *connector, 838void intel_release_load_detect_pipe(struct drm_connector *connector,
833 struct intel_load_detect_pipe *old, 839 struct intel_load_detect_pipe *old);
834 struct drm_modeset_acquire_ctx *ctx);
835int intel_pin_and_fence_fb_obj(struct drm_device *dev, 840int intel_pin_and_fence_fb_obj(struct drm_device *dev,
836 struct drm_i915_gem_object *obj, 841 struct drm_i915_gem_object *obj,
837 struct intel_engine_cs *pipelined); 842 struct intel_engine_cs *pipelined);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f9151f6641d9..5a9de21637b7 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -712,7 +712,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
712 struct intel_crtc_config *pipe_config) 712 struct intel_crtc_config *pipe_config)
713{ 713{
714 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 714 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
715 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 715 struct drm_device *dev = encoder->base.dev;
716 struct drm_i915_private *dev_priv = dev->dev_private;
716 u32 tmp, flags = 0; 717 u32 tmp, flags = 0;
717 int dotclock; 718 int dotclock;
718 719
@@ -731,9 +732,13 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
731 if (tmp & HDMI_MODE_SELECT_HDMI) 732 if (tmp & HDMI_MODE_SELECT_HDMI)
732 pipe_config->has_hdmi_sink = true; 733 pipe_config->has_hdmi_sink = true;
733 734
734 if (tmp & HDMI_MODE_SELECT_HDMI) 735 if (tmp & SDVO_AUDIO_ENABLE)
735 pipe_config->has_audio = true; 736 pipe_config->has_audio = true;
736 737
738 if (!HAS_PCH_SPLIT(dev) &&
739 tmp & HDMI_COLOR_RANGE_16_235)
740 pipe_config->limited_color_range = true;
741
737 pipe_config->adjusted_mode.flags |= flags; 742 pipe_config->adjusted_mode.flags |= flags;
738 743
739 if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc) 744 if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 881361c0f27e..fdf40267249c 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -538,7 +538,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
538 .destroy = intel_encoder_destroy, 538 .destroy = intel_encoder_destroy,
539}; 539};
540 540
541static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) 541static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
542{ 542{
543 DRM_INFO("Skipping LVDS initialization for %s\n", id->ident); 543 DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
544 return 1; 544 return 1;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 59b028f0b1e8..8e374449c6b5 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -801,7 +801,7 @@ static void pch_enable_backlight(struct intel_connector *connector)
801 801
802 cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2); 802 cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
803 if (cpu_ctl2 & BLM_PWM_ENABLE) { 803 if (cpu_ctl2 & BLM_PWM_ENABLE) {
804 WARN(1, "cpu backlight already enabled\n"); 804 DRM_DEBUG_KMS("cpu backlight already enabled\n");
805 cpu_ctl2 &= ~BLM_PWM_ENABLE; 805 cpu_ctl2 &= ~BLM_PWM_ENABLE;
806 I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2); 806 I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
807 } 807 }
@@ -845,7 +845,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
845 845
846 ctl = I915_READ(BLC_PWM_CTL); 846 ctl = I915_READ(BLC_PWM_CTL);
847 if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) { 847 if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
848 WARN(1, "backlight already enabled\n"); 848 DRM_DEBUG_KMS("backlight already enabled\n");
849 I915_WRITE(BLC_PWM_CTL, 0); 849 I915_WRITE(BLC_PWM_CTL, 0);
850 } 850 }
851 851
@@ -876,7 +876,7 @@ static void i965_enable_backlight(struct intel_connector *connector)
876 876
877 ctl2 = I915_READ(BLC_PWM_CTL2); 877 ctl2 = I915_READ(BLC_PWM_CTL2);
878 if (ctl2 & BLM_PWM_ENABLE) { 878 if (ctl2 & BLM_PWM_ENABLE) {
879 WARN(1, "backlight already enabled\n"); 879 DRM_DEBUG_KMS("backlight already enabled\n");
880 ctl2 &= ~BLM_PWM_ENABLE; 880 ctl2 &= ~BLM_PWM_ENABLE;
881 I915_WRITE(BLC_PWM_CTL2, ctl2); 881 I915_WRITE(BLC_PWM_CTL2, ctl2);
882 } 882 }
@@ -910,7 +910,7 @@ static void vlv_enable_backlight(struct intel_connector *connector)
910 910
911 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe)); 911 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
912 if (ctl2 & BLM_PWM_ENABLE) { 912 if (ctl2 & BLM_PWM_ENABLE) {
913 WARN(1, "backlight already enabled\n"); 913 DRM_DEBUG_KMS("backlight already enabled\n");
914 ctl2 &= ~BLM_PWM_ENABLE; 914 ctl2 &= ~BLM_PWM_ENABLE;
915 I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2); 915 I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
916 } 916 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 16371a444426..47a126a0493f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1363,54 +1363,66 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1363 1363
1364/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1364/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1365#define I830_BATCH_LIMIT (256*1024) 1365#define I830_BATCH_LIMIT (256*1024)
1366#define I830_TLB_ENTRIES (2)
1367#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1366static int 1368static int
1367i830_dispatch_execbuffer(struct intel_engine_cs *ring, 1369i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1368 u64 offset, u32 len, 1370 u64 offset, u32 len,
1369 unsigned flags) 1371 unsigned flags)
1370{ 1372{
1373 u32 cs_offset = ring->scratch.gtt_offset;
1371 int ret; 1374 int ret;
1372 1375
1373 if (flags & I915_DISPATCH_PINNED) { 1376 ret = intel_ring_begin(ring, 6);
1374 ret = intel_ring_begin(ring, 4); 1377 if (ret)
1375 if (ret) 1378 return ret;
1376 return ret;
1377 1379
1378 intel_ring_emit(ring, MI_BATCH_BUFFER); 1380 /* Evict the invalid PTE TLBs */
1379 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1381 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
1380 intel_ring_emit(ring, offset + len - 8); 1382 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
1381 intel_ring_emit(ring, MI_NOOP); 1383 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
1382 intel_ring_advance(ring); 1384 intel_ring_emit(ring, cs_offset);
1383 } else { 1385 intel_ring_emit(ring, 0xdeadbeef);
1384 u32 cs_offset = ring->scratch.gtt_offset; 1386 intel_ring_emit(ring, MI_NOOP);
1387 intel_ring_advance(ring);
1385 1388
1389 if ((flags & I915_DISPATCH_PINNED) == 0) {
1386 if (len > I830_BATCH_LIMIT) 1390 if (len > I830_BATCH_LIMIT)
1387 return -ENOSPC; 1391 return -ENOSPC;
1388 1392
1389 ret = intel_ring_begin(ring, 9+3); 1393 ret = intel_ring_begin(ring, 6 + 2);
1390 if (ret) 1394 if (ret)
1391 return ret; 1395 return ret;
1392 /* Blit the batch (which has now all relocs applied) to the stable batch 1396
1393 * scratch bo area (so that the CS never stumbles over its tlb 1397 /* Blit the batch (which has now all relocs applied) to the
1394 * invalidation bug) ... */ 1398 * stable batch scratch bo area (so that the CS never
1395 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | 1399 * stumbles over its tlb invalidation bug) ...
1396 XY_SRC_COPY_BLT_WRITE_ALPHA | 1400 */
1397 XY_SRC_COPY_BLT_WRITE_RGB); 1401 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
1398 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); 1402 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
1399 intel_ring_emit(ring, 0); 1403 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
1400 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1401 intel_ring_emit(ring, cs_offset); 1404 intel_ring_emit(ring, cs_offset);
1402 intel_ring_emit(ring, 0);
1403 intel_ring_emit(ring, 4096); 1405 intel_ring_emit(ring, 4096);
1404 intel_ring_emit(ring, offset); 1406 intel_ring_emit(ring, offset);
1407
1405 intel_ring_emit(ring, MI_FLUSH); 1408 intel_ring_emit(ring, MI_FLUSH);
1409 intel_ring_emit(ring, MI_NOOP);
1410 intel_ring_advance(ring);
1406 1411
1407 /* ... and execute it. */ 1412 /* ... and execute it. */
1408 intel_ring_emit(ring, MI_BATCH_BUFFER); 1413 offset = cs_offset;
1409 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1410 intel_ring_emit(ring, cs_offset + len - 8);
1411 intel_ring_advance(ring);
1412 } 1414 }
1413 1415
1416 ret = intel_ring_begin(ring, 4);
1417 if (ret)
1418 return ret;
1419
1420 intel_ring_emit(ring, MI_BATCH_BUFFER);
1421 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1422 intel_ring_emit(ring, offset + len - 8);
1423 intel_ring_emit(ring, MI_NOOP);
1424 intel_ring_advance(ring);
1425
1414 return 0; 1426 return 0;
1415} 1427}
1416 1428
@@ -2200,7 +2212,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2200 2212
2201 /* Workaround batchbuffer to combat CS tlb bug. */ 2213 /* Workaround batchbuffer to combat CS tlb bug. */
2202 if (HAS_BROKEN_CS_TLB(dev)) { 2214 if (HAS_BROKEN_CS_TLB(dev)) {
2203 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); 2215 obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
2204 if (obj == NULL) { 2216 if (obj == NULL) {
2205 DRM_ERROR("Failed to allocate batch bo\n"); 2217 DRM_ERROR("Failed to allocate batch bo\n");
2206 return -ENOMEM; 2218 return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index e211eef4b7e4..c14341ca3ef9 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder)
854 struct drm_device *dev = encoder->base.dev; 854 struct drm_device *dev = encoder->base.dev;
855 struct drm_i915_private *dev_priv = dev->dev_private; 855 struct drm_i915_private *dev_priv = dev->dev_private;
856 856
857 /* Prevents vblank waits from timing out in intel_tv_detect_type() */
858 intel_wait_for_vblank(encoder->base.dev,
859 to_intel_crtc(encoder->base.crtc)->pipe);
860
857 I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); 861 I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
858} 862}
859 863
@@ -1311,6 +1315,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1311{ 1315{
1312 struct drm_display_mode mode; 1316 struct drm_display_mode mode;
1313 struct intel_tv *intel_tv = intel_attached_tv(connector); 1317 struct intel_tv *intel_tv = intel_attached_tv(connector);
1318 enum drm_connector_status status;
1314 int type; 1319 int type;
1315 1320
1316 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", 1321 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
@@ -1323,16 +1328,24 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1323 struct intel_load_detect_pipe tmp; 1328 struct intel_load_detect_pipe tmp;
1324 struct drm_modeset_acquire_ctx ctx; 1329 struct drm_modeset_acquire_ctx ctx;
1325 1330
1331 drm_modeset_acquire_init(&ctx, 0);
1332
1326 if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) { 1333 if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
1327 type = intel_tv_detect_type(intel_tv, connector); 1334 type = intel_tv_detect_type(intel_tv, connector);
1328 intel_release_load_detect_pipe(connector, &tmp, &ctx); 1335 intel_release_load_detect_pipe(connector, &tmp);
1336 status = type < 0 ?
1337 connector_status_disconnected :
1338 connector_status_connected;
1329 } else 1339 } else
1330 return connector_status_unknown; 1340 status = connector_status_unknown;
1341
1342 drm_modeset_drop_locks(&ctx);
1343 drm_modeset_acquire_fini(&ctx);
1331 } else 1344 } else
1332 return connector->status; 1345 return connector->status;
1333 1346
1334 if (type < 0) 1347 if (status != connector_status_connected)
1335 return connector_status_disconnected; 1348 return status;
1336 1349
1337 intel_tv->type = type; 1350 intel_tv->type = type;
1338 intel_tv_find_better_format(connector); 1351 intel_tv_find_better_format(connector);