aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_breadcrumbs.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-11-10 18:25:32 -0500
committerDave Airlie <airlied@redhat.com>2016-11-10 18:25:32 -0500
commitdb8feb6979e91c2e916631a75dbfe9f10f6b05e5 (patch)
treeb4aa5965f207c18d908a794e5f4e647604d77553 /drivers/gpu/drm/i915/intel_breadcrumbs.c
parentafdd548f742ca454fc343696de472f3aaa5dc488 (diff)
parent58e197d631d44f9f4817b8198b43132a40de1164 (diff)
Merge tag 'drm-intel-next-2016-11-08' of git://anongit.freedesktop.org/git/drm-intel into drm-next
- gpu idling rework for s/r (Imre) - vlv mappable scanout fix - speed up probing in resume (Lyude) - dp audio workarounds for gen9 (Dhinakaran) - more conversion to using dev_priv internally (Ville) - more gen9+ wm fixes and cleanups (Maarten) - shrinker cleanup&fixes (Chris) - reorg plane init code (Ville) - implement support for multiple timelines (prep work for scheduler) from Chris and all - untangle dev->struct_mutex locking as prep for multiple timelines (Chris) - refactor bxt phy code and collect it all in intel_dpio_phy.c (Ander) - another gvt with bugfixes all over from Zhenyu - piles of lspcon fixes from Imre - 90/270 rotation fixes (Ville) - guc log buffer support (Akash+Sagar) - fbc fixes from Paulo - untangle rpm vs. tiling-fences/mmaps (Chris) - fix atomic commit to wait on the right fences (Daniel Stone) * tag 'drm-intel-next-2016-11-08' of git://anongit.freedesktop.org/git/drm-intel: (181 commits) drm/i915: Update DRIVER_DATE to 20161108 drm/i915: Mark CPU cache as dirty when used for rendering drm/i915: Add assert for no pending GPU requests during suspend/resume in LR mode drm/i915: Make sure engines are idle during GPU idling in LR mode drm/i915: Avoid early GPU idling due to race with new request drm/i915: Avoid early GPU idling due to already pending idle work drm/i915: Limit Valleyview and earlier to only using mappable scanout drm/i915: Round tile chunks up for constructing partial VMAs drm/i915: Remove the vma from the object list upon close drm/i915: Reinit polling before hpd when resuming drm/i915: Remove redundant reprobe in i915_drm_resume drm/i915/dp: Extend BDW DP audio workaround to GEN9 platforms drm/i915/dp: BDW cdclk fix for DP audio drm/i915: Fix pages pin counting around swizzle quirk drm/i915: Fix test on inputs for vma_compare() drm/i915/guc: Cache the client mapping drm/i915: Tidy slab cache allocations drm/i915: Introduce HAS_64BIT_RELOC drm/i915: Show the execlist queue in debugfs/i915_engine_info drm/i915: Unify global_list into global_link ...
Diffstat (limited to 'drivers/gpu/drm/i915/intel_breadcrumbs.c')
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c43
1 files changed, 27 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 56efcc507ea2..c410d3d6465f 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -83,16 +83,18 @@ static void irq_enable(struct intel_engine_cs *engine)
83 */ 83 */
84 engine->breadcrumbs.irq_posted = true; 84 engine->breadcrumbs.irq_posted = true;
85 85
86 spin_lock_irq(&engine->i915->irq_lock); 86 /* Caller disables interrupts */
87 spin_lock(&engine->i915->irq_lock);
87 engine->irq_enable(engine); 88 engine->irq_enable(engine);
88 spin_unlock_irq(&engine->i915->irq_lock); 89 spin_unlock(&engine->i915->irq_lock);
89} 90}
90 91
91static void irq_disable(struct intel_engine_cs *engine) 92static void irq_disable(struct intel_engine_cs *engine)
92{ 93{
93 spin_lock_irq(&engine->i915->irq_lock); 94 /* Caller disables interrupts */
95 spin_lock(&engine->i915->irq_lock);
94 engine->irq_disable(engine); 96 engine->irq_disable(engine);
95 spin_unlock_irq(&engine->i915->irq_lock); 97 spin_unlock(&engine->i915->irq_lock);
96 98
97 engine->breadcrumbs.irq_posted = false; 99 engine->breadcrumbs.irq_posted = false;
98} 100}
@@ -293,9 +295,9 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
293 struct intel_breadcrumbs *b = &engine->breadcrumbs; 295 struct intel_breadcrumbs *b = &engine->breadcrumbs;
294 bool first; 296 bool first;
295 297
296 spin_lock(&b->lock); 298 spin_lock_irq(&b->lock);
297 first = __intel_engine_add_wait(engine, wait); 299 first = __intel_engine_add_wait(engine, wait);
298 spin_unlock(&b->lock); 300 spin_unlock_irq(&b->lock);
299 301
300 return first; 302 return first;
301} 303}
@@ -326,7 +328,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
326 if (RB_EMPTY_NODE(&wait->node)) 328 if (RB_EMPTY_NODE(&wait->node))
327 return; 329 return;
328 330
329 spin_lock(&b->lock); 331 spin_lock_irq(&b->lock);
330 332
331 if (RB_EMPTY_NODE(&wait->node)) 333 if (RB_EMPTY_NODE(&wait->node))
332 goto out_unlock; 334 goto out_unlock;
@@ -400,7 +402,7 @@ out_unlock:
400 GEM_BUG_ON(rb_first(&b->waiters) != 402 GEM_BUG_ON(rb_first(&b->waiters) !=
401 (b->first_wait ? &b->first_wait->node : NULL)); 403 (b->first_wait ? &b->first_wait->node : NULL));
402 GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters)); 404 GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
403 spin_unlock(&b->lock); 405 spin_unlock_irq(&b->lock);
404} 406}
405 407
406static bool signal_complete(struct drm_i915_gem_request *request) 408static bool signal_complete(struct drm_i915_gem_request *request)
@@ -473,14 +475,14 @@ static int intel_breadcrumbs_signaler(void *arg)
473 * we just completed - so double check we are still 475 * we just completed - so double check we are still
474 * the oldest before picking the next one. 476 * the oldest before picking the next one.
475 */ 477 */
476 spin_lock(&b->lock); 478 spin_lock_irq(&b->lock);
477 if (request == b->first_signal) { 479 if (request == b->first_signal) {
478 struct rb_node *rb = 480 struct rb_node *rb =
479 rb_next(&request->signaling.node); 481 rb_next(&request->signaling.node);
480 b->first_signal = rb ? to_signaler(rb) : NULL; 482 b->first_signal = rb ? to_signaler(rb) : NULL;
481 } 483 }
482 rb_erase(&request->signaling.node, &b->signals); 484 rb_erase(&request->signaling.node, &b->signals);
483 spin_unlock(&b->lock); 485 spin_unlock_irq(&b->lock);
484 486
485 i915_gem_request_put(request); 487 i915_gem_request_put(request);
486 } else { 488 } else {
@@ -502,11 +504,20 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
502 struct rb_node *parent, **p; 504 struct rb_node *parent, **p;
503 bool first, wakeup; 505 bool first, wakeup;
504 506
505 /* locked by dma_fence_enable_sw_signaling() */ 507 /* Note that we may be called from an interrupt handler on another
508 * device (e.g. nouveau signaling a fence completion causing us
509 * to submit a request, and so enable signaling). As such,
510 * we need to make sure that all other users of b->lock protect
511 * against interrupts, i.e. use spin_lock_irqsave.
512 */
513
514 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
506 assert_spin_locked(&request->lock); 515 assert_spin_locked(&request->lock);
516 if (!request->global_seqno)
517 return;
507 518
508 request->signaling.wait.tsk = b->signaler; 519 request->signaling.wait.tsk = b->signaler;
509 request->signaling.wait.seqno = request->fence.seqno; 520 request->signaling.wait.seqno = request->global_seqno;
510 i915_gem_request_get(request); 521 i915_gem_request_get(request);
511 522
512 spin_lock(&b->lock); 523 spin_lock(&b->lock);
@@ -530,8 +541,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
530 p = &b->signals.rb_node; 541 p = &b->signals.rb_node;
531 while (*p) { 542 while (*p) {
532 parent = *p; 543 parent = *p;
533 if (i915_seqno_passed(request->fence.seqno, 544 if (i915_seqno_passed(request->global_seqno,
534 to_signaler(parent)->fence.seqno)) { 545 to_signaler(parent)->global_seqno)) {
535 p = &parent->rb_right; 546 p = &parent->rb_right;
536 first = false; 547 first = false;
537 } else { 548 } else {
@@ -592,7 +603,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
592 struct intel_breadcrumbs *b = &engine->breadcrumbs; 603 struct intel_breadcrumbs *b = &engine->breadcrumbs;
593 604
594 cancel_fake_irq(engine); 605 cancel_fake_irq(engine);
595 spin_lock(&b->lock); 606 spin_lock_irq(&b->lock);
596 607
597 __intel_breadcrumbs_disable_irq(b); 608 __intel_breadcrumbs_disable_irq(b);
598 if (intel_engine_has_waiter(engine)) { 609 if (intel_engine_has_waiter(engine)) {
@@ -605,7 +616,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
605 irq_disable(engine); 616 irq_disable(engine);
606 } 617 }
607 618
608 spin_unlock(&b->lock); 619 spin_unlock_irq(&b->lock);
609} 620}
610 621
611void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) 622void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)