diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2017-03-03 12:14:22 -0500 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2017-03-03 13:31:37 -0500 |
commit | b66255f0f77902ef41b09163a6a092d2d905e151 (patch) | |
tree | 87ac509fa2c22294a77045850cf9d4ceb47cded9 | |
parent | 24754d751cb86f6069315d8d613e23afcab06c91 (diff) |
drm/i915: Refactor wakeup of the next breadcrumb waiter
Refactor the common task of updating the first_waiter, serialised with
the interrupt handler. When we update the first_waiter, we also need to
wakeup the new bottom-half in order to complete the actions that we may
have delegated to it (such as checking the irq-seqno coherency or waking
up other lower priority concurrent waiters).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170303171422.4735-1-chris@chris-wilson.co.uk
-rw-r--r-- | drivers/gpu/drm/i915/intel_breadcrumbs.c | 48 |
1 files changed, 18 insertions, 30 deletions
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 235d4645a5cf..2b26f84480cc 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c | |||
@@ -287,6 +287,22 @@ static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b, | |||
287 | wake_up_process(wait->tsk); /* implicit smp_wmb() */ | 287 | wake_up_process(wait->tsk); /* implicit smp_wmb() */ |
288 | } | 288 | } |
289 | 289 | ||
290 | static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine, | ||
291 | struct rb_node *next) | ||
292 | { | ||
293 | struct intel_breadcrumbs *b = &engine->breadcrumbs; | ||
294 | |||
295 | GEM_BUG_ON(!b->irq_armed); | ||
296 | b->first_wait = to_wait(next); | ||
297 | |||
298 | /* We always wake up the next waiter that takes over as the bottom-half | ||
299 | * as we may delegate not only the irq-seqno barrier to the next waiter | ||
300 | * but also the task of waking up concurrent waiters. | ||
301 | */ | ||
302 | if (next) | ||
303 | wake_up_process(to_wait(next)->tsk); | ||
304 | } | ||
305 | |||
290 | static bool __intel_engine_add_wait(struct intel_engine_cs *engine, | 306 | static bool __intel_engine_add_wait(struct intel_engine_cs *engine, |
291 | struct intel_wait *wait) | 307 | struct intel_wait *wait) |
292 | { | 308 | { |
@@ -357,21 +373,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, | |||
357 | GEM_BUG_ON(!next && !first); | 373 | GEM_BUG_ON(!next && !first); |
358 | if (next && next != &wait->node) { | 374 | if (next && next != &wait->node) { |
359 | GEM_BUG_ON(first); | 375 | GEM_BUG_ON(first); |
360 | b->first_wait = to_wait(next); | 376 | __intel_breadcrumbs_next(engine, next); |
361 | /* As there is a delay between reading the current | ||
362 | * seqno, processing the completed tasks and selecting | ||
363 | * the next waiter, we may have missed the interrupt | ||
364 | * and so need for the next bottom-half to wakeup. | ||
365 | * | ||
366 | * Also as we enable the IRQ, we may miss the | ||
367 | * interrupt for that seqno, so we have to wake up | ||
368 | * the next bottom-half in order to do a coherent check | ||
369 | * in case the seqno passed. | ||
370 | */ | ||
371 | __intel_breadcrumbs_enable_irq(b); | ||
372 | if (test_bit(ENGINE_IRQ_BREADCRUMB, | ||
373 | &engine->irq_posted)) | ||
374 | wake_up_process(to_wait(next)->tsk); | ||
375 | } | 377 | } |
376 | 378 | ||
377 | do { | 379 | do { |
@@ -473,21 +475,7 @@ static void __intel_engine_remove_wait(struct intel_engine_cs *engine, | |||
473 | } | 475 | } |
474 | } | 476 | } |
475 | 477 | ||
476 | if (next) { | 478 | __intel_breadcrumbs_next(engine, next); |
477 | /* In our haste, we may have completed the first waiter | ||
478 | * before we enabled the interrupt. Do so now as we | ||
479 | * have a second waiter for a future seqno. Afterwards, | ||
480 | * we have to wake up that waiter in case we missed | ||
481 | * the interrupt, or if we have to handle an | ||
482 | * exception rather than a seqno completion. | ||
483 | */ | ||
484 | b->first_wait = to_wait(next); | ||
485 | if (b->first_wait->seqno != wait->seqno) | ||
486 | __intel_breadcrumbs_enable_irq(b); | ||
487 | wake_up_process(b->first_wait->tsk); | ||
488 | } else { | ||
489 | b->first_wait = NULL; | ||
490 | } | ||
491 | } else { | 479 | } else { |
492 | GEM_BUG_ON(rb_first(&b->waiters) == &wait->node); | 480 | GEM_BUG_ON(rb_first(&b->waiters) == &wait->node); |
493 | } | 481 | } |