diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-03-09 19:16:42 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-04-18 14:38:19 -0400 |
commit | c6e56f593ac2df436700527c3488d4ed224c3acf (patch) | |
tree | 0d323b1c103b240ab4258e6f0a3f4cdbee18b2fb | |
parent | f010ed82c7ba8b30f30872800100ad6b6efe2a6a (diff) |
srcu: Push srcu_advance_batches() fastpath into common case
This commit simplifies the SRCU state machine by pushing the
srcu_advance_batches() idle-SRCU fastpath into the common case. This is
done by giving srcu_reschedule() a delay parameter, which is zero in
the call from srcu_advance_batches().
This commit is a step towards numbering callbacks in order to
efficiently handle per-CPU callback lists.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r-- | kernel/rcu/srcu.c | 27 |
1 files changed, 7 insertions, 20 deletions
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index 7e7ecaa50dc5..821ecda873f2 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c | |||
@@ -399,8 +399,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head, | |||
399 | } | 399 | } |
400 | EXPORT_SYMBOL_GPL(call_srcu); | 400 | EXPORT_SYMBOL_GPL(call_srcu); |
401 | 401 | ||
402 | static void srcu_advance_batches(struct srcu_struct *sp, int trycount); | 402 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); |
403 | static void srcu_reschedule(struct srcu_struct *sp); | ||
404 | 403 | ||
405 | /* | 404 | /* |
406 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). | 405 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). |
@@ -409,7 +408,6 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) | |||
409 | { | 408 | { |
410 | struct rcu_synchronize rcu; | 409 | struct rcu_synchronize rcu; |
411 | struct rcu_head *head = &rcu.head; | 410 | struct rcu_head *head = &rcu.head; |
412 | bool done = false; | ||
413 | 411 | ||
414 | RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || | 412 | RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || |
415 | lock_is_held(&rcu_bh_lock_map) || | 413 | lock_is_held(&rcu_bh_lock_map) || |
@@ -431,25 +429,15 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) | |||
431 | sp->running = true; | 429 | sp->running = true; |
432 | rcu_batch_queue(&sp->batch_check0, head); | 430 | rcu_batch_queue(&sp->batch_check0, head); |
433 | spin_unlock_irq(&sp->queue_lock); | 431 | spin_unlock_irq(&sp->queue_lock); |
434 | |||
435 | srcu_advance_batches(sp, trycount); | ||
436 | if (!rcu_batch_empty(&sp->batch_done)) { | ||
437 | BUG_ON(sp->batch_done.head != head); | ||
438 | rcu_batch_dequeue(&sp->batch_done); | ||
439 | done = true; | ||
440 | } | ||
441 | /* give the processing owner to work_struct */ | 432 | /* give the processing owner to work_struct */ |
442 | srcu_reschedule(sp); | 433 | srcu_reschedule(sp, 0); |
443 | } else { | 434 | } else { |
444 | rcu_batch_queue(&sp->batch_queue, head); | 435 | rcu_batch_queue(&sp->batch_queue, head); |
445 | spin_unlock_irq(&sp->queue_lock); | 436 | spin_unlock_irq(&sp->queue_lock); |
446 | } | 437 | } |
447 | 438 | ||
448 | if (!done) { | 439 | wait_for_completion(&rcu.completion); |
449 | wait_for_completion(&rcu.completion); | 440 | smp_mb(); /* Caller's later accesses after GP. */ |
450 | smp_mb(); /* Caller's later accesses after GP. */ | ||
451 | } | ||
452 | |||
453 | } | 441 | } |
454 | 442 | ||
455 | /** | 443 | /** |
@@ -639,7 +627,7 @@ static void srcu_invoke_callbacks(struct srcu_struct *sp) | |||
639 | * Finished one round of SRCU grace period. Start another if there are | 627 | * Finished one round of SRCU grace period. Start another if there are |
640 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. | 628 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. |
641 | */ | 629 | */ |
642 | static void srcu_reschedule(struct srcu_struct *sp) | 630 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) |
643 | { | 631 | { |
644 | bool pending = true; | 632 | bool pending = true; |
645 | 633 | ||
@@ -653,8 +641,7 @@ static void srcu_reschedule(struct srcu_struct *sp) | |||
653 | } | 641 | } |
654 | 642 | ||
655 | if (pending) | 643 | if (pending) |
656 | queue_delayed_work(system_power_efficient_wq, | 644 | queue_delayed_work(system_power_efficient_wq, &sp->work, delay); |
657 | &sp->work, SRCU_INTERVAL); | ||
658 | } | 645 | } |
659 | 646 | ||
660 | /* | 647 | /* |
@@ -669,6 +656,6 @@ void process_srcu(struct work_struct *work) | |||
669 | srcu_collect_new(sp); | 656 | srcu_collect_new(sp); |
670 | srcu_advance_batches(sp, 1); | 657 | srcu_advance_batches(sp, 1); |
671 | srcu_invoke_callbacks(sp); | 658 | srcu_invoke_callbacks(sp); |
672 | srcu_reschedule(sp); | 659 | srcu_reschedule(sp, SRCU_INTERVAL); |
673 | } | 660 | } |
674 | EXPORT_SYMBOL_GPL(process_srcu); | 661 | EXPORT_SYMBOL_GPL(process_srcu); |