diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2016-06-24 14:30:32 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2016-08-22 12:30:11 -0400 |
commit | f7b8eb847e35b18d3ec333774691a905bf16017f (patch) | |
tree | 78df7f7491cf7ab1bef598fc3fa8279274f42f27 | |
parent | 29b4817d4018df78086157ea3a55c1d9424a7cfc (diff) |
rcu: Consolidate expedited grace period machinery
The functions synchronize_rcu_expedited() and synchronize_sched_expedited()
have nearly identical code. This commit therefore consolidates this code
into a new _synchronize_rcu_expedited() function.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r-- | kernel/rcu/tree_exp.h | 62 |
1 files changed, 29 insertions, 33 deletions
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 6d86ab6ec2c9..1549f456fb7b 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h | |||
@@ -516,6 +516,33 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s) | |||
516 | mutex_unlock(&rsp->exp_wake_mutex); | 516 | mutex_unlock(&rsp->exp_wake_mutex); |
517 | } | 517 | } |
518 | 518 | ||
519 | /* | ||
520 | * Given an rcu_state pointer and a smp_call_function() handler, kick | ||
521 | * off the specified flavor of expedited grace period. | ||
522 | */ | ||
523 | static void _synchronize_rcu_expedited(struct rcu_state *rsp, | ||
524 | smp_call_func_t func) | ||
525 | { | ||
526 | unsigned long s; | ||
527 | |||
528 | /* If expedited grace periods are prohibited, fall back to normal. */ | ||
529 | if (rcu_gp_is_normal()) { | ||
530 | wait_rcu_gp(rsp->call); | ||
531 | return; | ||
532 | } | ||
533 | |||
534 | /* Take a snapshot of the sequence number. */ | ||
535 | s = rcu_exp_gp_seq_snap(rsp); | ||
536 | if (exp_funnel_lock(rsp, s)) | ||
537 | return; /* Someone else did our work for us. */ | ||
538 | |||
539 | /* Initialize the rcu_node tree in preparation for the wait. */ | ||
540 | sync_rcu_exp_select_cpus(rsp, func); | ||
541 | |||
542 | /* Wait and clean up, including waking everyone. */ | ||
543 | rcu_exp_wait_wake(rsp, s); | ||
544 | } | ||
545 | |||
519 | /** | 546 | /** |
520 | * synchronize_sched_expedited - Brute-force RCU-sched grace period | 547 | * synchronize_sched_expedited - Brute-force RCU-sched grace period |
521 | * | 548 | * |
@@ -534,29 +561,13 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s) | |||
534 | */ | 561 | */ |
535 | void synchronize_sched_expedited(void) | 562 | void synchronize_sched_expedited(void) |
536 | { | 563 | { |
537 | unsigned long s; | ||
538 | struct rcu_state *rsp = &rcu_sched_state; | 564 | struct rcu_state *rsp = &rcu_sched_state; |
539 | 565 | ||
540 | /* If only one CPU, this is automatically a grace period. */ | 566 | /* If only one CPU, this is automatically a grace period. */ |
541 | if (rcu_blocking_is_gp()) | 567 | if (rcu_blocking_is_gp()) |
542 | return; | 568 | return; |
543 | 569 | ||
544 | /* If expedited grace periods are prohibited, fall back to normal. */ | 570 | _synchronize_rcu_expedited(rsp, sync_sched_exp_handler); |
545 | if (rcu_gp_is_normal()) { | ||
546 | wait_rcu_gp(call_rcu_sched); | ||
547 | return; | ||
548 | } | ||
549 | |||
550 | /* Take a snapshot of the sequence number. */ | ||
551 | s = rcu_exp_gp_seq_snap(rsp); | ||
552 | if (exp_funnel_lock(rsp, s)) | ||
553 | return; /* Someone else did our work for us. */ | ||
554 | |||
555 | /* Initialize the rcu_node tree in preparation for the wait. */ | ||
556 | sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler); | ||
557 | |||
558 | /* Wait and clean up, including waking everyone. */ | ||
559 | rcu_exp_wait_wake(rsp, s); | ||
560 | } | 571 | } |
561 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | 572 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); |
562 | 573 | ||
@@ -620,23 +631,8 @@ static void sync_rcu_exp_handler(void *info) | |||
620 | void synchronize_rcu_expedited(void) | 631 | void synchronize_rcu_expedited(void) |
621 | { | 632 | { |
622 | struct rcu_state *rsp = rcu_state_p; | 633 | struct rcu_state *rsp = rcu_state_p; |
623 | unsigned long s; | ||
624 | 634 | ||
625 | /* If expedited grace periods are prohibited, fall back to normal. */ | 635 | _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler); |
626 | if (rcu_gp_is_normal()) { | ||
627 | wait_rcu_gp(call_rcu); | ||
628 | return; | ||
629 | } | ||
630 | |||
631 | s = rcu_exp_gp_seq_snap(rsp); | ||
632 | if (exp_funnel_lock(rsp, s)) | ||
633 | return; /* Someone else did our work for us. */ | ||
634 | |||
635 | /* Initialize the rcu_node tree in preparation for the wait. */ | ||
636 | sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler); | ||
637 | |||
638 | /* Wait for ->blkd_tasks lists to drain, then wake everyone up. */ | ||
639 | rcu_exp_wait_wake(rsp, s); | ||
640 | } | 636 | } |
641 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | 637 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); |
642 | 638 | ||