aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-07-29 20:28:11 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-09-21 00:16:17 -0400
commitf4ecea309d3e17ba5e90082308125ad23bd5701b (patch)
treebf7623bc786bc0c9709a17aac6b459e7b1d21656
parent19a5ecde086a6a5287978b12ae948fa691b197b7 (diff)
rcu: Use rsp->expedited_wq instead of sync_rcu_preempt_exp_wq
Now that there is an ->expedited_wq waitqueue in each rcu_state structure, there is no need for the sync_rcu_preempt_exp_wq global variable. This commit therefore substitutes ->expedited_wq for sync_rcu_preempt_exp_wq. It also initializes ->expedited_wq only once at boot instead of at the start of each expedited grace period. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--kernel/rcu/tree.c2
-rw-r--r--kernel/rcu/tree_plugin.h6
2 files changed, 3 insertions, 5 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 775d36cc0050..53d66ebb4811 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3556,7 +3556,6 @@ void synchronize_sched_expedited(void)
3556 rcu_exp_gp_seq_start(rsp); 3556 rcu_exp_gp_seq_start(rsp);
3557 3557
3558 /* Stop each CPU that is online, non-idle, and not us. */ 3558 /* Stop each CPU that is online, non-idle, and not us. */
3559 init_waitqueue_head(&rsp->expedited_wq);
3560 atomic_set(&rsp->expedited_need_qs, 1); /* Extra count avoids race. */ 3559 atomic_set(&rsp->expedited_need_qs, 1); /* Extra count avoids race. */
3561 for_each_online_cpu(cpu) { 3560 for_each_online_cpu(cpu) {
3562 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 3561 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
@@ -4179,6 +4178,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
4179 } 4178 }
4180 4179
4181 init_waitqueue_head(&rsp->gp_wq); 4180 init_waitqueue_head(&rsp->gp_wq);
4181 init_waitqueue_head(&rsp->expedited_wq);
4182 rnp = rsp->level[rcu_num_lvls - 1]; 4182 rnp = rsp->level[rcu_num_lvls - 1];
4183 for_each_possible_cpu(i) { 4183 for_each_possible_cpu(i) {
4184 while (i > rnp->grphi) 4184 while (i > rnp->grphi)
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index b2bf3963a0ae..72df006de798 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -535,8 +535,6 @@ void synchronize_rcu(void)
535} 535}
536EXPORT_SYMBOL_GPL(synchronize_rcu); 536EXPORT_SYMBOL_GPL(synchronize_rcu);
537 537
538static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
539
540/* 538/*
541 * Return non-zero if there are any tasks in RCU read-side critical 539 * Return non-zero if there are any tasks in RCU read-side critical
542 * sections blocking the current preemptible-RCU expedited grace period. 540 * sections blocking the current preemptible-RCU expedited grace period.
@@ -590,7 +588,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
590 raw_spin_unlock_irqrestore(&rnp->lock, flags); 588 raw_spin_unlock_irqrestore(&rnp->lock, flags);
591 if (wake) { 589 if (wake) {
592 smp_mb(); /* EGP done before wake_up(). */ 590 smp_mb(); /* EGP done before wake_up(). */
593 wake_up(&sync_rcu_preempt_exp_wq); 591 wake_up(&rsp->expedited_wq);
594 } 592 }
595 break; 593 break;
596 } 594 }
@@ -729,7 +727,7 @@ void synchronize_rcu_expedited(void)
729 727
730 /* Wait for snapshotted ->blkd_tasks lists to drain. */ 728 /* Wait for snapshotted ->blkd_tasks lists to drain. */
731 rnp = rcu_get_root(rsp); 729 rnp = rcu_get_root(rsp);
732 wait_event(sync_rcu_preempt_exp_wq, 730 wait_event(rsp->expedited_wq,
733 sync_rcu_preempt_exp_done(rnp)); 731 sync_rcu_preempt_exp_done(rnp));
734 732
735 /* Clean up and exit. */ 733 /* Clean up and exit. */