diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-07-03 20:22:34 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-08-30 19:03:00 -0400 |
commit | 02f501423d0dde7a4b0dd138e0de6175bcf1926c (patch) | |
tree | 8f257ee626e3b6f39d5c93da1449f97080ef5014 /kernel | |
parent | 532c00c97f16a2a8576d453ae13ddc38162faed4 (diff) |
rcu: Remove rsp parameter from rcu_accelerate_cbs()
There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions. This commit therefore removes the rsp parameter from
rcu_accelerate_cbs().
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcu/tree.c | 15 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 2 |
2 files changed, 8 insertions, 9 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 06f83fce416b..984dbbf47265 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -1663,11 +1663,11 @@ static void rcu_gp_kthread_wake(void) | |||
1663 | * | 1663 | * |
1664 | * The caller must hold rnp->lock with interrupts disabled. | 1664 | * The caller must hold rnp->lock with interrupts disabled. |
1665 | */ | 1665 | */ |
1666 | static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, | 1666 | static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) |
1667 | struct rcu_data *rdp) | ||
1668 | { | 1667 | { |
1669 | unsigned long gp_seq_req; | 1668 | unsigned long gp_seq_req; |
1670 | bool ret = false; | 1669 | bool ret = false; |
1670 | struct rcu_state *rsp = &rcu_state; | ||
1671 | 1671 | ||
1672 | raw_lockdep_assert_held_rcu_node(rnp); | 1672 | raw_lockdep_assert_held_rcu_node(rnp); |
1673 | 1673 | ||
@@ -1719,7 +1719,7 @@ static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp, | |||
1719 | return; | 1719 | return; |
1720 | } | 1720 | } |
1721 | raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ | 1721 | raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ |
1722 | needwake = rcu_accelerate_cbs(rsp, rnp, rdp); | 1722 | needwake = rcu_accelerate_cbs(rnp, rdp); |
1723 | raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ | 1723 | raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ |
1724 | if (needwake) | 1724 | if (needwake) |
1725 | rcu_gp_kthread_wake(); | 1725 | rcu_gp_kthread_wake(); |
@@ -1751,7 +1751,7 @@ static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp, | |||
1751 | rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); | 1751 | rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); |
1752 | 1752 | ||
1753 | /* Classify any remaining callbacks. */ | 1753 | /* Classify any remaining callbacks. */ |
1754 | return rcu_accelerate_cbs(rsp, rnp, rdp); | 1754 | return rcu_accelerate_cbs(rnp, rdp); |
1755 | } | 1755 | } |
1756 | 1756 | ||
1757 | /* | 1757 | /* |
@@ -1777,7 +1777,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, | |||
1777 | ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */ | 1777 | ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */ |
1778 | trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend")); | 1778 | trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend")); |
1779 | } else { | 1779 | } else { |
1780 | ret = rcu_accelerate_cbs(rsp, rnp, rdp); /* Recent callbacks. */ | 1780 | ret = rcu_accelerate_cbs(rnp, rdp); /* Recent callbacks. */ |
1781 | } | 1781 | } |
1782 | 1782 | ||
1783 | /* Now handle the beginnings of any new-to-this-CPU grace periods. */ | 1783 | /* Now handle the beginnings of any new-to-this-CPU grace periods. */ |
@@ -2078,7 +2078,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
2078 | needgp = true; | 2078 | needgp = true; |
2079 | } | 2079 | } |
2080 | /* Advance CBs to reduce false positives below. */ | 2080 | /* Advance CBs to reduce false positives below. */ |
2081 | if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) { | 2081 | if (!rcu_accelerate_cbs(rnp, rdp) && needgp) { |
2082 | WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); | 2082 | WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); |
2083 | rsp->gp_req_activity = jiffies; | 2083 | rsp->gp_req_activity = jiffies; |
2084 | trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), | 2084 | trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), |
@@ -2331,7 +2331,6 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp) | |||
2331 | unsigned long mask; | 2331 | unsigned long mask; |
2332 | bool needwake; | 2332 | bool needwake; |
2333 | struct rcu_node *rnp; | 2333 | struct rcu_node *rnp; |
2334 | struct rcu_state *rsp = &rcu_state; | ||
2335 | 2334 | ||
2336 | rnp = rdp->mynode; | 2335 | rnp = rdp->mynode; |
2337 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | 2336 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
@@ -2359,7 +2358,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp) | |||
2359 | * This GP can't end until cpu checks in, so all of our | 2358 | * This GP can't end until cpu checks in, so all of our |
2360 | * callbacks can be processed during the next GP. | 2359 | * callbacks can be processed during the next GP. |
2361 | */ | 2360 | */ |
2362 | needwake = rcu_accelerate_cbs(rsp, rnp, rdp); | 2361 | needwake = rcu_accelerate_cbs(rnp, rdp); |
2363 | 2362 | ||
2364 | rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); | 2363 | rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); |
2365 | /* ^^^ Released rnp->lock */ | 2364 | /* ^^^ Released rnp->lock */ |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 50ca000ad9f2..0c59c3987c60 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -1697,7 +1697,7 @@ static void rcu_prepare_for_idle(void) | |||
1697 | continue; | 1697 | continue; |
1698 | rnp = rdp->mynode; | 1698 | rnp = rdp->mynode; |
1699 | raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ | 1699 | raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ |
1700 | needwake = rcu_accelerate_cbs(rsp, rnp, rdp); | 1700 | needwake = rcu_accelerate_cbs(rnp, rdp); |
1701 | raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ | 1701 | raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ |
1702 | if (needwake) | 1702 | if (needwake) |
1703 | rcu_gp_kthread_wake(); | 1703 | rcu_gp_kthread_wake(); |