diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-07-03 20:22:34 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-08-30 19:02:52 -0400 |
commit | aff4e9ede52badf550745c3d30ed5fcf86ed4351 (patch) | |
tree | b84c7d8b9c4c251477da155b7d019258f9805e1f /kernel/rcu/tree.c | |
parent | b50912d0b5e03f11004fec1e2b50244de9e2fa41 (diff) |
rcu: Remove rsp parameter from rcu_report_qs_rsp()
There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions. This commit therefore removes the rsp parameter from
rcu_report_qs_rsp().
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r-- | kernel/rcu/tree.c | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 919033d2c083..2665a45ccb43 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -138,7 +138,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); | |||
138 | static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); | 138 | static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); |
139 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); | 139 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); |
140 | static void invoke_rcu_core(void); | 140 | static void invoke_rcu_core(void); |
141 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); | 141 | static void invoke_rcu_callbacks(struct rcu_data *rdp); |
142 | static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp); | 142 | static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp); |
143 | static void sync_sched_exp_online_cleanup(int cpu); | 143 | static void sync_sched_exp_online_cleanup(int cpu); |
144 | 144 | ||
@@ -2189,9 +2189,11 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
2189 | * just-completed grace period. Note that the caller must hold rnp->lock, | 2189 | * just-completed grace period. Note that the caller must hold rnp->lock, |
2190 | * which is released before return. | 2190 | * which is released before return. |
2191 | */ | 2191 | */ |
2192 | static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) | 2192 | static void rcu_report_qs_rsp(unsigned long flags) |
2193 | __releases(rcu_get_root(rsp)->lock) | 2193 | __releases(rcu_get_root(rsp)->lock) |
2194 | { | 2194 | { |
2195 | struct rcu_state *rsp = &rcu_state; | ||
2196 | |||
2195 | raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp)); | 2197 | raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp)); |
2196 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); | 2198 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); |
2197 | WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); | 2199 | WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); |
@@ -2268,7 +2270,7 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, | |||
2268 | * state for this grace period. Invoke rcu_report_qs_rsp() | 2270 | * state for this grace period. Invoke rcu_report_qs_rsp() |
2269 | * to clean up and start the next grace period if one is needed. | 2271 | * to clean up and start the next grace period if one is needed. |
2270 | */ | 2272 | */ |
2271 | rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */ | 2273 | rcu_report_qs_rsp(flags); /* releases rnp->lock. */ |
2272 | } | 2274 | } |
2273 | 2275 | ||
2274 | /* | 2276 | /* |
@@ -2302,7 +2304,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp, | |||
2302 | * Only one rcu_node structure in the tree, so don't | 2304 | * Only one rcu_node structure in the tree, so don't |
2303 | * try to report up to its nonexistent parent! | 2305 | * try to report up to its nonexistent parent! |
2304 | */ | 2306 | */ |
2305 | rcu_report_qs_rsp(rsp, flags); | 2307 | rcu_report_qs_rsp(flags); |
2306 | return; | 2308 | return; |
2307 | } | 2309 | } |
2308 | 2310 | ||
@@ -2761,7 +2763,7 @@ __rcu_process_callbacks(struct rcu_state *rsp) | |||
2761 | 2763 | ||
2762 | /* If there are callbacks ready, invoke them. */ | 2764 | /* If there are callbacks ready, invoke them. */ |
2763 | if (rcu_segcblist_ready_cbs(&rdp->cblist)) | 2765 | if (rcu_segcblist_ready_cbs(&rdp->cblist)) |
2764 | invoke_rcu_callbacks(rsp, rdp); | 2766 | invoke_rcu_callbacks(rdp); |
2765 | 2767 | ||
2766 | /* Do any needed deferred wakeups of rcuo kthreads. */ | 2768 | /* Do any needed deferred wakeups of rcuo kthreads. */ |
2767 | do_nocb_deferred_wakeup(rdp); | 2769 | do_nocb_deferred_wakeup(rdp); |
@@ -2789,8 +2791,10 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused | |||
2789 | * are running on the current CPU with softirqs disabled, the | 2791 | * are running on the current CPU with softirqs disabled, the |
2790 | * rcu_cpu_kthread_task cannot disappear out from under us. | 2792 | * rcu_cpu_kthread_task cannot disappear out from under us. |
2791 | */ | 2793 | */ |
2792 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | 2794 | static void invoke_rcu_callbacks(struct rcu_data *rdp) |
2793 | { | 2795 | { |
2796 | struct rcu_state *rsp = &rcu_state; | ||
2797 | |||
2794 | if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) | 2798 | if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) |
2795 | return; | 2799 | return; |
2796 | if (likely(!rsp->boost)) { | 2800 | if (likely(!rsp->boost)) { |