diff options
author | Paul E. McKenney <paulmck@linux.ibm.com> | 2019-03-25 11:36:03 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.ibm.com> | 2019-05-25 17:50:49 -0400 |
commit | 43e903ad3e0843d03da15d8eaffb5ada22966c76 (patch) | |
tree | b4145d17c55323defaab03f8f202394e34b045cc | |
parent | 0864f057b050bc6dd68106b3185e02db5140012d (diff) |
rcu: Inline invoke_rcu_callbacks() into its sole remaining caller
This commit saves a few lines of code by inlining invoke_rcu_callbacks()
into its sole remaining caller.
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
-rw-r--r-- | kernel/rcu/tree.c | 20 |
1 files changed, 3 insertions, 17 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8e290163505a..7822a2e1370d 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -147,7 +147,6 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); | |||
147 | static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); | 147 | static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); |
148 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); | 148 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); |
149 | static void invoke_rcu_core(void); | 149 | static void invoke_rcu_core(void); |
150 | static void invoke_rcu_callbacks(struct rcu_data *rdp); | ||
151 | static void rcu_report_exp_rdp(struct rcu_data *rdp); | 150 | static void rcu_report_exp_rdp(struct rcu_data *rdp); |
152 | static void sync_sched_exp_online_cleanup(int cpu); | 151 | static void sync_sched_exp_online_cleanup(int cpu); |
153 | 152 | ||
@@ -2296,8 +2295,9 @@ static __latent_entropy void rcu_core(void) | |||
2296 | rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); | 2295 | rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); |
2297 | 2296 | ||
2298 | /* If there are callbacks ready, invoke them. */ | 2297 | /* If there are callbacks ready, invoke them. */ |
2299 | if (rcu_segcblist_ready_cbs(&rdp->cblist)) | 2298 | if (rcu_segcblist_ready_cbs(&rdp->cblist) && |
2300 | invoke_rcu_callbacks(rdp); | 2299 | likely(READ_ONCE(rcu_scheduler_fully_active))) |
2300 | rcu_do_batch(rdp); | ||
2301 | 2301 | ||
2302 | /* Do any needed deferred wakeups of rcuo kthreads. */ | 2302 | /* Do any needed deferred wakeups of rcuo kthreads. */ |
2303 | do_nocb_deferred_wakeup(rdp); | 2303 | do_nocb_deferred_wakeup(rdp); |
@@ -2333,20 +2333,6 @@ static void invoke_rcu_core_kthread(void) | |||
2333 | } | 2333 | } |
2334 | 2334 | ||
2335 | /* | 2335 | /* |
2336 | * Do RCU callback invocation. Not that if we are running !use_softirq, | ||
2337 | * we are already in the rcuc kthread. If callbacks are offloaded, then | ||
2338 | * ->cblist is always empty, so we don't get here. Therefore, we only | ||
2339 | * ever need to check for the scheduler being operational (some callbacks | ||
2340 | * do wakeups, so we do need the scheduler). | ||
2341 | */ | ||
2342 | static void invoke_rcu_callbacks(struct rcu_data *rdp) | ||
2343 | { | ||
2344 | if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) | ||
2345 | return; | ||
2346 | rcu_do_batch(rdp); | ||
2347 | } | ||
2348 | |||
2349 | /* | ||
2350 | * Wake up this CPU's rcuc kthread to do RCU core processing. | 2336 | * Wake up this CPU's rcuc kthread to do RCU core processing. |
2351 | */ | 2337 | */ |
2352 | static void invoke_rcu_core(void) | 2338 | static void invoke_rcu_core(void) |