aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-04-11 12:51:20 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-05-15 13:30:03 -0400
commitbd7af8463b9fae02b4c7d7248a088ca685ef184c (patch)
tree586e0fdfa853165a282a37d485bb702c1a6fc0c1 /kernel
parenta6058d85a2b24fa40ce7f0d7683989ec47b603b9 (diff)
rcu: Switch __rcu_process_callbacks() to rcu_accelerate_cbs()
The __rcu_process_callbacks() function currently checks to see if the current CPU needs a grace period and also if there is any other reason to kick off a new grace period. This is one of the fail-safe checks that has been rendered unnecessary by the changes that increase the accuracy of rcu_gp_cleanup()'s estimate as to whether another grace period is required. Because this particular fail-safe involved acquiring the root rcu_node structure's ->lock, which has seen excessive contention in real life, this fail-safe needs to go. However, one check must remain, namely the check for newly arrived RCU callbacks that have not yet been associated with a grace period. One might hope that the checks in __note_gp_changes(), which is invoked indirectly from rcu_check_quiescent_state(), would suffice, but this function won't be invoked at all if RCU is idle. It is therefore necessary to replace the fail-safe checks with a simpler check for newly arrived callbacks during an RCU idle period, which is exactly what this commit does. This change removes the final call to rcu_start_gp(), so this function is removed as well. Note that lockless use of cpu_needs_another_gp() is racy, but that these races are harmless in this case. If RCU really is idle, the values will not change, so the return value from cpu_needs_another_gp() will be correct. If RCU is not idle, the resulting redundant call to rcu_accelerate_cbs() will be harmless, and might even have the benefit of reducing grace-period latency a bit. This commit also moves interrupt disabling into the "if" statement to improve real-time response a bit. Reported-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Nicholas Piggin <npiggin@gmail.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/tree.c53
1 files changed, 15 insertions, 38 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 6396a3d10be9..fbacc486ed4c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2335,34 +2335,6 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
2335} 2335}
2336 2336
2337/* 2337/*
2338 * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's
2339 * callbacks. Note that rcu_start_gp_advanced() cannot do this because it
2340 * is invoked indirectly from rcu_advance_cbs(), which would result in
2341 * endless recursion -- or would do so if it wasn't for the self-deadlock
2342 * that is encountered beforehand.
2343 *
2344 * Returns true if the grace-period kthread needs to be awakened.
2345 */
2346static bool rcu_start_gp(struct rcu_state *rsp)
2347{
2348 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
2349 struct rcu_node *rnp = rcu_get_root(rsp);
2350 bool ret = false;
2351
2352 /*
2353 * If there is no grace period in progress right now, any
2354 * callbacks we have up to this point will be satisfied by the
2355 * next grace period. Also, advancing the callbacks reduces the
2356 * probability of false positives from cpu_needs_another_gp()
2357 * resulting in pointless grace periods. So, advance callbacks
2358 * then start the grace period!
2359 */
2360 ret = rcu_advance_cbs(rsp, rnp, rdp) || ret;
2361 ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret;
2362 return ret;
2363}
2364
2365/*
2366 * Report a full set of quiescent states to the specified rcu_state data 2338 * Report a full set of quiescent states to the specified rcu_state data
2367 * structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period 2339 * structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period
2368 * kthread if another grace period is required. Whether we wake 2340 * kthread if another grace period is required. Whether we wake
@@ -2889,22 +2861,27 @@ __rcu_process_callbacks(struct rcu_state *rsp)
2889 unsigned long flags; 2861 unsigned long flags;
2890 bool needwake; 2862 bool needwake;
2891 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); 2863 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2864 struct rcu_node *rnp;
2892 2865
2893 WARN_ON_ONCE(!rdp->beenonline); 2866 WARN_ON_ONCE(!rdp->beenonline);
2894 2867
2895 /* Update RCU state based on any recent quiescent states. */ 2868 /* Update RCU state based on any recent quiescent states. */
2896 rcu_check_quiescent_state(rsp, rdp); 2869 rcu_check_quiescent_state(rsp, rdp);
2897 2870
2898 /* Does this CPU require a not-yet-started grace period? */ 2871 /* No grace period and unregistered callbacks? */
2899 local_irq_save(flags); 2872 if (!rcu_gp_in_progress(rsp) &&
2900 if (cpu_needs_another_gp(rsp, rdp)) { 2873 rcu_segcblist_is_enabled(&rdp->cblist)) {
2901 raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */ 2874 local_irq_save(flags);
2902 needwake = rcu_start_gp(rsp); 2875 if (rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) {
2903 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags); 2876 local_irq_restore(flags);
2904 if (needwake) 2877 } else {
2905 rcu_gp_kthread_wake(rsp); 2878 rnp = rdp->mynode;
2906 } else { 2879 raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
2907 local_irq_restore(flags); 2880 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2881 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2882 if (needwake)
2883 rcu_gp_kthread_wake(rsp);
2884 }
2908 } 2885 }
2909 2886
2910 /* If there are callbacks ready, invoke them. */ 2887 /* If there are callbacks ready, invoke them. */