diff options
| author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-04-10 18:40:35 -0400 |
|---|---|---|
| committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-06-08 11:25:22 -0400 |
| commit | f92c734f02cbf10e40569facff82059ae9b61920 (patch) | |
| tree | 95d64d71c63d840d31bef62793ce8262305a6f1e /kernel/rcu/tree.c | |
| parent | c0ee4500ff67e455dcbc74ff3e9e9faa3bc93be1 (diff) | |
rcu: Prevent rcu_barrier() from starting needless grace periods
Currently rcu_barrier() uses call_rcu() to enqueue new callbacks
on each CPU with a non-empty callback list. This works, but means
that rcu_barrier() forces grace periods that are not otherwise needed.
The key point is that rcu_barrier() never needs to wait for a grace
period, but instead only for all pre-existing callbacks to be invoked.
This means that rcu_barrier()'s new callbacks should be placed in
the callback-list segment containing the last pre-existing callback.
This commit makes this change using the new rcu_segcblist_entrain()
function.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
| -rw-r--r-- | kernel/rcu/tree.c | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e354e475e645..657056c3e0cd 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
| @@ -3578,8 +3578,14 @@ static void rcu_barrier_func(void *type) | |||
| 3578 | struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); | 3578 | struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); |
| 3579 | 3579 | ||
| 3580 | _rcu_barrier_trace(rsp, "IRQ", -1, rsp->barrier_sequence); | 3580 | _rcu_barrier_trace(rsp, "IRQ", -1, rsp->barrier_sequence); |
| 3581 | atomic_inc(&rsp->barrier_cpu_count); | 3581 | rdp->barrier_head.func = rcu_barrier_callback; |
| 3582 | rsp->call(&rdp->barrier_head, rcu_barrier_callback); | 3582 | debug_rcu_head_queue(&rdp->barrier_head); |
| 3583 | if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) { | ||
| 3584 | atomic_inc(&rsp->barrier_cpu_count); | ||
| 3585 | } else { | ||
| 3586 | debug_rcu_head_unqueue(&rdp->barrier_head); | ||
| 3587 | _rcu_barrier_trace(rsp, "IRQNQ", -1, rsp->barrier_sequence); | ||
| 3588 | } | ||
| 3583 | } | 3589 | } |
| 3584 | 3590 | ||
| 3585 | /* | 3591 | /* |
