diff options
| author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2008-01-25 15:08:24 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:24 -0500 |
| commit | e0ecfa7917cafe72f4a75f87e8bb5d8d51dc534f (patch) | |
| tree | 92106dbd88530d3884bc6c4d1c088cba1ad8630e | |
| parent | 01c1c660f4b8086cad7a62345fd04290f3d82c8f (diff) | |
Preempt-RCU: fix rcu_barrier for preemptive environment.
Fix rcu_barrier() to work properly in preemptive kernel environment.
Also, the ordering of callback must be preserved while moving
callbacks to another CPU during CPU hotplug.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Dipankar Sarma <dipankar@in.ibm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | kernel/rcuclassic.c | 2 | ||||
| -rw-r--r-- | kernel/rcupdate.c | 10 |
2 files changed, 11 insertions, 1 deletions
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index 18369e3386e2..ce0cf16cab67 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
| @@ -371,9 +371,9 @@ static void __rcu_offline_cpu(struct rcu_data *this_rdp, | |||
| 371 | if (rcp->cur != rcp->completed) | 371 | if (rcp->cur != rcp->completed) |
| 372 | cpu_quiet(rdp->cpu, rcp); | 372 | cpu_quiet(rdp->cpu, rcp); |
| 373 | spin_unlock_bh(&rcp->lock); | 373 | spin_unlock_bh(&rcp->lock); |
| 374 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); | ||
| 374 | rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); | 375 | rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); |
| 375 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); | 376 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); |
| 376 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); | ||
| 377 | } | 377 | } |
| 378 | 378 | ||
| 379 | static void rcu_offline_cpu(int cpu) | 379 | static void rcu_offline_cpu(int cpu) |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 0ccd0095ebdc..760dfc233a00 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
| @@ -115,7 +115,17 @@ void rcu_barrier(void) | |||
| 115 | mutex_lock(&rcu_barrier_mutex); | 115 | mutex_lock(&rcu_barrier_mutex); |
| 116 | init_completion(&rcu_barrier_completion); | 116 | init_completion(&rcu_barrier_completion); |
| 117 | atomic_set(&rcu_barrier_cpu_count, 0); | 117 | atomic_set(&rcu_barrier_cpu_count, 0); |
| 118 | /* | ||
| 119 | * The queueing of callbacks in all CPUs must be atomic with | ||
| 120 | * respect to RCU, otherwise one CPU may queue a callback, | ||
| 121 | * wait for a grace period, decrement barrier count and call | ||
| 122 | * complete(), while other CPUs have not yet queued anything. | ||
| 123 | * So, we need to make sure that grace periods cannot complete | ||
| 124 | * until all the callbacks are queued. | ||
| 125 | */ | ||
| 126 | rcu_read_lock(); | ||
| 118 | on_each_cpu(rcu_barrier_func, NULL, 0, 1); | 127 | on_each_cpu(rcu_barrier_func, NULL, 0, 1); |
| 128 | rcu_read_unlock(); | ||
| 119 | wait_for_completion(&rcu_barrier_completion); | 129 | wait_for_completion(&rcu_barrier_completion); |
| 120 | mutex_unlock(&rcu_barrier_mutex); | 130 | mutex_unlock(&rcu_barrier_mutex); |
| 121 | } | 131 | } |
