diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-05-29 06:03:37 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-07-02 15:33:22 -0400 |
commit | 7db74df88b52844f4e966901e2972bba725e6766 (patch) | |
tree | ccb2139d2c4628591287bf456ede1164cef43d0f /kernel/rcutree.c | |
parent | 24ebbca8ecdd5129d7f829a7cb5146aaeb531f77 (diff) |
rcu: Move rcu_barrier_completion to rcu_state structure
In order to allow each RCU flavor to concurrently execute its
rcu_barrier() function, it is necessary to move the relevant
state to the rcu_state structure. This commit therefore moves the
rcu_barrier_completion global variable to a new ->barrier_completion
field in the rcu_state structure.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 9 |
1 files changed, 4 insertions, 5 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 5929b021666d..ca7d1678ac79 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -158,7 +158,6 @@ unsigned long rcutorture_vernum; | |||
158 | /* State information for rcu_barrier() and friends. */ | 158 | /* State information for rcu_barrier() and friends. */ |
159 | 159 | ||
160 | static DEFINE_MUTEX(rcu_barrier_mutex); | 160 | static DEFINE_MUTEX(rcu_barrier_mutex); |
161 | static struct completion rcu_barrier_completion; | ||
162 | 161 | ||
163 | /* | 162 | /* |
164 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | 163 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s |
@@ -2275,7 +2274,7 @@ static void rcu_barrier_callback(struct rcu_head *rhp) | |||
2275 | struct rcu_state *rsp = rdp->rsp; | 2274 | struct rcu_state *rsp = rdp->rsp; |
2276 | 2275 | ||
2277 | if (atomic_dec_and_test(&rsp->barrier_cpu_count)) | 2276 | if (atomic_dec_and_test(&rsp->barrier_cpu_count)) |
2278 | complete(&rcu_barrier_completion); | 2277 | complete(&rsp->barrier_completion); |
2279 | } | 2278 | } |
2280 | 2279 | ||
2281 | /* | 2280 | /* |
@@ -2325,7 +2324,7 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
2325 | * 6. Both rcu_barrier_callback() callbacks are invoked, awakening | 2324 | * 6. Both rcu_barrier_callback() callbacks are invoked, awakening |
2326 | * us -- but before CPU 1's orphaned callbacks are invoked!!! | 2325 | * us -- but before CPU 1's orphaned callbacks are invoked!!! |
2327 | */ | 2326 | */ |
2328 | init_completion(&rcu_barrier_completion); | 2327 | init_completion(&rsp->barrier_completion); |
2329 | atomic_set(&rsp->barrier_cpu_count, 1); | 2328 | atomic_set(&rsp->barrier_cpu_count, 1); |
2330 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 2329 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
2331 | rsp->rcu_barrier_in_progress = current; | 2330 | rsp->rcu_barrier_in_progress = current; |
@@ -2375,10 +2374,10 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
2375 | * CPU, and thus each counted, remove the initial count. | 2374 | * CPU, and thus each counted, remove the initial count. |
2376 | */ | 2375 | */ |
2377 | if (atomic_dec_and_test(&rsp->barrier_cpu_count)) | 2376 | if (atomic_dec_and_test(&rsp->barrier_cpu_count)) |
2378 | complete(&rcu_barrier_completion); | 2377 | complete(&rsp->barrier_completion); |
2379 | 2378 | ||
2380 | /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ | 2379 | /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ |
2381 | wait_for_completion(&rcu_barrier_completion); | 2380 | wait_for_completion(&rsp->barrier_completion); |
2382 | 2381 | ||
2383 | /* Other rcu_barrier() invocations can now safely proceed. */ | 2382 | /* Other rcu_barrier() invocations can now safely proceed. */ |
2384 | mutex_unlock(&rcu_barrier_mutex); | 2383 | mutex_unlock(&rcu_barrier_mutex); |