diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-05-29 03:34:56 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-07-02 15:33:22 -0400 |
commit | 24ebbca8ecdd5129d7f829a7cb5146aaeb531f77 (patch) | |
tree | b7a4bb8044dcd1e13119113c77d2a4b96ba25a43 /kernel/rcutree.c | |
parent | 06668efa9180f4824fe846a8ff96338c18646bc7 (diff) |
rcu: Move rcu_barrier_cpu_count to rcu_state structure
In order to allow each RCU flavor to concurrently execute its rcu_barrier()
function, it is necessary to move the relevant state to the rcu_state
structure. This commit therefore moves the rcu_barrier_cpu_count global
variable to a new ->barrier_cpu_count field in the rcu_state structure.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 25 |
1 files changed, 14 insertions, 11 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 1e552598b55d..5929b021666d 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -157,7 +157,6 @@ unsigned long rcutorture_vernum; | |||
157 | 157 | ||
158 | /* State information for rcu_barrier() and friends. */ | 158 | /* State information for rcu_barrier() and friends. */ |
159 | 159 | ||
160 | static atomic_t rcu_barrier_cpu_count; | ||
161 | static DEFINE_MUTEX(rcu_barrier_mutex); | 160 | static DEFINE_MUTEX(rcu_barrier_mutex); |
162 | static struct completion rcu_barrier_completion; | 161 | static struct completion rcu_barrier_completion; |
163 | 162 | ||
@@ -2270,9 +2269,12 @@ static int rcu_cpu_has_callbacks(int cpu) | |||
2270 | * RCU callback function for _rcu_barrier(). If we are last, wake | 2269 | * RCU callback function for _rcu_barrier(). If we are last, wake |
2271 | * up the task executing _rcu_barrier(). | 2270 | * up the task executing _rcu_barrier(). |
2272 | */ | 2271 | */ |
2273 | static void rcu_barrier_callback(struct rcu_head *notused) | 2272 | static void rcu_barrier_callback(struct rcu_head *rhp) |
2274 | { | 2273 | { |
2275 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | 2274 | struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head); |
2275 | struct rcu_state *rsp = rdp->rsp; | ||
2276 | |||
2277 | if (atomic_dec_and_test(&rsp->barrier_cpu_count)) | ||
2276 | complete(&rcu_barrier_completion); | 2278 | complete(&rcu_barrier_completion); |
2277 | } | 2279 | } |
2278 | 2280 | ||
@@ -2284,7 +2286,7 @@ static void rcu_barrier_func(void *type) | |||
2284 | struct rcu_state *rsp = type; | 2286 | struct rcu_state *rsp = type; |
2285 | struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); | 2287 | struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); |
2286 | 2288 | ||
2287 | atomic_inc(&rcu_barrier_cpu_count); | 2289 | atomic_inc(&rsp->barrier_cpu_count); |
2288 | rsp->call(&rdp->barrier_head, rcu_barrier_callback); | 2290 | rsp->call(&rdp->barrier_head, rcu_barrier_callback); |
2289 | } | 2291 | } |
2290 | 2292 | ||
@@ -2297,9 +2299,9 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
2297 | int cpu; | 2299 | int cpu; |
2298 | unsigned long flags; | 2300 | unsigned long flags; |
2299 | struct rcu_data *rdp; | 2301 | struct rcu_data *rdp; |
2300 | struct rcu_head rh; | 2302 | struct rcu_data rd; |
2301 | 2303 | ||
2302 | init_rcu_head_on_stack(&rh); | 2304 | init_rcu_head_on_stack(&rd.barrier_head); |
2303 | 2305 | ||
2304 | /* Take mutex to serialize concurrent rcu_barrier() requests. */ | 2306 | /* Take mutex to serialize concurrent rcu_barrier() requests. */ |
2305 | mutex_lock(&rcu_barrier_mutex); | 2307 | mutex_lock(&rcu_barrier_mutex); |
@@ -2324,7 +2326,7 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
2324 | * us -- but before CPU 1's orphaned callbacks are invoked!!! | 2326 | * us -- but before CPU 1's orphaned callbacks are invoked!!! |
2325 | */ | 2327 | */ |
2326 | init_completion(&rcu_barrier_completion); | 2328 | init_completion(&rcu_barrier_completion); |
2327 | atomic_set(&rcu_barrier_cpu_count, 1); | 2329 | atomic_set(&rsp->barrier_cpu_count, 1); |
2328 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 2330 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
2329 | rsp->rcu_barrier_in_progress = current; | 2331 | rsp->rcu_barrier_in_progress = current; |
2330 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | 2332 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); |
@@ -2363,15 +2365,16 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
2363 | rcu_adopt_orphan_cbs(rsp); | 2365 | rcu_adopt_orphan_cbs(rsp); |
2364 | rsp->rcu_barrier_in_progress = NULL; | 2366 | rsp->rcu_barrier_in_progress = NULL; |
2365 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | 2367 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); |
2366 | atomic_inc(&rcu_barrier_cpu_count); | 2368 | atomic_inc(&rsp->barrier_cpu_count); |
2367 | smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */ | 2369 | smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */ |
2368 | rsp->call(&rh, rcu_barrier_callback); | 2370 | rd.rsp = rsp; |
2371 | rsp->call(&rd.barrier_head, rcu_barrier_callback); | ||
2369 | 2372 | ||
2370 | /* | 2373 | /* |
2371 | * Now that we have an rcu_barrier_callback() callback on each | 2374 | * Now that we have an rcu_barrier_callback() callback on each |
2372 | * CPU, and thus each counted, remove the initial count. | 2375 | * CPU, and thus each counted, remove the initial count. |
2373 | */ | 2376 | */ |
2374 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | 2377 | if (atomic_dec_and_test(&rsp->barrier_cpu_count)) |
2375 | complete(&rcu_barrier_completion); | 2378 | complete(&rcu_barrier_completion); |
2376 | 2379 | ||
2377 | /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ | 2380 | /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ |
@@ -2380,7 +2383,7 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
2380 | /* Other rcu_barrier() invocations can now safely proceed. */ | 2383 | /* Other rcu_barrier() invocations can now safely proceed. */ |
2381 | mutex_unlock(&rcu_barrier_mutex); | 2384 | mutex_unlock(&rcu_barrier_mutex); |
2382 | 2385 | ||
2383 | destroy_rcu_head_on_stack(&rh); | 2386 | destroy_rcu_head_on_stack(&rd.barrier_head); |
2384 | } | 2387 | } |
2385 | 2388 | ||
2386 | /** | 2389 | /** |