aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/rcutree.c25
-rw-r--r--kernel/rcutree.h1
2 files changed, 15 insertions, 11 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 1e552598b55d..5929b021666d 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -157,7 +157,6 @@ unsigned long rcutorture_vernum;
157 157
158/* State information for rcu_barrier() and friends. */ 158/* State information for rcu_barrier() and friends. */
159 159
160static atomic_t rcu_barrier_cpu_count;
161static DEFINE_MUTEX(rcu_barrier_mutex); 160static DEFINE_MUTEX(rcu_barrier_mutex);
162static struct completion rcu_barrier_completion; 161static struct completion rcu_barrier_completion;
163 162
@@ -2270,9 +2269,12 @@ static int rcu_cpu_has_callbacks(int cpu)
2270 * RCU callback function for _rcu_barrier(). If we are last, wake 2269 * RCU callback function for _rcu_barrier(). If we are last, wake
2271 * up the task executing _rcu_barrier(). 2270 * up the task executing _rcu_barrier().
2272 */ 2271 */
2273static void rcu_barrier_callback(struct rcu_head *notused) 2272static void rcu_barrier_callback(struct rcu_head *rhp)
2274{ 2273{
2275 if (atomic_dec_and_test(&rcu_barrier_cpu_count)) 2274 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
2275 struct rcu_state *rsp = rdp->rsp;
2276
2277 if (atomic_dec_and_test(&rsp->barrier_cpu_count))
2276 complete(&rcu_barrier_completion); 2278 complete(&rcu_barrier_completion);
2277} 2279}
2278 2280
@@ -2284,7 +2286,7 @@ static void rcu_barrier_func(void *type)
2284 struct rcu_state *rsp = type; 2286 struct rcu_state *rsp = type;
2285 struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); 2287 struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
2286 2288
2287 atomic_inc(&rcu_barrier_cpu_count); 2289 atomic_inc(&rsp->barrier_cpu_count);
2288 rsp->call(&rdp->barrier_head, rcu_barrier_callback); 2290 rsp->call(&rdp->barrier_head, rcu_barrier_callback);
2289} 2291}
2290 2292
@@ -2297,9 +2299,9 @@ static void _rcu_barrier(struct rcu_state *rsp)
2297 int cpu; 2299 int cpu;
2298 unsigned long flags; 2300 unsigned long flags;
2299 struct rcu_data *rdp; 2301 struct rcu_data *rdp;
2300 struct rcu_head rh; 2302 struct rcu_data rd;
2301 2303
2302 init_rcu_head_on_stack(&rh); 2304 init_rcu_head_on_stack(&rd.barrier_head);
2303 2305
2304 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 2306 /* Take mutex to serialize concurrent rcu_barrier() requests. */
2305 mutex_lock(&rcu_barrier_mutex); 2307 mutex_lock(&rcu_barrier_mutex);
@@ -2324,7 +2326,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
2324 * us -- but before CPU 1's orphaned callbacks are invoked!!! 2326 * us -- but before CPU 1's orphaned callbacks are invoked!!!
2325 */ 2327 */
2326 init_completion(&rcu_barrier_completion); 2328 init_completion(&rcu_barrier_completion);
2327 atomic_set(&rcu_barrier_cpu_count, 1); 2329 atomic_set(&rsp->barrier_cpu_count, 1);
2328 raw_spin_lock_irqsave(&rsp->onofflock, flags); 2330 raw_spin_lock_irqsave(&rsp->onofflock, flags);
2329 rsp->rcu_barrier_in_progress = current; 2331 rsp->rcu_barrier_in_progress = current;
2330 raw_spin_unlock_irqrestore(&rsp->onofflock, flags); 2332 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
@@ -2363,15 +2365,16 @@ static void _rcu_barrier(struct rcu_state *rsp)
2363 rcu_adopt_orphan_cbs(rsp); 2365 rcu_adopt_orphan_cbs(rsp);
2364 rsp->rcu_barrier_in_progress = NULL; 2366 rsp->rcu_barrier_in_progress = NULL;
2365 raw_spin_unlock_irqrestore(&rsp->onofflock, flags); 2367 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
2366 atomic_inc(&rcu_barrier_cpu_count); 2368 atomic_inc(&rsp->barrier_cpu_count);
2367 smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */ 2369 smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */
2368 rsp->call(&rh, rcu_barrier_callback); 2370 rd.rsp = rsp;
2371 rsp->call(&rd.barrier_head, rcu_barrier_callback);
2369 2372
2370 /* 2373 /*
2371 * Now that we have an rcu_barrier_callback() callback on each 2374 * Now that we have an rcu_barrier_callback() callback on each
2372 * CPU, and thus each counted, remove the initial count. 2375 * CPU, and thus each counted, remove the initial count.
2373 */ 2376 */
2374 if (atomic_dec_and_test(&rcu_barrier_cpu_count)) 2377 if (atomic_dec_and_test(&rsp->barrier_cpu_count))
2375 complete(&rcu_barrier_completion); 2378 complete(&rcu_barrier_completion);
2376 2379
2377 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ 2380 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
@@ -2380,7 +2383,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
2380 /* Other rcu_barrier() invocations can now safely proceed. */ 2383 /* Other rcu_barrier() invocations can now safely proceed. */
2381 mutex_unlock(&rcu_barrier_mutex); 2384 mutex_unlock(&rcu_barrier_mutex);
2382 2385
2383 destroy_rcu_head_on_stack(&rh); 2386 destroy_rcu_head_on_stack(&rd.barrier_head);
2384} 2387}
2385 2388
2386/** 2389/**
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 586d93c978f2..c57ef0b7f097 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -400,6 +400,7 @@ struct rcu_state {
400 struct task_struct *rcu_barrier_in_progress; 400 struct task_struct *rcu_barrier_in_progress;
401 /* Task doing rcu_barrier(), */ 401 /* Task doing rcu_barrier(), */
402 /* or NULL if no barrier. */ 402 /* or NULL if no barrier. */
403 atomic_t barrier_cpu_count; /* # CPUs waiting on. */
403 raw_spinlock_t fqslock; /* Only one task forcing */ 404 raw_spinlock_t fqslock; /* Only one task forcing */
404 /* quiescent states. */ 405 /* quiescent states. */
405 unsigned long jiffies_force_qs; /* Time at which to invoke */ 406 unsigned long jiffies_force_qs; /* Time at which to invoke */