aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-08-02 20:43:50 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-09-23 10:43:55 -0400
commit1331e7a1bbe1f11b19c4327ba0853bee2a606543 (patch)
tree596d9ed379ef7e2c5d8cf1cc0ba56ade084548ec /kernel/rcutree.c
parenta10d206ef1a83121ab7430cb196e0376a7145b22 (diff)
rcu: Remove _rcu_barrier() dependency on __stop_machine()
Currently, _rcu_barrier() relies on preempt_disable() to prevent any CPU from going offline, which in turn depends on CPU hotplug's use of __stop_machine(). This patch therefore makes _rcu_barrier() use get_online_cpus() to block CPU-hotplug operations. This has the added benefit of removing the need for _rcu_barrier() to adopt callbacks: Because CPU-hotplug operations are excluded, there can be no callbacks to adopt. This commit simplifies the code accordingly. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c83
1 files changed, 11 insertions, 72 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f7bcd9e6c054..c45d3f745302 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1392,17 +1392,6 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
1392 int i; 1392 int i;
1393 struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); 1393 struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
1394 1394
1395 /*
1396 * If there is an rcu_barrier() operation in progress, then
1397 * only the task doing that operation is permitted to adopt
1398 * callbacks. To do otherwise breaks rcu_barrier() and friends
1399 * by causing them to fail to wait for the callbacks in the
1400 * orphanage.
1401 */
1402 if (rsp->rcu_barrier_in_progress &&
1403 rsp->rcu_barrier_in_progress != current)
1404 return;
1405
1406 /* Do the accounting first. */ 1395 /* Do the accounting first. */
1407 rdp->qlen_lazy += rsp->qlen_lazy; 1396 rdp->qlen_lazy += rsp->qlen_lazy;
1408 rdp->qlen += rsp->qlen; 1397 rdp->qlen += rsp->qlen;
@@ -1457,9 +1446,8 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
1457 * The CPU has been completely removed, and some other CPU is reporting 1446 * The CPU has been completely removed, and some other CPU is reporting
1458 * this fact from process context. Do the remainder of the cleanup, 1447 * this fact from process context. Do the remainder of the cleanup,
1459 * including orphaning the outgoing CPU's RCU callbacks, and also 1448 * including orphaning the outgoing CPU's RCU callbacks, and also
1460 * adopting them, if there is no _rcu_barrier() instance running. 1449 * adopting them. There can only be one CPU hotplug operation at a time,
1461 * There can only be one CPU hotplug operation at a time, so no other 1450 * so no other CPU can be attempting to update rcu_cpu_kthread_task.
1462 * CPU can be attempting to update rcu_cpu_kthread_task.
1463 */ 1451 */
1464static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) 1452static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1465{ 1453{
@@ -1521,10 +1509,6 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1521 1509
1522#else /* #ifdef CONFIG_HOTPLUG_CPU */ 1510#else /* #ifdef CONFIG_HOTPLUG_CPU */
1523 1511
1524static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
1525{
1526}
1527
1528static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) 1512static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
1529{ 1513{
1530} 1514}
@@ -2328,13 +2312,10 @@ static void rcu_barrier_func(void *type)
2328static void _rcu_barrier(struct rcu_state *rsp) 2312static void _rcu_barrier(struct rcu_state *rsp)
2329{ 2313{
2330 int cpu; 2314 int cpu;
2331 unsigned long flags;
2332 struct rcu_data *rdp; 2315 struct rcu_data *rdp;
2333 struct rcu_data rd;
2334 unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done); 2316 unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
2335 unsigned long snap_done; 2317 unsigned long snap_done;
2336 2318
2337 init_rcu_head_on_stack(&rd.barrier_head);
2338 _rcu_barrier_trace(rsp, "Begin", -1, snap); 2319 _rcu_barrier_trace(rsp, "Begin", -1, snap);
2339 2320
2340 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 2321 /* Take mutex to serialize concurrent rcu_barrier() requests. */
@@ -2374,70 +2355,30 @@ static void _rcu_barrier(struct rcu_state *rsp)
2374 /* 2355 /*
2375 * Initialize the count to one rather than to zero in order to 2356 * Initialize the count to one rather than to zero in order to
2376 * avoid a too-soon return to zero in case of a short grace period 2357 * avoid a too-soon return to zero in case of a short grace period
2377 * (or preemption of this task). Also flag this task as doing 2358 * (or preemption of this task). Exclude CPU-hotplug operations
2378 * an rcu_barrier(). This will prevent anyone else from adopting 2359 * to ensure that no offline CPU has callbacks queued.
2379 * orphaned callbacks, which could cause otherwise failure if a
2380 * CPU went offline and quickly came back online. To see this,
2381 * consider the following sequence of events:
2382 *
2383 * 1. We cause CPU 0 to post an rcu_barrier_callback() callback.
2384 * 2. CPU 1 goes offline, orphaning its callbacks.
2385 * 3. CPU 0 adopts CPU 1's orphaned callbacks.
2386 * 4. CPU 1 comes back online.
2387 * 5. We cause CPU 1 to post an rcu_barrier_callback() callback.
2388 * 6. Both rcu_barrier_callback() callbacks are invoked, awakening
2389 * us -- but before CPU 1's orphaned callbacks are invoked!!!
2390 */ 2360 */
2391 init_completion(&rsp->barrier_completion); 2361 init_completion(&rsp->barrier_completion);
2392 atomic_set(&rsp->barrier_cpu_count, 1); 2362 atomic_set(&rsp->barrier_cpu_count, 1);
2393 raw_spin_lock_irqsave(&rsp->onofflock, flags); 2363 get_online_cpus();
2394 rsp->rcu_barrier_in_progress = current;
2395 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
2396 2364
2397 /* 2365 /*
2398 * Force every CPU with callbacks to register a new callback 2366 * Force each CPU with callbacks to register a new callback.
2399 * that will tell us when all the preceding callbacks have 2367 * When that callback is invoked, we will know that all of the
2400 * been invoked. If an offline CPU has callbacks, wait for 2368 * corresponding CPU's preceding callbacks have been invoked.
2401 * it to either come back online or to finish orphaning those
2402 * callbacks.
2403 */ 2369 */
2404 for_each_possible_cpu(cpu) { 2370 for_each_online_cpu(cpu) {
2405 preempt_disable();
2406 rdp = per_cpu_ptr(rsp->rda, cpu); 2371 rdp = per_cpu_ptr(rsp->rda, cpu);
2407 if (cpu_is_offline(cpu)) { 2372 if (ACCESS_ONCE(rdp->qlen)) {
2408 _rcu_barrier_trace(rsp, "Offline", cpu,
2409 rsp->n_barrier_done);
2410 preempt_enable();
2411 while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen))
2412 schedule_timeout_interruptible(1);
2413 } else if (ACCESS_ONCE(rdp->qlen)) {
2414 _rcu_barrier_trace(rsp, "OnlineQ", cpu, 2373 _rcu_barrier_trace(rsp, "OnlineQ", cpu,
2415 rsp->n_barrier_done); 2374 rsp->n_barrier_done);
2416 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); 2375 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
2417 preempt_enable();
2418 } else { 2376 } else {
2419 _rcu_barrier_trace(rsp, "OnlineNQ", cpu, 2377 _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
2420 rsp->n_barrier_done); 2378 rsp->n_barrier_done);
2421 preempt_enable();
2422 } 2379 }
2423 } 2380 }
2424 2381 put_online_cpus();
2425 /*
2426 * Now that all online CPUs have rcu_barrier_callback() callbacks
2427 * posted, we can adopt all of the orphaned callbacks and place
2428 * an rcu_barrier_callback() callback after them. When that is done,
2429 * we are guaranteed to have an rcu_barrier_callback() callback
2430 * following every callback that could possibly have been
2431 * registered before _rcu_barrier() was called.
2432 */
2433 raw_spin_lock_irqsave(&rsp->onofflock, flags);
2434 rcu_adopt_orphan_cbs(rsp);
2435 rsp->rcu_barrier_in_progress = NULL;
2436 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
2437 atomic_inc(&rsp->barrier_cpu_count);
2438 smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */
2439 rd.rsp = rsp;
2440 rsp->call(&rd.barrier_head, rcu_barrier_callback);
2441 2382
2442 /* 2383 /*
2443 * Now that we have an rcu_barrier_callback() callback on each 2384 * Now that we have an rcu_barrier_callback() callback on each
@@ -2458,8 +2399,6 @@ static void _rcu_barrier(struct rcu_state *rsp)
2458 2399
2459 /* Other rcu_barrier() invocations can now safely proceed. */ 2400 /* Other rcu_barrier() invocations can now safely proceed. */
2460 mutex_unlock(&rsp->barrier_mutex); 2401 mutex_unlock(&rsp->barrier_mutex);
2461
2462 destroy_rcu_head_on_stack(&rd.barrier_head);
2463} 2402}
2464 2403
2465/** 2404/**