aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-07-25 14:21:47 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-09-16 13:07:59 -0400
commitf4579fc57cf4244057b713b1f73f4dc9f0b11e97 (patch)
tree7c682f8da323a07e5a6d63ad4ff87cc1baf34e73 /kernel/rcu/tree_plugin.h
parent11ed7f934cb807f26da09547b5946c2e534d1dac (diff)
rcu: Fix attempt to avoid unsolicited offloading of callbacks
Commit b58cc46c5f6b (rcu: Don't offload callbacks unless specifically requested) failed to adjust the callback lists of the CPUs that are known to be no-CBs CPUs only because they are also nohz_full= CPUs. This failure can result in callbacks that are posted during early boot getting stranded on nxtlist for CPUs whose no-CBs property becomes apparent late, and there can also be spurious warnings about offline CPUs posting callbacks. This commit fixes these problems by adding an early-boot rcu_init_nohz() that properly initializes the no-CBs CPUs. Note that kernels built with CONFIG_RCU_NOCB_CPU_ALL=y or with CONFIG_RCU_NOCB_CPU=n do not exhibit this bug. Neither do kernels booted without the nohz_full= boot parameter. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Pranith Kumar <bobby.prani@gmail.com> Tested-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r--kernel/rcu/tree_plugin.h92
1 files changed, 61 insertions, 31 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index a7997e272564..06d077ccf8d5 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -85,33 +85,6 @@ static void __init rcu_bootup_announce_oddness(void)
85 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); 85 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
86 if (nr_cpu_ids != NR_CPUS) 86 if (nr_cpu_ids != NR_CPUS)
87 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); 87 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
88#ifdef CONFIG_RCU_NOCB_CPU
89#ifndef CONFIG_RCU_NOCB_CPU_NONE
90 if (!have_rcu_nocb_mask) {
91 zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
92 have_rcu_nocb_mask = true;
93 }
94#ifdef CONFIG_RCU_NOCB_CPU_ZERO
95 pr_info("\tOffload RCU callbacks from CPU 0\n");
96 cpumask_set_cpu(0, rcu_nocb_mask);
97#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
98#ifdef CONFIG_RCU_NOCB_CPU_ALL
99 pr_info("\tOffload RCU callbacks from all CPUs\n");
100 cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
101#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
102#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
103 if (have_rcu_nocb_mask) {
104 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
105 pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
106 cpumask_and(rcu_nocb_mask, cpu_possible_mask,
107 rcu_nocb_mask);
108 }
109 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
110 pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
111 if (rcu_nocb_poll)
112 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
113 }
114#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
115} 88}
116 89
117#ifdef CONFIG_TREE_PREEMPT_RCU 90#ifdef CONFIG_TREE_PREEMPT_RCU
@@ -2451,6 +2424,67 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2451 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty")); 2424 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
2452} 2425}
2453 2426
2427void __init rcu_init_nohz(void)
2428{
2429 int cpu;
2430 bool need_rcu_nocb_mask = true;
2431 struct rcu_state *rsp;
2432
2433#ifdef CONFIG_RCU_NOCB_CPU_NONE
2434 need_rcu_nocb_mask = false;
2435#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
2436
2437#if defined(CONFIG_NO_HZ_FULL)
2438 if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
2439 need_rcu_nocb_mask = true;
2440#endif /* #if defined(CONFIG_NO_HZ_FULL) */
2441
2442 if (!have_rcu_nocb_mask && need_rcu_nocb_mask) {
2443 zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
2444 have_rcu_nocb_mask = true;
2445 }
2446 if (!have_rcu_nocb_mask)
2447 return;
2448
2449#ifdef CONFIG_RCU_NOCB_CPU_ZERO
2450 pr_info("\tOffload RCU callbacks from CPU 0\n");
2451 cpumask_set_cpu(0, rcu_nocb_mask);
2452#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
2453#ifdef CONFIG_RCU_NOCB_CPU_ALL
2454 pr_info("\tOffload RCU callbacks from all CPUs\n");
2455 cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
2456#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
2457#if defined(CONFIG_NO_HZ_FULL)
2458 if (tick_nohz_full_running)
2459 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
2460#endif /* #if defined(CONFIG_NO_HZ_FULL) */
2461
2462 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
2463 pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
2464 cpumask_and(rcu_nocb_mask, cpu_possible_mask,
2465 rcu_nocb_mask);
2466 }
2467 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
2468 pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
2469 if (rcu_nocb_poll)
2470 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
2471
2472 for_each_rcu_flavor(rsp) {
2473 for_each_cpu(cpu, rcu_nocb_mask) {
2474 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2475
2476 /*
2477 * If there are early callbacks, they will need
2478 * to be moved to the nocb lists.
2479 */
2480 WARN_ON_ONCE(rdp->nxttail[RCU_NEXT_TAIL] !=
2481 &rdp->nxtlist &&
2482 rdp->nxttail[RCU_NEXT_TAIL] != NULL);
2483 init_nocb_callback_list(rdp);
2484 }
2485 }
2486}
2487
2454/* Initialize per-rcu_data variables for no-CBs CPUs. */ 2488/* Initialize per-rcu_data variables for no-CBs CPUs. */
2455static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2489static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2456{ 2490{
@@ -2479,10 +2513,6 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2479 2513
2480 if (rcu_nocb_mask == NULL) 2514 if (rcu_nocb_mask == NULL)
2481 return; 2515 return;
2482#if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL)
2483 if (tick_nohz_full_running)
2484 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
2485#endif /* #if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL) */
2486 if (ls == -1) { 2516 if (ls == -1) {
2487 ls = int_sqrt(nr_cpu_ids); 2517 ls = int_sqrt(nr_cpu_ids);
2488 rcu_nocb_leader_stride = ls; 2518 rcu_nocb_leader_stride = ls;