aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-02-24 14:05:36 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-02-26 19:04:37 -0500
commit5871968d531f39c23a8e6c69525bb705bca52e04 (patch)
tree059ed811b5da15952bcb06844ecfcb437070be6b /kernel/rcu
parent675da67f24e2d6d8df0cedf12e59085ed8bbf4e7 (diff)
rcu: Tighten up affinity and check for sysidle
If the RCU grace-period kthread invoking rcu_sysidle_check_cpu() happens to be running on the tick_do_timer_cpu initially, then rcu_bind_gp_kthread() won't bind it. This kthread might then migrate before invoking rcu_gp_fqs(), which will trigger the WARN_ON_ONCE() in rcu_sysidle_check_cpu(). This commit therefore makes rcu_bind_gp_kthread() do the binding even if the kthread is currently on the same CPU. Because this incurs added overhead, this commit also causes each RCU grace-period kthread to invoke rcu_bind_gp_kthread() once at boot rather than at the beginning of each grace period. And as long as rcu_bind_gp_kthread() is being modified, this commit eliminates its #ifdef. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.c2
-rw-r--r--kernel/rcu/tree_plugin.h12
2 files changed, 7 insertions, 7 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 735bd7ee749a..a6972c20eaa5 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1707,7 +1707,6 @@ static int rcu_gp_init(struct rcu_state *rsp)
1707 struct rcu_node *rnp = rcu_get_root(rsp); 1707 struct rcu_node *rnp = rcu_get_root(rsp);
1708 1708
1709 ACCESS_ONCE(rsp->gp_activity) = jiffies; 1709 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1710 rcu_bind_gp_kthread();
1711 raw_spin_lock_irq(&rnp->lock); 1710 raw_spin_lock_irq(&rnp->lock);
1712 smp_mb__after_unlock_lock(); 1711 smp_mb__after_unlock_lock();
1713 if (!ACCESS_ONCE(rsp->gp_flags)) { 1712 if (!ACCESS_ONCE(rsp->gp_flags)) {
@@ -1895,6 +1894,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
1895 struct rcu_state *rsp = arg; 1894 struct rcu_state *rsp = arg;
1896 struct rcu_node *rnp = rcu_get_root(rsp); 1895 struct rcu_node *rnp = rcu_get_root(rsp);
1897 1896
1897 rcu_bind_gp_kthread();
1898 for (;;) { 1898 for (;;) {
1899 1899
1900 /* Handle grace-period start. */ 1900 /* Handle grace-period start. */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 0a571e9a0f1d..b46c92824db1 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2763,7 +2763,8 @@ static void rcu_sysidle_exit(int irq)
2763 2763
2764/* 2764/*
2765 * Check to see if the current CPU is idle. Note that usermode execution 2765 * Check to see if the current CPU is idle. Note that usermode execution
2766 * does not count as idle. The caller must have disabled interrupts. 2766 * does not count as idle. The caller must have disabled interrupts,
2767 * and must be running on tick_do_timer_cpu.
2767 */ 2768 */
2768static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, 2769static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
2769 unsigned long *maxj) 2770 unsigned long *maxj)
@@ -2784,8 +2785,8 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
2784 if (!*isidle || rdp->rsp != rcu_state_p || 2785 if (!*isidle || rdp->rsp != rcu_state_p ||
2785 cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu) 2786 cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
2786 return; 2787 return;
2787 if (rcu_gp_in_progress(rdp->rsp)) 2788 /* Verify affinity of current kthread. */
2788 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu); 2789 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
2789 2790
2790 /* Pick up current idle and NMI-nesting counter and check. */ 2791 /* Pick up current idle and NMI-nesting counter and check. */
2791 cur = atomic_read(&rdtp->dynticks_idle); 2792 cur = atomic_read(&rdtp->dynticks_idle);
@@ -3068,11 +3069,10 @@ static void rcu_bind_gp_kthread(void)
3068 return; 3069 return;
3069#ifdef CONFIG_NO_HZ_FULL_SYSIDLE 3070#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
3070 cpu = tick_do_timer_cpu; 3071 cpu = tick_do_timer_cpu;
3071 if (cpu >= 0 && cpu < nr_cpu_ids && raw_smp_processor_id() != cpu) 3072 if (cpu >= 0 && cpu < nr_cpu_ids)
3072 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 3073 set_cpus_allowed_ptr(current, cpumask_of(cpu));
3073#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 3074#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3074 if (!is_housekeeping_cpu(raw_smp_processor_id())) 3075 housekeeping_affine(current);
3075 housekeeping_affine(current);
3076#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 3076#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3077} 3077}
3078 3078