aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-05-22 12:50:53 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-05-22 19:12:26 -0400
commitf64c6013a2029316ea552f99823d116ee5f5f955 (patch)
tree07426bae4e4500acc8bc64a0edd0c6fe6b298e6e /kernel/rcu/tree.c
parent22df7316ac71dc1ac57415349938737d2a229c59 (diff)
rcu/x86: Provide early rcu_cpu_starting() callback
The x86/mtrr code does horrific things because hardware. It uses stop_machine_from_inactive_cpu(), which does a wakeup (of the stopper thread on another CPU), which uses RCU, all before the CPU is onlined. RCU complains about this, because wakeups use RCU and RCU does (rightfully) not consider offline CPUs for grace-periods. Fix this by initializing RCU way early in the MTRR case. Tested-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> [ paulmck: Add !SMP support, per 0day Test Robot report. ]
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 4fccdfa25716..aa7cade1b9f3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3665,6 +3665,8 @@ int rcutree_dead_cpu(unsigned int cpu)
3665 return 0; 3665 return 0;
3666} 3666}
3667 3667
3668static DEFINE_PER_CPU(int, rcu_cpu_started);
3669
3668/* 3670/*
3669 * Mark the specified CPU as being online so that subsequent grace periods 3671 * Mark the specified CPU as being online so that subsequent grace periods
3670 * (both expedited and normal) will wait on it. Note that this means that 3672 * (both expedited and normal) will wait on it. Note that this means that
@@ -3686,6 +3688,11 @@ void rcu_cpu_starting(unsigned int cpu)
3686 struct rcu_node *rnp; 3688 struct rcu_node *rnp;
3687 struct rcu_state *rsp; 3689 struct rcu_state *rsp;
3688 3690
3691 if (per_cpu(rcu_cpu_started, cpu))
3692 return;
3693
3694 per_cpu(rcu_cpu_started, cpu) = 1;
3695
3689 for_each_rcu_flavor(rsp) { 3696 for_each_rcu_flavor(rsp) {
3690 rdp = per_cpu_ptr(rsp->rda, cpu); 3697 rdp = per_cpu_ptr(rsp->rda, cpu);
3691 rnp = rdp->mynode; 3698 rnp = rdp->mynode;
@@ -3742,6 +3749,8 @@ void rcu_report_dead(unsigned int cpu)
3742 preempt_enable(); 3749 preempt_enable();
3743 for_each_rcu_flavor(rsp) 3750 for_each_rcu_flavor(rsp)
3744 rcu_cleanup_dying_idle_cpu(cpu, rsp); 3751 rcu_cleanup_dying_idle_cpu(cpu, rsp);
3752
3753 per_cpu(rcu_cpu_started, cpu) = 0;
3745} 3754}
3746 3755
3747/* Migrate the dead CPU's callbacks to the current CPU. */ 3756/* Migrate the dead CPU's callbacks to the current CPU. */