aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-02-26 13:43:44 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-03-01 14:36:58 -0500
commit27d50c7eeb0f03c3d3ca72aac4d2dd487ca1f3f0 (patch)
tree6cb9af43f8a7c74829e9874575d7aafd6516be31
parente69aab13117efc1987620090e539b4ebeb33a04c (diff)
rcu: Make CPU_DYING_IDLE an explicit call
Make the RCU CPU_DYING_IDLE callback an explicit function call, so it gets invoked at the proper place. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arch@vger.kernel.org Cc: Rik van Riel <riel@redhat.com> Cc: Rafael Wysocki <rafael.j.wysocki@intel.com> Cc: "Srivatsa S. Bhat" <srivatsa@mit.edu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul McKenney <paulmck@linux.vnet.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul Turner <pjt@google.com> Link: http://lkml.kernel.org/r/20160226182341.870167933@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/notifier.h2
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--kernel/cpu.c1
-rw-r--r--kernel/rcu/tree.c70
-rw-r--r--kernel/sched/idle.c2
6 files changed, 42 insertions, 41 deletions
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 91a48d1b4ca0..f9b1fab4388a 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -101,9 +101,7 @@ enum {
101 * Called on the new cpu, just before 101 * Called on the new cpu, just before
102 * enabling interrupts. Must not sleep, 102 * enabling interrupts. Must not sleep,
103 * must not fail */ 103 * must not fail */
104#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached 104#define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly,
105 * idle loop. */
106#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
107 * perhaps due to preemption. */ 105 * perhaps due to preemption. */
108 106
109/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend 107/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index d14a4c362465..4149868de4e6 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -47,6 +47,8 @@
47 * runtime initialization. 47 * runtime initialization.
48 */ 48 */
49 49
50struct notifier_block;
51
50typedef int (*notifier_fn_t)(struct notifier_block *nb, 52typedef int (*notifier_fn_t)(struct notifier_block *nb,
51 unsigned long action, void *data); 53 unsigned long action, void *data);
52 54
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 14e6f47ee16f..fc46fe3ea259 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -332,9 +332,7 @@ void rcu_init(void);
332void rcu_sched_qs(void); 332void rcu_sched_qs(void);
333void rcu_bh_qs(void); 333void rcu_bh_qs(void);
334void rcu_check_callbacks(int user); 334void rcu_check_callbacks(int user);
335struct notifier_block; 335void rcu_report_dead(unsigned int cpu);
336int rcu_cpu_notify(struct notifier_block *self,
337 unsigned long action, void *hcpu);
338 336
339#ifndef CONFIG_TINY_RCU 337#ifndef CONFIG_TINY_RCU
340void rcu_end_inkernel_boot(void); 338void rcu_end_inkernel_boot(void);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 0e8c07f2566e..ff8059b76a85 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -762,6 +762,7 @@ void cpuhp_report_idle_dead(void)
762 BUG_ON(st->state != CPUHP_AP_OFFLINE); 762 BUG_ON(st->state != CPUHP_AP_OFFLINE);
763 st->state = CPUHP_AP_IDLE_DEAD; 763 st->state = CPUHP_AP_IDLE_DEAD;
764 complete(&st->done); 764 complete(&st->done);
765 rcu_report_dead(smp_processor_id());
765} 766}
766 767
767#else 768#else
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e41dd4131f7a..85b41341272e 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2607,28 +2607,6 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2607} 2607}
2608 2608
2609/* 2609/*
2610 * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
2611 * function. We now remove it from the rcu_node tree's ->qsmaskinit
2612 * bit masks.
2613 */
2614static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
2615{
2616 unsigned long flags;
2617 unsigned long mask;
2618 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2619 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
2620
2621 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2622 return;
2623
2624 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
2625 mask = rdp->grpmask;
2626 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
2627 rnp->qsmaskinitnext &= ~mask;
2628 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2629}
2630
2631/*
2632 * The CPU has been completely removed, and some other CPU is reporting 2610 * The CPU has been completely removed, and some other CPU is reporting
2633 * this fact from process context. Do the remainder of the cleanup, 2611 * this fact from process context. Do the remainder of the cleanup,
2634 * including orphaning the outgoing CPU's RCU callbacks, and also 2612 * including orphaning the outgoing CPU's RCU callbacks, and also
@@ -4247,6 +4225,43 @@ static void rcu_prepare_cpu(int cpu)
4247 rcu_init_percpu_data(cpu, rsp); 4225 rcu_init_percpu_data(cpu, rsp);
4248} 4226}
4249 4227
4228#ifdef CONFIG_HOTPLUG_CPU
4229/*
4230 * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
4231 * function. We now remove it from the rcu_node tree's ->qsmaskinit
4232 * bit masks.
4233 */
4234static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
4235{
4236 unsigned long flags;
4237 unsigned long mask;
4238 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
4239 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
4240
4241 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
4242 return;
4243
4244 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4245 mask = rdp->grpmask;
4246 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4247 rnp->qsmaskinitnext &= ~mask;
4248 raw_spin_unlock_irqrestore(&rnp->lock, flags);
4249}
4250
4251void rcu_report_dead(unsigned int cpu)
4252{
4253 struct rcu_state *rsp;
4254
4255 /* QS for any half-done expedited RCU-sched GP. */
4256 preempt_disable();
4257 rcu_report_exp_rdp(&rcu_sched_state,
4258 this_cpu_ptr(rcu_sched_state.rda), true);
4259 preempt_enable();
4260 for_each_rcu_flavor(rsp)
4261 rcu_cleanup_dying_idle_cpu(cpu, rsp);
4262}
4263#endif
4264
4250/* 4265/*
4251 * Handle CPU online/offline notification events. 4266 * Handle CPU online/offline notification events.
4252 */ 4267 */
@@ -4278,17 +4293,6 @@ int rcu_cpu_notify(struct notifier_block *self,
4278 for_each_rcu_flavor(rsp) 4293 for_each_rcu_flavor(rsp)
4279 rcu_cleanup_dying_cpu(rsp); 4294 rcu_cleanup_dying_cpu(rsp);
4280 break; 4295 break;
4281 case CPU_DYING_IDLE:
4282 /* QS for any half-done expedited RCU-sched GP. */
4283 preempt_disable();
4284 rcu_report_exp_rdp(&rcu_sched_state,
4285 this_cpu_ptr(rcu_sched_state.rda), true);
4286 preempt_enable();
4287
4288 for_each_rcu_flavor(rsp) {
4289 rcu_cleanup_dying_idle_cpu(cpu, rsp);
4290 }
4291 break;
4292 case CPU_DEAD: 4296 case CPU_DEAD:
4293 case CPU_DEAD_FROZEN: 4297 case CPU_DEAD_FROZEN:
4294 case CPU_UP_CANCELED: 4298 case CPU_UP_CANCELED:
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 8abbe89e9114..bd12c6c714ec 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -220,8 +220,6 @@ static void cpu_idle_loop(void)
220 rmb(); 220 rmb();
221 221
222 if (cpu_is_offline(smp_processor_id())) { 222 if (cpu_is_offline(smp_processor_id())) {
223 rcu_cpu_notify(NULL, CPU_DYING_IDLE,
224 (void *)(long)smp_processor_id());
225 cpuhp_report_idle_dead(); 223 cpuhp_report_idle_dead();
226 arch_cpu_idle_dead(); 224 arch_cpu_idle_dead();
227 } 225 }