aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-09-25 13:01:45 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-09-25 13:01:45 -0400
commit5217192b85480353aeeb395574e60d0db04f3676 (patch)
treea45b440623a8fc55ece18b72e40829b2d9815d74 /kernel/rcutree.c
parentbda4ec9f6a7d7b249c7b14baa553731efedce300 (diff)
parentbff4a394795add6b919debc009f72b7607f5d4bf (diff)
Merge remote-tracking branch 'tip/smp/hotplug' into next.2012.09.25b
The conflicts between kernel/rcutree.h and kernel/rcutree_plugin.h were due to adjacent insertions and deletions, which were resolved by simply accepting the changes on both branches.
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 80fd02e5d115..7387e46009d9 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -134,13 +134,12 @@ static int rcu_scheduler_fully_active __read_mostly;
134 */ 134 */
135static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); 135static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
136DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 136DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
137DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
138DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); 137DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
139DEFINE_PER_CPU(char, rcu_cpu_has_work); 138DEFINE_PER_CPU(char, rcu_cpu_has_work);
140 139
141#endif /* #ifdef CONFIG_RCU_BOOST */ 140#endif /* #ifdef CONFIG_RCU_BOOST */
142 141
143static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 142static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
144static void invoke_rcu_core(void); 143static void invoke_rcu_core(void);
145static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); 144static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
146 145
@@ -1543,8 +1542,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1543 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 1542 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
1544 1543
1545 /* Adjust any no-longer-needed kthreads. */ 1544 /* Adjust any no-longer-needed kthreads. */
1546 rcu_stop_cpu_kthread(cpu); 1545 rcu_boost_kthread_setaffinity(rnp, -1);
1547 rcu_node_kthread_setaffinity(rnp, -1);
1548 1546
1549 /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */ 1547 /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */
1550 1548
@@ -2572,12 +2570,10 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
2572 break; 2570 break;
2573 case CPU_ONLINE: 2571 case CPU_ONLINE:
2574 case CPU_DOWN_FAILED: 2572 case CPU_DOWN_FAILED:
2575 rcu_node_kthread_setaffinity(rnp, -1); 2573 rcu_boost_kthread_setaffinity(rnp, -1);
2576 rcu_cpu_kthread_setrt(cpu, 1);
2577 break; 2574 break;
2578 case CPU_DOWN_PREPARE: 2575 case CPU_DOWN_PREPARE:
2579 rcu_node_kthread_setaffinity(rnp, cpu); 2576 rcu_boost_kthread_setaffinity(rnp, cpu);
2580 rcu_cpu_kthread_setrt(cpu, 0);
2581 break; 2577 break;
2582 case CPU_DYING: 2578 case CPU_DYING:
2583 case CPU_DYING_FROZEN: 2579 case CPU_DYING_FROZEN: