diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2009-08-27 17:58:16 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-29 09:34:39 -0400 |
commit | dd5d19bafd90d33043a4a14b2e2d98612caa293c (patch) | |
tree | c21d547124d277f00332bdb01c75f2f4f321de8c /kernel/rcutree.c | |
parent | 06e799764eb7c2e4640888d438c3524d756613e1 (diff) |
rcu: Create rcutree plugins to handle hotplug CPU for multi-level trees
When offlining CPUs from a multi-level tree, there is the
possibility of offlining the last CPU from a given node when
there are preempted RCU read-side critical sections that
started life on one of the CPUs on that node.
In this case, the corresponding tasks will be enqueued via the
task_struct's rcu_node_entry list_head onto one of the
rcu_node's blocked_tasks[] lists. These tasks need to be moved
somewhere else so that they will prevent the current grace
period from ending. That somewhere is the root rcu_node.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <20090827215816.GA30472@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 2 |
1 files changed, 2 insertions, 0 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index fee6316a8673..d903e2f2b840 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -81,6 +81,7 @@ struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | |||
81 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 81 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
82 | 82 | ||
83 | extern long rcu_batches_completed_sched(void); | 83 | extern long rcu_batches_completed_sched(void); |
84 | static struct rcu_node *rcu_get_root(struct rcu_state *rsp); | ||
84 | static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, | 85 | static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, |
85 | struct rcu_node *rnp, unsigned long flags); | 86 | struct rcu_node *rnp, unsigned long flags); |
86 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags); | 87 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags); |
@@ -876,6 +877,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
876 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 877 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
877 | break; | 878 | break; |
878 | } | 879 | } |
880 | rcu_preempt_offline_tasks(rsp, rnp); | ||
879 | mask = rnp->grpmask; | 881 | mask = rnp->grpmask; |
880 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 882 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
881 | rnp = rnp->parent; | 883 | rnp = rnp->parent; |