aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.h
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2010-10-20 02:13:06 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2010-11-30 01:01:58 -0500
commit29494be71afe2a16ad04e344306a620d7cc22d06 (patch)
treed16a2acd1566be326483d5217d39b78d2ef798c5 /kernel/rcutree.h
parent7b27d5475f86186914e54e4a6bb994e9a985337b (diff)
rcu,cleanup: simplify the code when cpu is dying
When we handle the CPU_DYING notifier, the whole system is stopped except for the current CPU. We therefore need no synchronization with the other CPUs. This allows us to move any orphaned RCU callbacks directly to the list of any online CPU without needing to run them through the global orphan lists. These global orphan lists can therefore be dispensed with. This commit makes thes changes, though currently victimizes CPU 0 @@@. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.h')
-rw-r--r--kernel/rcutree.h16
1 files changed, 4 insertions, 12 deletions
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 91d4170c5c13..1a54be2a902f 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -203,8 +203,8 @@ struct rcu_data {
203 long qlen_last_fqs_check; 203 long qlen_last_fqs_check;
204 /* qlen at last check for QS forcing */ 204 /* qlen at last check for QS forcing */
205 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */ 205 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
206 unsigned long n_cbs_orphaned; /* RCU cbs sent to orphanage. */ 206 unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
207 unsigned long n_cbs_adopted; /* RCU cbs adopted from orphanage. */ 207 unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */
208 unsigned long n_force_qs_snap; 208 unsigned long n_force_qs_snap;
209 /* did other CPU force QS recently? */ 209 /* did other CPU force QS recently? */
210 long blimit; /* Upper limit on a processed batch */ 210 long blimit; /* Upper limit on a processed batch */
@@ -309,15 +309,7 @@ struct rcu_state {
309 /* End of fields guarded by root rcu_node's lock. */ 309 /* End of fields guarded by root rcu_node's lock. */
310 310
311 raw_spinlock_t onofflock; /* exclude on/offline and */ 311 raw_spinlock_t onofflock; /* exclude on/offline and */
312 /* starting new GP. Also */ 312 /* starting new GP. */
313 /* protects the following */
314 /* orphan_cbs fields. */
315 struct rcu_head *orphan_cbs_list; /* list of rcu_head structs */
316 /* orphaned by all CPUs in */
317 /* a given leaf rcu_node */
318 /* going offline. */
319 struct rcu_head **orphan_cbs_tail; /* And tail pointer. */
320 long orphan_qlen; /* Number of orphaned cbs. */
321 raw_spinlock_t fqslock; /* Only one task forcing */ 313 raw_spinlock_t fqslock; /* Only one task forcing */
322 /* quiescent states. */ 314 /* quiescent states. */
323 unsigned long jiffies_force_qs; /* Time at which to invoke */ 315 unsigned long jiffies_force_qs; /* Time at which to invoke */
@@ -390,7 +382,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
390static int rcu_preempt_pending(int cpu); 382static int rcu_preempt_pending(int cpu);
391static int rcu_preempt_needs_cpu(int cpu); 383static int rcu_preempt_needs_cpu(int cpu);
392static void __cpuinit rcu_preempt_init_percpu_data(int cpu); 384static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
393static void rcu_preempt_send_cbs_to_orphanage(void); 385static void rcu_preempt_send_cbs_to_online(void);
394static void __init __rcu_init_preempt(void); 386static void __init __rcu_init_preempt(void);
395static void rcu_needs_cpu_flush(void); 387static void rcu_needs_cpu_flush(void);
396 388