aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDipankar Sarma <dipankar@in.ibm.com>2008-01-25 15:08:23 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:23 -0500
commitc2d727aa2ff17a1c8e5ed1e5e231bb8579b27e82 (patch)
tree76b570288a72c1337920c5e60682316c554f00ee /kernel
parentc49443c538c1bbf50eda27e4a3711e9fc15176b0 (diff)
Preempt-RCU: Use softirq instead of tasklets for
This patch makes RCU use softirq instead of tasklets. It also adds a memory barrier after raising the softirq inorder to ensure that the cpu sees the most recently updated value of rcu->cur while processing callbacks. The discussion of the related theoretical race pointed out by James Huang can be found here --> http://lkml.org/lkml/2007/11/20/603 Signed-off-by: Gautham R Shenoy <ego@in.ibm.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Dipankar Sarma <dipankar@in.ibm.com> Reviewed-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcupdate.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index f2c1a04e9b18..4dfa0b792efa 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -73,8 +73,6 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk = {
73DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; 73DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
74DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; 74DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
75 75
76/* Fake initialization required by compiler */
77static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
78static int blimit = 10; 76static int blimit = 10;
79static int qhimark = 10000; 77static int qhimark = 10000;
80static int qlowmark = 100; 78static int qlowmark = 100;
@@ -231,6 +229,18 @@ void rcu_barrier(void)
231} 229}
232EXPORT_SYMBOL_GPL(rcu_barrier); 230EXPORT_SYMBOL_GPL(rcu_barrier);
233 231
232/* Raises the softirq for processing rcu_callbacks. */
233static inline void raise_rcu_softirq(void)
234{
235 raise_softirq(RCU_SOFTIRQ);
236 /*
237 * The smp_mb() here is required to ensure that this cpu's
238 * __rcu_process_callbacks() reads the most recently updated
239 * value of rcu->cur.
240 */
241 smp_mb();
242}
243
234/* 244/*
235 * Invoke the completed RCU callbacks. They are expected to be in 245 * Invoke the completed RCU callbacks. They are expected to be in
236 * a per-cpu list. 246 * a per-cpu list.
@@ -260,7 +270,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
260 if (!rdp->donelist) 270 if (!rdp->donelist)
261 rdp->donetail = &rdp->donelist; 271 rdp->donetail = &rdp->donelist;
262 else 272 else
263 tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu)); 273 raise_rcu_softirq();
264} 274}
265 275
266/* 276/*
@@ -412,7 +422,6 @@ static void rcu_offline_cpu(int cpu)
412 &per_cpu(rcu_bh_data, cpu)); 422 &per_cpu(rcu_bh_data, cpu));
413 put_cpu_var(rcu_data); 423 put_cpu_var(rcu_data);
414 put_cpu_var(rcu_bh_data); 424 put_cpu_var(rcu_bh_data);
415 tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu);
416} 425}
417 426
418#else 427#else
@@ -424,7 +433,7 @@ static void rcu_offline_cpu(int cpu)
424#endif 433#endif
425 434
426/* 435/*
427 * This does the RCU processing work from tasklet context. 436 * This does the RCU processing work from softirq context.
428 */ 437 */
429static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, 438static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
430 struct rcu_data *rdp) 439 struct rcu_data *rdp)
@@ -469,7 +478,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
469 rcu_do_batch(rdp); 478 rcu_do_batch(rdp);
470} 479}
471 480
472static void rcu_process_callbacks(unsigned long unused) 481static void rcu_process_callbacks(struct softirq_action *unused)
473{ 482{
474 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); 483 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
475 __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); 484 __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
@@ -533,7 +542,7 @@ void rcu_check_callbacks(int cpu, int user)
533 rcu_bh_qsctr_inc(cpu); 542 rcu_bh_qsctr_inc(cpu);
534 } else if (!in_softirq()) 543 } else if (!in_softirq())
535 rcu_bh_qsctr_inc(cpu); 544 rcu_bh_qsctr_inc(cpu);
536 tasklet_schedule(&per_cpu(rcu_tasklet, cpu)); 545 raise_rcu_softirq();
537} 546}
538 547
539static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, 548static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
@@ -556,7 +565,7 @@ static void __cpuinit rcu_online_cpu(int cpu)
556 565
557 rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); 566 rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
558 rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); 567 rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
559 tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); 568 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks, NULL);
560} 569}
561 570
562static int __cpuinit rcu_cpu_notify(struct notifier_block *self, 571static int __cpuinit rcu_cpu_notify(struct notifier_block *self,