aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcupdate.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2006-10-04 05:17:17 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-04 10:55:31 -0400
commit20e9751bd9dd6b832fd84ada27840360f7e877f1 (patch)
treec598ff9cb9e540da7f8595f2c130f3674065e018 /kernel/rcupdate.c
parent4b6c2cca6eef9cc4a15350bf1c61839e12e08b84 (diff)
[PATCH] rcu: simplify/improve batch tuning
Kill a hard-to-calculate 'rsinterval' boot parameter and per-cpu rcu_data.last_rs_qlen. Instead, it adds adds a flag rcu_ctrlblk.signaled, which records the fact that one of CPUs has sent a resched IPI since the last rcu_start_batch(). Roughly speaking, we need two rcu_start_batch()s in order to move callbacks from ->nxtlist to ->donelist. This means that when ->qlen exceeds qhimark and continues to grow, we should send a resched IPI, and then do it again after we gone through a quiescent state. On the other hand, if it was already sent, we don't need to do it again when another CPU detects overflow of the queue. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Acked-by: Paul E. McKenney <paulmck@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/rcupdate.c')
-rw-r--r--kernel/rcupdate.c11
1 files changed, 3 insertions, 8 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 523e46483b99..26bb5ffe1ef1 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -71,9 +71,6 @@ static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
71static int blimit = 10; 71static int blimit = 10;
72static int qhimark = 10000; 72static int qhimark = 10000;
73static int qlowmark = 100; 73static int qlowmark = 100;
74#ifdef CONFIG_SMP
75static int rsinterval = 1000;
76#endif
77 74
78static atomic_t rcu_barrier_cpu_count; 75static atomic_t rcu_barrier_cpu_count;
79static DEFINE_MUTEX(rcu_barrier_mutex); 76static DEFINE_MUTEX(rcu_barrier_mutex);
@@ -86,8 +83,8 @@ static void force_quiescent_state(struct rcu_data *rdp,
86 int cpu; 83 int cpu;
87 cpumask_t cpumask; 84 cpumask_t cpumask;
88 set_need_resched(); 85 set_need_resched();
89 if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) { 86 if (unlikely(!rcp->signaled)) {
90 rdp->last_rs_qlen = rdp->qlen; 87 rcp->signaled = 1;
91 /* 88 /*
92 * Don't send IPI to itself. With irqs disabled, 89 * Don't send IPI to itself. With irqs disabled,
93 * rdp->cpu is the current cpu. 90 * rdp->cpu is the current cpu.
@@ -301,6 +298,7 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp)
301 smp_mb(); 298 smp_mb();
302 cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); 299 cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
303 300
301 rcp->signaled = 0;
304 } 302 }
305} 303}
306 304
@@ -628,9 +626,6 @@ void synchronize_rcu(void)
628module_param(blimit, int, 0); 626module_param(blimit, int, 0);
629module_param(qhimark, int, 0); 627module_param(qhimark, int, 0);
630module_param(qlowmark, int, 0); 628module_param(qlowmark, int, 0);
631#ifdef CONFIG_SMP
632module_param(rsinterval, int, 0);
633#endif
634EXPORT_SYMBOL_GPL(rcu_batches_completed); 629EXPORT_SYMBOL_GPL(rcu_batches_completed);
635EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); 630EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
636EXPORT_SYMBOL_GPL(call_rcu); 631EXPORT_SYMBOL_GPL(call_rcu);