diff options
| author | Oleg Nesterov <oleg@tv-sign.ru> | 2006-10-04 05:17:17 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-04 10:55:31 -0400 |
| commit | 20e9751bd9dd6b832fd84ada27840360f7e877f1 (patch) | |
| tree | c598ff9cb9e540da7f8595f2c130f3674065e018 /kernel | |
| parent | 4b6c2cca6eef9cc4a15350bf1c61839e12e08b84 (diff) | |
[PATCH] rcu: simplify/improve batch tuning
Kill a hard-to-calculate 'rsinterval' boot parameter and per-cpu
rcu_data.last_rs_qlen. Instead, it adds adds a flag rcu_ctrlblk.signaled,
which records the fact that one of CPUs has sent a resched IPI since the
last rcu_start_batch().
Roughly speaking, we need two rcu_start_batch()s in order to move callbacks
from ->nxtlist to ->donelist. This means that when ->qlen exceeds qhimark
and continues to grow, we should send a resched IPI, and then do it again
after we gone through a quiescent state.
On the other hand, if it was already sent, we don't need to do it again
when another CPU detects overflow of the queue.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Acked-by: Paul E. McKenney <paulmck@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/rcupdate.c | 11 |
1 files changed, 3 insertions, 8 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 523e46483b99..26bb5ffe1ef1 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
| @@ -71,9 +71,6 @@ static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; | |||
| 71 | static int blimit = 10; | 71 | static int blimit = 10; |
| 72 | static int qhimark = 10000; | 72 | static int qhimark = 10000; |
| 73 | static int qlowmark = 100; | 73 | static int qlowmark = 100; |
| 74 | #ifdef CONFIG_SMP | ||
| 75 | static int rsinterval = 1000; | ||
| 76 | #endif | ||
| 77 | 74 | ||
| 78 | static atomic_t rcu_barrier_cpu_count; | 75 | static atomic_t rcu_barrier_cpu_count; |
| 79 | static DEFINE_MUTEX(rcu_barrier_mutex); | 76 | static DEFINE_MUTEX(rcu_barrier_mutex); |
| @@ -86,8 +83,8 @@ static void force_quiescent_state(struct rcu_data *rdp, | |||
| 86 | int cpu; | 83 | int cpu; |
| 87 | cpumask_t cpumask; | 84 | cpumask_t cpumask; |
| 88 | set_need_resched(); | 85 | set_need_resched(); |
| 89 | if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) { | 86 | if (unlikely(!rcp->signaled)) { |
| 90 | rdp->last_rs_qlen = rdp->qlen; | 87 | rcp->signaled = 1; |
| 91 | /* | 88 | /* |
| 92 | * Don't send IPI to itself. With irqs disabled, | 89 | * Don't send IPI to itself. With irqs disabled, |
| 93 | * rdp->cpu is the current cpu. | 90 | * rdp->cpu is the current cpu. |
| @@ -301,6 +298,7 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp) | |||
| 301 | smp_mb(); | 298 | smp_mb(); |
| 302 | cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); | 299 | cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); |
| 303 | 300 | ||
| 301 | rcp->signaled = 0; | ||
| 304 | } | 302 | } |
| 305 | } | 303 | } |
| 306 | 304 | ||
| @@ -628,9 +626,6 @@ void synchronize_rcu(void) | |||
| 628 | module_param(blimit, int, 0); | 626 | module_param(blimit, int, 0); |
| 629 | module_param(qhimark, int, 0); | 627 | module_param(qhimark, int, 0); |
| 630 | module_param(qlowmark, int, 0); | 628 | module_param(qlowmark, int, 0); |
| 631 | #ifdef CONFIG_SMP | ||
| 632 | module_param(rsinterval, int, 0); | ||
| 633 | #endif | ||
| 634 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 629 | EXPORT_SYMBOL_GPL(rcu_batches_completed); |
| 635 | EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); | 630 | EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); |
| 636 | EXPORT_SYMBOL_GPL(call_rcu); | 631 | EXPORT_SYMBOL_GPL(call_rcu); |
