aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2010-03-11 11:17:13 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-11 12:32:49 -0500
commit39c0cbe2150cbd848a25ba6cdb271d1ad46818ad (patch)
tree7b9c356b39a2b50219398ce534d7d64e7ab4bf06 /kernel
parent41acab8851a0408c1d5ad6c21a07456f88b54d40 (diff)
sched: Rate-limit nohz
Entering nohz code on every micro-idle is costing ~10% throughput for netperf TCP_RR when scheduling cross-cpu. Rate limiting entry fixes this, but raises ticks a bit. On my Q6600, an idle box goes from ~85 interrupts/sec to 128. The higher the context switch rate, the more nohz entry costs. With this patch and some cycle recovery patches in my tree, max cross cpu context switch rate is improved by ~16%, a large portion of which of which is this ratelimiting. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1268301003.6785.28.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c12
-rw-r--r--kernel/time/tick-sched.c3
2 files changed, 15 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a4aa071f08f3..60b1bbe2ad1b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -492,6 +492,7 @@ struct rq {
492 #define CPU_LOAD_IDX_MAX 5 492 #define CPU_LOAD_IDX_MAX 5
493 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 493 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
494#ifdef CONFIG_NO_HZ 494#ifdef CONFIG_NO_HZ
495 u64 nohz_stamp;
495 unsigned char in_nohz_recently; 496 unsigned char in_nohz_recently;
496#endif 497#endif
497 /* capture load from *all* tasks on this cpu: */ 498 /* capture load from *all* tasks on this cpu: */
@@ -1228,6 +1229,17 @@ void wake_up_idle_cpu(int cpu)
1228 if (!tsk_is_polling(rq->idle)) 1229 if (!tsk_is_polling(rq->idle))
1229 smp_send_reschedule(cpu); 1230 smp_send_reschedule(cpu);
1230} 1231}
1232
1233int nohz_ratelimit(int cpu)
1234{
1235 struct rq *rq = cpu_rq(cpu);
1236 u64 diff = rq->clock - rq->nohz_stamp;
1237
1238 rq->nohz_stamp = rq->clock;
1239
1240 return diff < (NSEC_PER_SEC / HZ) >> 1;
1241}
1242
1231#endif /* CONFIG_NO_HZ */ 1243#endif /* CONFIG_NO_HZ */
1232 1244
1233static u64 sched_avg_period(void) 1245static u64 sched_avg_period(void)
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index f992762d7f51..f25735a767af 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -262,6 +262,9 @@ void tick_nohz_stop_sched_tick(int inidle)
262 goto end; 262 goto end;
263 } 263 }
264 264
265 if (nohz_ratelimit(cpu))
266 goto end;
267
265 ts->idle_calls++; 268 ts->idle_calls++;
266 /* Read jiffies and the time when jiffies were updated last */ 269 /* Read jiffies and the time when jiffies were updated last */
267 do { 270 do {