aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2010-03-11 11:17:13 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-11 12:32:49 -0500
commit39c0cbe2150cbd848a25ba6cdb271d1ad46818ad (patch)
tree7b9c356b39a2b50219398ce534d7d64e7ab4bf06 /include
parent41acab8851a0408c1d5ad6c21a07456f88b54d40 (diff)
sched: Rate-limit nohz
Entering nohz code on every micro-idle is costing ~10% throughput for netperf TCP_RR when scheduling cross-cpu. Rate limiting entry fixes this, but raises ticks a bit. On my Q6600, an idle box goes from ~85 interrupts/sec to 128. The higher the context switch rate, the more nohz entry costs. With this patch and some cycle recovery patches in my tree, max cross cpu context switch rate is improved by ~16%, a large portion of which of which is this ratelimiting. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1268301003.6785.28.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/linux/sched.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8cc863d66477..13efe7dac5fa 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -271,11 +271,17 @@ extern cpumask_var_t nohz_cpu_mask;
271#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 271#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
272extern int select_nohz_load_balancer(int cpu); 272extern int select_nohz_load_balancer(int cpu);
273extern int get_nohz_load_balancer(void); 273extern int get_nohz_load_balancer(void);
274extern int nohz_ratelimit(int cpu);
274#else 275#else
275static inline int select_nohz_load_balancer(int cpu) 276static inline int select_nohz_load_balancer(int cpu)
276{ 277{
277 return 0; 278 return 0;
278} 279}
280
281static inline int nohz_ratelimit(int cpu)
282{
283 return 0;
284}
279#endif 285#endif
280 286
281/* 287/*