aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-10-17 13:27:02 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-20 08:05:02 -0400
commitffda12a17a324103e9900fa1035309811eecbfe5 (patch)
tree79fe8aae79a41b467f2cdd055036b3017642a9f6 /include/linux/sched.h
parentb0aa51b999c449e5e3f9faa1ee406e052d407fe7 (diff)
sched: optimize group load balancer
I noticed that tg_shares_up() unconditionally takes rq-locks for all cpus in the sched_domain. This hurts. We need the rq-locks whenever we change the weight of the per-cpu group sched entities. To allevate this a little, only change the weight when the new weight is at least shares_thresh away from the old value. This avoids the rq-lock for the top level entries, since those will never be re-weighted, and fuzzes the lower level entries a little to gain performance in semi-stable situations. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6eda6ad735dc..4f59c8e8597d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1621,6 +1621,7 @@ extern unsigned int sysctl_sched_features;
1621extern unsigned int sysctl_sched_migration_cost; 1621extern unsigned int sysctl_sched_migration_cost;
1622extern unsigned int sysctl_sched_nr_migrate; 1622extern unsigned int sysctl_sched_nr_migrate;
1623extern unsigned int sysctl_sched_shares_ratelimit; 1623extern unsigned int sysctl_sched_shares_ratelimit;
1624extern unsigned int sysctl_sched_shares_thresh;
1624 1625
1625int sched_nr_latency_handler(struct ctl_table *table, int write, 1626int sched_nr_latency_handler(struct ctl_table *table, int write,
1626 struct file *file, void __user *buffer, size_t *length, 1627 struct file *file, void __user *buffer, size_t *length,