aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2010-11-15 18:47:06 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-18 07:27:49 -0500
commita7a4f8a752ec734b2eab904fc863d5dc873de338 (patch)
tree18b69c4cc0fc10cf6f0fe429308b25086942d921 /kernel/sched_fair.c
parent67e86250f8ea7b8f7da53ac25ea73c6bd71f5cd9 (diff)
sched: Add sysctl_sched_shares_window
Introduce a new sysctl for the shares window and disambiguate it from sched_time_avg. A 10ms window appears to be a good compromise between accuracy and performance. Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101115234938.112173964@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index b320753aa6c9..6c84439ce987 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -89,6 +89,13 @@ unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
89 89
90const_debug unsigned int sysctl_sched_migration_cost = 500000UL; 90const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
91 91
92/*
93 * The exponential sliding window over which load is averaged for shares
94 * distribution.
95 * (default: 10msec)
96 */
97unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
98
92static const struct sched_class fair_sched_class; 99static const struct sched_class fair_sched_class;
93 100
94/************************************************************** 101/**************************************************************
@@ -688,7 +695,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
688#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED 695#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
689static void update_cfs_load(struct cfs_rq *cfs_rq) 696static void update_cfs_load(struct cfs_rq *cfs_rq)
690{ 697{
691 u64 period = sched_avg_period(); 698 u64 period = sysctl_sched_shares_window;
692 u64 now, delta; 699 u64 now, delta;
693 unsigned long load = cfs_rq->load.weight; 700 unsigned long load = cfs_rq->load.weight;
694 701