diff options
author | Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | 2009-11-30 06:16:46 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-12-09 04:03:58 -0500 |
commit | 0bcdcf28c979869f44e05121b96ff2cfb05bd8e6 (patch) | |
tree | c70b8fb37fec9badf95ac2ea679407334f580ae5 /kernel/sched.c | |
parent | 57785df5ac53c70da9fb53696130f3c551bfe1f9 (diff) |
sched: Fix missing sched tunable recalculation on cpu add/remove
Based on Peter Zijlstras patch suggestion this enables recalculation of
the scheduler tunables in response of a change in the number of cpus. It
also adds a max of eight cpus that are considered in that scaling.
Signed-off-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1259579808-11357-2-git-send-email-ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 29 |
1 files changed, 16 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 3878f5018007..b54ecf84b6be 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -814,6 +814,7 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; | |||
814 | * default: 0.25ms | 814 | * default: 0.25ms |
815 | */ | 815 | */ |
816 | unsigned int sysctl_sched_shares_ratelimit = 250000; | 816 | unsigned int sysctl_sched_shares_ratelimit = 250000; |
817 | unsigned int normalized_sysctl_sched_shares_ratelimit = 250000; | ||
817 | 818 | ||
818 | /* | 819 | /* |
819 | * Inject some fuzzyness into changing the per-cpu group shares | 820 | * Inject some fuzzyness into changing the per-cpu group shares |
@@ -1814,6 +1815,7 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) | |||
1814 | #endif | 1815 | #endif |
1815 | 1816 | ||
1816 | static void calc_load_account_active(struct rq *this_rq); | 1817 | static void calc_load_account_active(struct rq *this_rq); |
1818 | static void update_sysctl(void); | ||
1817 | 1819 | ||
1818 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | 1820 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) |
1819 | { | 1821 | { |
@@ -7028,22 +7030,23 @@ cpumask_var_t nohz_cpu_mask; | |||
7028 | * | 7030 | * |
7029 | * This idea comes from the SD scheduler of Con Kolivas: | 7031 | * This idea comes from the SD scheduler of Con Kolivas: |
7030 | */ | 7032 | */ |
7031 | static inline void sched_init_granularity(void) | 7033 | static void update_sysctl(void) |
7032 | { | 7034 | { |
7033 | unsigned int factor = 1 + ilog2(num_online_cpus()); | 7035 | unsigned int cpus = min(num_online_cpus(), 8U); |
7034 | const unsigned long limit = 200000000; | 7036 | unsigned int factor = 1 + ilog2(cpus); |
7035 | |||
7036 | sysctl_sched_min_granularity *= factor; | ||
7037 | if (sysctl_sched_min_granularity > limit) | ||
7038 | sysctl_sched_min_granularity = limit; | ||
7039 | |||
7040 | sysctl_sched_latency *= factor; | ||
7041 | if (sysctl_sched_latency > limit) | ||
7042 | sysctl_sched_latency = limit; | ||
7043 | 7037 | ||
7044 | sysctl_sched_wakeup_granularity *= factor; | 7038 | #define SET_SYSCTL(name) \ |
7039 | (sysctl_##name = (factor) * normalized_sysctl_##name) | ||
7040 | SET_SYSCTL(sched_min_granularity); | ||
7041 | SET_SYSCTL(sched_latency); | ||
7042 | SET_SYSCTL(sched_wakeup_granularity); | ||
7043 | SET_SYSCTL(sched_shares_ratelimit); | ||
7044 | #undef SET_SYSCTL | ||
7045 | } | ||
7045 | 7046 | ||
7046 | sysctl_sched_shares_ratelimit *= factor; | 7047 | static inline void sched_init_granularity(void) |
7048 | { | ||
7049 | update_sysctl(); | ||
7047 | } | 7050 | } |
7048 | 7051 | ||
7049 | #ifdef CONFIG_SMP | 7052 | #ifdef CONFIG_SMP |