aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChristian Ehrhardt <ehrhardt@linux.vnet.ibm.com>2009-11-30 06:16:46 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-09 04:03:58 -0500
commit0bcdcf28c979869f44e05121b96ff2cfb05bd8e6 (patch)
treec70b8fb37fec9badf95ac2ea679407334f580ae5 /kernel
parent57785df5ac53c70da9fb53696130f3c551bfe1f9 (diff)
sched: Fix missing sched tunable recalculation on cpu add/remove
Based on Peter Zijlstras patch suggestion this enables recalculation of the scheduler tunables in response of a change in the number of cpus. It also adds a max of eight cpus that are considered in that scaling. Signed-off-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1259579808-11357-2-git-send-email-ehrhardt@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c29
-rw-r--r--kernel/sched_fair.c16
2 files changed, 32 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3878f5018007..b54ecf84b6be 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -814,6 +814,7 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
814 * default: 0.25ms 814 * default: 0.25ms
815 */ 815 */
816unsigned int sysctl_sched_shares_ratelimit = 250000; 816unsigned int sysctl_sched_shares_ratelimit = 250000;
817unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;
817 818
818/* 819/*
819 * Inject some fuzzyness into changing the per-cpu group shares 820 * Inject some fuzzyness into changing the per-cpu group shares
@@ -1814,6 +1815,7 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1814#endif 1815#endif
1815 1816
1816static void calc_load_account_active(struct rq *this_rq); 1817static void calc_load_account_active(struct rq *this_rq);
1818static void update_sysctl(void);
1817 1819
1818static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1820static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1819{ 1821{
@@ -7028,22 +7030,23 @@ cpumask_var_t nohz_cpu_mask;
7028 * 7030 *
7029 * This idea comes from the SD scheduler of Con Kolivas: 7031 * This idea comes from the SD scheduler of Con Kolivas:
7030 */ 7032 */
7031static inline void sched_init_granularity(void) 7033static void update_sysctl(void)
7032{ 7034{
7033 unsigned int factor = 1 + ilog2(num_online_cpus()); 7035 unsigned int cpus = min(num_online_cpus(), 8U);
7034 const unsigned long limit = 200000000; 7036 unsigned int factor = 1 + ilog2(cpus);
7035
7036 sysctl_sched_min_granularity *= factor;
7037 if (sysctl_sched_min_granularity > limit)
7038 sysctl_sched_min_granularity = limit;
7039
7040 sysctl_sched_latency *= factor;
7041 if (sysctl_sched_latency > limit)
7042 sysctl_sched_latency = limit;
7043 7037
7044 sysctl_sched_wakeup_granularity *= factor; 7038#define SET_SYSCTL(name) \
7039 (sysctl_##name = (factor) * normalized_sysctl_##name)
7040 SET_SYSCTL(sched_min_granularity);
7041 SET_SYSCTL(sched_latency);
7042 SET_SYSCTL(sched_wakeup_granularity);
7043 SET_SYSCTL(sched_shares_ratelimit);
7044#undef SET_SYSCTL
7045}
7045 7046
7046 sysctl_sched_shares_ratelimit *= factor; 7047static inline void sched_init_granularity(void)
7048{
7049 update_sysctl();
7047} 7050}
7048 7051
7049#ifdef CONFIG_SMP 7052#ifdef CONFIG_SMP
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c163a285bf05..71b3458245e5 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -35,12 +35,14 @@
35 * run vmstat and monitor the context-switches (cs) field) 35 * run vmstat and monitor the context-switches (cs) field)
36 */ 36 */
37unsigned int sysctl_sched_latency = 5000000ULL; 37unsigned int sysctl_sched_latency = 5000000ULL;
38unsigned int normalized_sysctl_sched_latency = 5000000ULL;
38 39
39/* 40/*
40 * Minimal preemption granularity for CPU-bound tasks: 41 * Minimal preemption granularity for CPU-bound tasks:
41 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) 42 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
42 */ 43 */
43unsigned int sysctl_sched_min_granularity = 1000000ULL; 44unsigned int sysctl_sched_min_granularity = 1000000ULL;
45unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL;
44 46
45/* 47/*
46 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity 48 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
@@ -70,6 +72,7 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
70 * have immediate wakeup/sleep latencies. 72 * have immediate wakeup/sleep latencies.
71 */ 73 */
72unsigned int sysctl_sched_wakeup_granularity = 1000000UL; 74unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
75unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
73 76
74const_debug unsigned int sysctl_sched_migration_cost = 500000UL; 77const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
75 78
@@ -1890,6 +1893,17 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1890 1893
1891 return 0; 1894 return 0;
1892} 1895}
1896
1897static void rq_online_fair(struct rq *rq)
1898{
1899 update_sysctl();
1900}
1901
1902static void rq_offline_fair(struct rq *rq)
1903{
1904 update_sysctl();
1905}
1906
1893#endif /* CONFIG_SMP */ 1907#endif /* CONFIG_SMP */
1894 1908
1895/* 1909/*
@@ -2035,6 +2049,8 @@ static const struct sched_class fair_sched_class = {
2035 2049
2036 .load_balance = load_balance_fair, 2050 .load_balance = load_balance_fair,
2037 .move_one_task = move_one_task_fair, 2051 .move_one_task = move_one_task_fair,
2052 .rq_online = rq_online_fair,
2053 .rq_offline = rq_offline_fair,
2038#endif 2054#endif
2039 2055
2040 .set_curr_task = set_curr_task_fair, 2056 .set_curr_task = set_curr_task_fair,