diff options
author | Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | 2009-11-30 06:16:46 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-12-09 04:03:58 -0500 |
commit | 0bcdcf28c979869f44e05121b96ff2cfb05bd8e6 (patch) | |
tree | c70b8fb37fec9badf95ac2ea679407334f580ae5 /kernel/sched_fair.c | |
parent | 57785df5ac53c70da9fb53696130f3c551bfe1f9 (diff) |
sched: Fix missing sched tunable recalculation on cpu add/remove
Based on Peter Zijlstras patch suggestion this enables recalculation of
the scheduler tunables in response of a change in the number of cpus. It
also adds a max of eight cpus that are considered in that scaling.
Signed-off-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1259579808-11357-2-git-send-email-ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c163a285bf05..71b3458245e5 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -35,12 +35,14 @@ | |||
35 | * run vmstat and monitor the context-switches (cs) field) | 35 | * run vmstat and monitor the context-switches (cs) field) |
36 | */ | 36 | */ |
37 | unsigned int sysctl_sched_latency = 5000000ULL; | 37 | unsigned int sysctl_sched_latency = 5000000ULL; |
38 | unsigned int normalized_sysctl_sched_latency = 5000000ULL; | ||
38 | 39 | ||
39 | /* | 40 | /* |
40 | * Minimal preemption granularity for CPU-bound tasks: | 41 | * Minimal preemption granularity for CPU-bound tasks: |
41 | * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) | 42 | * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) |
42 | */ | 43 | */ |
43 | unsigned int sysctl_sched_min_granularity = 1000000ULL; | 44 | unsigned int sysctl_sched_min_granularity = 1000000ULL; |
45 | unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL; | ||
44 | 46 | ||
45 | /* | 47 | /* |
46 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | 48 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity |
@@ -70,6 +72,7 @@ unsigned int __read_mostly sysctl_sched_compat_yield; | |||
70 | * have immediate wakeup/sleep latencies. | 72 | * have immediate wakeup/sleep latencies. |
71 | */ | 73 | */ |
72 | unsigned int sysctl_sched_wakeup_granularity = 1000000UL; | 74 | unsigned int sysctl_sched_wakeup_granularity = 1000000UL; |
75 | unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; | ||
73 | 76 | ||
74 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; | 77 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; |
75 | 78 | ||
@@ -1890,6 +1893,17 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1890 | 1893 | ||
1891 | return 0; | 1894 | return 0; |
1892 | } | 1895 | } |
1896 | |||
1897 | static void rq_online_fair(struct rq *rq) | ||
1898 | { | ||
1899 | update_sysctl(); | ||
1900 | } | ||
1901 | |||
1902 | static void rq_offline_fair(struct rq *rq) | ||
1903 | { | ||
1904 | update_sysctl(); | ||
1905 | } | ||
1906 | |||
1893 | #endif /* CONFIG_SMP */ | 1907 | #endif /* CONFIG_SMP */ |
1894 | 1908 | ||
1895 | /* | 1909 | /* |
@@ -2035,6 +2049,8 @@ static const struct sched_class fair_sched_class = { | |||
2035 | 2049 | ||
2036 | .load_balance = load_balance_fair, | 2050 | .load_balance = load_balance_fair, |
2037 | .move_one_task = move_one_task_fair, | 2051 | .move_one_task = move_one_task_fair, |
2052 | .rq_online = rq_online_fair, | ||
2053 | .rq_offline = rq_offline_fair, | ||
2038 | #endif | 2054 | #endif |
2039 | 2055 | ||
2040 | .set_curr_task = set_curr_task_fair, | 2056 | .set_curr_task = set_curr_task_fair, |