diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-11-09 16:39:38 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-11-09 16:39:38 -0500 |
commit | 19978ca610946ed57c071bad63f8f6642ca1298b (patch) | |
tree | 3bdbe89a249b6091fbd71653f106f486337ca3d6 /kernel/sched.c | |
parent | fa13a5a1f25f671d084d8884be96fc48d9b68275 (diff) |
sched: reintroduce SMP tunings again
Yanmin Zhang reported an aim7 regression and bisected it down to:
| commit 38ad464d410dadceda1563f36bdb0be7fe4c8938
| Author: Ingo Molnar <mingo@elte.hu>
| Date: Mon Oct 15 17:00:02 2007 +0200
|
| sched: uniform tunings
|
| use the same defaults on both UP and SMP.
fix this by reintroducing similar SMP tunings again. This resolves
the regression.
(also update the comments to match the ilog2(nr_cpus) tuning effect)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 28 |
1 files changed, 28 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 3f6bd1112900..69cae271c63b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4992,6 +4992,32 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
4992 | */ | 4992 | */ |
4993 | cpumask_t nohz_cpu_mask = CPU_MASK_NONE; | 4993 | cpumask_t nohz_cpu_mask = CPU_MASK_NONE; |
4994 | 4994 | ||
4995 | /* | ||
4996 | * Increase the granularity value when there are more CPUs, | ||
4997 | * because with more CPUs the 'effective latency' as visible | ||
4998 | * to users decreases. But the relationship is not linear, | ||
4999 | * so pick a second-best guess by going with the log2 of the | ||
5000 | * number of CPUs. | ||
5001 | * | ||
5002 | * This idea comes from the SD scheduler of Con Kolivas: | ||
5003 | */ | ||
5004 | static inline void sched_init_granularity(void) | ||
5005 | { | ||
5006 | unsigned int factor = 1 + ilog2(num_online_cpus()); | ||
5007 | const unsigned long limit = 200000000; | ||
5008 | |||
5009 | sysctl_sched_min_granularity *= factor; | ||
5010 | if (sysctl_sched_min_granularity > limit) | ||
5011 | sysctl_sched_min_granularity = limit; | ||
5012 | |||
5013 | sysctl_sched_latency *= factor; | ||
5014 | if (sysctl_sched_latency > limit) | ||
5015 | sysctl_sched_latency = limit; | ||
5016 | |||
5017 | sysctl_sched_wakeup_granularity *= factor; | ||
5018 | sysctl_sched_batch_wakeup_granularity *= factor; | ||
5019 | } | ||
5020 | |||
4995 | #ifdef CONFIG_SMP | 5021 | #ifdef CONFIG_SMP |
4996 | /* | 5022 | /* |
4997 | * This is how migration works: | 5023 | * This is how migration works: |
@@ -6688,10 +6714,12 @@ void __init sched_init_smp(void) | |||
6688 | /* Move init over to a non-isolated CPU */ | 6714 | /* Move init over to a non-isolated CPU */ |
6689 | if (set_cpus_allowed(current, non_isolated_cpus) < 0) | 6715 | if (set_cpus_allowed(current, non_isolated_cpus) < 0) |
6690 | BUG(); | 6716 | BUG(); |
6717 | sched_init_granularity(); | ||
6691 | } | 6718 | } |
6692 | #else | 6719 | #else |
6693 | void __init sched_init_smp(void) | 6720 | void __init sched_init_smp(void) |
6694 | { | 6721 | { |
6722 | sched_init_granularity(); | ||
6695 | } | 6723 | } |
6696 | #endif /* CONFIG_SMP */ | 6724 | #endif /* CONFIG_SMP */ |
6697 | 6725 | ||