diff options
| -rw-r--r-- | kernel/sched.c | 28 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 18 |
2 files changed, 37 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 3f6bd1112900..69cae271c63b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -4992,6 +4992,32 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
| 4992 | */ | 4992 | */ |
| 4993 | cpumask_t nohz_cpu_mask = CPU_MASK_NONE; | 4993 | cpumask_t nohz_cpu_mask = CPU_MASK_NONE; |
| 4994 | 4994 | ||
| 4995 | /* | ||
| 4996 | * Increase the granularity value when there are more CPUs, | ||
| 4997 | * because with more CPUs the 'effective latency' as visible | ||
| 4998 | * to users decreases. But the relationship is not linear, | ||
| 4999 | * so pick a second-best guess by going with the log2 of the | ||
| 5000 | * number of CPUs. | ||
| 5001 | * | ||
| 5002 | * This idea comes from the SD scheduler of Con Kolivas: | ||
| 5003 | */ | ||
| 5004 | static inline void sched_init_granularity(void) | ||
| 5005 | { | ||
| 5006 | unsigned int factor = 1 + ilog2(num_online_cpus()); | ||
| 5007 | const unsigned long limit = 200000000; | ||
| 5008 | |||
| 5009 | sysctl_sched_min_granularity *= factor; | ||
| 5010 | if (sysctl_sched_min_granularity > limit) | ||
| 5011 | sysctl_sched_min_granularity = limit; | ||
| 5012 | |||
| 5013 | sysctl_sched_latency *= factor; | ||
| 5014 | if (sysctl_sched_latency > limit) | ||
| 5015 | sysctl_sched_latency = limit; | ||
| 5016 | |||
| 5017 | sysctl_sched_wakeup_granularity *= factor; | ||
| 5018 | sysctl_sched_batch_wakeup_granularity *= factor; | ||
| 5019 | } | ||
| 5020 | |||
| 4995 | #ifdef CONFIG_SMP | 5021 | #ifdef CONFIG_SMP |
| 4996 | /* | 5022 | /* |
| 4997 | * This is how migration works: | 5023 | * This is how migration works: |
| @@ -6688,10 +6714,12 @@ void __init sched_init_smp(void) | |||
| 6688 | /* Move init over to a non-isolated CPU */ | 6714 | /* Move init over to a non-isolated CPU */ |
| 6689 | if (set_cpus_allowed(current, non_isolated_cpus) < 0) | 6715 | if (set_cpus_allowed(current, non_isolated_cpus) < 0) |
| 6690 | BUG(); | 6716 | BUG(); |
| 6717 | sched_init_granularity(); | ||
| 6691 | } | 6718 | } |
| 6692 | #else | 6719 | #else |
| 6693 | void __init sched_init_smp(void) | 6720 | void __init sched_init_smp(void) |
| 6694 | { | 6721 | { |
| 6722 | sched_init_granularity(); | ||
| 6695 | } | 6723 | } |
| 6696 | #endif /* CONFIG_SMP */ | 6724 | #endif /* CONFIG_SMP */ |
| 6697 | 6725 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c495dcf7031b..7264814ba62a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | 22 | ||
| 23 | /* | 23 | /* |
| 24 | * Targeted preemption latency for CPU-bound tasks: | 24 | * Targeted preemption latency for CPU-bound tasks: |
| 25 | * (default: 20ms, units: nanoseconds) | 25 | * (default: 20ms * ilog(ncpus), units: nanoseconds) |
| 26 | * | 26 | * |
| 27 | * NOTE: this latency value is not the same as the concept of | 27 | * NOTE: this latency value is not the same as the concept of |
| 28 | * 'timeslice length' - timeslices in CFS are of variable length | 28 | * 'timeslice length' - timeslices in CFS are of variable length |
| @@ -32,18 +32,18 @@ | |||
| 32 | * (to see the precise effective timeslice length of your workload, | 32 | * (to see the precise effective timeslice length of your workload, |
| 33 | * run vmstat and monitor the context-switches (cs) field) | 33 | * run vmstat and monitor the context-switches (cs) field) |
| 34 | */ | 34 | */ |
| 35 | const_debug unsigned int sysctl_sched_latency = 20000000ULL; | 35 | unsigned int sysctl_sched_latency = 20000000ULL; |
| 36 | 36 | ||
| 37 | /* | 37 | /* |
| 38 | * Minimal preemption granularity for CPU-bound tasks: | 38 | * Minimal preemption granularity for CPU-bound tasks: |
| 39 | * (default: 1 msec, units: nanoseconds) | 39 | * (default: 1 msec * ilog(ncpus), units: nanoseconds) |
| 40 | */ | 40 | */ |
| 41 | const_debug unsigned int sysctl_sched_min_granularity = 1000000ULL; | 41 | unsigned int sysctl_sched_min_granularity = 1000000ULL; |
| 42 | 42 | ||
| 43 | /* | 43 | /* |
| 44 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | 44 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity |
| 45 | */ | 45 | */ |
| 46 | const_debug unsigned int sched_nr_latency = 20; | 46 | unsigned int sched_nr_latency = 20; |
| 47 | 47 | ||
| 48 | /* | 48 | /* |
| 49 | * After fork, child runs first. (default) If set to 0 then | 49 | * After fork, child runs first. (default) If set to 0 then |
| @@ -61,23 +61,23 @@ unsigned int __read_mostly sysctl_sched_compat_yield; | |||
| 61 | 61 | ||
| 62 | /* | 62 | /* |
| 63 | * SCHED_BATCH wake-up granularity. | 63 | * SCHED_BATCH wake-up granularity. |
| 64 | * (default: 10 msec, units: nanoseconds) | 64 | * (default: 10 msec * ilog(ncpus), units: nanoseconds) |
| 65 | * | 65 | * |
| 66 | * This option delays the preemption effects of decoupled workloads | 66 | * This option delays the preemption effects of decoupled workloads |
| 67 | * and reduces their over-scheduling. Synchronous workloads will still | 67 | * and reduces their over-scheduling. Synchronous workloads will still |
| 68 | * have immediate wakeup/sleep latencies. | 68 | * have immediate wakeup/sleep latencies. |
| 69 | */ | 69 | */ |
| 70 | const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL; | 70 | unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL; |
| 71 | 71 | ||
| 72 | /* | 72 | /* |
| 73 | * SCHED_OTHER wake-up granularity. | 73 | * SCHED_OTHER wake-up granularity. |
| 74 | * (default: 10 msec, units: nanoseconds) | 74 | * (default: 10 msec * ilog(ncpus), units: nanoseconds) |
| 75 | * | 75 | * |
| 76 | * This option delays the preemption effects of decoupled workloads | 76 | * This option delays the preemption effects of decoupled workloads |
| 77 | * and reduces their over-scheduling. Synchronous workloads will still | 77 | * and reduces their over-scheduling. Synchronous workloads will still |
| 78 | * have immediate wakeup/sleep latencies. | 78 | * have immediate wakeup/sleep latencies. |
| 79 | */ | 79 | */ |
| 80 | const_debug unsigned int sysctl_sched_wakeup_granularity = 10000000UL; | 80 | unsigned int sysctl_sched_wakeup_granularity = 10000000UL; |
| 81 | 81 | ||
| 82 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; | 82 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; |
| 83 | 83 | ||
