diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2007-10-15 11:00:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:12 -0400 |
commit | 5f6d858ecca78f71755859a346d845e302973cd1 (patch) | |
tree | 5422f6ff9aa7ffab4c708432de84a79e52e503d9 /kernel/sched_fair.c | |
parent | b0ffd246ea947a037746e725bd461bb7e809a4b3 (diff) |
sched: speed up and simplify vslice calculations
speed up and simplify vslice calculations.
[ From: Mike Galbraith <efault@gmx.de>: build fix ]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ec0569e59e24..ae2d4b08e782 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -46,7 +46,7 @@ const_debug unsigned int sysctl_sched_child_runs_first = 1; | |||
46 | * Minimal preemption granularity for CPU-bound tasks: | 46 | * Minimal preemption granularity for CPU-bound tasks: |
47 | * (default: 2 msec, units: nanoseconds) | 47 | * (default: 2 msec, units: nanoseconds) |
48 | */ | 48 | */ |
49 | unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL; | 49 | const_debug unsigned int sysctl_sched_nr_latency = 20; |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * sys_sched_yield() compat mode | 52 | * sys_sched_yield() compat mode |
@@ -222,8 +222,7 @@ static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | |||
222 | static u64 __sched_period(unsigned long nr_running) | 222 | static u64 __sched_period(unsigned long nr_running) |
223 | { | 223 | { |
224 | u64 period = sysctl_sched_latency; | 224 | u64 period = sysctl_sched_latency; |
225 | unsigned long nr_latency = | 225 | unsigned long nr_latency = sysctl_sched_nr_latency; |
226 | sysctl_sched_latency / sysctl_sched_min_granularity; | ||
227 | 226 | ||
228 | if (unlikely(nr_running > nr_latency)) { | 227 | if (unlikely(nr_running > nr_latency)) { |
229 | period *= nr_running; | 228 | period *= nr_running; |
@@ -245,11 +244,15 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
245 | 244 | ||
246 | static u64 __sched_vslice(unsigned long nr_running) | 245 | static u64 __sched_vslice(unsigned long nr_running) |
247 | { | 246 | { |
248 | u64 period = __sched_period(nr_running); | 247 | unsigned long period = sysctl_sched_latency; |
248 | unsigned long nr_latency = sysctl_sched_nr_latency; | ||
249 | 249 | ||
250 | do_div(period, nr_running); | 250 | if (unlikely(nr_running > nr_latency)) |
251 | nr_running = nr_latency; | ||
251 | 252 | ||
252 | return period; | 253 | period /= nr_running; |
254 | |||
255 | return (u64)period; | ||
253 | } | 256 | } |
254 | 257 | ||
255 | /* | 258 | /* |