diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:04 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:04 -0400 |
commit | 08e2388aa1e40cb06f7d04ac621e2ae94e1d8fdc (patch) | |
tree | 07ef79ec5f60879471bfcdef1da7e1d37cbddb4e /kernel/sched_fair.c | |
parent | 1091985b482fdd577a5c511059b9d7b4467bd15d (diff) |
sched: clean up calc_weighted()
clean up calc_weighted() - we always use the normalized shift so
it's not needed to pass that in. Also, push the non-nice0 branch
into the function.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 31 |
1 files changed, 8 insertions, 23 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 91a227b436ee..b46f8078e78f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -397,27 +397,16 @@ update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
397 | schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); | 397 | schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); |
398 | } | 398 | } |
399 | 399 | ||
400 | /* | ||
401 | * We calculate fair deltas here, so protect against the random effects | ||
402 | * of a multiplication overflow by capping it to the runtime limit: | ||
403 | */ | ||
404 | #if BITS_PER_LONG == 32 | ||
405 | static inline unsigned long | 400 | static inline unsigned long |
406 | calc_weighted(unsigned long delta, unsigned long weight, int shift) | 401 | calc_weighted(unsigned long delta, struct sched_entity *se) |
407 | { | 402 | { |
408 | u64 tmp = (u64)delta * weight >> shift; | 403 | unsigned long weight = se->load.weight; |
409 | 404 | ||
410 | if (unlikely(tmp > sysctl_sched_runtime_limit*2)) | 405 | if (unlikely(weight != NICE_0_LOAD)) |
411 | return sysctl_sched_runtime_limit*2; | 406 | return (u64)delta * se->load.weight >> NICE_0_SHIFT; |
412 | return tmp; | 407 | else |
408 | return delta; | ||
413 | } | 409 | } |
414 | #else | ||
415 | static inline unsigned long | ||
416 | calc_weighted(unsigned long delta, unsigned long weight, int shift) | ||
417 | { | ||
418 | return delta * weight >> shift; | ||
419 | } | ||
420 | #endif | ||
421 | 410 | ||
422 | /* | 411 | /* |
423 | * Task is being enqueued - update stats: | 412 | * Task is being enqueued - update stats: |
@@ -469,9 +458,7 @@ __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, | |||
469 | schedstat_set(se->wait_max, max(se->wait_max, | 458 | schedstat_set(se->wait_max, max(se->wait_max, |
470 | rq_of(cfs_rq)->clock - se->wait_start)); | 459 | rq_of(cfs_rq)->clock - se->wait_start)); |
471 | 460 | ||
472 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 461 | delta_fair = calc_weighted(delta_fair, se); |
473 | delta_fair = calc_weighted(delta_fair, se->load.weight, | ||
474 | NICE_0_SHIFT); | ||
475 | 462 | ||
476 | add_wait_runtime(cfs_rq, se, delta_fair); | 463 | add_wait_runtime(cfs_rq, se, delta_fair); |
477 | } | 464 | } |
@@ -554,9 +541,7 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, | |||
554 | delta_fair = div64_likely32((u64)delta_fair * load, | 541 | delta_fair = div64_likely32((u64)delta_fair * load, |
555 | load + se->load.weight); | 542 | load + se->load.weight); |
556 | 543 | ||
557 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 544 | delta_fair = calc_weighted(delta_fair, se); |
558 | delta_fair = calc_weighted(delta_fair, se->load.weight, | ||
559 | NICE_0_SHIFT); | ||
560 | 545 | ||
561 | prev_runtime = se->wait_runtime; | 546 | prev_runtime = se->wait_runtime; |
562 | __add_wait_runtime(cfs_rq, se, delta_fair); | 547 | __add_wait_runtime(cfs_rq, se, delta_fair); |