aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-08-09 05:16:51 -0400
committerIngo Molnar <mingo@elte.hu>2007-08-09 05:16:51 -0400
commit194081ebfaa8c7d16133e08dd79254910c20c6ff (patch)
treeba4d26c12614c93694fa67dd4003e243fe546e6c /kernel
parent254753dc321ea2b753ca9bc58ac329557a20efac (diff)
sched: round a bit better
round a tiny bit better in high-frequency rescheduling scenarios, by rounding around zero instead of rounding down. (this is pretty theoretical though) Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 5470ab0258a8..b0afd8db1396 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -638,6 +638,11 @@ static u64 div64_likely32(u64 divident, unsigned long divisor)
638 638
639#define WMULT_SHIFT 32 639#define WMULT_SHIFT 32
640 640
641/*
642 * Shift right and round:
643 */
644#define RSR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
645
641static unsigned long 646static unsigned long
642calc_delta_mine(unsigned long delta_exec, unsigned long weight, 647calc_delta_mine(unsigned long delta_exec, unsigned long weight,
643 struct load_weight *lw) 648 struct load_weight *lw)
@@ -645,18 +650,17 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
645 u64 tmp; 650 u64 tmp;
646 651
647 if (unlikely(!lw->inv_weight)) 652 if (unlikely(!lw->inv_weight))
648 lw->inv_weight = WMULT_CONST / lw->weight; 653 lw->inv_weight = (WMULT_CONST - lw->weight/2) / lw->weight + 1;
649 654
650 tmp = (u64)delta_exec * weight; 655 tmp = (u64)delta_exec * weight;
651 /* 656 /*
652 * Check whether we'd overflow the 64-bit multiplication: 657 * Check whether we'd overflow the 64-bit multiplication:
653 */ 658 */
654 if (unlikely(tmp > WMULT_CONST)) { 659 if (unlikely(tmp > WMULT_CONST))
655 tmp = ((tmp >> WMULT_SHIFT/2) * lw->inv_weight) 660 tmp = RSR(RSR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
656 >> (WMULT_SHIFT/2); 661 WMULT_SHIFT/2);
657 } else { 662 else
658 tmp = (tmp * lw->inv_weight) >> WMULT_SHIFT; 663 tmp = RSR(tmp * lw->inv_weight, WMULT_SHIFT);
659 }
660 664
661 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); 665 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
662} 666}