diff options
author | Paul Mackerras <paulus@samba.org> | 2007-09-13 11:24:25 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-09-13 11:24:25 -0400 |
commit | b2315372eac9cd9f622c32a93e323cf6f0f03462 (patch) | |
tree | 9e1faa7cdcddf5d90bec4fb9523742d4cce699a1 /kernel/sched.c | |
parent | 5326152fa182b0a16e4abf913ce403e3c7ab53b7 (diff) | |
parent | c87ce65868bbf9bbea9c3f112ff8315302daf8f2 (diff) |
Merge branch 'linux-2.6' into for-2.6.24
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 16 |
1 files changed, 7 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 9fe473a190de..deeb1f8e0c30 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -668,7 +668,7 @@ static u64 div64_likely32(u64 divident, unsigned long divisor) | |||
668 | /* | 668 | /* |
669 | * Shift right and round: | 669 | * Shift right and round: |
670 | */ | 670 | */ |
671 | #define RSR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) | 671 | #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) |
672 | 672 | ||
673 | static unsigned long | 673 | static unsigned long |
674 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, | 674 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, |
@@ -684,10 +684,10 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, | |||
684 | * Check whether we'd overflow the 64-bit multiplication: | 684 | * Check whether we'd overflow the 64-bit multiplication: |
685 | */ | 685 | */ |
686 | if (unlikely(tmp > WMULT_CONST)) | 686 | if (unlikely(tmp > WMULT_CONST)) |
687 | tmp = RSR(RSR(tmp, WMULT_SHIFT/2) * lw->inv_weight, | 687 | tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, |
688 | WMULT_SHIFT/2); | 688 | WMULT_SHIFT/2); |
689 | else | 689 | else |
690 | tmp = RSR(tmp * lw->inv_weight, WMULT_SHIFT); | 690 | tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); |
691 | 691 | ||
692 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); | 692 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); |
693 | } | 693 | } |
@@ -858,7 +858,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq) | |||
858 | 858 | ||
859 | static void set_load_weight(struct task_struct *p) | 859 | static void set_load_weight(struct task_struct *p) |
860 | { | 860 | { |
861 | task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime; | ||
862 | p->se.wait_runtime = 0; | 861 | p->se.wait_runtime = 0; |
863 | 862 | ||
864 | if (task_has_rt_policy(p)) { | 863 | if (task_has_rt_policy(p)) { |
@@ -1587,6 +1586,7 @@ static void __sched_fork(struct task_struct *p) | |||
1587 | p->se.wait_start_fair = 0; | 1586 | p->se.wait_start_fair = 0; |
1588 | p->se.exec_start = 0; | 1587 | p->se.exec_start = 0; |
1589 | p->se.sum_exec_runtime = 0; | 1588 | p->se.sum_exec_runtime = 0; |
1589 | p->se.prev_sum_exec_runtime = 0; | ||
1590 | p->se.delta_exec = 0; | 1590 | p->se.delta_exec = 0; |
1591 | p->se.delta_fair_run = 0; | 1591 | p->se.delta_fair_run = 0; |
1592 | p->se.delta_fair_sleep = 0; | 1592 | p->se.delta_fair_sleep = 0; |
@@ -2511,7 +2511,7 @@ group_next: | |||
2511 | * a think about bumping its value to force at least one task to be | 2511 | * a think about bumping its value to force at least one task to be |
2512 | * moved | 2512 | * moved |
2513 | */ | 2513 | */ |
2514 | if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) { | 2514 | if (*imbalance < busiest_load_per_task) { |
2515 | unsigned long tmp, pwr_now, pwr_move; | 2515 | unsigned long tmp, pwr_now, pwr_move; |
2516 | unsigned int imbn; | 2516 | unsigned int imbn; |
2517 | 2517 | ||
@@ -2563,10 +2563,8 @@ small_imbalance: | |||
2563 | pwr_move /= SCHED_LOAD_SCALE; | 2563 | pwr_move /= SCHED_LOAD_SCALE; |
2564 | 2564 | ||
2565 | /* Move if we gain throughput */ | 2565 | /* Move if we gain throughput */ |
2566 | if (pwr_move <= pwr_now) | 2566 | if (pwr_move > pwr_now) |
2567 | goto out_balanced; | 2567 | *imbalance = busiest_load_per_task; |
2568 | |||
2569 | *imbalance = busiest_load_per_task; | ||
2570 | } | 2568 | } |
2571 | 2569 | ||
2572 | return busiest; | 2570 | return busiest; |