aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b533d6db78aa..deeb1f8e0c30 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -668,7 +668,7 @@ static u64 div64_likely32(u64 divident, unsigned long divisor)
668/* 668/*
669 * Shift right and round: 669 * Shift right and round:
670 */ 670 */
671#define RSR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) 671#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
672 672
673static unsigned long 673static unsigned long
674calc_delta_mine(unsigned long delta_exec, unsigned long weight, 674calc_delta_mine(unsigned long delta_exec, unsigned long weight,
@@ -684,10 +684,10 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
684 * Check whether we'd overflow the 64-bit multiplication: 684 * Check whether we'd overflow the 64-bit multiplication:
685 */ 685 */
686 if (unlikely(tmp > WMULT_CONST)) 686 if (unlikely(tmp > WMULT_CONST))
687 tmp = RSR(RSR(tmp, WMULT_SHIFT/2) * lw->inv_weight, 687 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
688 WMULT_SHIFT/2); 688 WMULT_SHIFT/2);
689 else 689 else
690 tmp = RSR(tmp * lw->inv_weight, WMULT_SHIFT); 690 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
691 691
692 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); 692 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
693} 693}
@@ -858,7 +858,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq)
858 858
859static void set_load_weight(struct task_struct *p) 859static void set_load_weight(struct task_struct *p)
860{ 860{
861 task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime;
862 p->se.wait_runtime = 0; 861 p->se.wait_runtime = 0;
863 862
864 if (task_has_rt_policy(p)) { 863 if (task_has_rt_policy(p)) {
@@ -2512,7 +2511,7 @@ group_next:
2512 * a think about bumping its value to force at least one task to be 2511 * a think about bumping its value to force at least one task to be
2513 * moved 2512 * moved
2514 */ 2513 */
2515 if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) { 2514 if (*imbalance < busiest_load_per_task) {
2516 unsigned long tmp, pwr_now, pwr_move; 2515 unsigned long tmp, pwr_now, pwr_move;
2517 unsigned int imbn; 2516 unsigned int imbn;
2518 2517
@@ -2564,10 +2563,8 @@ small_imbalance:
2564 pwr_move /= SCHED_LOAD_SCALE; 2563 pwr_move /= SCHED_LOAD_SCALE;
2565 2564
2566 /* Move if we gain throughput */ 2565 /* Move if we gain throughput */
2567 if (pwr_move <= pwr_now) 2566 if (pwr_move > pwr_now)
2568 goto out_balanced; 2567 *imbalance = busiest_load_per_task;
2569
2570 *imbalance = busiest_load_per_task;
2571 } 2568 }
2572 2569
2573 return busiest; 2570 return busiest;