diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 19 |
1 files changed, 9 insertions, 10 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index e91db32cadfd..fedbb51bba96 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -75,7 +75,7 @@ enum { | |||
75 | 75 | ||
76 | unsigned int sysctl_sched_features __read_mostly = | 76 | unsigned int sysctl_sched_features __read_mostly = |
77 | SCHED_FEAT_FAIR_SLEEPERS *1 | | 77 | SCHED_FEAT_FAIR_SLEEPERS *1 | |
78 | SCHED_FEAT_SLEEPER_AVG *1 | | 78 | SCHED_FEAT_SLEEPER_AVG *0 | |
79 | SCHED_FEAT_SLEEPER_LOAD_AVG *1 | | 79 | SCHED_FEAT_SLEEPER_LOAD_AVG *1 | |
80 | SCHED_FEAT_PRECISE_CPU_LOAD *1 | | 80 | SCHED_FEAT_PRECISE_CPU_LOAD *1 | |
81 | SCHED_FEAT_START_DEBIT *1 | | 81 | SCHED_FEAT_START_DEBIT *1 | |
@@ -304,11 +304,9 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
304 | delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); | 304 | delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); |
305 | 305 | ||
306 | if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) { | 306 | if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) { |
307 | delta = calc_delta_mine(cfs_rq->sleeper_bonus, | 307 | delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec); |
308 | curr->load.weight, lw); | 308 | delta = calc_delta_mine(delta, curr->load.weight, lw); |
309 | if (unlikely(delta > cfs_rq->sleeper_bonus)) | 309 | delta = min((u64)delta, cfs_rq->sleeper_bonus); |
310 | delta = cfs_rq->sleeper_bonus; | ||
311 | |||
312 | cfs_rq->sleeper_bonus -= delta; | 310 | cfs_rq->sleeper_bonus -= delta; |
313 | delta_mine -= delta; | 311 | delta_mine -= delta; |
314 | } | 312 | } |
@@ -521,6 +519,8 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
521 | * Track the amount of bonus we've given to sleepers: | 519 | * Track the amount of bonus we've given to sleepers: |
522 | */ | 520 | */ |
523 | cfs_rq->sleeper_bonus += delta_fair; | 521 | cfs_rq->sleeper_bonus += delta_fair; |
522 | if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit)) | ||
523 | cfs_rq->sleeper_bonus = sysctl_sched_runtime_limit; | ||
524 | 524 | ||
525 | schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); | 525 | schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); |
526 | } | 526 | } |
@@ -959,13 +959,12 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
959 | for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { | 959 | for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { |
960 | #ifdef CONFIG_FAIR_GROUP_SCHED | 960 | #ifdef CONFIG_FAIR_GROUP_SCHED |
961 | struct cfs_rq *this_cfs_rq; | 961 | struct cfs_rq *this_cfs_rq; |
962 | long imbalances; | 962 | long imbalance; |
963 | unsigned long maxload; | 963 | unsigned long maxload; |
964 | 964 | ||
965 | this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu); | 965 | this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu); |
966 | 966 | ||
967 | imbalance = busy_cfs_rq->load.weight - | 967 | imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight; |
968 | this_cfs_rq->load.weight; | ||
969 | /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */ | 968 | /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */ |
970 | if (imbalance <= 0) | 969 | if (imbalance <= 0) |
971 | continue; | 970 | continue; |
@@ -976,7 +975,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
976 | 975 | ||
977 | *this_best_prio = cfs_rq_best_prio(this_cfs_rq); | 976 | *this_best_prio = cfs_rq_best_prio(this_cfs_rq); |
978 | #else | 977 | #else |
979 | #define maxload rem_load_move | 978 | # define maxload rem_load_move |
980 | #endif | 979 | #endif |
981 | /* pass busy_cfs_rq argument into | 980 | /* pass busy_cfs_rq argument into |
982 | * load_balance_[start|next]_fair iterators | 981 | * load_balance_[start|next]_fair iterators |