diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 19 |
1 files changed, 7 insertions, 12 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7f00772e57c9..4ee50f0af8d1 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1789,10 +1789,7 @@ wakeup_gran(struct sched_entity *curr, struct sched_entity *se) | |||
1789 | * This is especially important for buddies when the leftmost | 1789 | * This is especially important for buddies when the leftmost |
1790 | * task is higher priority than the buddy. | 1790 | * task is higher priority than the buddy. |
1791 | */ | 1791 | */ |
1792 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 1792 | return calc_delta_fair(gran, se); |
1793 | gran = calc_delta_fair(gran, se); | ||
1794 | |||
1795 | return gran; | ||
1796 | } | 1793 | } |
1797 | 1794 | ||
1798 | /* | 1795 | /* |
@@ -2104,21 +2101,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2104 | enum cpu_idle_type idle, int *all_pinned, | 2101 | enum cpu_idle_type idle, int *all_pinned, |
2105 | int *this_best_prio, struct cfs_rq *busiest_cfs_rq) | 2102 | int *this_best_prio, struct cfs_rq *busiest_cfs_rq) |
2106 | { | 2103 | { |
2107 | int loops = 0, pulled = 0, pinned = 0; | 2104 | int loops = 0, pulled = 0; |
2108 | long rem_load_move = max_load_move; | 2105 | long rem_load_move = max_load_move; |
2109 | struct task_struct *p, *n; | 2106 | struct task_struct *p, *n; |
2110 | 2107 | ||
2111 | if (max_load_move == 0) | 2108 | if (max_load_move == 0) |
2112 | goto out; | 2109 | goto out; |
2113 | 2110 | ||
2114 | pinned = 1; | ||
2115 | |||
2116 | list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { | 2111 | list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { |
2117 | if (loops++ > sysctl_sched_nr_migrate) | 2112 | if (loops++ > sysctl_sched_nr_migrate) |
2118 | break; | 2113 | break; |
2119 | 2114 | ||
2120 | if ((p->se.load.weight >> 1) > rem_load_move || | 2115 | if ((p->se.load.weight >> 1) > rem_load_move || |
2121 | !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) | 2116 | !can_migrate_task(p, busiest, this_cpu, sd, idle, |
2117 | all_pinned)) | ||
2122 | continue; | 2118 | continue; |
2123 | 2119 | ||
2124 | pull_task(busiest, p, this_rq, this_cpu); | 2120 | pull_task(busiest, p, this_rq, this_cpu); |
@@ -2153,9 +2149,6 @@ out: | |||
2153 | */ | 2149 | */ |
2154 | schedstat_add(sd, lb_gained[idle], pulled); | 2150 | schedstat_add(sd, lb_gained[idle], pulled); |
2155 | 2151 | ||
2156 | if (all_pinned) | ||
2157 | *all_pinned = pinned; | ||
2158 | |||
2159 | return max_load_move - rem_load_move; | 2152 | return max_load_move - rem_load_move; |
2160 | } | 2153 | } |
2161 | 2154 | ||
@@ -3127,6 +3120,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3127 | if (!sds.busiest || sds.busiest_nr_running == 0) | 3120 | if (!sds.busiest || sds.busiest_nr_running == 0) |
3128 | goto out_balanced; | 3121 | goto out_balanced; |
3129 | 3122 | ||
3123 | sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; | ||
3124 | |||
3130 | /* | 3125 | /* |
3131 | * If the busiest group is imbalanced the below checks don't | 3126 | * If the busiest group is imbalanced the below checks don't |
3132 | * work because they assumes all things are equal, which typically | 3127 | * work because they assumes all things are equal, which typically |
@@ -3151,7 +3146,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3151 | * Don't pull any tasks if this group is already above the domain | 3146 | * Don't pull any tasks if this group is already above the domain |
3152 | * average load. | 3147 | * average load. |
3153 | */ | 3148 | */ |
3154 | sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; | ||
3155 | if (sds.this_load >= sds.avg_load) | 3149 | if (sds.this_load >= sds.avg_load) |
3156 | goto out_balanced; | 3150 | goto out_balanced; |
3157 | 3151 | ||
@@ -3340,6 +3334,7 @@ redo: | |||
3340 | * still unbalanced. ld_moved simply stays zero, so it is | 3334 | * still unbalanced. ld_moved simply stays zero, so it is |
3341 | * correctly treated as an imbalance. | 3335 | * correctly treated as an imbalance. |
3342 | */ | 3336 | */ |
3337 | all_pinned = 1; | ||
3343 | local_irq_save(flags); | 3338 | local_irq_save(flags); |
3344 | double_rq_lock(this_rq, busiest); | 3339 | double_rq_lock(this_rq, busiest); |
3345 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | 3340 | ld_moved = move_tasks(this_rq, this_cpu, busiest, |