aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c32
1 files changed, 12 insertions, 20 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 16511e9e5528..923bed0b0c42 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -929,6 +929,7 @@ static struct task_struct *load_balance_next_fair(void *arg)
929 return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr); 929 return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
930} 930}
931 931
932#ifdef CONFIG_FAIR_GROUP_SCHED
932static int cfs_rq_best_prio(struct cfs_rq *cfs_rq) 933static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
933{ 934{
934 struct sched_entity *curr; 935 struct sched_entity *curr;
@@ -942,12 +943,13 @@ static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
942 943
943 return p->prio; 944 return p->prio;
944} 945}
946#endif
945 947
946static unsigned long 948static unsigned long
947load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 949load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
948 unsigned long max_nr_move, unsigned long max_load_move, 950 unsigned long max_nr_move, unsigned long max_load_move,
949 struct sched_domain *sd, enum cpu_idle_type idle, 951 struct sched_domain *sd, enum cpu_idle_type idle,
950 int *all_pinned) 952 int *all_pinned, int *this_best_prio)
951{ 953{
952 struct cfs_rq *busy_cfs_rq; 954 struct cfs_rq *busy_cfs_rq;
953 unsigned long load_moved, total_nr_moved = 0, nr_moved; 955 unsigned long load_moved, total_nr_moved = 0, nr_moved;
@@ -958,10 +960,10 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
958 cfs_rq_iterator.next = load_balance_next_fair; 960 cfs_rq_iterator.next = load_balance_next_fair;
959 961
960 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { 962 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
963#ifdef CONFIG_FAIR_GROUP_SCHED
961 struct cfs_rq *this_cfs_rq; 964 struct cfs_rq *this_cfs_rq;
962 long imbalance; 965 long imbalances;
963 unsigned long maxload; 966 unsigned long maxload;
964 int this_best_prio, best_prio, best_prio_seen = 0;
965 967
966 this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu); 968 this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
967 969
@@ -975,27 +977,17 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
975 imbalance /= 2; 977 imbalance /= 2;
976 maxload = min(rem_load_move, imbalance); 978 maxload = min(rem_load_move, imbalance);
977 979
978 this_best_prio = cfs_rq_best_prio(this_cfs_rq); 980 *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
979 best_prio = cfs_rq_best_prio(busy_cfs_rq); 981#else
980 982#define maxload rem_load_move
981 /* 983#endif
982 * Enable handling of the case where there is more than one task
983 * with the best priority. If the current running task is one
984 * of those with prio==best_prio we know it won't be moved
985 * and therefore it's safe to override the skip (based on load)
986 * of any task we find with that prio.
987 */
988 if (cfs_rq_curr(busy_cfs_rq) == &busiest->curr->se)
989 best_prio_seen = 1;
990
991 /* pass busy_cfs_rq argument into 984 /* pass busy_cfs_rq argument into
992 * load_balance_[start|next]_fair iterators 985 * load_balance_[start|next]_fair iterators
993 */ 986 */
994 cfs_rq_iterator.arg = busy_cfs_rq; 987 cfs_rq_iterator.arg = busy_cfs_rq;
995 nr_moved = balance_tasks(this_rq, this_cpu, busiest, 988 nr_moved = balance_tasks(this_rq, this_cpu, busiest,
996 max_nr_move, maxload, sd, idle, all_pinned, 989 max_nr_move, maxload, sd, idle, all_pinned,
997 &load_moved, this_best_prio, best_prio, 990 &load_moved, this_best_prio, &cfs_rq_iterator);
998 best_prio_seen, &cfs_rq_iterator);
999 991
1000 total_nr_moved += nr_moved; 992 total_nr_moved += nr_moved;
1001 max_nr_move -= nr_moved; 993 max_nr_move -= nr_moved;