diff options
author | Peter Williams <pwil3058@bigpond.net.au> | 2007-10-24 12:23:51 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-24 12:23:51 -0400 |
commit | e1d1484f72127a5580d37c379f6a5b2c2786434c (patch) | |
tree | e3e6529c5b9079f35b2c60bbd346a3c51c2b2c13 /kernel/sched_fair.c | |
parent | a0f846aa76c3e03d54c1700a87cab3a46ccd71e2 (diff) |
sched: reduce balance-tasks overhead
At the moment, balance_tasks() provides low level functionality for both
move_tasks() and move_one_task() (indirectly) via the load_balance()
function (in the sched_class interface) which also provides dual
functionality. This dual functionality complicates the interfaces and
internal mechanisms and makes the run time overhead of operations that
are called with two run queue locks held.
This patch addresses this issue and reduces the overhead of these
operations.
Signed-off-by: Peter Williams <pwil3058@bigpond.net.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 44 |
1 files changed, 33 insertions, 11 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 166ed6db600b..a90d0457d603 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -936,12 +936,11 @@ static int cfs_rq_best_prio(struct cfs_rq *cfs_rq) | |||
936 | 936 | ||
937 | static unsigned long | 937 | static unsigned long |
938 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 938 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, |
939 | unsigned long max_nr_move, unsigned long max_load_move, | 939 | unsigned long max_load_move, |
940 | struct sched_domain *sd, enum cpu_idle_type idle, | 940 | struct sched_domain *sd, enum cpu_idle_type idle, |
941 | int *all_pinned, int *this_best_prio) | 941 | int *all_pinned, int *this_best_prio) |
942 | { | 942 | { |
943 | struct cfs_rq *busy_cfs_rq; | 943 | struct cfs_rq *busy_cfs_rq; |
944 | unsigned long load_moved, total_nr_moved = 0, nr_moved; | ||
945 | long rem_load_move = max_load_move; | 944 | long rem_load_move = max_load_move; |
946 | struct rq_iterator cfs_rq_iterator; | 945 | struct rq_iterator cfs_rq_iterator; |
947 | 946 | ||
@@ -969,25 +968,47 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
969 | #else | 968 | #else |
970 | # define maxload rem_load_move | 969 | # define maxload rem_load_move |
971 | #endif | 970 | #endif |
972 | /* pass busy_cfs_rq argument into | 971 | /* |
972 | * pass busy_cfs_rq argument into | ||
973 | * load_balance_[start|next]_fair iterators | 973 | * load_balance_[start|next]_fair iterators |
974 | */ | 974 | */ |
975 | cfs_rq_iterator.arg = busy_cfs_rq; | 975 | cfs_rq_iterator.arg = busy_cfs_rq; |
976 | nr_moved = balance_tasks(this_rq, this_cpu, busiest, | 976 | rem_load_move -= balance_tasks(this_rq, this_cpu, busiest, |
977 | max_nr_move, maxload, sd, idle, all_pinned, | 977 | maxload, sd, idle, all_pinned, |
978 | &load_moved, this_best_prio, &cfs_rq_iterator); | 978 | this_best_prio, |
979 | 979 | &cfs_rq_iterator); | |
980 | total_nr_moved += nr_moved; | ||
981 | max_nr_move -= nr_moved; | ||
982 | rem_load_move -= load_moved; | ||
983 | 980 | ||
984 | if (max_nr_move <= 0 || rem_load_move <= 0) | 981 | if (rem_load_move <= 0) |
985 | break; | 982 | break; |
986 | } | 983 | } |
987 | 984 | ||
988 | return max_load_move - rem_load_move; | 985 | return max_load_move - rem_load_move; |
989 | } | 986 | } |
990 | 987 | ||
988 | static int | ||
989 | move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
990 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
991 | { | ||
992 | struct cfs_rq *busy_cfs_rq; | ||
993 | struct rq_iterator cfs_rq_iterator; | ||
994 | |||
995 | cfs_rq_iterator.start = load_balance_start_fair; | ||
996 | cfs_rq_iterator.next = load_balance_next_fair; | ||
997 | |||
998 | for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { | ||
999 | /* | ||
1000 | * pass busy_cfs_rq argument into | ||
1001 | * load_balance_[start|next]_fair iterators | ||
1002 | */ | ||
1003 | cfs_rq_iterator.arg = busy_cfs_rq; | ||
1004 | if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, | ||
1005 | &cfs_rq_iterator)) | ||
1006 | return 1; | ||
1007 | } | ||
1008 | |||
1009 | return 0; | ||
1010 | } | ||
1011 | |||
991 | /* | 1012 | /* |
992 | * scheduler tick hitting a task of our scheduling class: | 1013 | * scheduler tick hitting a task of our scheduling class: |
993 | */ | 1014 | */ |
@@ -1064,6 +1085,7 @@ static const struct sched_class fair_sched_class = { | |||
1064 | .put_prev_task = put_prev_task_fair, | 1085 | .put_prev_task = put_prev_task_fair, |
1065 | 1086 | ||
1066 | .load_balance = load_balance_fair, | 1087 | .load_balance = load_balance_fair, |
1088 | .move_one_task = move_one_task_fair, | ||
1067 | 1089 | ||
1068 | .set_curr_task = set_curr_task_fair, | 1090 | .set_curr_task = set_curr_task_fair, |
1069 | .task_tick = task_tick_fair, | 1091 | .task_tick = task_tick_fair, |