diff options
author | Peter Williams <pwil3058@bigpond.net.au> | 2007-10-24 12:23:51 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-24 12:23:51 -0400 |
commit | e1d1484f72127a5580d37c379f6a5b2c2786434c (patch) | |
tree | e3e6529c5b9079f35b2c60bbd346a3c51c2b2c13 /kernel/sched_rt.c | |
parent | a0f846aa76c3e03d54c1700a87cab3a46ccd71e2 (diff) |
sched: reduce balance-tasks overhead
At the moment, balance_tasks() provides low level functionality for both
move_tasks() and move_one_task() (indirectly) via the load_balance()
function (in the sched_class interface) which also provides dual
functionality. This dual functionality complicates the interfaces and
internal mechanisms and makes the run time overhead of operations that
are called with two run queue locks held.
This patch addresses this issue and reduces the overhead of these
operations.
Signed-off-by: Peter Williams <pwil3058@bigpond.net.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 28 |
1 files changed, 19 insertions, 9 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index d0097a0634e5..e9395b7119e6 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -172,13 +172,11 @@ static struct task_struct *load_balance_next_rt(void *arg) | |||
172 | 172 | ||
173 | static unsigned long | 173 | static unsigned long |
174 | load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | 174 | load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, |
175 | unsigned long max_nr_move, unsigned long max_load_move, | 175 | unsigned long max_load_move, |
176 | struct sched_domain *sd, enum cpu_idle_type idle, | 176 | struct sched_domain *sd, enum cpu_idle_type idle, |
177 | int *all_pinned, int *this_best_prio) | 177 | int *all_pinned, int *this_best_prio) |
178 | { | 178 | { |
179 | int nr_moved; | ||
180 | struct rq_iterator rt_rq_iterator; | 179 | struct rq_iterator rt_rq_iterator; |
181 | unsigned long load_moved; | ||
182 | 180 | ||
183 | rt_rq_iterator.start = load_balance_start_rt; | 181 | rt_rq_iterator.start = load_balance_start_rt; |
184 | rt_rq_iterator.next = load_balance_next_rt; | 182 | rt_rq_iterator.next = load_balance_next_rt; |
@@ -187,11 +185,22 @@ load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
187 | */ | 185 | */ |
188 | rt_rq_iterator.arg = busiest; | 186 | rt_rq_iterator.arg = busiest; |
189 | 187 | ||
190 | nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move, | 188 | return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd, |
191 | max_load_move, sd, idle, all_pinned, &load_moved, | 189 | idle, all_pinned, this_best_prio, &rt_rq_iterator); |
192 | this_best_prio, &rt_rq_iterator); | 190 | } |
191 | |||
192 | static int | ||
193 | move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
194 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
195 | { | ||
196 | struct rq_iterator rt_rq_iterator; | ||
197 | |||
198 | rt_rq_iterator.start = load_balance_start_rt; | ||
199 | rt_rq_iterator.next = load_balance_next_rt; | ||
200 | rt_rq_iterator.arg = busiest; | ||
193 | 201 | ||
194 | return load_moved; | 202 | return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, |
203 | &rt_rq_iterator); | ||
195 | } | 204 | } |
196 | 205 | ||
197 | static void task_tick_rt(struct rq *rq, struct task_struct *p) | 206 | static void task_tick_rt(struct rq *rq, struct task_struct *p) |
@@ -237,6 +246,7 @@ const struct sched_class rt_sched_class = { | |||
237 | .put_prev_task = put_prev_task_rt, | 246 | .put_prev_task = put_prev_task_rt, |
238 | 247 | ||
239 | .load_balance = load_balance_rt, | 248 | .load_balance = load_balance_rt, |
249 | .move_one_task = move_one_task_rt, | ||
240 | 250 | ||
241 | .set_curr_task = set_curr_task_rt, | 251 | .set_curr_task = set_curr_task_rt, |
242 | .task_tick = task_tick_rt, | 252 | .task_tick = task_tick_rt, |