diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-15 18:29:07 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-15 18:29:07 -0400 |
commit | 82638844d9a8581bbf33201cc209a14876eca167 (patch) | |
tree | 961d7f9360194421a71aa644a9d0c176a960ce49 /kernel/sched_rt.c | |
parent | 9982fbface82893e77d211fbabfbd229da6bdde6 (diff) | |
parent | 63cf13b77ab785e87c867defa8545e6d4a989774 (diff) |
Merge branch 'linus' into cpus4096
Conflicts:
arch/x86/xen/smp.c
kernel/sched_rt.c
net/iucv/iucv.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 405 |
1 files changed, 264 insertions, 141 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index e757f370eb1b..7c9614728c59 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -12,6 +12,9 @@ static inline int rt_overloaded(struct rq *rq) | |||
12 | 12 | ||
13 | static inline void rt_set_overload(struct rq *rq) | 13 | static inline void rt_set_overload(struct rq *rq) |
14 | { | 14 | { |
15 | if (!rq->online) | ||
16 | return; | ||
17 | |||
15 | cpu_set(rq->cpu, rq->rd->rto_mask); | 18 | cpu_set(rq->cpu, rq->rd->rto_mask); |
16 | /* | 19 | /* |
17 | * Make sure the mask is visible before we set | 20 | * Make sure the mask is visible before we set |
@@ -26,6 +29,9 @@ static inline void rt_set_overload(struct rq *rq) | |||
26 | 29 | ||
27 | static inline void rt_clear_overload(struct rq *rq) | 30 | static inline void rt_clear_overload(struct rq *rq) |
28 | { | 31 | { |
32 | if (!rq->online) | ||
33 | return; | ||
34 | |||
29 | /* the order here really doesn't matter */ | 35 | /* the order here really doesn't matter */ |
30 | atomic_dec(&rq->rd->rto_count); | 36 | atomic_dec(&rq->rd->rto_count); |
31 | cpu_clear(rq->cpu, rq->rd->rto_mask); | 37 | cpu_clear(rq->cpu, rq->rd->rto_mask); |
@@ -155,7 +161,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) | |||
155 | return &rt_rq->tg->rt_bandwidth; | 161 | return &rt_rq->tg->rt_bandwidth; |
156 | } | 162 | } |
157 | 163 | ||
158 | #else | 164 | #else /* !CONFIG_RT_GROUP_SCHED */ |
159 | 165 | ||
160 | static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) | 166 | static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) |
161 | { | 167 | { |
@@ -220,49 +226,10 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) | |||
220 | return &def_rt_bandwidth; | 226 | return &def_rt_bandwidth; |
221 | } | 227 | } |
222 | 228 | ||
223 | #endif | 229 | #endif /* CONFIG_RT_GROUP_SCHED */ |
224 | |||
225 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | ||
226 | { | ||
227 | int i, idle = 1; | ||
228 | cpumask_t span; | ||
229 | |||
230 | if (rt_b->rt_runtime == RUNTIME_INF) | ||
231 | return 1; | ||
232 | |||
233 | span = sched_rt_period_mask(); | ||
234 | for_each_cpu_mask_nr(i, span) { | ||
235 | int enqueue = 0; | ||
236 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); | ||
237 | struct rq *rq = rq_of_rt_rq(rt_rq); | ||
238 | |||
239 | spin_lock(&rq->lock); | ||
240 | if (rt_rq->rt_time) { | ||
241 | u64 runtime; | ||
242 | |||
243 | spin_lock(&rt_rq->rt_runtime_lock); | ||
244 | runtime = rt_rq->rt_runtime; | ||
245 | rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); | ||
246 | if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { | ||
247 | rt_rq->rt_throttled = 0; | ||
248 | enqueue = 1; | ||
249 | } | ||
250 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | ||
251 | idle = 0; | ||
252 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
253 | } else if (rt_rq->rt_nr_running) | ||
254 | idle = 0; | ||
255 | |||
256 | if (enqueue) | ||
257 | sched_rt_rq_enqueue(rt_rq); | ||
258 | spin_unlock(&rq->lock); | ||
259 | } | ||
260 | |||
261 | return idle; | ||
262 | } | ||
263 | 230 | ||
264 | #ifdef CONFIG_SMP | 231 | #ifdef CONFIG_SMP |
265 | static int balance_runtime(struct rt_rq *rt_rq) | 232 | static int do_balance_runtime(struct rt_rq *rt_rq) |
266 | { | 233 | { |
267 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | 234 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
268 | struct root_domain *rd = cpu_rq(smp_processor_id())->rd; | 235 | struct root_domain *rd = cpu_rq(smp_processor_id())->rd; |
@@ -281,6 +248,9 @@ static int balance_runtime(struct rt_rq *rt_rq) | |||
281 | continue; | 248 | continue; |
282 | 249 | ||
283 | spin_lock(&iter->rt_runtime_lock); | 250 | spin_lock(&iter->rt_runtime_lock); |
251 | if (iter->rt_runtime == RUNTIME_INF) | ||
252 | goto next; | ||
253 | |||
284 | diff = iter->rt_runtime - iter->rt_time; | 254 | diff = iter->rt_runtime - iter->rt_time; |
285 | if (diff > 0) { | 255 | if (diff > 0) { |
286 | do_div(diff, weight); | 256 | do_div(diff, weight); |
@@ -294,13 +264,163 @@ static int balance_runtime(struct rt_rq *rt_rq) | |||
294 | break; | 264 | break; |
295 | } | 265 | } |
296 | } | 266 | } |
267 | next: | ||
297 | spin_unlock(&iter->rt_runtime_lock); | 268 | spin_unlock(&iter->rt_runtime_lock); |
298 | } | 269 | } |
299 | spin_unlock(&rt_b->rt_runtime_lock); | 270 | spin_unlock(&rt_b->rt_runtime_lock); |
300 | 271 | ||
301 | return more; | 272 | return more; |
302 | } | 273 | } |
303 | #endif | 274 | |
275 | static void __disable_runtime(struct rq *rq) | ||
276 | { | ||
277 | struct root_domain *rd = rq->rd; | ||
278 | struct rt_rq *rt_rq; | ||
279 | |||
280 | if (unlikely(!scheduler_running)) | ||
281 | return; | ||
282 | |||
283 | for_each_leaf_rt_rq(rt_rq, rq) { | ||
284 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | ||
285 | s64 want; | ||
286 | int i; | ||
287 | |||
288 | spin_lock(&rt_b->rt_runtime_lock); | ||
289 | spin_lock(&rt_rq->rt_runtime_lock); | ||
290 | if (rt_rq->rt_runtime == RUNTIME_INF || | ||
291 | rt_rq->rt_runtime == rt_b->rt_runtime) | ||
292 | goto balanced; | ||
293 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
294 | |||
295 | want = rt_b->rt_runtime - rt_rq->rt_runtime; | ||
296 | |||
297 | for_each_cpu_mask(i, rd->span) { | ||
298 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | ||
299 | s64 diff; | ||
300 | |||
301 | if (iter == rt_rq) | ||
302 | continue; | ||
303 | |||
304 | spin_lock(&iter->rt_runtime_lock); | ||
305 | if (want > 0) { | ||
306 | diff = min_t(s64, iter->rt_runtime, want); | ||
307 | iter->rt_runtime -= diff; | ||
308 | want -= diff; | ||
309 | } else { | ||
310 | iter->rt_runtime -= want; | ||
311 | want -= want; | ||
312 | } | ||
313 | spin_unlock(&iter->rt_runtime_lock); | ||
314 | |||
315 | if (!want) | ||
316 | break; | ||
317 | } | ||
318 | |||
319 | spin_lock(&rt_rq->rt_runtime_lock); | ||
320 | BUG_ON(want); | ||
321 | balanced: | ||
322 | rt_rq->rt_runtime = RUNTIME_INF; | ||
323 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
324 | spin_unlock(&rt_b->rt_runtime_lock); | ||
325 | } | ||
326 | } | ||
327 | |||
328 | static void disable_runtime(struct rq *rq) | ||
329 | { | ||
330 | unsigned long flags; | ||
331 | |||
332 | spin_lock_irqsave(&rq->lock, flags); | ||
333 | __disable_runtime(rq); | ||
334 | spin_unlock_irqrestore(&rq->lock, flags); | ||
335 | } | ||
336 | |||
337 | static void __enable_runtime(struct rq *rq) | ||
338 | { | ||
339 | struct rt_rq *rt_rq; | ||
340 | |||
341 | if (unlikely(!scheduler_running)) | ||
342 | return; | ||
343 | |||
344 | for_each_leaf_rt_rq(rt_rq, rq) { | ||
345 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | ||
346 | |||
347 | spin_lock(&rt_b->rt_runtime_lock); | ||
348 | spin_lock(&rt_rq->rt_runtime_lock); | ||
349 | rt_rq->rt_runtime = rt_b->rt_runtime; | ||
350 | rt_rq->rt_time = 0; | ||
351 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
352 | spin_unlock(&rt_b->rt_runtime_lock); | ||
353 | } | ||
354 | } | ||
355 | |||
356 | static void enable_runtime(struct rq *rq) | ||
357 | { | ||
358 | unsigned long flags; | ||
359 | |||
360 | spin_lock_irqsave(&rq->lock, flags); | ||
361 | __enable_runtime(rq); | ||
362 | spin_unlock_irqrestore(&rq->lock, flags); | ||
363 | } | ||
364 | |||
365 | static int balance_runtime(struct rt_rq *rt_rq) | ||
366 | { | ||
367 | int more = 0; | ||
368 | |||
369 | if (rt_rq->rt_time > rt_rq->rt_runtime) { | ||
370 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
371 | more = do_balance_runtime(rt_rq); | ||
372 | spin_lock(&rt_rq->rt_runtime_lock); | ||
373 | } | ||
374 | |||
375 | return more; | ||
376 | } | ||
377 | #else /* !CONFIG_SMP */ | ||
378 | static inline int balance_runtime(struct rt_rq *rt_rq) | ||
379 | { | ||
380 | return 0; | ||
381 | } | ||
382 | #endif /* CONFIG_SMP */ | ||
383 | |||
384 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | ||
385 | { | ||
386 | int i, idle = 1; | ||
387 | cpumask_t span; | ||
388 | |||
389 | if (rt_b->rt_runtime == RUNTIME_INF) | ||
390 | return 1; | ||
391 | |||
392 | span = sched_rt_period_mask(); | ||
393 | for_each_cpu_mask(i, span) { | ||
394 | int enqueue = 0; | ||
395 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); | ||
396 | struct rq *rq = rq_of_rt_rq(rt_rq); | ||
397 | |||
398 | spin_lock(&rq->lock); | ||
399 | if (rt_rq->rt_time) { | ||
400 | u64 runtime; | ||
401 | |||
402 | spin_lock(&rt_rq->rt_runtime_lock); | ||
403 | if (rt_rq->rt_throttled) | ||
404 | balance_runtime(rt_rq); | ||
405 | runtime = rt_rq->rt_runtime; | ||
406 | rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); | ||
407 | if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { | ||
408 | rt_rq->rt_throttled = 0; | ||
409 | enqueue = 1; | ||
410 | } | ||
411 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | ||
412 | idle = 0; | ||
413 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
414 | } else if (rt_rq->rt_nr_running) | ||
415 | idle = 0; | ||
416 | |||
417 | if (enqueue) | ||
418 | sched_rt_rq_enqueue(rt_rq); | ||
419 | spin_unlock(&rq->lock); | ||
420 | } | ||
421 | |||
422 | return idle; | ||
423 | } | ||
304 | 424 | ||
305 | static inline int rt_se_prio(struct sched_rt_entity *rt_se) | 425 | static inline int rt_se_prio(struct sched_rt_entity *rt_se) |
306 | { | 426 | { |
@@ -327,18 +447,10 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) | |||
327 | if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq)) | 447 | if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq)) |
328 | return 0; | 448 | return 0; |
329 | 449 | ||
330 | #ifdef CONFIG_SMP | 450 | balance_runtime(rt_rq); |
331 | if (rt_rq->rt_time > runtime) { | 451 | runtime = sched_rt_runtime(rt_rq); |
332 | int more; | 452 | if (runtime == RUNTIME_INF) |
333 | 453 | return 0; | |
334 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
335 | more = balance_runtime(rt_rq); | ||
336 | spin_lock(&rt_rq->rt_runtime_lock); | ||
337 | |||
338 | if (more) | ||
339 | runtime = sched_rt_runtime(rt_rq); | ||
340 | } | ||
341 | #endif | ||
342 | 454 | ||
343 | if (rt_rq->rt_time > runtime) { | 455 | if (rt_rq->rt_time > runtime) { |
344 | rt_rq->rt_throttled = 1; | 456 | rt_rq->rt_throttled = 1; |
@@ -392,12 +504,21 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
392 | WARN_ON(!rt_prio(rt_se_prio(rt_se))); | 504 | WARN_ON(!rt_prio(rt_se_prio(rt_se))); |
393 | rt_rq->rt_nr_running++; | 505 | rt_rq->rt_nr_running++; |
394 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED | 506 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
395 | if (rt_se_prio(rt_se) < rt_rq->highest_prio) | 507 | if (rt_se_prio(rt_se) < rt_rq->highest_prio) { |
508 | struct rq *rq = rq_of_rt_rq(rt_rq); | ||
509 | |||
396 | rt_rq->highest_prio = rt_se_prio(rt_se); | 510 | rt_rq->highest_prio = rt_se_prio(rt_se); |
511 | #ifdef CONFIG_SMP | ||
512 | if (rq->online) | ||
513 | cpupri_set(&rq->rd->cpupri, rq->cpu, | ||
514 | rt_se_prio(rt_se)); | ||
515 | #endif | ||
516 | } | ||
397 | #endif | 517 | #endif |
398 | #ifdef CONFIG_SMP | 518 | #ifdef CONFIG_SMP |
399 | if (rt_se->nr_cpus_allowed > 1) { | 519 | if (rt_se->nr_cpus_allowed > 1) { |
400 | struct rq *rq = rq_of_rt_rq(rt_rq); | 520 | struct rq *rq = rq_of_rt_rq(rt_rq); |
521 | |||
401 | rq->rt.rt_nr_migratory++; | 522 | rq->rt.rt_nr_migratory++; |
402 | } | 523 | } |
403 | 524 | ||
@@ -417,6 +538,10 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
417 | static inline | 538 | static inline |
418 | void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | 539 | void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
419 | { | 540 | { |
541 | #ifdef CONFIG_SMP | ||
542 | int highest_prio = rt_rq->highest_prio; | ||
543 | #endif | ||
544 | |||
420 | WARN_ON(!rt_prio(rt_se_prio(rt_se))); | 545 | WARN_ON(!rt_prio(rt_se_prio(rt_se))); |
421 | WARN_ON(!rt_rq->rt_nr_running); | 546 | WARN_ON(!rt_rq->rt_nr_running); |
422 | rt_rq->rt_nr_running--; | 547 | rt_rq->rt_nr_running--; |
@@ -440,6 +565,14 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
440 | rq->rt.rt_nr_migratory--; | 565 | rq->rt.rt_nr_migratory--; |
441 | } | 566 | } |
442 | 567 | ||
568 | if (rt_rq->highest_prio != highest_prio) { | ||
569 | struct rq *rq = rq_of_rt_rq(rt_rq); | ||
570 | |||
571 | if (rq->online) | ||
572 | cpupri_set(&rq->rd->cpupri, rq->cpu, | ||
573 | rt_rq->highest_prio); | ||
574 | } | ||
575 | |||
443 | update_rt_migration(rq_of_rt_rq(rt_rq)); | 576 | update_rt_migration(rq_of_rt_rq(rt_rq)); |
444 | #endif /* CONFIG_SMP */ | 577 | #endif /* CONFIG_SMP */ |
445 | #ifdef CONFIG_RT_GROUP_SCHED | 578 | #ifdef CONFIG_RT_GROUP_SCHED |
@@ -455,6 +588,7 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
455 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 588 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
456 | struct rt_prio_array *array = &rt_rq->active; | 589 | struct rt_prio_array *array = &rt_rq->active; |
457 | struct rt_rq *group_rq = group_rt_rq(rt_se); | 590 | struct rt_rq *group_rq = group_rt_rq(rt_se); |
591 | struct list_head *queue = array->queue + rt_se_prio(rt_se); | ||
458 | 592 | ||
459 | /* | 593 | /* |
460 | * Don't enqueue the group if its throttled, or when empty. | 594 | * Don't enqueue the group if its throttled, or when empty. |
@@ -465,7 +599,11 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
465 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) | 599 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) |
466 | return; | 600 | return; |
467 | 601 | ||
468 | list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); | 602 | if (rt_se->nr_cpus_allowed == 1) |
603 | list_add(&rt_se->run_list, queue); | ||
604 | else | ||
605 | list_add_tail(&rt_se->run_list, queue); | ||
606 | |||
469 | __set_bit(rt_se_prio(rt_se), array->bitmap); | 607 | __set_bit(rt_se_prio(rt_se), array->bitmap); |
470 | 608 | ||
471 | inc_rt_tasks(rt_se, rt_rq); | 609 | inc_rt_tasks(rt_se, rt_rq); |
@@ -532,6 +670,8 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) | |||
532 | rt_se->timeout = 0; | 670 | rt_se->timeout = 0; |
533 | 671 | ||
534 | enqueue_rt_entity(rt_se); | 672 | enqueue_rt_entity(rt_se); |
673 | |||
674 | inc_cpu_load(rq, p->se.load.weight); | ||
535 | } | 675 | } |
536 | 676 | ||
537 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | 677 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) |
@@ -540,6 +680,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | |||
540 | 680 | ||
541 | update_curr_rt(rq); | 681 | update_curr_rt(rq); |
542 | dequeue_rt_entity(rt_se); | 682 | dequeue_rt_entity(rt_se); |
683 | |||
684 | dec_cpu_load(rq, p->se.load.weight); | ||
543 | } | 685 | } |
544 | 686 | ||
545 | /* | 687 | /* |
@@ -550,10 +692,12 @@ static | |||
550 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) | 692 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) |
551 | { | 693 | { |
552 | struct rt_prio_array *array = &rt_rq->active; | 694 | struct rt_prio_array *array = &rt_rq->active; |
553 | struct list_head *queue = array->queue + rt_se_prio(rt_se); | ||
554 | 695 | ||
555 | if (on_rt_rq(rt_se)) | 696 | if (on_rt_rq(rt_se)) { |
556 | list_move_tail(&rt_se->run_list, queue); | 697 | list_del_init(&rt_se->run_list); |
698 | list_add_tail(&rt_se->run_list, | ||
699 | array->queue + rt_se_prio(rt_se)); | ||
700 | } | ||
557 | } | 701 | } |
558 | 702 | ||
559 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) | 703 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) |
@@ -616,8 +760,37 @@ static int select_task_rq_rt(struct task_struct *p, int sync) | |||
616 | */ | 760 | */ |
617 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) | 761 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) |
618 | { | 762 | { |
619 | if (p->prio < rq->curr->prio) | 763 | if (p->prio < rq->curr->prio) { |
620 | resched_task(rq->curr); | 764 | resched_task(rq->curr); |
765 | return; | ||
766 | } | ||
767 | |||
768 | #ifdef CONFIG_SMP | ||
769 | /* | ||
770 | * If: | ||
771 | * | ||
772 | * - the newly woken task is of equal priority to the current task | ||
773 | * - the newly woken task is non-migratable while current is migratable | ||
774 | * - current will be preempted on the next reschedule | ||
775 | * | ||
776 | * we should check to see if current can readily move to a different | ||
777 | * cpu. If so, we will reschedule to allow the push logic to try | ||
778 | * to move current somewhere else, making room for our non-migratable | ||
779 | * task. | ||
780 | */ | ||
781 | if((p->prio == rq->curr->prio) | ||
782 | && p->rt.nr_cpus_allowed == 1 | ||
783 | && rq->curr->rt.nr_cpus_allowed != 1) { | ||
784 | cpumask_t mask; | ||
785 | |||
786 | if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) | ||
787 | /* | ||
788 | * There appears to be other cpus that can accept | ||
789 | * current, so lets reschedule to try and push it away | ||
790 | */ | ||
791 | resched_task(rq->curr); | ||
792 | } | ||
793 | #endif | ||
621 | } | 794 | } |
622 | 795 | ||
623 | static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, | 796 | static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, |
@@ -720,73 +893,6 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | |||
720 | 893 | ||
721 | static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); | 894 | static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); |
722 | 895 | ||
723 | static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask) | ||
724 | { | ||
725 | int lowest_prio = -1; | ||
726 | int lowest_cpu = -1; | ||
727 | int count = 0; | ||
728 | int cpu; | ||
729 | |||
730 | cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed); | ||
731 | |||
732 | /* | ||
733 | * Scan each rq for the lowest prio. | ||
734 | */ | ||
735 | for_each_cpu_mask(cpu, *lowest_mask) { | ||
736 | struct rq *rq = cpu_rq(cpu); | ||
737 | |||
738 | /* We look for lowest RT prio or non-rt CPU */ | ||
739 | if (rq->rt.highest_prio >= MAX_RT_PRIO) { | ||
740 | /* | ||
741 | * if we already found a low RT queue | ||
742 | * and now we found this non-rt queue | ||
743 | * clear the mask and set our bit. | ||
744 | * Otherwise just return the queue as is | ||
745 | * and the count==1 will cause the algorithm | ||
746 | * to use the first bit found. | ||
747 | */ | ||
748 | if (lowest_cpu != -1) { | ||
749 | cpus_clear(*lowest_mask); | ||
750 | cpu_set(rq->cpu, *lowest_mask); | ||
751 | } | ||
752 | return 1; | ||
753 | } | ||
754 | |||
755 | /* no locking for now */ | ||
756 | if ((rq->rt.highest_prio > task->prio) | ||
757 | && (rq->rt.highest_prio >= lowest_prio)) { | ||
758 | if (rq->rt.highest_prio > lowest_prio) { | ||
759 | /* new low - clear old data */ | ||
760 | lowest_prio = rq->rt.highest_prio; | ||
761 | lowest_cpu = cpu; | ||
762 | count = 0; | ||
763 | } | ||
764 | count++; | ||
765 | } else | ||
766 | cpu_clear(cpu, *lowest_mask); | ||
767 | } | ||
768 | |||
769 | /* | ||
770 | * Clear out all the set bits that represent | ||
771 | * runqueues that were of higher prio than | ||
772 | * the lowest_prio. | ||
773 | */ | ||
774 | if (lowest_cpu > 0) { | ||
775 | /* | ||
776 | * Perhaps we could add another cpumask op to | ||
777 | * zero out bits. Like cpu_zero_bits(cpumask, nrbits); | ||
778 | * Then that could be optimized to use memset and such. | ||
779 | */ | ||
780 | for_each_cpu_mask(cpu, *lowest_mask) { | ||
781 | if (cpu >= lowest_cpu) | ||
782 | break; | ||
783 | cpu_clear(cpu, *lowest_mask); | ||
784 | } | ||
785 | } | ||
786 | |||
787 | return count; | ||
788 | } | ||
789 | |||
790 | static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | 896 | static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) |
791 | { | 897 | { |
792 | int first; | 898 | int first; |
@@ -808,17 +914,12 @@ static int find_lowest_rq(struct task_struct *task) | |||
808 | cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask); | 914 | cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask); |
809 | int this_cpu = smp_processor_id(); | 915 | int this_cpu = smp_processor_id(); |
810 | int cpu = task_cpu(task); | 916 | int cpu = task_cpu(task); |
811 | int count = find_lowest_cpus(task, lowest_mask); | ||
812 | 917 | ||
813 | if (!count) | 918 | if (task->rt.nr_cpus_allowed == 1) |
814 | return -1; /* No targets found */ | 919 | return -1; /* No other targets possible */ |
815 | 920 | ||
816 | /* | 921 | if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) |
817 | * There is no sense in performing an optimal search if only one | 922 | return -1; /* No targets found */ |
818 | * target is found. | ||
819 | */ | ||
820 | if (count == 1) | ||
821 | return first_cpu(*lowest_mask); | ||
822 | 923 | ||
823 | /* | 924 | /* |
824 | * At this point we have built a mask of cpus representing the | 925 | * At this point we have built a mask of cpus representing the |
@@ -1163,17 +1264,25 @@ static void set_cpus_allowed_rt(struct task_struct *p, | |||
1163 | } | 1264 | } |
1164 | 1265 | ||
1165 | /* Assumes rq->lock is held */ | 1266 | /* Assumes rq->lock is held */ |
1166 | static void join_domain_rt(struct rq *rq) | 1267 | static void rq_online_rt(struct rq *rq) |
1167 | { | 1268 | { |
1168 | if (rq->rt.overloaded) | 1269 | if (rq->rt.overloaded) |
1169 | rt_set_overload(rq); | 1270 | rt_set_overload(rq); |
1271 | |||
1272 | __enable_runtime(rq); | ||
1273 | |||
1274 | cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio); | ||
1170 | } | 1275 | } |
1171 | 1276 | ||
1172 | /* Assumes rq->lock is held */ | 1277 | /* Assumes rq->lock is held */ |
1173 | static void leave_domain_rt(struct rq *rq) | 1278 | static void rq_offline_rt(struct rq *rq) |
1174 | { | 1279 | { |
1175 | if (rq->rt.overloaded) | 1280 | if (rq->rt.overloaded) |
1176 | rt_clear_overload(rq); | 1281 | rt_clear_overload(rq); |
1282 | |||
1283 | __disable_runtime(rq); | ||
1284 | |||
1285 | cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); | ||
1177 | } | 1286 | } |
1178 | 1287 | ||
1179 | /* | 1288 | /* |
@@ -1336,8 +1445,8 @@ static const struct sched_class rt_sched_class = { | |||
1336 | .load_balance = load_balance_rt, | 1445 | .load_balance = load_balance_rt, |
1337 | .move_one_task = move_one_task_rt, | 1446 | .move_one_task = move_one_task_rt, |
1338 | .set_cpus_allowed = set_cpus_allowed_rt, | 1447 | .set_cpus_allowed = set_cpus_allowed_rt, |
1339 | .join_domain = join_domain_rt, | 1448 | .rq_online = rq_online_rt, |
1340 | .leave_domain = leave_domain_rt, | 1449 | .rq_offline = rq_offline_rt, |
1341 | .pre_schedule = pre_schedule_rt, | 1450 | .pre_schedule = pre_schedule_rt, |
1342 | .post_schedule = post_schedule_rt, | 1451 | .post_schedule = post_schedule_rt, |
1343 | .task_wake_up = task_wake_up_rt, | 1452 | .task_wake_up = task_wake_up_rt, |
@@ -1350,3 +1459,17 @@ static const struct sched_class rt_sched_class = { | |||
1350 | .prio_changed = prio_changed_rt, | 1459 | .prio_changed = prio_changed_rt, |
1351 | .switched_to = switched_to_rt, | 1460 | .switched_to = switched_to_rt, |
1352 | }; | 1461 | }; |
1462 | |||
1463 | #ifdef CONFIG_SCHED_DEBUG | ||
1464 | extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); | ||
1465 | |||
1466 | static void print_rt_stats(struct seq_file *m, int cpu) | ||
1467 | { | ||
1468 | struct rt_rq *rt_rq; | ||
1469 | |||
1470 | rcu_read_lock(); | ||
1471 | for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu)) | ||
1472 | print_rt_rq(m, cpu, rt_rq); | ||
1473 | rcu_read_unlock(); | ||
1474 | } | ||
1475 | #endif /* CONFIG_SCHED_DEBUG */ | ||