diff options
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 191 |
1 files changed, 87 insertions, 104 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index f622880e918f..c2fbb02c1b54 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -194,17 +194,20 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) | |||
194 | return rt_se->my_q; | 194 | return rt_se->my_q; |
195 | } | 195 | } |
196 | 196 | ||
197 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se); | 197 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head); |
198 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se); | 198 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se); |
199 | 199 | ||
200 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 200 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
201 | { | 201 | { |
202 | int this_cpu = smp_processor_id(); | ||
202 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; | 203 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; |
203 | struct sched_rt_entity *rt_se = rt_rq->rt_se; | 204 | struct sched_rt_entity *rt_se; |
205 | |||
206 | rt_se = rt_rq->tg->rt_se[this_cpu]; | ||
204 | 207 | ||
205 | if (rt_rq->rt_nr_running) { | 208 | if (rt_rq->rt_nr_running) { |
206 | if (rt_se && !on_rt_rq(rt_se)) | 209 | if (rt_se && !on_rt_rq(rt_se)) |
207 | enqueue_rt_entity(rt_se); | 210 | enqueue_rt_entity(rt_se, false); |
208 | if (rt_rq->highest_prio.curr < curr->prio) | 211 | if (rt_rq->highest_prio.curr < curr->prio) |
209 | resched_task(curr); | 212 | resched_task(curr); |
210 | } | 213 | } |
@@ -212,7 +215,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | |||
212 | 215 | ||
213 | static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | 216 | static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
214 | { | 217 | { |
215 | struct sched_rt_entity *rt_se = rt_rq->rt_se; | 218 | int this_cpu = smp_processor_id(); |
219 | struct sched_rt_entity *rt_se; | ||
220 | |||
221 | rt_se = rt_rq->tg->rt_se[this_cpu]; | ||
216 | 222 | ||
217 | if (rt_se && on_rt_rq(rt_se)) | 223 | if (rt_se && on_rt_rq(rt_se)) |
218 | dequeue_rt_entity(rt_se); | 224 | dequeue_rt_entity(rt_se); |
@@ -327,7 +333,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
327 | 333 | ||
328 | weight = cpumask_weight(rd->span); | 334 | weight = cpumask_weight(rd->span); |
329 | 335 | ||
330 | spin_lock(&rt_b->rt_runtime_lock); | 336 | raw_spin_lock(&rt_b->rt_runtime_lock); |
331 | rt_period = ktime_to_ns(rt_b->rt_period); | 337 | rt_period = ktime_to_ns(rt_b->rt_period); |
332 | for_each_cpu(i, rd->span) { | 338 | for_each_cpu(i, rd->span) { |
333 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 339 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
@@ -336,7 +342,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
336 | if (iter == rt_rq) | 342 | if (iter == rt_rq) |
337 | continue; | 343 | continue; |
338 | 344 | ||
339 | spin_lock(&iter->rt_runtime_lock); | 345 | raw_spin_lock(&iter->rt_runtime_lock); |
340 | /* | 346 | /* |
341 | * Either all rqs have inf runtime and there's nothing to steal | 347 | * Either all rqs have inf runtime and there's nothing to steal |
342 | * or __disable_runtime() below sets a specific rq to inf to | 348 | * or __disable_runtime() below sets a specific rq to inf to |
@@ -358,14 +364,14 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
358 | rt_rq->rt_runtime += diff; | 364 | rt_rq->rt_runtime += diff; |
359 | more = 1; | 365 | more = 1; |
360 | if (rt_rq->rt_runtime == rt_period) { | 366 | if (rt_rq->rt_runtime == rt_period) { |
361 | spin_unlock(&iter->rt_runtime_lock); | 367 | raw_spin_unlock(&iter->rt_runtime_lock); |
362 | break; | 368 | break; |
363 | } | 369 | } |
364 | } | 370 | } |
365 | next: | 371 | next: |
366 | spin_unlock(&iter->rt_runtime_lock); | 372 | raw_spin_unlock(&iter->rt_runtime_lock); |
367 | } | 373 | } |
368 | spin_unlock(&rt_b->rt_runtime_lock); | 374 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
369 | 375 | ||
370 | return more; | 376 | return more; |
371 | } | 377 | } |
@@ -386,8 +392,8 @@ static void __disable_runtime(struct rq *rq) | |||
386 | s64 want; | 392 | s64 want; |
387 | int i; | 393 | int i; |
388 | 394 | ||
389 | spin_lock(&rt_b->rt_runtime_lock); | 395 | raw_spin_lock(&rt_b->rt_runtime_lock); |
390 | spin_lock(&rt_rq->rt_runtime_lock); | 396 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
391 | /* | 397 | /* |
392 | * Either we're all inf and nobody needs to borrow, or we're | 398 | * Either we're all inf and nobody needs to borrow, or we're |
393 | * already disabled and thus have nothing to do, or we have | 399 | * already disabled and thus have nothing to do, or we have |
@@ -396,7 +402,7 @@ static void __disable_runtime(struct rq *rq) | |||
396 | if (rt_rq->rt_runtime == RUNTIME_INF || | 402 | if (rt_rq->rt_runtime == RUNTIME_INF || |
397 | rt_rq->rt_runtime == rt_b->rt_runtime) | 403 | rt_rq->rt_runtime == rt_b->rt_runtime) |
398 | goto balanced; | 404 | goto balanced; |
399 | spin_unlock(&rt_rq->rt_runtime_lock); | 405 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
400 | 406 | ||
401 | /* | 407 | /* |
402 | * Calculate the difference between what we started out with | 408 | * Calculate the difference between what we started out with |
@@ -418,7 +424,7 @@ static void __disable_runtime(struct rq *rq) | |||
418 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) | 424 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) |
419 | continue; | 425 | continue; |
420 | 426 | ||
421 | spin_lock(&iter->rt_runtime_lock); | 427 | raw_spin_lock(&iter->rt_runtime_lock); |
422 | if (want > 0) { | 428 | if (want > 0) { |
423 | diff = min_t(s64, iter->rt_runtime, want); | 429 | diff = min_t(s64, iter->rt_runtime, want); |
424 | iter->rt_runtime -= diff; | 430 | iter->rt_runtime -= diff; |
@@ -427,13 +433,13 @@ static void __disable_runtime(struct rq *rq) | |||
427 | iter->rt_runtime -= want; | 433 | iter->rt_runtime -= want; |
428 | want -= want; | 434 | want -= want; |
429 | } | 435 | } |
430 | spin_unlock(&iter->rt_runtime_lock); | 436 | raw_spin_unlock(&iter->rt_runtime_lock); |
431 | 437 | ||
432 | if (!want) | 438 | if (!want) |
433 | break; | 439 | break; |
434 | } | 440 | } |
435 | 441 | ||
436 | spin_lock(&rt_rq->rt_runtime_lock); | 442 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
437 | /* | 443 | /* |
438 | * We cannot be left wanting - that would mean some runtime | 444 | * We cannot be left wanting - that would mean some runtime |
439 | * leaked out of the system. | 445 | * leaked out of the system. |
@@ -445,8 +451,8 @@ balanced: | |||
445 | * runtime - in which case borrowing doesn't make sense. | 451 | * runtime - in which case borrowing doesn't make sense. |
446 | */ | 452 | */ |
447 | rt_rq->rt_runtime = RUNTIME_INF; | 453 | rt_rq->rt_runtime = RUNTIME_INF; |
448 | spin_unlock(&rt_rq->rt_runtime_lock); | 454 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
449 | spin_unlock(&rt_b->rt_runtime_lock); | 455 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
450 | } | 456 | } |
451 | } | 457 | } |
452 | 458 | ||
@@ -454,9 +460,9 @@ static void disable_runtime(struct rq *rq) | |||
454 | { | 460 | { |
455 | unsigned long flags; | 461 | unsigned long flags; |
456 | 462 | ||
457 | spin_lock_irqsave(&rq->lock, flags); | 463 | raw_spin_lock_irqsave(&rq->lock, flags); |
458 | __disable_runtime(rq); | 464 | __disable_runtime(rq); |
459 | spin_unlock_irqrestore(&rq->lock, flags); | 465 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
460 | } | 466 | } |
461 | 467 | ||
462 | static void __enable_runtime(struct rq *rq) | 468 | static void __enable_runtime(struct rq *rq) |
@@ -472,13 +478,13 @@ static void __enable_runtime(struct rq *rq) | |||
472 | for_each_leaf_rt_rq(rt_rq, rq) { | 478 | for_each_leaf_rt_rq(rt_rq, rq) { |
473 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | 479 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
474 | 480 | ||
475 | spin_lock(&rt_b->rt_runtime_lock); | 481 | raw_spin_lock(&rt_b->rt_runtime_lock); |
476 | spin_lock(&rt_rq->rt_runtime_lock); | 482 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
477 | rt_rq->rt_runtime = rt_b->rt_runtime; | 483 | rt_rq->rt_runtime = rt_b->rt_runtime; |
478 | rt_rq->rt_time = 0; | 484 | rt_rq->rt_time = 0; |
479 | rt_rq->rt_throttled = 0; | 485 | rt_rq->rt_throttled = 0; |
480 | spin_unlock(&rt_rq->rt_runtime_lock); | 486 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
481 | spin_unlock(&rt_b->rt_runtime_lock); | 487 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
482 | } | 488 | } |
483 | } | 489 | } |
484 | 490 | ||
@@ -486,9 +492,9 @@ static void enable_runtime(struct rq *rq) | |||
486 | { | 492 | { |
487 | unsigned long flags; | 493 | unsigned long flags; |
488 | 494 | ||
489 | spin_lock_irqsave(&rq->lock, flags); | 495 | raw_spin_lock_irqsave(&rq->lock, flags); |
490 | __enable_runtime(rq); | 496 | __enable_runtime(rq); |
491 | spin_unlock_irqrestore(&rq->lock, flags); | 497 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
492 | } | 498 | } |
493 | 499 | ||
494 | static int balance_runtime(struct rt_rq *rt_rq) | 500 | static int balance_runtime(struct rt_rq *rt_rq) |
@@ -496,9 +502,9 @@ static int balance_runtime(struct rt_rq *rt_rq) | |||
496 | int more = 0; | 502 | int more = 0; |
497 | 503 | ||
498 | if (rt_rq->rt_time > rt_rq->rt_runtime) { | 504 | if (rt_rq->rt_time > rt_rq->rt_runtime) { |
499 | spin_unlock(&rt_rq->rt_runtime_lock); | 505 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
500 | more = do_balance_runtime(rt_rq); | 506 | more = do_balance_runtime(rt_rq); |
501 | spin_lock(&rt_rq->rt_runtime_lock); | 507 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
502 | } | 508 | } |
503 | 509 | ||
504 | return more; | 510 | return more; |
@@ -524,11 +530,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
524 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); | 530 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); |
525 | struct rq *rq = rq_of_rt_rq(rt_rq); | 531 | struct rq *rq = rq_of_rt_rq(rt_rq); |
526 | 532 | ||
527 | spin_lock(&rq->lock); | 533 | raw_spin_lock(&rq->lock); |
528 | if (rt_rq->rt_time) { | 534 | if (rt_rq->rt_time) { |
529 | u64 runtime; | 535 | u64 runtime; |
530 | 536 | ||
531 | spin_lock(&rt_rq->rt_runtime_lock); | 537 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
532 | if (rt_rq->rt_throttled) | 538 | if (rt_rq->rt_throttled) |
533 | balance_runtime(rt_rq); | 539 | balance_runtime(rt_rq); |
534 | runtime = rt_rq->rt_runtime; | 540 | runtime = rt_rq->rt_runtime; |
@@ -539,13 +545,13 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
539 | } | 545 | } |
540 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | 546 | if (rt_rq->rt_time || rt_rq->rt_nr_running) |
541 | idle = 0; | 547 | idle = 0; |
542 | spin_unlock(&rt_rq->rt_runtime_lock); | 548 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
543 | } else if (rt_rq->rt_nr_running) | 549 | } else if (rt_rq->rt_nr_running) |
544 | idle = 0; | 550 | idle = 0; |
545 | 551 | ||
546 | if (enqueue) | 552 | if (enqueue) |
547 | sched_rt_rq_enqueue(rt_rq); | 553 | sched_rt_rq_enqueue(rt_rq); |
548 | spin_unlock(&rq->lock); | 554 | raw_spin_unlock(&rq->lock); |
549 | } | 555 | } |
550 | 556 | ||
551 | return idle; | 557 | return idle; |
@@ -624,11 +630,11 @@ static void update_curr_rt(struct rq *rq) | |||
624 | rt_rq = rt_rq_of_se(rt_se); | 630 | rt_rq = rt_rq_of_se(rt_se); |
625 | 631 | ||
626 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { | 632 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { |
627 | spin_lock(&rt_rq->rt_runtime_lock); | 633 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
628 | rt_rq->rt_time += delta_exec; | 634 | rt_rq->rt_time += delta_exec; |
629 | if (sched_rt_runtime_exceeded(rt_rq)) | 635 | if (sched_rt_runtime_exceeded(rt_rq)) |
630 | resched_task(curr); | 636 | resched_task(curr); |
631 | spin_unlock(&rt_rq->rt_runtime_lock); | 637 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
632 | } | 638 | } |
633 | } | 639 | } |
634 | } | 640 | } |
@@ -803,7 +809,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
803 | dec_rt_group(rt_se, rt_rq); | 809 | dec_rt_group(rt_se, rt_rq); |
804 | } | 810 | } |
805 | 811 | ||
806 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) | 812 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) |
807 | { | 813 | { |
808 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 814 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
809 | struct rt_prio_array *array = &rt_rq->active; | 815 | struct rt_prio_array *array = &rt_rq->active; |
@@ -819,7 +825,10 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
819 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) | 825 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) |
820 | return; | 826 | return; |
821 | 827 | ||
822 | list_add_tail(&rt_se->run_list, queue); | 828 | if (head) |
829 | list_add(&rt_se->run_list, queue); | ||
830 | else | ||
831 | list_add_tail(&rt_se->run_list, queue); | ||
823 | __set_bit(rt_se_prio(rt_se), array->bitmap); | 832 | __set_bit(rt_se_prio(rt_se), array->bitmap); |
824 | 833 | ||
825 | inc_rt_tasks(rt_se, rt_rq); | 834 | inc_rt_tasks(rt_se, rt_rq); |
@@ -856,11 +865,11 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se) | |||
856 | } | 865 | } |
857 | } | 866 | } |
858 | 867 | ||
859 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | 868 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) |
860 | { | 869 | { |
861 | dequeue_rt_stack(rt_se); | 870 | dequeue_rt_stack(rt_se); |
862 | for_each_sched_rt_entity(rt_se) | 871 | for_each_sched_rt_entity(rt_se) |
863 | __enqueue_rt_entity(rt_se); | 872 | __enqueue_rt_entity(rt_se, head); |
864 | } | 873 | } |
865 | 874 | ||
866 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | 875 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) |
@@ -871,21 +880,22 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | |||
871 | struct rt_rq *rt_rq = group_rt_rq(rt_se); | 880 | struct rt_rq *rt_rq = group_rt_rq(rt_se); |
872 | 881 | ||
873 | if (rt_rq && rt_rq->rt_nr_running) | 882 | if (rt_rq && rt_rq->rt_nr_running) |
874 | __enqueue_rt_entity(rt_se); | 883 | __enqueue_rt_entity(rt_se, false); |
875 | } | 884 | } |
876 | } | 885 | } |
877 | 886 | ||
878 | /* | 887 | /* |
879 | * Adding/removing a task to/from a priority array: | 888 | * Adding/removing a task to/from a priority array: |
880 | */ | 889 | */ |
881 | static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) | 890 | static void |
891 | enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head) | ||
882 | { | 892 | { |
883 | struct sched_rt_entity *rt_se = &p->rt; | 893 | struct sched_rt_entity *rt_se = &p->rt; |
884 | 894 | ||
885 | if (wakeup) | 895 | if (wakeup) |
886 | rt_se->timeout = 0; | 896 | rt_se->timeout = 0; |
887 | 897 | ||
888 | enqueue_rt_entity(rt_se); | 898 | enqueue_rt_entity(rt_se, head); |
889 | 899 | ||
890 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) | 900 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) |
891 | enqueue_pushable_task(rq, p); | 901 | enqueue_pushable_task(rq, p); |
@@ -1136,7 +1146,12 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | |||
1136 | if (next && next->prio < idx) | 1146 | if (next && next->prio < idx) |
1137 | continue; | 1147 | continue; |
1138 | list_for_each_entry(rt_se, array->queue + idx, run_list) { | 1148 | list_for_each_entry(rt_se, array->queue + idx, run_list) { |
1139 | struct task_struct *p = rt_task_of(rt_se); | 1149 | struct task_struct *p; |
1150 | |||
1151 | if (!rt_entity_is_task(rt_se)) | ||
1152 | continue; | ||
1153 | |||
1154 | p = rt_task_of(rt_se); | ||
1140 | if (pick_rt_task(rq, p, cpu)) { | 1155 | if (pick_rt_task(rq, p, cpu)) { |
1141 | next = p; | 1156 | next = p; |
1142 | break; | 1157 | break; |
@@ -1153,29 +1168,12 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | |||
1153 | 1168 | ||
1154 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); | 1169 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); |
1155 | 1170 | ||
1156 | static inline int pick_optimal_cpu(int this_cpu, | ||
1157 | const struct cpumask *mask) | ||
1158 | { | ||
1159 | int first; | ||
1160 | |||
1161 | /* "this_cpu" is cheaper to preempt than a remote processor */ | ||
1162 | if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask)) | ||
1163 | return this_cpu; | ||
1164 | |||
1165 | first = cpumask_first(mask); | ||
1166 | if (first < nr_cpu_ids) | ||
1167 | return first; | ||
1168 | |||
1169 | return -1; | ||
1170 | } | ||
1171 | |||
1172 | static int find_lowest_rq(struct task_struct *task) | 1171 | static int find_lowest_rq(struct task_struct *task) |
1173 | { | 1172 | { |
1174 | struct sched_domain *sd; | 1173 | struct sched_domain *sd; |
1175 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); | 1174 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); |
1176 | int this_cpu = smp_processor_id(); | 1175 | int this_cpu = smp_processor_id(); |
1177 | int cpu = task_cpu(task); | 1176 | int cpu = task_cpu(task); |
1178 | cpumask_var_t domain_mask; | ||
1179 | 1177 | ||
1180 | if (task->rt.nr_cpus_allowed == 1) | 1178 | if (task->rt.nr_cpus_allowed == 1) |
1181 | return -1; /* No other targets possible */ | 1179 | return -1; /* No other targets possible */ |
@@ -1198,28 +1196,26 @@ static int find_lowest_rq(struct task_struct *task) | |||
1198 | * Otherwise, we consult the sched_domains span maps to figure | 1196 | * Otherwise, we consult the sched_domains span maps to figure |
1199 | * out which cpu is logically closest to our hot cache data. | 1197 | * out which cpu is logically closest to our hot cache data. |
1200 | */ | 1198 | */ |
1201 | if (this_cpu == cpu) | 1199 | if (!cpumask_test_cpu(this_cpu, lowest_mask)) |
1202 | this_cpu = -1; /* Skip this_cpu opt if the same */ | 1200 | this_cpu = -1; /* Skip this_cpu opt if not among lowest */ |
1203 | |||
1204 | if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) { | ||
1205 | for_each_domain(cpu, sd) { | ||
1206 | if (sd->flags & SD_WAKE_AFFINE) { | ||
1207 | int best_cpu; | ||
1208 | |||
1209 | cpumask_and(domain_mask, | ||
1210 | sched_domain_span(sd), | ||
1211 | lowest_mask); | ||
1212 | 1201 | ||
1213 | best_cpu = pick_optimal_cpu(this_cpu, | 1202 | for_each_domain(cpu, sd) { |
1214 | domain_mask); | 1203 | if (sd->flags & SD_WAKE_AFFINE) { |
1204 | int best_cpu; | ||
1215 | 1205 | ||
1216 | if (best_cpu != -1) { | 1206 | /* |
1217 | free_cpumask_var(domain_mask); | 1207 | * "this_cpu" is cheaper to preempt than a |
1218 | return best_cpu; | 1208 | * remote processor. |
1219 | } | 1209 | */ |
1220 | } | 1210 | if (this_cpu != -1 && |
1211 | cpumask_test_cpu(this_cpu, sched_domain_span(sd))) | ||
1212 | return this_cpu; | ||
1213 | |||
1214 | best_cpu = cpumask_first_and(lowest_mask, | ||
1215 | sched_domain_span(sd)); | ||
1216 | if (best_cpu < nr_cpu_ids) | ||
1217 | return best_cpu; | ||
1221 | } | 1218 | } |
1222 | free_cpumask_var(domain_mask); | ||
1223 | } | 1219 | } |
1224 | 1220 | ||
1225 | /* | 1221 | /* |
@@ -1227,7 +1223,13 @@ static int find_lowest_rq(struct task_struct *task) | |||
1227 | * just give the caller *something* to work with from the compatible | 1223 | * just give the caller *something* to work with from the compatible |
1228 | * locations. | 1224 | * locations. |
1229 | */ | 1225 | */ |
1230 | return pick_optimal_cpu(this_cpu, lowest_mask); | 1226 | if (this_cpu != -1) |
1227 | return this_cpu; | ||
1228 | |||
1229 | cpu = cpumask_any(lowest_mask); | ||
1230 | if (cpu < nr_cpu_ids) | ||
1231 | return cpu; | ||
1232 | return -1; | ||
1231 | } | 1233 | } |
1232 | 1234 | ||
1233 | /* Will lock the rq it finds */ | 1235 | /* Will lock the rq it finds */ |
@@ -1259,7 +1261,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1259 | task_running(rq, task) || | 1261 | task_running(rq, task) || |
1260 | !task->se.on_rq)) { | 1262 | !task->se.on_rq)) { |
1261 | 1263 | ||
1262 | spin_unlock(&lowest_rq->lock); | 1264 | raw_spin_unlock(&lowest_rq->lock); |
1263 | lowest_rq = NULL; | 1265 | lowest_rq = NULL; |
1264 | break; | 1266 | break; |
1265 | } | 1267 | } |
@@ -1485,7 +1487,7 @@ static void post_schedule_rt(struct rq *rq) | |||
1485 | * If we are not running and we are not going to reschedule soon, we should | 1487 | * If we are not running and we are not going to reschedule soon, we should |
1486 | * try to push tasks away now | 1488 | * try to push tasks away now |
1487 | */ | 1489 | */ |
1488 | static void task_wake_up_rt(struct rq *rq, struct task_struct *p) | 1490 | static void task_woken_rt(struct rq *rq, struct task_struct *p) |
1489 | { | 1491 | { |
1490 | if (!task_running(rq, p) && | 1492 | if (!task_running(rq, p) && |
1491 | !test_tsk_need_resched(rq->curr) && | 1493 | !test_tsk_need_resched(rq->curr) && |
@@ -1494,24 +1496,6 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p) | |||
1494 | push_rt_tasks(rq); | 1496 | push_rt_tasks(rq); |
1495 | } | 1497 | } |
1496 | 1498 | ||
1497 | static unsigned long | ||
1498 | load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1499 | unsigned long max_load_move, | ||
1500 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
1501 | int *all_pinned, int *this_best_prio) | ||
1502 | { | ||
1503 | /* don't touch RT tasks */ | ||
1504 | return 0; | ||
1505 | } | ||
1506 | |||
1507 | static int | ||
1508 | move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1509 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
1510 | { | ||
1511 | /* don't touch RT tasks */ | ||
1512 | return 0; | ||
1513 | } | ||
1514 | |||
1515 | static void set_cpus_allowed_rt(struct task_struct *p, | 1499 | static void set_cpus_allowed_rt(struct task_struct *p, |
1516 | const struct cpumask *new_mask) | 1500 | const struct cpumask *new_mask) |
1517 | { | 1501 | { |
@@ -1683,8 +1667,9 @@ static void watchdog(struct rq *rq, struct task_struct *p) | |||
1683 | if (!p->signal) | 1667 | if (!p->signal) |
1684 | return; | 1668 | return; |
1685 | 1669 | ||
1686 | soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur; | 1670 | /* max may change after cur was read, this will be fixed next tick */ |
1687 | hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max; | 1671 | soft = task_rlimit(p, RLIMIT_RTTIME); |
1672 | hard = task_rlimit_max(p, RLIMIT_RTTIME); | ||
1688 | 1673 | ||
1689 | if (soft != RLIM_INFINITY) { | 1674 | if (soft != RLIM_INFINITY) { |
1690 | unsigned long next; | 1675 | unsigned long next; |
@@ -1734,7 +1719,7 @@ static void set_curr_task_rt(struct rq *rq) | |||
1734 | dequeue_pushable_task(rq, p); | 1719 | dequeue_pushable_task(rq, p); |
1735 | } | 1720 | } |
1736 | 1721 | ||
1737 | unsigned int get_rr_interval_rt(struct task_struct *task) | 1722 | static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) |
1738 | { | 1723 | { |
1739 | /* | 1724 | /* |
1740 | * Time slice is 0 for SCHED_FIFO tasks | 1725 | * Time slice is 0 for SCHED_FIFO tasks |
@@ -1759,14 +1744,12 @@ static const struct sched_class rt_sched_class = { | |||
1759 | #ifdef CONFIG_SMP | 1744 | #ifdef CONFIG_SMP |
1760 | .select_task_rq = select_task_rq_rt, | 1745 | .select_task_rq = select_task_rq_rt, |
1761 | 1746 | ||
1762 | .load_balance = load_balance_rt, | ||
1763 | .move_one_task = move_one_task_rt, | ||
1764 | .set_cpus_allowed = set_cpus_allowed_rt, | 1747 | .set_cpus_allowed = set_cpus_allowed_rt, |
1765 | .rq_online = rq_online_rt, | 1748 | .rq_online = rq_online_rt, |
1766 | .rq_offline = rq_offline_rt, | 1749 | .rq_offline = rq_offline_rt, |
1767 | .pre_schedule = pre_schedule_rt, | 1750 | .pre_schedule = pre_schedule_rt, |
1768 | .post_schedule = post_schedule_rt, | 1751 | .post_schedule = post_schedule_rt, |
1769 | .task_wake_up = task_wake_up_rt, | 1752 | .task_woken = task_woken_rt, |
1770 | .switched_from = switched_from_rt, | 1753 | .switched_from = switched_from_rt, |
1771 | #endif | 1754 | #endif |
1772 | 1755 | ||