diff options
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 127 |
1 files changed, 57 insertions, 70 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index a4d790cddb19..f48328ac216f 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -327,7 +327,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
327 | 327 | ||
328 | weight = cpumask_weight(rd->span); | 328 | weight = cpumask_weight(rd->span); |
329 | 329 | ||
330 | spin_lock(&rt_b->rt_runtime_lock); | 330 | raw_spin_lock(&rt_b->rt_runtime_lock); |
331 | rt_period = ktime_to_ns(rt_b->rt_period); | 331 | rt_period = ktime_to_ns(rt_b->rt_period); |
332 | for_each_cpu(i, rd->span) { | 332 | for_each_cpu(i, rd->span) { |
333 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 333 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
@@ -336,7 +336,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
336 | if (iter == rt_rq) | 336 | if (iter == rt_rq) |
337 | continue; | 337 | continue; |
338 | 338 | ||
339 | spin_lock(&iter->rt_runtime_lock); | 339 | raw_spin_lock(&iter->rt_runtime_lock); |
340 | /* | 340 | /* |
341 | * Either all rqs have inf runtime and there's nothing to steal | 341 | * Either all rqs have inf runtime and there's nothing to steal |
342 | * or __disable_runtime() below sets a specific rq to inf to | 342 | * or __disable_runtime() below sets a specific rq to inf to |
@@ -358,14 +358,14 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
358 | rt_rq->rt_runtime += diff; | 358 | rt_rq->rt_runtime += diff; |
359 | more = 1; | 359 | more = 1; |
360 | if (rt_rq->rt_runtime == rt_period) { | 360 | if (rt_rq->rt_runtime == rt_period) { |
361 | spin_unlock(&iter->rt_runtime_lock); | 361 | raw_spin_unlock(&iter->rt_runtime_lock); |
362 | break; | 362 | break; |
363 | } | 363 | } |
364 | } | 364 | } |
365 | next: | 365 | next: |
366 | spin_unlock(&iter->rt_runtime_lock); | 366 | raw_spin_unlock(&iter->rt_runtime_lock); |
367 | } | 367 | } |
368 | spin_unlock(&rt_b->rt_runtime_lock); | 368 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
369 | 369 | ||
370 | return more; | 370 | return more; |
371 | } | 371 | } |
@@ -386,8 +386,8 @@ static void __disable_runtime(struct rq *rq) | |||
386 | s64 want; | 386 | s64 want; |
387 | int i; | 387 | int i; |
388 | 388 | ||
389 | spin_lock(&rt_b->rt_runtime_lock); | 389 | raw_spin_lock(&rt_b->rt_runtime_lock); |
390 | spin_lock(&rt_rq->rt_runtime_lock); | 390 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
391 | /* | 391 | /* |
392 | * Either we're all inf and nobody needs to borrow, or we're | 392 | * Either we're all inf and nobody needs to borrow, or we're |
393 | * already disabled and thus have nothing to do, or we have | 393 | * already disabled and thus have nothing to do, or we have |
@@ -396,7 +396,7 @@ static void __disable_runtime(struct rq *rq) | |||
396 | if (rt_rq->rt_runtime == RUNTIME_INF || | 396 | if (rt_rq->rt_runtime == RUNTIME_INF || |
397 | rt_rq->rt_runtime == rt_b->rt_runtime) | 397 | rt_rq->rt_runtime == rt_b->rt_runtime) |
398 | goto balanced; | 398 | goto balanced; |
399 | spin_unlock(&rt_rq->rt_runtime_lock); | 399 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
400 | 400 | ||
401 | /* | 401 | /* |
402 | * Calculate the difference between what we started out with | 402 | * Calculate the difference between what we started out with |
@@ -418,7 +418,7 @@ static void __disable_runtime(struct rq *rq) | |||
418 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) | 418 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) |
419 | continue; | 419 | continue; |
420 | 420 | ||
421 | spin_lock(&iter->rt_runtime_lock); | 421 | raw_spin_lock(&iter->rt_runtime_lock); |
422 | if (want > 0) { | 422 | if (want > 0) { |
423 | diff = min_t(s64, iter->rt_runtime, want); | 423 | diff = min_t(s64, iter->rt_runtime, want); |
424 | iter->rt_runtime -= diff; | 424 | iter->rt_runtime -= diff; |
@@ -427,13 +427,13 @@ static void __disable_runtime(struct rq *rq) | |||
427 | iter->rt_runtime -= want; | 427 | iter->rt_runtime -= want; |
428 | want -= want; | 428 | want -= want; |
429 | } | 429 | } |
430 | spin_unlock(&iter->rt_runtime_lock); | 430 | raw_spin_unlock(&iter->rt_runtime_lock); |
431 | 431 | ||
432 | if (!want) | 432 | if (!want) |
433 | break; | 433 | break; |
434 | } | 434 | } |
435 | 435 | ||
436 | spin_lock(&rt_rq->rt_runtime_lock); | 436 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
437 | /* | 437 | /* |
438 | * We cannot be left wanting - that would mean some runtime | 438 | * We cannot be left wanting - that would mean some runtime |
439 | * leaked out of the system. | 439 | * leaked out of the system. |
@@ -445,8 +445,8 @@ balanced: | |||
445 | * runtime - in which case borrowing doesn't make sense. | 445 | * runtime - in which case borrowing doesn't make sense. |
446 | */ | 446 | */ |
447 | rt_rq->rt_runtime = RUNTIME_INF; | 447 | rt_rq->rt_runtime = RUNTIME_INF; |
448 | spin_unlock(&rt_rq->rt_runtime_lock); | 448 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
449 | spin_unlock(&rt_b->rt_runtime_lock); | 449 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
450 | } | 450 | } |
451 | } | 451 | } |
452 | 452 | ||
@@ -454,9 +454,9 @@ static void disable_runtime(struct rq *rq) | |||
454 | { | 454 | { |
455 | unsigned long flags; | 455 | unsigned long flags; |
456 | 456 | ||
457 | spin_lock_irqsave(&rq->lock, flags); | 457 | raw_spin_lock_irqsave(&rq->lock, flags); |
458 | __disable_runtime(rq); | 458 | __disable_runtime(rq); |
459 | spin_unlock_irqrestore(&rq->lock, flags); | 459 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
460 | } | 460 | } |
461 | 461 | ||
462 | static void __enable_runtime(struct rq *rq) | 462 | static void __enable_runtime(struct rq *rq) |
@@ -472,13 +472,13 @@ static void __enable_runtime(struct rq *rq) | |||
472 | for_each_leaf_rt_rq(rt_rq, rq) { | 472 | for_each_leaf_rt_rq(rt_rq, rq) { |
473 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | 473 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
474 | 474 | ||
475 | spin_lock(&rt_b->rt_runtime_lock); | 475 | raw_spin_lock(&rt_b->rt_runtime_lock); |
476 | spin_lock(&rt_rq->rt_runtime_lock); | 476 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
477 | rt_rq->rt_runtime = rt_b->rt_runtime; | 477 | rt_rq->rt_runtime = rt_b->rt_runtime; |
478 | rt_rq->rt_time = 0; | 478 | rt_rq->rt_time = 0; |
479 | rt_rq->rt_throttled = 0; | 479 | rt_rq->rt_throttled = 0; |
480 | spin_unlock(&rt_rq->rt_runtime_lock); | 480 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
481 | spin_unlock(&rt_b->rt_runtime_lock); | 481 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
482 | } | 482 | } |
483 | } | 483 | } |
484 | 484 | ||
@@ -486,9 +486,9 @@ static void enable_runtime(struct rq *rq) | |||
486 | { | 486 | { |
487 | unsigned long flags; | 487 | unsigned long flags; |
488 | 488 | ||
489 | spin_lock_irqsave(&rq->lock, flags); | 489 | raw_spin_lock_irqsave(&rq->lock, flags); |
490 | __enable_runtime(rq); | 490 | __enable_runtime(rq); |
491 | spin_unlock_irqrestore(&rq->lock, flags); | 491 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
492 | } | 492 | } |
493 | 493 | ||
494 | static int balance_runtime(struct rt_rq *rt_rq) | 494 | static int balance_runtime(struct rt_rq *rt_rq) |
@@ -496,9 +496,9 @@ static int balance_runtime(struct rt_rq *rt_rq) | |||
496 | int more = 0; | 496 | int more = 0; |
497 | 497 | ||
498 | if (rt_rq->rt_time > rt_rq->rt_runtime) { | 498 | if (rt_rq->rt_time > rt_rq->rt_runtime) { |
499 | spin_unlock(&rt_rq->rt_runtime_lock); | 499 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
500 | more = do_balance_runtime(rt_rq); | 500 | more = do_balance_runtime(rt_rq); |
501 | spin_lock(&rt_rq->rt_runtime_lock); | 501 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
502 | } | 502 | } |
503 | 503 | ||
504 | return more; | 504 | return more; |
@@ -524,11 +524,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
524 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); | 524 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); |
525 | struct rq *rq = rq_of_rt_rq(rt_rq); | 525 | struct rq *rq = rq_of_rt_rq(rt_rq); |
526 | 526 | ||
527 | spin_lock(&rq->lock); | 527 | raw_spin_lock(&rq->lock); |
528 | if (rt_rq->rt_time) { | 528 | if (rt_rq->rt_time) { |
529 | u64 runtime; | 529 | u64 runtime; |
530 | 530 | ||
531 | spin_lock(&rt_rq->rt_runtime_lock); | 531 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
532 | if (rt_rq->rt_throttled) | 532 | if (rt_rq->rt_throttled) |
533 | balance_runtime(rt_rq); | 533 | balance_runtime(rt_rq); |
534 | runtime = rt_rq->rt_runtime; | 534 | runtime = rt_rq->rt_runtime; |
@@ -539,13 +539,13 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
539 | } | 539 | } |
540 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | 540 | if (rt_rq->rt_time || rt_rq->rt_nr_running) |
541 | idle = 0; | 541 | idle = 0; |
542 | spin_unlock(&rt_rq->rt_runtime_lock); | 542 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
543 | } else if (rt_rq->rt_nr_running) | 543 | } else if (rt_rq->rt_nr_running) |
544 | idle = 0; | 544 | idle = 0; |
545 | 545 | ||
546 | if (enqueue) | 546 | if (enqueue) |
547 | sched_rt_rq_enqueue(rt_rq); | 547 | sched_rt_rq_enqueue(rt_rq); |
548 | spin_unlock(&rq->lock); | 548 | raw_spin_unlock(&rq->lock); |
549 | } | 549 | } |
550 | 550 | ||
551 | return idle; | 551 | return idle; |
@@ -624,11 +624,11 @@ static void update_curr_rt(struct rq *rq) | |||
624 | rt_rq = rt_rq_of_se(rt_se); | 624 | rt_rq = rt_rq_of_se(rt_se); |
625 | 625 | ||
626 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { | 626 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { |
627 | spin_lock(&rt_rq->rt_runtime_lock); | 627 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
628 | rt_rq->rt_time += delta_exec; | 628 | rt_rq->rt_time += delta_exec; |
629 | if (sched_rt_runtime_exceeded(rt_rq)) | 629 | if (sched_rt_runtime_exceeded(rt_rq)) |
630 | resched_task(curr); | 630 | resched_task(curr); |
631 | spin_unlock(&rt_rq->rt_runtime_lock); | 631 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
632 | } | 632 | } |
633 | } | 633 | } |
634 | } | 634 | } |
@@ -1153,29 +1153,12 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | |||
1153 | 1153 | ||
1154 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); | 1154 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); |
1155 | 1155 | ||
1156 | static inline int pick_optimal_cpu(int this_cpu, | ||
1157 | const struct cpumask *mask) | ||
1158 | { | ||
1159 | int first; | ||
1160 | |||
1161 | /* "this_cpu" is cheaper to preempt than a remote processor */ | ||
1162 | if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask)) | ||
1163 | return this_cpu; | ||
1164 | |||
1165 | first = cpumask_first(mask); | ||
1166 | if (first < nr_cpu_ids) | ||
1167 | return first; | ||
1168 | |||
1169 | return -1; | ||
1170 | } | ||
1171 | |||
1172 | static int find_lowest_rq(struct task_struct *task) | 1156 | static int find_lowest_rq(struct task_struct *task) |
1173 | { | 1157 | { |
1174 | struct sched_domain *sd; | 1158 | struct sched_domain *sd; |
1175 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); | 1159 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); |
1176 | int this_cpu = smp_processor_id(); | 1160 | int this_cpu = smp_processor_id(); |
1177 | int cpu = task_cpu(task); | 1161 | int cpu = task_cpu(task); |
1178 | cpumask_var_t domain_mask; | ||
1179 | 1162 | ||
1180 | if (task->rt.nr_cpus_allowed == 1) | 1163 | if (task->rt.nr_cpus_allowed == 1) |
1181 | return -1; /* No other targets possible */ | 1164 | return -1; /* No other targets possible */ |
@@ -1198,28 +1181,26 @@ static int find_lowest_rq(struct task_struct *task) | |||
1198 | * Otherwise, we consult the sched_domains span maps to figure | 1181 | * Otherwise, we consult the sched_domains span maps to figure |
1199 | * out which cpu is logically closest to our hot cache data. | 1182 | * out which cpu is logically closest to our hot cache data. |
1200 | */ | 1183 | */ |
1201 | if (this_cpu == cpu) | 1184 | if (!cpumask_test_cpu(this_cpu, lowest_mask)) |
1202 | this_cpu = -1; /* Skip this_cpu opt if the same */ | 1185 | this_cpu = -1; /* Skip this_cpu opt if not among lowest */ |
1203 | |||
1204 | if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) { | ||
1205 | for_each_domain(cpu, sd) { | ||
1206 | if (sd->flags & SD_WAKE_AFFINE) { | ||
1207 | int best_cpu; | ||
1208 | 1186 | ||
1209 | cpumask_and(domain_mask, | 1187 | for_each_domain(cpu, sd) { |
1210 | sched_domain_span(sd), | 1188 | if (sd->flags & SD_WAKE_AFFINE) { |
1211 | lowest_mask); | 1189 | int best_cpu; |
1212 | 1190 | ||
1213 | best_cpu = pick_optimal_cpu(this_cpu, | 1191 | /* |
1214 | domain_mask); | 1192 | * "this_cpu" is cheaper to preempt than a |
1215 | 1193 | * remote processor. | |
1216 | if (best_cpu != -1) { | 1194 | */ |
1217 | free_cpumask_var(domain_mask); | 1195 | if (this_cpu != -1 && |
1218 | return best_cpu; | 1196 | cpumask_test_cpu(this_cpu, sched_domain_span(sd))) |
1219 | } | 1197 | return this_cpu; |
1220 | } | 1198 | |
1199 | best_cpu = cpumask_first_and(lowest_mask, | ||
1200 | sched_domain_span(sd)); | ||
1201 | if (best_cpu < nr_cpu_ids) | ||
1202 | return best_cpu; | ||
1221 | } | 1203 | } |
1222 | free_cpumask_var(domain_mask); | ||
1223 | } | 1204 | } |
1224 | 1205 | ||
1225 | /* | 1206 | /* |
@@ -1227,7 +1208,13 @@ static int find_lowest_rq(struct task_struct *task) | |||
1227 | * just give the caller *something* to work with from the compatible | 1208 | * just give the caller *something* to work with from the compatible |
1228 | * locations. | 1209 | * locations. |
1229 | */ | 1210 | */ |
1230 | return pick_optimal_cpu(this_cpu, lowest_mask); | 1211 | if (this_cpu != -1) |
1212 | return this_cpu; | ||
1213 | |||
1214 | cpu = cpumask_any(lowest_mask); | ||
1215 | if (cpu < nr_cpu_ids) | ||
1216 | return cpu; | ||
1217 | return -1; | ||
1231 | } | 1218 | } |
1232 | 1219 | ||
1233 | /* Will lock the rq it finds */ | 1220 | /* Will lock the rq it finds */ |
@@ -1259,7 +1246,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1259 | task_running(rq, task) || | 1246 | task_running(rq, task) || |
1260 | !task->se.on_rq)) { | 1247 | !task->se.on_rq)) { |
1261 | 1248 | ||
1262 | spin_unlock(&lowest_rq->lock); | 1249 | raw_spin_unlock(&lowest_rq->lock); |
1263 | lowest_rq = NULL; | 1250 | lowest_rq = NULL; |
1264 | break; | 1251 | break; |
1265 | } | 1252 | } |
@@ -1485,7 +1472,7 @@ static void post_schedule_rt(struct rq *rq) | |||
1485 | * If we are not running and we are not going to reschedule soon, we should | 1472 | * If we are not running and we are not going to reschedule soon, we should |
1486 | * try to push tasks away now | 1473 | * try to push tasks away now |
1487 | */ | 1474 | */ |
1488 | static void task_wake_up_rt(struct rq *rq, struct task_struct *p) | 1475 | static void task_woken_rt(struct rq *rq, struct task_struct *p) |
1489 | { | 1476 | { |
1490 | if (!task_running(rq, p) && | 1477 | if (!task_running(rq, p) && |
1491 | !test_tsk_need_resched(rq->curr) && | 1478 | !test_tsk_need_resched(rq->curr) && |
@@ -1734,7 +1721,7 @@ static void set_curr_task_rt(struct rq *rq) | |||
1734 | dequeue_pushable_task(rq, p); | 1721 | dequeue_pushable_task(rq, p); |
1735 | } | 1722 | } |
1736 | 1723 | ||
1737 | unsigned int get_rr_interval_rt(struct task_struct *task) | 1724 | unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) |
1738 | { | 1725 | { |
1739 | /* | 1726 | /* |
1740 | * Time slice is 0 for SCHED_FIFO tasks | 1727 | * Time slice is 0 for SCHED_FIFO tasks |
@@ -1766,7 +1753,7 @@ static const struct sched_class rt_sched_class = { | |||
1766 | .rq_offline = rq_offline_rt, | 1753 | .rq_offline = rq_offline_rt, |
1767 | .pre_schedule = pre_schedule_rt, | 1754 | .pre_schedule = pre_schedule_rt, |
1768 | .post_schedule = post_schedule_rt, | 1755 | .post_schedule = post_schedule_rt, |
1769 | .task_wake_up = task_wake_up_rt, | 1756 | .task_woken = task_woken_rt, |
1770 | .switched_from = switched_from_rt, | 1757 | .switched_from = switched_from_rt, |
1771 | #endif | 1758 | #endif |
1772 | 1759 | ||