diff options
Diffstat (limited to 'kernel/sched_rt.c')
| -rw-r--r-- | kernel/sched_rt.c | 64 | 
1 files changed, 32 insertions, 32 deletions
| diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index aecbd9c6b20c..f48328ac216f 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -327,7 +327,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
| 327 | 327 | ||
| 328 | weight = cpumask_weight(rd->span); | 328 | weight = cpumask_weight(rd->span); | 
| 329 | 329 | ||
| 330 | spin_lock(&rt_b->rt_runtime_lock); | 330 | raw_spin_lock(&rt_b->rt_runtime_lock); | 
| 331 | rt_period = ktime_to_ns(rt_b->rt_period); | 331 | rt_period = ktime_to_ns(rt_b->rt_period); | 
| 332 | for_each_cpu(i, rd->span) { | 332 | for_each_cpu(i, rd->span) { | 
| 333 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 333 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 
| @@ -336,7 +336,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
| 336 | if (iter == rt_rq) | 336 | if (iter == rt_rq) | 
| 337 | continue; | 337 | continue; | 
| 338 | 338 | ||
| 339 | spin_lock(&iter->rt_runtime_lock); | 339 | raw_spin_lock(&iter->rt_runtime_lock); | 
| 340 | /* | 340 | /* | 
| 341 | * Either all rqs have inf runtime and there's nothing to steal | 341 | * Either all rqs have inf runtime and there's nothing to steal | 
| 342 | * or __disable_runtime() below sets a specific rq to inf to | 342 | * or __disable_runtime() below sets a specific rq to inf to | 
| @@ -358,14 +358,14 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
| 358 | rt_rq->rt_runtime += diff; | 358 | rt_rq->rt_runtime += diff; | 
| 359 | more = 1; | 359 | more = 1; | 
| 360 | if (rt_rq->rt_runtime == rt_period) { | 360 | if (rt_rq->rt_runtime == rt_period) { | 
| 361 | spin_unlock(&iter->rt_runtime_lock); | 361 | raw_spin_unlock(&iter->rt_runtime_lock); | 
| 362 | break; | 362 | break; | 
| 363 | } | 363 | } | 
| 364 | } | 364 | } | 
| 365 | next: | 365 | next: | 
| 366 | spin_unlock(&iter->rt_runtime_lock); | 366 | raw_spin_unlock(&iter->rt_runtime_lock); | 
| 367 | } | 367 | } | 
| 368 | spin_unlock(&rt_b->rt_runtime_lock); | 368 | raw_spin_unlock(&rt_b->rt_runtime_lock); | 
| 369 | 369 | ||
| 370 | return more; | 370 | return more; | 
| 371 | } | 371 | } | 
| @@ -386,8 +386,8 @@ static void __disable_runtime(struct rq *rq) | |||
| 386 | s64 want; | 386 | s64 want; | 
| 387 | int i; | 387 | int i; | 
| 388 | 388 | ||
| 389 | spin_lock(&rt_b->rt_runtime_lock); | 389 | raw_spin_lock(&rt_b->rt_runtime_lock); | 
| 390 | spin_lock(&rt_rq->rt_runtime_lock); | 390 | raw_spin_lock(&rt_rq->rt_runtime_lock); | 
| 391 | /* | 391 | /* | 
| 392 | * Either we're all inf and nobody needs to borrow, or we're | 392 | * Either we're all inf and nobody needs to borrow, or we're | 
| 393 | * already disabled and thus have nothing to do, or we have | 393 | * already disabled and thus have nothing to do, or we have | 
| @@ -396,7 +396,7 @@ static void __disable_runtime(struct rq *rq) | |||
| 396 | if (rt_rq->rt_runtime == RUNTIME_INF || | 396 | if (rt_rq->rt_runtime == RUNTIME_INF || | 
| 397 | rt_rq->rt_runtime == rt_b->rt_runtime) | 397 | rt_rq->rt_runtime == rt_b->rt_runtime) | 
| 398 | goto balanced; | 398 | goto balanced; | 
| 399 | spin_unlock(&rt_rq->rt_runtime_lock); | 399 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 
| 400 | 400 | ||
| 401 | /* | 401 | /* | 
| 402 | * Calculate the difference between what we started out with | 402 | * Calculate the difference between what we started out with | 
| @@ -418,7 +418,7 @@ static void __disable_runtime(struct rq *rq) | |||
| 418 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) | 418 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) | 
| 419 | continue; | 419 | continue; | 
| 420 | 420 | ||
| 421 | spin_lock(&iter->rt_runtime_lock); | 421 | raw_spin_lock(&iter->rt_runtime_lock); | 
| 422 | if (want > 0) { | 422 | if (want > 0) { | 
| 423 | diff = min_t(s64, iter->rt_runtime, want); | 423 | diff = min_t(s64, iter->rt_runtime, want); | 
| 424 | iter->rt_runtime -= diff; | 424 | iter->rt_runtime -= diff; | 
| @@ -427,13 +427,13 @@ static void __disable_runtime(struct rq *rq) | |||
| 427 | iter->rt_runtime -= want; | 427 | iter->rt_runtime -= want; | 
| 428 | want -= want; | 428 | want -= want; | 
| 429 | } | 429 | } | 
| 430 | spin_unlock(&iter->rt_runtime_lock); | 430 | raw_spin_unlock(&iter->rt_runtime_lock); | 
| 431 | 431 | ||
| 432 | if (!want) | 432 | if (!want) | 
| 433 | break; | 433 | break; | 
| 434 | } | 434 | } | 
| 435 | 435 | ||
| 436 | spin_lock(&rt_rq->rt_runtime_lock); | 436 | raw_spin_lock(&rt_rq->rt_runtime_lock); | 
| 437 | /* | 437 | /* | 
| 438 | * We cannot be left wanting - that would mean some runtime | 438 | * We cannot be left wanting - that would mean some runtime | 
| 439 | * leaked out of the system. | 439 | * leaked out of the system. | 
| @@ -445,8 +445,8 @@ balanced: | |||
| 445 | * runtime - in which case borrowing doesn't make sense. | 445 | * runtime - in which case borrowing doesn't make sense. | 
| 446 | */ | 446 | */ | 
| 447 | rt_rq->rt_runtime = RUNTIME_INF; | 447 | rt_rq->rt_runtime = RUNTIME_INF; | 
| 448 | spin_unlock(&rt_rq->rt_runtime_lock); | 448 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 
| 449 | spin_unlock(&rt_b->rt_runtime_lock); | 449 | raw_spin_unlock(&rt_b->rt_runtime_lock); | 
| 450 | } | 450 | } | 
| 451 | } | 451 | } | 
| 452 | 452 | ||
| @@ -454,9 +454,9 @@ static void disable_runtime(struct rq *rq) | |||
| 454 | { | 454 | { | 
| 455 | unsigned long flags; | 455 | unsigned long flags; | 
| 456 | 456 | ||
| 457 | spin_lock_irqsave(&rq->lock, flags); | 457 | raw_spin_lock_irqsave(&rq->lock, flags); | 
| 458 | __disable_runtime(rq); | 458 | __disable_runtime(rq); | 
| 459 | spin_unlock_irqrestore(&rq->lock, flags); | 459 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 
| 460 | } | 460 | } | 
| 461 | 461 | ||
| 462 | static void __enable_runtime(struct rq *rq) | 462 | static void __enable_runtime(struct rq *rq) | 
| @@ -472,13 +472,13 @@ static void __enable_runtime(struct rq *rq) | |||
| 472 | for_each_leaf_rt_rq(rt_rq, rq) { | 472 | for_each_leaf_rt_rq(rt_rq, rq) { | 
| 473 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | 473 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | 
| 474 | 474 | ||
| 475 | spin_lock(&rt_b->rt_runtime_lock); | 475 | raw_spin_lock(&rt_b->rt_runtime_lock); | 
| 476 | spin_lock(&rt_rq->rt_runtime_lock); | 476 | raw_spin_lock(&rt_rq->rt_runtime_lock); | 
| 477 | rt_rq->rt_runtime = rt_b->rt_runtime; | 477 | rt_rq->rt_runtime = rt_b->rt_runtime; | 
| 478 | rt_rq->rt_time = 0; | 478 | rt_rq->rt_time = 0; | 
| 479 | rt_rq->rt_throttled = 0; | 479 | rt_rq->rt_throttled = 0; | 
| 480 | spin_unlock(&rt_rq->rt_runtime_lock); | 480 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 
| 481 | spin_unlock(&rt_b->rt_runtime_lock); | 481 | raw_spin_unlock(&rt_b->rt_runtime_lock); | 
| 482 | } | 482 | } | 
| 483 | } | 483 | } | 
| 484 | 484 | ||
| @@ -486,9 +486,9 @@ static void enable_runtime(struct rq *rq) | |||
| 486 | { | 486 | { | 
| 487 | unsigned long flags; | 487 | unsigned long flags; | 
| 488 | 488 | ||
| 489 | spin_lock_irqsave(&rq->lock, flags); | 489 | raw_spin_lock_irqsave(&rq->lock, flags); | 
| 490 | __enable_runtime(rq); | 490 | __enable_runtime(rq); | 
| 491 | spin_unlock_irqrestore(&rq->lock, flags); | 491 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 
| 492 | } | 492 | } | 
| 493 | 493 | ||
| 494 | static int balance_runtime(struct rt_rq *rt_rq) | 494 | static int balance_runtime(struct rt_rq *rt_rq) | 
| @@ -496,9 +496,9 @@ static int balance_runtime(struct rt_rq *rt_rq) | |||
| 496 | int more = 0; | 496 | int more = 0; | 
| 497 | 497 | ||
| 498 | if (rt_rq->rt_time > rt_rq->rt_runtime) { | 498 | if (rt_rq->rt_time > rt_rq->rt_runtime) { | 
| 499 | spin_unlock(&rt_rq->rt_runtime_lock); | 499 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 
| 500 | more = do_balance_runtime(rt_rq); | 500 | more = do_balance_runtime(rt_rq); | 
| 501 | spin_lock(&rt_rq->rt_runtime_lock); | 501 | raw_spin_lock(&rt_rq->rt_runtime_lock); | 
| 502 | } | 502 | } | 
| 503 | 503 | ||
| 504 | return more; | 504 | return more; | 
| @@ -524,11 +524,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
| 524 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); | 524 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); | 
| 525 | struct rq *rq = rq_of_rt_rq(rt_rq); | 525 | struct rq *rq = rq_of_rt_rq(rt_rq); | 
| 526 | 526 | ||
| 527 | spin_lock(&rq->lock); | 527 | raw_spin_lock(&rq->lock); | 
| 528 | if (rt_rq->rt_time) { | 528 | if (rt_rq->rt_time) { | 
| 529 | u64 runtime; | 529 | u64 runtime; | 
| 530 | 530 | ||
| 531 | spin_lock(&rt_rq->rt_runtime_lock); | 531 | raw_spin_lock(&rt_rq->rt_runtime_lock); | 
| 532 | if (rt_rq->rt_throttled) | 532 | if (rt_rq->rt_throttled) | 
| 533 | balance_runtime(rt_rq); | 533 | balance_runtime(rt_rq); | 
| 534 | runtime = rt_rq->rt_runtime; | 534 | runtime = rt_rq->rt_runtime; | 
| @@ -539,13 +539,13 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
| 539 | } | 539 | } | 
| 540 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | 540 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | 
| 541 | idle = 0; | 541 | idle = 0; | 
| 542 | spin_unlock(&rt_rq->rt_runtime_lock); | 542 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 
| 543 | } else if (rt_rq->rt_nr_running) | 543 | } else if (rt_rq->rt_nr_running) | 
| 544 | idle = 0; | 544 | idle = 0; | 
| 545 | 545 | ||
| 546 | if (enqueue) | 546 | if (enqueue) | 
| 547 | sched_rt_rq_enqueue(rt_rq); | 547 | sched_rt_rq_enqueue(rt_rq); | 
| 548 | spin_unlock(&rq->lock); | 548 | raw_spin_unlock(&rq->lock); | 
| 549 | } | 549 | } | 
| 550 | 550 | ||
| 551 | return idle; | 551 | return idle; | 
| @@ -624,11 +624,11 @@ static void update_curr_rt(struct rq *rq) | |||
| 624 | rt_rq = rt_rq_of_se(rt_se); | 624 | rt_rq = rt_rq_of_se(rt_se); | 
| 625 | 625 | ||
| 626 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { | 626 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { | 
| 627 | spin_lock(&rt_rq->rt_runtime_lock); | 627 | raw_spin_lock(&rt_rq->rt_runtime_lock); | 
| 628 | rt_rq->rt_time += delta_exec; | 628 | rt_rq->rt_time += delta_exec; | 
| 629 | if (sched_rt_runtime_exceeded(rt_rq)) | 629 | if (sched_rt_runtime_exceeded(rt_rq)) | 
| 630 | resched_task(curr); | 630 | resched_task(curr); | 
| 631 | spin_unlock(&rt_rq->rt_runtime_lock); | 631 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 
| 632 | } | 632 | } | 
| 633 | } | 633 | } | 
| 634 | } | 634 | } | 
| @@ -1246,7 +1246,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
| 1246 | task_running(rq, task) || | 1246 | task_running(rq, task) || | 
| 1247 | !task->se.on_rq)) { | 1247 | !task->se.on_rq)) { | 
| 1248 | 1248 | ||
| 1249 | spin_unlock(&lowest_rq->lock); | 1249 | raw_spin_unlock(&lowest_rq->lock); | 
| 1250 | lowest_rq = NULL; | 1250 | lowest_rq = NULL; | 
| 1251 | break; | 1251 | break; | 
| 1252 | } | 1252 | } | 
| @@ -1472,7 +1472,7 @@ static void post_schedule_rt(struct rq *rq) | |||
| 1472 | * If we are not running and we are not going to reschedule soon, we should | 1472 | * If we are not running and we are not going to reschedule soon, we should | 
| 1473 | * try to push tasks away now | 1473 | * try to push tasks away now | 
| 1474 | */ | 1474 | */ | 
| 1475 | static void task_wake_up_rt(struct rq *rq, struct task_struct *p) | 1475 | static void task_woken_rt(struct rq *rq, struct task_struct *p) | 
| 1476 | { | 1476 | { | 
| 1477 | if (!task_running(rq, p) && | 1477 | if (!task_running(rq, p) && | 
| 1478 | !test_tsk_need_resched(rq->curr) && | 1478 | !test_tsk_need_resched(rq->curr) && | 
| @@ -1753,7 +1753,7 @@ static const struct sched_class rt_sched_class = { | |||
| 1753 | .rq_offline = rq_offline_rt, | 1753 | .rq_offline = rq_offline_rt, | 
| 1754 | .pre_schedule = pre_schedule_rt, | 1754 | .pre_schedule = pre_schedule_rt, | 
| 1755 | .post_schedule = post_schedule_rt, | 1755 | .post_schedule = post_schedule_rt, | 
| 1756 | .task_wake_up = task_wake_up_rt, | 1756 | .task_woken = task_woken_rt, | 
| 1757 | .switched_from = switched_from_rt, | 1757 | .switched_from = switched_from_rt, | 
| 1758 | #endif | 1758 | #endif | 
| 1759 | 1759 | ||
