diff options
Diffstat (limited to 'kernel/sched_rt.c')
| -rw-r--r-- | kernel/sched_rt.c | 70 |
1 files changed, 38 insertions, 32 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 060e87b0cb1c..0f3c19197fa4 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -250,7 +250,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
| 250 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | 250 | if (rt_rq->rt_time || rt_rq->rt_nr_running) |
| 251 | idle = 0; | 251 | idle = 0; |
| 252 | spin_unlock(&rt_rq->rt_runtime_lock); | 252 | spin_unlock(&rt_rq->rt_runtime_lock); |
| 253 | } | 253 | } else if (rt_rq->rt_nr_running) |
| 254 | idle = 0; | ||
| 254 | 255 | ||
| 255 | if (enqueue) | 256 | if (enqueue) |
| 256 | sched_rt_rq_enqueue(rt_rq); | 257 | sched_rt_rq_enqueue(rt_rq); |
| @@ -449,13 +450,19 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
| 449 | #endif | 450 | #endif |
| 450 | } | 451 | } |
| 451 | 452 | ||
| 452 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | 453 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) |
| 453 | { | 454 | { |
| 454 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 455 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
| 455 | struct rt_prio_array *array = &rt_rq->active; | 456 | struct rt_prio_array *array = &rt_rq->active; |
| 456 | struct rt_rq *group_rq = group_rt_rq(rt_se); | 457 | struct rt_rq *group_rq = group_rt_rq(rt_se); |
| 457 | 458 | ||
| 458 | if (group_rq && rt_rq_throttled(group_rq)) | 459 | /* |
| 460 | * Don't enqueue the group if its throttled, or when empty. | ||
| 461 | * The latter is a consequence of the former when a child group | ||
| 462 | * get throttled and the current group doesn't have any other | ||
| 463 | * active members. | ||
| 464 | */ | ||
| 465 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) | ||
| 459 | return; | 466 | return; |
| 460 | 467 | ||
| 461 | list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); | 468 | list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); |
| @@ -464,7 +471,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
| 464 | inc_rt_tasks(rt_se, rt_rq); | 471 | inc_rt_tasks(rt_se, rt_rq); |
| 465 | } | 472 | } |
| 466 | 473 | ||
| 467 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | 474 | static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) |
| 468 | { | 475 | { |
| 469 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 476 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
| 470 | struct rt_prio_array *array = &rt_rq->active; | 477 | struct rt_prio_array *array = &rt_rq->active; |
| @@ -480,11 +487,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | |||
| 480 | * Because the prio of an upper entry depends on the lower | 487 | * Because the prio of an upper entry depends on the lower |
| 481 | * entries, we must remove entries top - down. | 488 | * entries, we must remove entries top - down. |
| 482 | */ | 489 | */ |
| 483 | static void dequeue_rt_stack(struct task_struct *p) | 490 | static void dequeue_rt_stack(struct sched_rt_entity *rt_se) |
| 484 | { | 491 | { |
| 485 | struct sched_rt_entity *rt_se, *back = NULL; | 492 | struct sched_rt_entity *back = NULL; |
| 486 | 493 | ||
| 487 | rt_se = &p->rt; | ||
| 488 | for_each_sched_rt_entity(rt_se) { | 494 | for_each_sched_rt_entity(rt_se) { |
| 489 | rt_se->back = back; | 495 | rt_se->back = back; |
| 490 | back = rt_se; | 496 | back = rt_se; |
| @@ -492,7 +498,26 @@ static void dequeue_rt_stack(struct task_struct *p) | |||
| 492 | 498 | ||
| 493 | for (rt_se = back; rt_se; rt_se = rt_se->back) { | 499 | for (rt_se = back; rt_se; rt_se = rt_se->back) { |
| 494 | if (on_rt_rq(rt_se)) | 500 | if (on_rt_rq(rt_se)) |
| 495 | dequeue_rt_entity(rt_se); | 501 | __dequeue_rt_entity(rt_se); |
| 502 | } | ||
| 503 | } | ||
| 504 | |||
| 505 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | ||
| 506 | { | ||
| 507 | dequeue_rt_stack(rt_se); | ||
| 508 | for_each_sched_rt_entity(rt_se) | ||
| 509 | __enqueue_rt_entity(rt_se); | ||
| 510 | } | ||
| 511 | |||
| 512 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | ||
| 513 | { | ||
| 514 | dequeue_rt_stack(rt_se); | ||
| 515 | |||
| 516 | for_each_sched_rt_entity(rt_se) { | ||
| 517 | struct rt_rq *rt_rq = group_rt_rq(rt_se); | ||
| 518 | |||
| 519 | if (rt_rq && rt_rq->rt_nr_running) | ||
| 520 | __enqueue_rt_entity(rt_se); | ||
| 496 | } | 521 | } |
| 497 | } | 522 | } |
| 498 | 523 | ||
| @@ -506,36 +531,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) | |||
| 506 | if (wakeup) | 531 | if (wakeup) |
| 507 | rt_se->timeout = 0; | 532 | rt_se->timeout = 0; |
| 508 | 533 | ||
| 509 | dequeue_rt_stack(p); | 534 | enqueue_rt_entity(rt_se); |
| 510 | |||
| 511 | /* | ||
| 512 | * enqueue everybody, bottom - up. | ||
| 513 | */ | ||
| 514 | for_each_sched_rt_entity(rt_se) | ||
| 515 | enqueue_rt_entity(rt_se); | ||
| 516 | |||
| 517 | inc_cpu_load(rq, p->se.load.weight); | ||
| 518 | } | 535 | } |
| 519 | 536 | ||
| 520 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | 537 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) |
| 521 | { | 538 | { |
| 522 | struct sched_rt_entity *rt_se = &p->rt; | 539 | struct sched_rt_entity *rt_se = &p->rt; |
| 523 | struct rt_rq *rt_rq; | ||
| 524 | 540 | ||
| 525 | update_curr_rt(rq); | 541 | update_curr_rt(rq); |
| 526 | 542 | dequeue_rt_entity(rt_se); | |
| 527 | dequeue_rt_stack(p); | ||
| 528 | |||
| 529 | /* | ||
| 530 | * re-enqueue all non-empty rt_rq entities. | ||
| 531 | */ | ||
| 532 | for_each_sched_rt_entity(rt_se) { | ||
| 533 | rt_rq = group_rt_rq(rt_se); | ||
| 534 | if (rt_rq && rt_rq->rt_nr_running) | ||
| 535 | enqueue_rt_entity(rt_se); | ||
| 536 | } | ||
| 537 | |||
| 538 | dec_cpu_load(rq, p->se.load.weight); | ||
| 539 | } | 543 | } |
| 540 | 544 | ||
| 541 | /* | 545 | /* |
| @@ -546,8 +550,10 @@ static | |||
| 546 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) | 550 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) |
| 547 | { | 551 | { |
| 548 | struct rt_prio_array *array = &rt_rq->active; | 552 | struct rt_prio_array *array = &rt_rq->active; |
| 553 | struct list_head *queue = array->queue + rt_se_prio(rt_se); | ||
| 549 | 554 | ||
| 550 | list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); | 555 | if (on_rt_rq(rt_se)) |
| 556 | list_move_tail(&rt_se->run_list, queue); | ||
| 551 | } | 557 | } |
| 552 | 558 | ||
| 553 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) | 559 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) |
