aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c66
1 files changed, 38 insertions, 28 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 3432d573205d..0f3c19197fa4 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -250,7 +250,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
250 if (rt_rq->rt_time || rt_rq->rt_nr_running) 250 if (rt_rq->rt_time || rt_rq->rt_nr_running)
251 idle = 0; 251 idle = 0;
252 spin_unlock(&rt_rq->rt_runtime_lock); 252 spin_unlock(&rt_rq->rt_runtime_lock);
253 } 253 } else if (rt_rq->rt_nr_running)
254 idle = 0;
254 255
255 if (enqueue) 256 if (enqueue)
256 sched_rt_rq_enqueue(rt_rq); 257 sched_rt_rq_enqueue(rt_rq);
@@ -449,13 +450,19 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
449#endif 450#endif
450} 451}
451 452
452static void enqueue_rt_entity(struct sched_rt_entity *rt_se) 453static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
453{ 454{
454 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 455 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
455 struct rt_prio_array *array = &rt_rq->active; 456 struct rt_prio_array *array = &rt_rq->active;
456 struct rt_rq *group_rq = group_rt_rq(rt_se); 457 struct rt_rq *group_rq = group_rt_rq(rt_se);
457 458
458 if (group_rq && rt_rq_throttled(group_rq)) 459 /*
460 * Don't enqueue the group if its throttled, or when empty.
461 * The latter is a consequence of the former when a child group
462 * get throttled and the current group doesn't have any other
463 * active members.
464 */
465 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
459 return; 466 return;
460 467
461 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); 468 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
@@ -464,7 +471,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
464 inc_rt_tasks(rt_se, rt_rq); 471 inc_rt_tasks(rt_se, rt_rq);
465} 472}
466 473
467static void dequeue_rt_entity(struct sched_rt_entity *rt_se) 474static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
468{ 475{
469 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 476 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
470 struct rt_prio_array *array = &rt_rq->active; 477 struct rt_prio_array *array = &rt_rq->active;
@@ -480,11 +487,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
480 * Because the prio of an upper entry depends on the lower 487 * Because the prio of an upper entry depends on the lower
481 * entries, we must remove entries top - down. 488 * entries, we must remove entries top - down.
482 */ 489 */
483static void dequeue_rt_stack(struct task_struct *p) 490static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
484{ 491{
485 struct sched_rt_entity *rt_se, *back = NULL; 492 struct sched_rt_entity *back = NULL;
486 493
487 rt_se = &p->rt;
488 for_each_sched_rt_entity(rt_se) { 494 for_each_sched_rt_entity(rt_se) {
489 rt_se->back = back; 495 rt_se->back = back;
490 back = rt_se; 496 back = rt_se;
@@ -492,7 +498,26 @@ static void dequeue_rt_stack(struct task_struct *p)
492 498
493 for (rt_se = back; rt_se; rt_se = rt_se->back) { 499 for (rt_se = back; rt_se; rt_se = rt_se->back) {
494 if (on_rt_rq(rt_se)) 500 if (on_rt_rq(rt_se))
495 dequeue_rt_entity(rt_se); 501 __dequeue_rt_entity(rt_se);
502 }
503}
504
505static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
506{
507 dequeue_rt_stack(rt_se);
508 for_each_sched_rt_entity(rt_se)
509 __enqueue_rt_entity(rt_se);
510}
511
512static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
513{
514 dequeue_rt_stack(rt_se);
515
516 for_each_sched_rt_entity(rt_se) {
517 struct rt_rq *rt_rq = group_rt_rq(rt_se);
518
519 if (rt_rq && rt_rq->rt_nr_running)
520 __enqueue_rt_entity(rt_se);
496 } 521 }
497} 522}
498 523
@@ -506,32 +531,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
506 if (wakeup) 531 if (wakeup)
507 rt_se->timeout = 0; 532 rt_se->timeout = 0;
508 533
509 dequeue_rt_stack(p); 534 enqueue_rt_entity(rt_se);
510
511 /*
512 * enqueue everybody, bottom - up.
513 */
514 for_each_sched_rt_entity(rt_se)
515 enqueue_rt_entity(rt_se);
516} 535}
517 536
518static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) 537static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
519{ 538{
520 struct sched_rt_entity *rt_se = &p->rt; 539 struct sched_rt_entity *rt_se = &p->rt;
521 struct rt_rq *rt_rq;
522 540
523 update_curr_rt(rq); 541 update_curr_rt(rq);
524 542 dequeue_rt_entity(rt_se);
525 dequeue_rt_stack(p);
526
527 /*
528 * re-enqueue all non-empty rt_rq entities.
529 */
530 for_each_sched_rt_entity(rt_se) {
531 rt_rq = group_rt_rq(rt_se);
532 if (rt_rq && rt_rq->rt_nr_running)
533 enqueue_rt_entity(rt_se);
534 }
535} 543}
536 544
537/* 545/*
@@ -542,8 +550,10 @@ static
542void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 550void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
543{ 551{
544 struct rt_prio_array *array = &rt_rq->active; 552 struct rt_prio_array *array = &rt_rq->active;
553 struct list_head *queue = array->queue + rt_se_prio(rt_se);
545 554
546 list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); 555 if (on_rt_rq(rt_se))
556 list_move_tail(&rt_se->run_list, queue);
547} 557}
548 558
549static void requeue_task_rt(struct rq *rq, struct task_struct *p) 559static void requeue_task_rt(struct rq *rq, struct task_struct *p)