diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-06-19 03:09:15 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-19 03:09:15 -0400 |
commit | 1cdad71537b42d0f0bf247772942ce678e4e8898 (patch) | |
tree | 58caef683e4739907e0ae147bead71a5b19e2207 /kernel/sched_rt.c | |
parent | 20b6331bfed1f07ba1e5006889a5d64adc53615e (diff) | |
parent | 15a8641eadb492ef7c5489faa25256967bdfd303 (diff) |
Merge branch 'sched' into sched-devel
Conflicts:
kernel/sched_rt.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 67 |
1 files changed, 39 insertions, 28 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index f721b52acd8d..fee5fa7c72db 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -571,14 +571,20 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
571 | #endif | 571 | #endif |
572 | } | 572 | } |
573 | 573 | ||
574 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | 574 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) |
575 | { | 575 | { |
576 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 576 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
577 | struct rt_prio_array *array = &rt_rq->active; | 577 | struct rt_prio_array *array = &rt_rq->active; |
578 | struct rt_rq *group_rq = group_rt_rq(rt_se); | 578 | struct rt_rq *group_rq = group_rt_rq(rt_se); |
579 | struct list_head *queue = array->queue + rt_se_prio(rt_se); | 579 | struct list_head *queue = array->queue + rt_se_prio(rt_se); |
580 | 580 | ||
581 | if (group_rq && rt_rq_throttled(group_rq)) | 581 | /* |
582 | * Don't enqueue the group if its throttled, or when empty. | ||
583 | * The latter is a consequence of the former when a child group | ||
584 | * get throttled and the current group doesn't have any other | ||
585 | * active members. | ||
586 | */ | ||
587 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) | ||
582 | return; | 588 | return; |
583 | 589 | ||
584 | if (rt_se->nr_cpus_allowed == 1) | 590 | if (rt_se->nr_cpus_allowed == 1) |
@@ -591,7 +597,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
591 | inc_rt_tasks(rt_se, rt_rq); | 597 | inc_rt_tasks(rt_se, rt_rq); |
592 | } | 598 | } |
593 | 599 | ||
594 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | 600 | static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) |
595 | { | 601 | { |
596 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 602 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
597 | struct rt_prio_array *array = &rt_rq->active; | 603 | struct rt_prio_array *array = &rt_rq->active; |
@@ -607,11 +613,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | |||
607 | * Because the prio of an upper entry depends on the lower | 613 | * Because the prio of an upper entry depends on the lower |
608 | * entries, we must remove entries top - down. | 614 | * entries, we must remove entries top - down. |
609 | */ | 615 | */ |
610 | static void dequeue_rt_stack(struct task_struct *p) | 616 | static void dequeue_rt_stack(struct sched_rt_entity *rt_se) |
611 | { | 617 | { |
612 | struct sched_rt_entity *rt_se, *back = NULL; | 618 | struct sched_rt_entity *back = NULL; |
613 | 619 | ||
614 | rt_se = &p->rt; | ||
615 | for_each_sched_rt_entity(rt_se) { | 620 | for_each_sched_rt_entity(rt_se) { |
616 | rt_se->back = back; | 621 | rt_se->back = back; |
617 | back = rt_se; | 622 | back = rt_se; |
@@ -619,7 +624,26 @@ static void dequeue_rt_stack(struct task_struct *p) | |||
619 | 624 | ||
620 | for (rt_se = back; rt_se; rt_se = rt_se->back) { | 625 | for (rt_se = back; rt_se; rt_se = rt_se->back) { |
621 | if (on_rt_rq(rt_se)) | 626 | if (on_rt_rq(rt_se)) |
622 | dequeue_rt_entity(rt_se); | 627 | __dequeue_rt_entity(rt_se); |
628 | } | ||
629 | } | ||
630 | |||
631 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | ||
632 | { | ||
633 | dequeue_rt_stack(rt_se); | ||
634 | for_each_sched_rt_entity(rt_se) | ||
635 | __enqueue_rt_entity(rt_se); | ||
636 | } | ||
637 | |||
638 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | ||
639 | { | ||
640 | dequeue_rt_stack(rt_se); | ||
641 | |||
642 | for_each_sched_rt_entity(rt_se) { | ||
643 | struct rt_rq *rt_rq = group_rt_rq(rt_se); | ||
644 | |||
645 | if (rt_rq && rt_rq->rt_nr_running) | ||
646 | __enqueue_rt_entity(rt_se); | ||
623 | } | 647 | } |
624 | } | 648 | } |
625 | 649 | ||
@@ -633,32 +657,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) | |||
633 | if (wakeup) | 657 | if (wakeup) |
634 | rt_se->timeout = 0; | 658 | rt_se->timeout = 0; |
635 | 659 | ||
636 | dequeue_rt_stack(p); | 660 | enqueue_rt_entity(rt_se); |
637 | |||
638 | /* | ||
639 | * enqueue everybody, bottom - up. | ||
640 | */ | ||
641 | for_each_sched_rt_entity(rt_se) | ||
642 | enqueue_rt_entity(rt_se); | ||
643 | } | 661 | } |
644 | 662 | ||
645 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | 663 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) |
646 | { | 664 | { |
647 | struct sched_rt_entity *rt_se = &p->rt; | 665 | struct sched_rt_entity *rt_se = &p->rt; |
648 | struct rt_rq *rt_rq; | ||
649 | 666 | ||
650 | update_curr_rt(rq); | 667 | update_curr_rt(rq); |
651 | 668 | dequeue_rt_entity(rt_se); | |
652 | dequeue_rt_stack(p); | ||
653 | |||
654 | /* | ||
655 | * re-enqueue all non-empty rt_rq entities. | ||
656 | */ | ||
657 | for_each_sched_rt_entity(rt_se) { | ||
658 | rt_rq = group_rt_rq(rt_se); | ||
659 | if (rt_rq && rt_rq->rt_nr_running) | ||
660 | enqueue_rt_entity(rt_se); | ||
661 | } | ||
662 | } | 669 | } |
663 | 670 | ||
664 | /* | 671 | /* |
@@ -669,9 +676,13 @@ static | |||
669 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) | 676 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) |
670 | { | 677 | { |
671 | struct rt_prio_array *array = &rt_rq->active; | 678 | struct rt_prio_array *array = &rt_rq->active; |
679 | struct list_head *queue = array->queue + rt_se_prio(rt_se); | ||
672 | 680 | ||
673 | list_del_init(&rt_se->run_list); | 681 | if (on_rt_rq(rt_se)) { |
674 | list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); | 682 | list_del_init(&rt_se->run_list); |
683 | list_add_tail(&rt_se->run_list, | ||
684 | array->queue + rt_se_prio(rt_se)); | ||
685 | } | ||
675 | } | 686 | } |
676 | 687 | ||
677 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) | 688 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) |