diff options
author | Gregory Haskins <ghaskins@novell.com> | 2009-01-14 09:10:04 -0500 |
---|---|---|
committer | Gregory Haskins <ghaskins@novell.com> | 2009-01-14 09:10:04 -0500 |
commit | 398a153b16b09a68739928d4502455db9725ac86 (patch) | |
tree | 3a584f77d92c49ac8e5a047fd120607c3b5f9727 /kernel/sched_rt.c | |
parent | b07430ac37103218b5c1e542490a1b98e6deb3d6 (diff) |
sched: fix build error in kernel/sched_rt.c when RT_GROUP_SCHED && !SMP
Ingo found a build error in the scheduler when RT_GROUP_SCHED was
enabled, but SMP was not. This patch rearranges the code such
that it is a little more streamlined and compiles under all permutations
of SMP, UP and RT_GROUP_SCHED. It was boot tested on my 4-way x86_64
and it still passes preempt-test.
Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 264 |
1 files changed, 170 insertions, 94 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 4eda5f795f04..4230b15fe90e 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -3,6 +3,40 @@ | |||
3 | * policies) | 3 | * policies) |
4 | */ | 4 | */ |
5 | 5 | ||
6 | static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) | ||
7 | { | ||
8 | return container_of(rt_se, struct task_struct, rt); | ||
9 | } | ||
10 | |||
11 | #ifdef CONFIG_RT_GROUP_SCHED | ||
12 | |||
13 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | ||
14 | { | ||
15 | return rt_rq->rq; | ||
16 | } | ||
17 | |||
18 | static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) | ||
19 | { | ||
20 | return rt_se->rt_rq; | ||
21 | } | ||
22 | |||
23 | #else /* CONFIG_RT_GROUP_SCHED */ | ||
24 | |||
25 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | ||
26 | { | ||
27 | return container_of(rt_rq, struct rq, rt); | ||
28 | } | ||
29 | |||
30 | static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) | ||
31 | { | ||
32 | struct task_struct *p = rt_task_of(rt_se); | ||
33 | struct rq *rq = task_rq(p); | ||
34 | |||
35 | return &rq->rt; | ||
36 | } | ||
37 | |||
38 | #endif /* CONFIG_RT_GROUP_SCHED */ | ||
39 | |||
6 | #ifdef CONFIG_SMP | 40 | #ifdef CONFIG_SMP |
7 | 41 | ||
8 | static inline int rt_overloaded(struct rq *rq) | 42 | static inline int rt_overloaded(struct rq *rq) |
@@ -37,19 +71,35 @@ static inline void rt_clear_overload(struct rq *rq) | |||
37 | cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); | 71 | cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); |
38 | } | 72 | } |
39 | 73 | ||
40 | static void update_rt_migration(struct rq *rq) | 74 | static void update_rt_migration(struct rt_rq *rt_rq) |
41 | { | 75 | { |
42 | if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) { | 76 | if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) { |
43 | if (!rq->rt.overloaded) { | 77 | if (!rt_rq->overloaded) { |
44 | rt_set_overload(rq); | 78 | rt_set_overload(rq_of_rt_rq(rt_rq)); |
45 | rq->rt.overloaded = 1; | 79 | rt_rq->overloaded = 1; |
46 | } | 80 | } |
47 | } else if (rq->rt.overloaded) { | 81 | } else if (rt_rq->overloaded) { |
48 | rt_clear_overload(rq); | 82 | rt_clear_overload(rq_of_rt_rq(rt_rq)); |
49 | rq->rt.overloaded = 0; | 83 | rt_rq->overloaded = 0; |
50 | } | 84 | } |
51 | } | 85 | } |
52 | 86 | ||
87 | static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||
88 | { | ||
89 | if (rt_se->nr_cpus_allowed > 1) | ||
90 | rt_rq->rt_nr_migratory++; | ||
91 | |||
92 | update_rt_migration(rt_rq); | ||
93 | } | ||
94 | |||
95 | static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||
96 | { | ||
97 | if (rt_se->nr_cpus_allowed > 1) | ||
98 | rt_rq->rt_nr_migratory--; | ||
99 | |||
100 | update_rt_migration(rt_rq); | ||
101 | } | ||
102 | |||
53 | static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) | 103 | static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) |
54 | { | 104 | { |
55 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); | 105 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); |
@@ -68,14 +118,13 @@ static inline | |||
68 | void enqueue_pushable_task(struct rq *rq, struct task_struct *p) {} | 118 | void enqueue_pushable_task(struct rq *rq, struct task_struct *p) {} |
69 | static inline | 119 | static inline |
70 | void dequeue_pushable_task(struct rq *rq, struct task_struct *p) {} | 120 | void dequeue_pushable_task(struct rq *rq, struct task_struct *p) {} |
121 | static inline | ||
122 | void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} | ||
123 | static inline | ||
124 | void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} | ||
71 | 125 | ||
72 | #endif /* CONFIG_SMP */ | 126 | #endif /* CONFIG_SMP */ |
73 | 127 | ||
74 | static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) | ||
75 | { | ||
76 | return container_of(rt_se, struct task_struct, rt); | ||
77 | } | ||
78 | |||
79 | static inline int on_rt_rq(struct sched_rt_entity *rt_se) | 128 | static inline int on_rt_rq(struct sched_rt_entity *rt_se) |
80 | { | 129 | { |
81 | return !list_empty(&rt_se->run_list); | 130 | return !list_empty(&rt_se->run_list); |
@@ -99,16 +148,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq) | |||
99 | #define for_each_leaf_rt_rq(rt_rq, rq) \ | 148 | #define for_each_leaf_rt_rq(rt_rq, rq) \ |
100 | list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) | 149 | list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) |
101 | 150 | ||
102 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | ||
103 | { | ||
104 | return rt_rq->rq; | ||
105 | } | ||
106 | |||
107 | static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) | ||
108 | { | ||
109 | return rt_se->rt_rq; | ||
110 | } | ||
111 | |||
112 | #define for_each_sched_rt_entity(rt_se) \ | 151 | #define for_each_sched_rt_entity(rt_se) \ |
113 | for (; rt_se; rt_se = rt_se->parent) | 152 | for (; rt_se; rt_se = rt_se->parent) |
114 | 153 | ||
@@ -196,19 +235,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq) | |||
196 | #define for_each_leaf_rt_rq(rt_rq, rq) \ | 235 | #define for_each_leaf_rt_rq(rt_rq, rq) \ |
197 | for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) | 236 | for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) |
198 | 237 | ||
199 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | ||
200 | { | ||
201 | return container_of(rt_rq, struct rq, rt); | ||
202 | } | ||
203 | |||
204 | static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) | ||
205 | { | ||
206 | struct task_struct *p = rt_task_of(rt_se); | ||
207 | struct rq *rq = task_rq(p); | ||
208 | |||
209 | return &rq->rt; | ||
210 | } | ||
211 | |||
212 | #define for_each_sched_rt_entity(rt_se) \ | 238 | #define for_each_sched_rt_entity(rt_se) \ |
213 | for (; rt_se; rt_se = NULL) | 239 | for (; rt_se; rt_se = NULL) |
214 | 240 | ||
@@ -567,7 +593,7 @@ static void update_curr_rt(struct rq *rq) | |||
567 | } | 593 | } |
568 | } | 594 | } |
569 | 595 | ||
570 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED | 596 | #if defined CONFIG_SMP |
571 | 597 | ||
572 | static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu); | 598 | static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu); |
573 | 599 | ||
@@ -580,33 +606,24 @@ static inline int next_prio(struct rq *rq) | |||
580 | else | 606 | else |
581 | return MAX_RT_PRIO; | 607 | return MAX_RT_PRIO; |
582 | } | 608 | } |
583 | #endif | ||
584 | 609 | ||
585 | static inline | 610 | static void |
586 | void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | 611 | inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) |
587 | { | 612 | { |
588 | int prio = rt_se_prio(rt_se); | ||
589 | #ifdef CONFIG_SMP | ||
590 | struct rq *rq = rq_of_rt_rq(rt_rq); | 613 | struct rq *rq = rq_of_rt_rq(rt_rq); |
591 | #endif | ||
592 | 614 | ||
593 | WARN_ON(!rt_prio(prio)); | 615 | if (prio < prev_prio) { |
594 | rt_rq->rt_nr_running++; | ||
595 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED | ||
596 | if (prio < rt_rq->highest_prio.curr) { | ||
597 | 616 | ||
598 | /* | 617 | /* |
599 | * If the new task is higher in priority than anything on the | 618 | * If the new task is higher in priority than anything on the |
600 | * run-queue, we have a new high that must be published to | 619 | * run-queue, we know that the previous high becomes our |
601 | * the world. We also know that the previous high becomes | 620 | * next-highest. |
602 | * our next-highest. | ||
603 | */ | 621 | */ |
604 | rt_rq->highest_prio.next = rt_rq->highest_prio.curr; | 622 | rt_rq->highest_prio.next = prev_prio; |
605 | rt_rq->highest_prio.curr = prio; | 623 | |
606 | #ifdef CONFIG_SMP | ||
607 | if (rq->online) | 624 | if (rq->online) |
608 | cpupri_set(&rq->rd->cpupri, rq->cpu, prio); | 625 | cpupri_set(&rq->rd->cpupri, rq->cpu, prio); |
609 | #endif | 626 | |
610 | } else if (prio == rt_rq->highest_prio.curr) | 627 | } else if (prio == rt_rq->highest_prio.curr) |
611 | /* | 628 | /* |
612 | * If the next task is equal in priority to the highest on | 629 | * If the next task is equal in priority to the highest on |
@@ -619,72 +636,131 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
619 | * Otherwise, we need to recompute next-highest | 636 | * Otherwise, we need to recompute next-highest |
620 | */ | 637 | */ |
621 | rt_rq->highest_prio.next = next_prio(rq); | 638 | rt_rq->highest_prio.next = next_prio(rq); |
622 | #endif | 639 | } |
623 | #ifdef CONFIG_SMP | ||
624 | if (rt_se->nr_cpus_allowed > 1) | ||
625 | rq->rt.rt_nr_migratory++; | ||
626 | 640 | ||
627 | update_rt_migration(rq); | 641 | static void |
628 | #endif | 642 | dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) |
629 | #ifdef CONFIG_RT_GROUP_SCHED | 643 | { |
630 | if (rt_se_boosted(rt_se)) | 644 | struct rq *rq = rq_of_rt_rq(rt_rq); |
631 | rt_rq->rt_nr_boosted++; | ||
632 | 645 | ||
633 | if (rt_rq->tg) | 646 | if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next)) |
634 | start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); | 647 | rt_rq->highest_prio.next = next_prio(rq); |
635 | #else | 648 | |
636 | start_rt_bandwidth(&def_rt_bandwidth); | 649 | if (rq->online && rt_rq->highest_prio.curr != prev_prio) |
637 | #endif | 650 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); |
638 | } | 651 | } |
639 | 652 | ||
653 | #else /* CONFIG_SMP */ | ||
654 | |||
640 | static inline | 655 | static inline |
641 | void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | 656 | void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} |
642 | { | 657 | static inline |
643 | #ifdef CONFIG_SMP | 658 | void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} |
644 | struct rq *rq = rq_of_rt_rq(rt_rq); | 659 | |
645 | int highest_prio = rt_rq->highest_prio.curr; | 660 | #endif /* CONFIG_SMP */ |
646 | #endif | ||
647 | 661 | ||
648 | WARN_ON(!rt_prio(rt_se_prio(rt_se))); | ||
649 | WARN_ON(!rt_rq->rt_nr_running); | ||
650 | rt_rq->rt_nr_running--; | ||
651 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED | 662 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
663 | static void | ||
664 | inc_rt_prio(struct rt_rq *rt_rq, int prio) | ||
665 | { | ||
666 | int prev_prio = rt_rq->highest_prio.curr; | ||
667 | |||
668 | if (prio < prev_prio) | ||
669 | rt_rq->highest_prio.curr = prio; | ||
670 | |||
671 | inc_rt_prio_smp(rt_rq, prio, prev_prio); | ||
672 | } | ||
673 | |||
674 | static void | ||
675 | dec_rt_prio(struct rt_rq *rt_rq, int prio) | ||
676 | { | ||
677 | int prev_prio = rt_rq->highest_prio.curr; | ||
678 | |||
652 | if (rt_rq->rt_nr_running) { | 679 | if (rt_rq->rt_nr_running) { |
653 | int prio = rt_se_prio(rt_se); | ||
654 | 680 | ||
655 | WARN_ON(prio < rt_rq->highest_prio.curr); | 681 | WARN_ON(prio < prev_prio); |
656 | 682 | ||
657 | /* | 683 | /* |
658 | * This may have been our highest or next-highest priority | 684 | * This may have been our highest task, and therefore |
659 | * task and therefore we may have some recomputation to do | 685 | * we may have some recomputation to do |
660 | */ | 686 | */ |
661 | if (prio == rt_rq->highest_prio.curr) { | 687 | if (prio == prev_prio) { |
662 | struct rt_prio_array *array = &rt_rq->active; | 688 | struct rt_prio_array *array = &rt_rq->active; |
663 | 689 | ||
664 | rt_rq->highest_prio.curr = | 690 | rt_rq->highest_prio.curr = |
665 | sched_find_first_bit(array->bitmap); | 691 | sched_find_first_bit(array->bitmap); |
666 | } | 692 | } |
667 | 693 | ||
668 | if (prio <= rt_rq->highest_prio.next) | ||
669 | rt_rq->highest_prio.next = next_prio(rq); | ||
670 | } else | 694 | } else |
671 | rt_rq->highest_prio.curr = MAX_RT_PRIO; | 695 | rt_rq->highest_prio.curr = MAX_RT_PRIO; |
672 | #endif | ||
673 | #ifdef CONFIG_SMP | ||
674 | if (rt_se->nr_cpus_allowed > 1) | ||
675 | rq->rt.rt_nr_migratory--; | ||
676 | 696 | ||
677 | if (rq->online && rt_rq->highest_prio.curr != highest_prio) | 697 | dec_rt_prio_smp(rt_rq, prio, prev_prio); |
678 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); | 698 | } |
699 | |||
700 | #else | ||
701 | |||
702 | static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} | ||
703 | static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} | ||
704 | |||
705 | #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */ | ||
679 | 706 | ||
680 | update_rt_migration(rq); | ||
681 | #endif /* CONFIG_SMP */ | ||
682 | #ifdef CONFIG_RT_GROUP_SCHED | 707 | #ifdef CONFIG_RT_GROUP_SCHED |
708 | |||
709 | static void | ||
710 | inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||
711 | { | ||
712 | if (rt_se_boosted(rt_se)) | ||
713 | rt_rq->rt_nr_boosted++; | ||
714 | |||
715 | if (rt_rq->tg) | ||
716 | start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); | ||
717 | } | ||
718 | |||
719 | static void | ||
720 | dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||
721 | { | ||
683 | if (rt_se_boosted(rt_se)) | 722 | if (rt_se_boosted(rt_se)) |
684 | rt_rq->rt_nr_boosted--; | 723 | rt_rq->rt_nr_boosted--; |
685 | 724 | ||
686 | WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); | 725 | WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); |
687 | #endif | 726 | } |
727 | |||
728 | #else /* CONFIG_RT_GROUP_SCHED */ | ||
729 | |||
730 | static void | ||
731 | inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||
732 | { | ||
733 | start_rt_bandwidth(&def_rt_bandwidth); | ||
734 | } | ||
735 | |||
736 | static inline | ||
737 | void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} | ||
738 | |||
739 | #endif /* CONFIG_RT_GROUP_SCHED */ | ||
740 | |||
741 | static inline | ||
742 | void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||
743 | { | ||
744 | int prio = rt_se_prio(rt_se); | ||
745 | |||
746 | WARN_ON(!rt_prio(prio)); | ||
747 | rt_rq->rt_nr_running++; | ||
748 | |||
749 | inc_rt_prio(rt_rq, prio); | ||
750 | inc_rt_migration(rt_se, rt_rq); | ||
751 | inc_rt_group(rt_se, rt_rq); | ||
752 | } | ||
753 | |||
754 | static inline | ||
755 | void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||
756 | { | ||
757 | WARN_ON(!rt_prio(rt_se_prio(rt_se))); | ||
758 | WARN_ON(!rt_rq->rt_nr_running); | ||
759 | rt_rq->rt_nr_running--; | ||
760 | |||
761 | dec_rt_prio(rt_rq, rt_se_prio(rt_se)); | ||
762 | dec_rt_migration(rt_se, rt_rq); | ||
763 | dec_rt_group(rt_se, rt_rq); | ||
688 | } | 764 | } |
689 | 765 | ||
690 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) | 766 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) |
@@ -1453,7 +1529,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, | |||
1453 | rq->rt.rt_nr_migratory--; | 1529 | rq->rt.rt_nr_migratory--; |
1454 | } | 1530 | } |
1455 | 1531 | ||
1456 | update_rt_migration(rq); | 1532 | update_rt_migration(&rq->rt); |
1457 | } | 1533 | } |
1458 | 1534 | ||
1459 | cpumask_copy(&p->cpus_allowed, new_mask); | 1535 | cpumask_copy(&p->cpus_allowed, new_mask); |