aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c569
1 files changed, 394 insertions, 175 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index fbec5a58ff10..f2c66f8f9712 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -3,6 +3,40 @@
3 * policies) 3 * policies)
4 */ 4 */
5 5
6static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
7{
8 return container_of(rt_se, struct task_struct, rt);
9}
10
11#ifdef CONFIG_RT_GROUP_SCHED
12
13static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
14{
15 return rt_rq->rq;
16}
17
18static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
19{
20 return rt_se->rt_rq;
21}
22
23#else /* CONFIG_RT_GROUP_SCHED */
24
25static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
26{
27 return container_of(rt_rq, struct rq, rt);
28}
29
30static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
31{
32 struct task_struct *p = rt_task_of(rt_se);
33 struct rq *rq = task_rq(p);
34
35 return &rq->rt;
36}
37
38#endif /* CONFIG_RT_GROUP_SCHED */
39
6#ifdef CONFIG_SMP 40#ifdef CONFIG_SMP
7 41
8static inline int rt_overloaded(struct rq *rq) 42static inline int rt_overloaded(struct rq *rq)
@@ -37,25 +71,69 @@ static inline void rt_clear_overload(struct rq *rq)
37 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); 71 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
38} 72}
39 73
40static void update_rt_migration(struct rq *rq) 74static void update_rt_migration(struct rt_rq *rt_rq)
41{ 75{
42 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) { 76 if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
43 if (!rq->rt.overloaded) { 77 if (!rt_rq->overloaded) {
44 rt_set_overload(rq); 78 rt_set_overload(rq_of_rt_rq(rt_rq));
45 rq->rt.overloaded = 1; 79 rt_rq->overloaded = 1;
46 } 80 }
47 } else if (rq->rt.overloaded) { 81 } else if (rt_rq->overloaded) {
48 rt_clear_overload(rq); 82 rt_clear_overload(rq_of_rt_rq(rt_rq));
49 rq->rt.overloaded = 0; 83 rt_rq->overloaded = 0;
50 } 84 }
51} 85}
52#endif /* CONFIG_SMP */
53 86
54static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 87static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
88{
89 if (rt_se->nr_cpus_allowed > 1)
90 rt_rq->rt_nr_migratory++;
91
92 update_rt_migration(rt_rq);
93}
94
95static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
96{
97 if (rt_se->nr_cpus_allowed > 1)
98 rt_rq->rt_nr_migratory--;
99
100 update_rt_migration(rt_rq);
101}
102
103static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
104{
105 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
106 plist_node_init(&p->pushable_tasks, p->prio);
107 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
108}
109
110static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
111{
112 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
113}
114
115#else
116
117static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
55{ 118{
56 return container_of(rt_se, struct task_struct, rt);
57} 119}
58 120
121static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
122{
123}
124
125static inline
126void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
127{
128}
129
130static inline
131void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
132{
133}
134
135#endif /* CONFIG_SMP */
136
59static inline int on_rt_rq(struct sched_rt_entity *rt_se) 137static inline int on_rt_rq(struct sched_rt_entity *rt_se)
60{ 138{
61 return !list_empty(&rt_se->run_list); 139 return !list_empty(&rt_se->run_list);
@@ -79,16 +157,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
79#define for_each_leaf_rt_rq(rt_rq, rq) \ 157#define for_each_leaf_rt_rq(rt_rq, rq) \
80 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) 158 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
81 159
82static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
83{
84 return rt_rq->rq;
85}
86
87static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
88{
89 return rt_se->rt_rq;
90}
91
92#define for_each_sched_rt_entity(rt_se) \ 160#define for_each_sched_rt_entity(rt_se) \
93 for (; rt_se; rt_se = rt_se->parent) 161 for (; rt_se; rt_se = rt_se->parent)
94 162
@@ -108,7 +176,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
108 if (rt_rq->rt_nr_running) { 176 if (rt_rq->rt_nr_running) {
109 if (rt_se && !on_rt_rq(rt_se)) 177 if (rt_se && !on_rt_rq(rt_se))
110 enqueue_rt_entity(rt_se); 178 enqueue_rt_entity(rt_se);
111 if (rt_rq->highest_prio < curr->prio) 179 if (rt_rq->highest_prio.curr < curr->prio)
112 resched_task(curr); 180 resched_task(curr);
113 } 181 }
114} 182}
@@ -176,19 +244,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
176#define for_each_leaf_rt_rq(rt_rq, rq) \ 244#define for_each_leaf_rt_rq(rt_rq, rq) \
177 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 245 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
178 246
179static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180{
181 return container_of(rt_rq, struct rq, rt);
182}
183
184static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185{
186 struct task_struct *p = rt_task_of(rt_se);
187 struct rq *rq = task_rq(p);
188
189 return &rq->rt;
190}
191
192#define for_each_sched_rt_entity(rt_se) \ 247#define for_each_sched_rt_entity(rt_se) \
193 for (; rt_se; rt_se = NULL) 248 for (; rt_se; rt_se = NULL)
194 249
@@ -473,7 +528,7 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
473 struct rt_rq *rt_rq = group_rt_rq(rt_se); 528 struct rt_rq *rt_rq = group_rt_rq(rt_se);
474 529
475 if (rt_rq) 530 if (rt_rq)
476 return rt_rq->highest_prio; 531 return rt_rq->highest_prio.curr;
477#endif 532#endif
478 533
479 return rt_task_of(rt_se)->prio; 534 return rt_task_of(rt_se)->prio;
@@ -547,91 +602,174 @@ static void update_curr_rt(struct rq *rq)
547 } 602 }
548} 603}
549 604
550static inline 605#if defined CONFIG_SMP
551void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 606
607static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
608
609static inline int next_prio(struct rq *rq)
552{ 610{
553 WARN_ON(!rt_prio(rt_se_prio(rt_se))); 611 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
554 rt_rq->rt_nr_running++; 612
555#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 613 if (next && rt_prio(next->prio))
556 if (rt_se_prio(rt_se) < rt_rq->highest_prio) { 614 return next->prio;
557#ifdef CONFIG_SMP 615 else
558 struct rq *rq = rq_of_rt_rq(rt_rq); 616 return MAX_RT_PRIO;
559#endif 617}
618
619static void
620inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
621{
622 struct rq *rq = rq_of_rt_rq(rt_rq);
623
624 if (prio < prev_prio) {
625
626 /*
627 * If the new task is higher in priority than anything on the
628 * run-queue, we know that the previous high becomes our
629 * next-highest.
630 */
631 rt_rq->highest_prio.next = prev_prio;
560 632
561 rt_rq->highest_prio = rt_se_prio(rt_se);
562#ifdef CONFIG_SMP
563 if (rq->online) 633 if (rq->online)
564 cpupri_set(&rq->rd->cpupri, rq->cpu, 634 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
565 rt_se_prio(rt_se));
566#endif
567 }
568#endif
569#ifdef CONFIG_SMP
570 if (rt_se->nr_cpus_allowed > 1) {
571 struct rq *rq = rq_of_rt_rq(rt_rq);
572 635
573 rq->rt.rt_nr_migratory++; 636 } else if (prio == rt_rq->highest_prio.curr)
574 } 637 /*
638 * If the next task is equal in priority to the highest on
639 * the run-queue, then we implicitly know that the next highest
640 * task cannot be any lower than current
641 */
642 rt_rq->highest_prio.next = prio;
643 else if (prio < rt_rq->highest_prio.next)
644 /*
645 * Otherwise, we need to recompute next-highest
646 */
647 rt_rq->highest_prio.next = next_prio(rq);
648}
575 649
576 update_rt_migration(rq_of_rt_rq(rt_rq)); 650static void
577#endif 651dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
578#ifdef CONFIG_RT_GROUP_SCHED 652{
579 if (rt_se_boosted(rt_se)) 653 struct rq *rq = rq_of_rt_rq(rt_rq);
580 rt_rq->rt_nr_boosted++;
581 654
582 if (rt_rq->tg) 655 if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
583 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); 656 rt_rq->highest_prio.next = next_prio(rq);
584#else 657
585 start_rt_bandwidth(&def_rt_bandwidth); 658 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
586#endif 659 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
587} 660}
588 661
662#else /* CONFIG_SMP */
663
589static inline 664static inline
590void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 665void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
591{ 666static inline
592#ifdef CONFIG_SMP 667void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
593 int highest_prio = rt_rq->highest_prio; 668
594#endif 669#endif /* CONFIG_SMP */
595 670
596 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
597 WARN_ON(!rt_rq->rt_nr_running);
598 rt_rq->rt_nr_running--;
599#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 671#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
672static void
673inc_rt_prio(struct rt_rq *rt_rq, int prio)
674{
675 int prev_prio = rt_rq->highest_prio.curr;
676
677 if (prio < prev_prio)
678 rt_rq->highest_prio.curr = prio;
679
680 inc_rt_prio_smp(rt_rq, prio, prev_prio);
681}
682
683static void
684dec_rt_prio(struct rt_rq *rt_rq, int prio)
685{
686 int prev_prio = rt_rq->highest_prio.curr;
687
600 if (rt_rq->rt_nr_running) { 688 if (rt_rq->rt_nr_running) {
601 struct rt_prio_array *array;
602 689
603 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio); 690 WARN_ON(prio < prev_prio);
604 if (rt_se_prio(rt_se) == rt_rq->highest_prio) { 691
605 /* recalculate */ 692 /*
606 array = &rt_rq->active; 693 * This may have been our highest task, and therefore
607 rt_rq->highest_prio = 694 * we may have some recomputation to do
695 */
696 if (prio == prev_prio) {
697 struct rt_prio_array *array = &rt_rq->active;
698
699 rt_rq->highest_prio.curr =
608 sched_find_first_bit(array->bitmap); 700 sched_find_first_bit(array->bitmap);
609 } /* otherwise leave rq->highest prio alone */ 701 }
702
610 } else 703 } else
611 rt_rq->highest_prio = MAX_RT_PRIO; 704 rt_rq->highest_prio.curr = MAX_RT_PRIO;
612#endif
613#ifdef CONFIG_SMP
614 if (rt_se->nr_cpus_allowed > 1) {
615 struct rq *rq = rq_of_rt_rq(rt_rq);
616 rq->rt.rt_nr_migratory--;
617 }
618 705
619 if (rt_rq->highest_prio != highest_prio) { 706 dec_rt_prio_smp(rt_rq, prio, prev_prio);
620 struct rq *rq = rq_of_rt_rq(rt_rq); 707}
621 708
622 if (rq->online) 709#else
623 cpupri_set(&rq->rd->cpupri, rq->cpu, 710
624 rt_rq->highest_prio); 711static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
625 } 712static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
713
714#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
626 715
627 update_rt_migration(rq_of_rt_rq(rt_rq));
628#endif /* CONFIG_SMP */
629#ifdef CONFIG_RT_GROUP_SCHED 716#ifdef CONFIG_RT_GROUP_SCHED
717
718static void
719inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
720{
721 if (rt_se_boosted(rt_se))
722 rt_rq->rt_nr_boosted++;
723
724 if (rt_rq->tg)
725 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
726}
727
728static void
729dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
730{
630 if (rt_se_boosted(rt_se)) 731 if (rt_se_boosted(rt_se))
631 rt_rq->rt_nr_boosted--; 732 rt_rq->rt_nr_boosted--;
632 733
633 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); 734 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
634#endif 735}
736
737#else /* CONFIG_RT_GROUP_SCHED */
738
739static void
740inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
741{
742 start_rt_bandwidth(&def_rt_bandwidth);
743}
744
745static inline
746void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
747
748#endif /* CONFIG_RT_GROUP_SCHED */
749
750static inline
751void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
752{
753 int prio = rt_se_prio(rt_se);
754
755 WARN_ON(!rt_prio(prio));
756 rt_rq->rt_nr_running++;
757
758 inc_rt_prio(rt_rq, prio);
759 inc_rt_migration(rt_se, rt_rq);
760 inc_rt_group(rt_se, rt_rq);
761}
762
763static inline
764void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
765{
766 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
767 WARN_ON(!rt_rq->rt_nr_running);
768 rt_rq->rt_nr_running--;
769
770 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
771 dec_rt_migration(rt_se, rt_rq);
772 dec_rt_group(rt_se, rt_rq);
635} 773}
636 774
637static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) 775static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
@@ -718,6 +856,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
718 856
719 enqueue_rt_entity(rt_se); 857 enqueue_rt_entity(rt_se);
720 858
859 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
860 enqueue_pushable_task(rq, p);
861
721 inc_cpu_load(rq, p->se.load.weight); 862 inc_cpu_load(rq, p->se.load.weight);
722} 863}
723 864
@@ -728,6 +869,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
728 update_curr_rt(rq); 869 update_curr_rt(rq);
729 dequeue_rt_entity(rt_se); 870 dequeue_rt_entity(rt_se);
730 871
872 dequeue_pushable_task(rq, p);
873
731 dec_cpu_load(rq, p->se.load.weight); 874 dec_cpu_load(rq, p->se.load.weight);
732} 875}
733 876
@@ -871,7 +1014,7 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
871 return next; 1014 return next;
872} 1015}
873 1016
874static struct task_struct *pick_next_task_rt(struct rq *rq) 1017static struct task_struct *_pick_next_task_rt(struct rq *rq)
875{ 1018{
876 struct sched_rt_entity *rt_se; 1019 struct sched_rt_entity *rt_se;
877 struct task_struct *p; 1020 struct task_struct *p;
@@ -893,6 +1036,18 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
893 1036
894 p = rt_task_of(rt_se); 1037 p = rt_task_of(rt_se);
895 p->se.exec_start = rq->clock; 1038 p->se.exec_start = rq->clock;
1039
1040 return p;
1041}
1042
1043static struct task_struct *pick_next_task_rt(struct rq *rq)
1044{
1045 struct task_struct *p = _pick_next_task_rt(rq);
1046
1047 /* The running task is never eligible for pushing */
1048 if (p)
1049 dequeue_pushable_task(rq, p);
1050
896 return p; 1051 return p;
897} 1052}
898 1053
@@ -900,6 +1055,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
900{ 1055{
901 update_curr_rt(rq); 1056 update_curr_rt(rq);
902 p->se.exec_start = 0; 1057 p->se.exec_start = 0;
1058
1059 /*
1060 * The previous task needs to be made eligible for pushing
1061 * if it is still active
1062 */
1063 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
1064 enqueue_pushable_task(rq, p);
903} 1065}
904 1066
905#ifdef CONFIG_SMP 1067#ifdef CONFIG_SMP
@@ -953,12 +1115,13 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
953 1115
954static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); 1116static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
955 1117
956static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) 1118static inline int pick_optimal_cpu(int this_cpu,
1119 const struct cpumask *mask)
957{ 1120{
958 int first; 1121 int first;
959 1122
960 /* "this_cpu" is cheaper to preempt than a remote processor */ 1123 /* "this_cpu" is cheaper to preempt than a remote processor */
961 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) 1124 if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
962 return this_cpu; 1125 return this_cpu;
963 1126
964 first = cpumask_first(mask); 1127 first = cpumask_first(mask);
@@ -974,6 +1137,7 @@ static int find_lowest_rq(struct task_struct *task)
974 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); 1137 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
975 int this_cpu = smp_processor_id(); 1138 int this_cpu = smp_processor_id();
976 int cpu = task_cpu(task); 1139 int cpu = task_cpu(task);
1140 cpumask_var_t domain_mask;
977 1141
978 if (task->rt.nr_cpus_allowed == 1) 1142 if (task->rt.nr_cpus_allowed == 1)
979 return -1; /* No other targets possible */ 1143 return -1; /* No other targets possible */
@@ -1006,19 +1170,25 @@ static int find_lowest_rq(struct task_struct *task)
1006 if (this_cpu == cpu) 1170 if (this_cpu == cpu)
1007 this_cpu = -1; /* Skip this_cpu opt if the same */ 1171 this_cpu = -1; /* Skip this_cpu opt if the same */
1008 1172
1009 for_each_domain(cpu, sd) { 1173 if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
1010 if (sd->flags & SD_WAKE_AFFINE) { 1174 for_each_domain(cpu, sd) {
1011 cpumask_t domain_mask; 1175 if (sd->flags & SD_WAKE_AFFINE) {
1012 int best_cpu; 1176 int best_cpu;
1177
1178 cpumask_and(domain_mask,
1179 sched_domain_span(sd),
1180 lowest_mask);
1013 1181
1014 cpumask_and(&domain_mask, sched_domain_span(sd), 1182 best_cpu = pick_optimal_cpu(this_cpu,
1015 lowest_mask); 1183 domain_mask);
1016 1184
1017 best_cpu = pick_optimal_cpu(this_cpu, 1185 if (best_cpu != -1) {
1018 &domain_mask); 1186 free_cpumask_var(domain_mask);
1019 if (best_cpu != -1) 1187 return best_cpu;
1020 return best_cpu; 1188 }
1189 }
1021 } 1190 }
1191 free_cpumask_var(domain_mask);
1022 } 1192 }
1023 1193
1024 /* 1194 /*
@@ -1065,7 +1235,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1065 } 1235 }
1066 1236
1067 /* If this rq is still suitable use it. */ 1237 /* If this rq is still suitable use it. */
1068 if (lowest_rq->rt.highest_prio > task->prio) 1238 if (lowest_rq->rt.highest_prio.curr > task->prio)
1069 break; 1239 break;
1070 1240
1071 /* try again */ 1241 /* try again */
@@ -1076,6 +1246,31 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1076 return lowest_rq; 1246 return lowest_rq;
1077} 1247}
1078 1248
1249static inline int has_pushable_tasks(struct rq *rq)
1250{
1251 return !plist_head_empty(&rq->rt.pushable_tasks);
1252}
1253
1254static struct task_struct *pick_next_pushable_task(struct rq *rq)
1255{
1256 struct task_struct *p;
1257
1258 if (!has_pushable_tasks(rq))
1259 return NULL;
1260
1261 p = plist_first_entry(&rq->rt.pushable_tasks,
1262 struct task_struct, pushable_tasks);
1263
1264 BUG_ON(rq->cpu != task_cpu(p));
1265 BUG_ON(task_current(rq, p));
1266 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1267
1268 BUG_ON(!p->se.on_rq);
1269 BUG_ON(!rt_task(p));
1270
1271 return p;
1272}
1273
1079/* 1274/*
1080 * If the current CPU has more than one RT task, see if the non 1275 * If the current CPU has more than one RT task, see if the non
1081 * running task can migrate over to a CPU that is running a task 1276 * running task can migrate over to a CPU that is running a task
@@ -1085,13 +1280,11 @@ static int push_rt_task(struct rq *rq)
1085{ 1280{
1086 struct task_struct *next_task; 1281 struct task_struct *next_task;
1087 struct rq *lowest_rq; 1282 struct rq *lowest_rq;
1088 int ret = 0;
1089 int paranoid = RT_MAX_TRIES;
1090 1283
1091 if (!rq->rt.overloaded) 1284 if (!rq->rt.overloaded)
1092 return 0; 1285 return 0;
1093 1286
1094 next_task = pick_next_highest_task_rt(rq, -1); 1287 next_task = pick_next_pushable_task(rq);
1095 if (!next_task) 1288 if (!next_task)
1096 return 0; 1289 return 0;
1097 1290
@@ -1120,16 +1313,34 @@ static int push_rt_task(struct rq *rq)
1120 struct task_struct *task; 1313 struct task_struct *task;
1121 /* 1314 /*
1122 * find lock_lowest_rq releases rq->lock 1315 * find lock_lowest_rq releases rq->lock
1123 * so it is possible that next_task has changed. 1316 * so it is possible that next_task has migrated.
1124 * If it has, then try again. 1317 *
1318 * We need to make sure that the task is still on the same
1319 * run-queue and is also still the next task eligible for
1320 * pushing.
1125 */ 1321 */
1126 task = pick_next_highest_task_rt(rq, -1); 1322 task = pick_next_pushable_task(rq);
1127 if (unlikely(task != next_task) && task && paranoid--) { 1323 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1128 put_task_struct(next_task); 1324 /*
1129 next_task = task; 1325 * If we get here, the task hasnt moved at all, but
1130 goto retry; 1326 * it has failed to push. We will not try again,
1327 * since the other cpus will pull from us when they
1328 * are ready.
1329 */
1330 dequeue_pushable_task(rq, next_task);
1331 goto out;
1131 } 1332 }
1132 goto out; 1333
1334 if (!task)
1335 /* No more tasks, just exit */
1336 goto out;
1337
1338 /*
1339 * Something has shifted, try again.
1340 */
1341 put_task_struct(next_task);
1342 next_task = task;
1343 goto retry;
1133 } 1344 }
1134 1345
1135 deactivate_task(rq, next_task, 0); 1346 deactivate_task(rq, next_task, 0);
@@ -1140,23 +1351,12 @@ static int push_rt_task(struct rq *rq)
1140 1351
1141 double_unlock_balance(rq, lowest_rq); 1352 double_unlock_balance(rq, lowest_rq);
1142 1353
1143 ret = 1;
1144out: 1354out:
1145 put_task_struct(next_task); 1355 put_task_struct(next_task);
1146 1356
1147 return ret; 1357 return 1;
1148} 1358}
1149 1359
1150/*
1151 * TODO: Currently we just use the second highest prio task on
1152 * the queue, and stop when it can't migrate (or there's
1153 * no more RT tasks). There may be a case where a lower
1154 * priority RT task has a different affinity than the
1155 * higher RT task. In this case the lower RT task could
1156 * possibly be able to migrate where as the higher priority
1157 * RT task could not. We currently ignore this issue.
1158 * Enhancements are welcome!
1159 */
1160static void push_rt_tasks(struct rq *rq) 1360static void push_rt_tasks(struct rq *rq)
1161{ 1361{
1162 /* push_rt_task will return true if it moved an RT */ 1362 /* push_rt_task will return true if it moved an RT */
@@ -1167,33 +1367,35 @@ static void push_rt_tasks(struct rq *rq)
1167static int pull_rt_task(struct rq *this_rq) 1367static int pull_rt_task(struct rq *this_rq)
1168{ 1368{
1169 int this_cpu = this_rq->cpu, ret = 0, cpu; 1369 int this_cpu = this_rq->cpu, ret = 0, cpu;
1170 struct task_struct *p, *next; 1370 struct task_struct *p;
1171 struct rq *src_rq; 1371 struct rq *src_rq;
1172 1372
1173 if (likely(!rt_overloaded(this_rq))) 1373 if (likely(!rt_overloaded(this_rq)))
1174 return 0; 1374 return 0;
1175 1375
1176 next = pick_next_task_rt(this_rq);
1177
1178 for_each_cpu(cpu, this_rq->rd->rto_mask) { 1376 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1179 if (this_cpu == cpu) 1377 if (this_cpu == cpu)
1180 continue; 1378 continue;
1181 1379
1182 src_rq = cpu_rq(cpu); 1380 src_rq = cpu_rq(cpu);
1381
1382 /*
1383 * Don't bother taking the src_rq->lock if the next highest
1384 * task is known to be lower-priority than our current task.
1385 * This may look racy, but if this value is about to go
1386 * logically higher, the src_rq will push this task away.
1387 * And if its going logically lower, we do not care
1388 */
1389 if (src_rq->rt.highest_prio.next >=
1390 this_rq->rt.highest_prio.curr)
1391 continue;
1392
1183 /* 1393 /*
1184 * We can potentially drop this_rq's lock in 1394 * We can potentially drop this_rq's lock in
1185 * double_lock_balance, and another CPU could 1395 * double_lock_balance, and another CPU could
1186 * steal our next task - hence we must cause 1396 * alter this_rq
1187 * the caller to recalculate the next task
1188 * in that case:
1189 */ 1397 */
1190 if (double_lock_balance(this_rq, src_rq)) { 1398 double_lock_balance(this_rq, src_rq);
1191 struct task_struct *old_next = next;
1192
1193 next = pick_next_task_rt(this_rq);
1194 if (next != old_next)
1195 ret = 1;
1196 }
1197 1399
1198 /* 1400 /*
1199 * Are there still pullable RT tasks? 1401 * Are there still pullable RT tasks?
@@ -1207,7 +1409,7 @@ static int pull_rt_task(struct rq *this_rq)
1207 * Do we have an RT task that preempts 1409 * Do we have an RT task that preempts
1208 * the to-be-scheduled task? 1410 * the to-be-scheduled task?
1209 */ 1411 */
1210 if (p && (!next || (p->prio < next->prio))) { 1412 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1211 WARN_ON(p == src_rq->curr); 1413 WARN_ON(p == src_rq->curr);
1212 WARN_ON(!p->se.on_rq); 1414 WARN_ON(!p->se.on_rq);
1213 1415
@@ -1217,12 +1419,9 @@ static int pull_rt_task(struct rq *this_rq)
1217 * This is just that p is wakeing up and hasn't 1419 * This is just that p is wakeing up and hasn't
1218 * had a chance to schedule. We only pull 1420 * had a chance to schedule. We only pull
1219 * p if it is lower in priority than the 1421 * p if it is lower in priority than the
1220 * current task on the run queue or 1422 * current task on the run queue
1221 * this_rq next task is lower in prio than
1222 * the current task on that rq.
1223 */ 1423 */
1224 if (p->prio < src_rq->curr->prio || 1424 if (p->prio < src_rq->curr->prio)
1225 (next && next->prio < src_rq->curr->prio))
1226 goto skip; 1425 goto skip;
1227 1426
1228 ret = 1; 1427 ret = 1;
@@ -1235,13 +1434,7 @@ static int pull_rt_task(struct rq *this_rq)
1235 * case there's an even higher prio task 1434 * case there's an even higher prio task
1236 * in another runqueue. (low likelyhood 1435 * in another runqueue. (low likelyhood
1237 * but possible) 1436 * but possible)
1238 *
1239 * Update next so that we won't pick a task
1240 * on another cpu with a priority lower (or equal)
1241 * than the one we just picked.
1242 */ 1437 */
1243 next = p;
1244
1245 } 1438 }
1246 skip: 1439 skip:
1247 double_unlock_balance(this_rq, src_rq); 1440 double_unlock_balance(this_rq, src_rq);
@@ -1253,24 +1446,27 @@ static int pull_rt_task(struct rq *this_rq)
1253static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) 1446static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1254{ 1447{
1255 /* Try to pull RT tasks here if we lower this rq's prio */ 1448 /* Try to pull RT tasks here if we lower this rq's prio */
1256 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio) 1449 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1257 pull_rt_task(rq); 1450 pull_rt_task(rq);
1258} 1451}
1259 1452
1453/*
1454 * assumes rq->lock is held
1455 */
1456static int needs_post_schedule_rt(struct rq *rq)
1457{
1458 return has_pushable_tasks(rq);
1459}
1460
1260static void post_schedule_rt(struct rq *rq) 1461static void post_schedule_rt(struct rq *rq)
1261{ 1462{
1262 /* 1463 /*
1263 * If we have more than one rt_task queued, then 1464 * This is only called if needs_post_schedule_rt() indicates that
1264 * see if we can push the other rt_tasks off to other CPUS. 1465 * we need to push tasks away
1265 * Note we may release the rq lock, and since
1266 * the lock was owned by prev, we need to release it
1267 * first via finish_lock_switch and then reaquire it here.
1268 */ 1466 */
1269 if (unlikely(rq->rt.overloaded)) { 1467 spin_lock_irq(&rq->lock);
1270 spin_lock_irq(&rq->lock); 1468 push_rt_tasks(rq);
1271 push_rt_tasks(rq); 1469 spin_unlock_irq(&rq->lock);
1272 spin_unlock_irq(&rq->lock);
1273 }
1274} 1470}
1275 1471
1276/* 1472/*
@@ -1281,7 +1477,8 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1281{ 1477{
1282 if (!task_running(rq, p) && 1478 if (!task_running(rq, p) &&
1283 !test_tsk_need_resched(rq->curr) && 1479 !test_tsk_need_resched(rq->curr) &&
1284 rq->rt.overloaded) 1480 has_pushable_tasks(rq) &&
1481 p->rt.nr_cpus_allowed > 1)
1285 push_rt_tasks(rq); 1482 push_rt_tasks(rq);
1286} 1483}
1287 1484
@@ -1317,6 +1514,24 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1317 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { 1514 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1318 struct rq *rq = task_rq(p); 1515 struct rq *rq = task_rq(p);
1319 1516
1517 if (!task_current(rq, p)) {
1518 /*
1519 * Make sure we dequeue this task from the pushable list
1520 * before going further. It will either remain off of
1521 * the list because we are no longer pushable, or it
1522 * will be requeued.
1523 */
1524 if (p->rt.nr_cpus_allowed > 1)
1525 dequeue_pushable_task(rq, p);
1526
1527 /*
1528 * Requeue if our weight is changing and still > 1
1529 */
1530 if (weight > 1)
1531 enqueue_pushable_task(rq, p);
1532
1533 }
1534
1320 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { 1535 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1321 rq->rt.rt_nr_migratory++; 1536 rq->rt.rt_nr_migratory++;
1322 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) { 1537 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
@@ -1324,7 +1539,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1324 rq->rt.rt_nr_migratory--; 1539 rq->rt.rt_nr_migratory--;
1325 } 1540 }
1326 1541
1327 update_rt_migration(rq); 1542 update_rt_migration(&rq->rt);
1328 } 1543 }
1329 1544
1330 cpumask_copy(&p->cpus_allowed, new_mask); 1545 cpumask_copy(&p->cpus_allowed, new_mask);
@@ -1339,7 +1554,7 @@ static void rq_online_rt(struct rq *rq)
1339 1554
1340 __enable_runtime(rq); 1555 __enable_runtime(rq);
1341 1556
1342 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio); 1557 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1343} 1558}
1344 1559
1345/* Assumes rq->lock is held */ 1560/* Assumes rq->lock is held */
@@ -1431,7 +1646,7 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1431 * can release the rq lock and p could migrate. 1646 * can release the rq lock and p could migrate.
1432 * Only reschedule if p is still on the same runqueue. 1647 * Only reschedule if p is still on the same runqueue.
1433 */ 1648 */
1434 if (p->prio > rq->rt.highest_prio && rq->curr == p) 1649 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1435 resched_task(p); 1650 resched_task(p);
1436#else 1651#else
1437 /* For UP simply resched on drop of prio */ 1652 /* For UP simply resched on drop of prio */
@@ -1502,6 +1717,9 @@ static void set_curr_task_rt(struct rq *rq)
1502 struct task_struct *p = rq->curr; 1717 struct task_struct *p = rq->curr;
1503 1718
1504 p->se.exec_start = rq->clock; 1719 p->se.exec_start = rq->clock;
1720
1721 /* The running task is never eligible for pushing */
1722 dequeue_pushable_task(rq, p);
1505} 1723}
1506 1724
1507static const struct sched_class rt_sched_class = { 1725static const struct sched_class rt_sched_class = {
@@ -1524,6 +1742,7 @@ static const struct sched_class rt_sched_class = {
1524 .rq_online = rq_online_rt, 1742 .rq_online = rq_online_rt,
1525 .rq_offline = rq_offline_rt, 1743 .rq_offline = rq_offline_rt,
1526 .pre_schedule = pre_schedule_rt, 1744 .pre_schedule = pre_schedule_rt,
1745 .needs_post_schedule = needs_post_schedule_rt,
1527 .post_schedule = post_schedule_rt, 1746 .post_schedule = post_schedule_rt,
1528 .task_wake_up = task_wake_up_rt, 1747 .task_wake_up = task_wake_up_rt,
1529 .switched_from = switched_from_rt, 1748 .switched_from = switched_from_rt,