aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c134
1 files changed, 106 insertions, 28 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index d8cdf1618551..b3512f1afce9 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -79,6 +79,8 @@ void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
79 rt_rq->overloaded = 0; 79 rt_rq->overloaded = 0;
80 plist_head_init(&rt_rq->pushable_tasks); 80 plist_head_init(&rt_rq->pushable_tasks);
81#endif 81#endif
82 /* We start is dequeued state, because no RT tasks are queued */
83 rt_rq->rt_queued = 0;
82 84
83 rt_rq->rt_time = 0; 85 rt_rq->rt_time = 0;
84 rt_rq->rt_throttled = 0; 86 rt_rq->rt_throttled = 0;
@@ -112,6 +114,13 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
112 return rt_se->rt_rq; 114 return rt_se->rt_rq;
113} 115}
114 116
117static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
118{
119 struct rt_rq *rt_rq = rt_se->rt_rq;
120
121 return rt_rq->rq;
122}
123
115void free_rt_sched_group(struct task_group *tg) 124void free_rt_sched_group(struct task_group *tg)
116{ 125{
117 int i; 126 int i;
@@ -211,10 +220,16 @@ static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
211 return container_of(rt_rq, struct rq, rt); 220 return container_of(rt_rq, struct rq, rt);
212} 221}
213 222
214static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 223static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
215{ 224{
216 struct task_struct *p = rt_task_of(rt_se); 225 struct task_struct *p = rt_task_of(rt_se);
217 struct rq *rq = task_rq(p); 226
227 return task_rq(p);
228}
229
230static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
231{
232 struct rq *rq = rq_of_rt_se(rt_se);
218 233
219 return &rq->rt; 234 return &rq->rt;
220} 235}
@@ -391,6 +406,9 @@ static inline void set_post_schedule(struct rq *rq)
391} 406}
392#endif /* CONFIG_SMP */ 407#endif /* CONFIG_SMP */
393 408
409static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
410static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
411
394static inline int on_rt_rq(struct sched_rt_entity *rt_se) 412static inline int on_rt_rq(struct sched_rt_entity *rt_se)
395{ 413{
396 return !list_empty(&rt_se->run_list); 414 return !list_empty(&rt_se->run_list);
@@ -452,8 +470,11 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
452 rt_se = rt_rq->tg->rt_se[cpu]; 470 rt_se = rt_rq->tg->rt_se[cpu];
453 471
454 if (rt_rq->rt_nr_running) { 472 if (rt_rq->rt_nr_running) {
455 if (rt_se && !on_rt_rq(rt_se)) 473 if (!rt_se)
474 enqueue_top_rt_rq(rt_rq);
475 else if (!on_rt_rq(rt_se))
456 enqueue_rt_entity(rt_se, false); 476 enqueue_rt_entity(rt_se, false);
477
457 if (rt_rq->highest_prio.curr < curr->prio) 478 if (rt_rq->highest_prio.curr < curr->prio)
458 resched_task(curr); 479 resched_task(curr);
459 } 480 }
@@ -466,10 +487,17 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
466 487
467 rt_se = rt_rq->tg->rt_se[cpu]; 488 rt_se = rt_rq->tg->rt_se[cpu];
468 489
469 if (rt_se && on_rt_rq(rt_se)) 490 if (!rt_se)
491 dequeue_top_rt_rq(rt_rq);
492 else if (on_rt_rq(rt_se))
470 dequeue_rt_entity(rt_se); 493 dequeue_rt_entity(rt_se);
471} 494}
472 495
496static inline int rt_rq_throttled(struct rt_rq *rt_rq)
497{
498 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
499}
500
473static int rt_se_boosted(struct sched_rt_entity *rt_se) 501static int rt_se_boosted(struct sched_rt_entity *rt_se)
474{ 502{
475 struct rt_rq *rt_rq = group_rt_rq(rt_se); 503 struct rt_rq *rt_rq = group_rt_rq(rt_se);
@@ -532,12 +560,23 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
532 560
533static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 561static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
534{ 562{
535 if (rt_rq->rt_nr_running) 563 struct rq *rq = rq_of_rt_rq(rt_rq);
536 resched_task(rq_of_rt_rq(rt_rq)->curr); 564
565 if (!rt_rq->rt_nr_running)
566 return;
567
568 enqueue_top_rt_rq(rt_rq);
569 resched_task(rq->curr);
537} 570}
538 571
539static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 572static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
540{ 573{
574 dequeue_top_rt_rq(rt_rq);
575}
576
577static inline int rt_rq_throttled(struct rt_rq *rt_rq)
578{
579 return rt_rq->rt_throttled;
541} 580}
542 581
543static inline const struct cpumask *sched_rt_period_mask(void) 582static inline const struct cpumask *sched_rt_period_mask(void)
@@ -851,14 +890,8 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
851 * but accrue some time due to boosting. 890 * but accrue some time due to boosting.
852 */ 891 */
853 if (likely(rt_b->rt_runtime)) { 892 if (likely(rt_b->rt_runtime)) {
854 static bool once = false;
855
856 rt_rq->rt_throttled = 1; 893 rt_rq->rt_throttled = 1;
857 894 printk_deferred_once("sched: RT throttling activated\n");
858 if (!once) {
859 once = true;
860 printk_sched("sched: RT throttling activated\n");
861 }
862 } else { 895 } else {
863 /* 896 /*
864 * In case we did anyway, make it go away, 897 * In case we did anyway, make it go away,
@@ -922,6 +955,38 @@ static void update_curr_rt(struct rq *rq)
922 } 955 }
923} 956}
924 957
958static void
959dequeue_top_rt_rq(struct rt_rq *rt_rq)
960{
961 struct rq *rq = rq_of_rt_rq(rt_rq);
962
963 BUG_ON(&rq->rt != rt_rq);
964
965 if (!rt_rq->rt_queued)
966 return;
967
968 BUG_ON(!rq->nr_running);
969
970 sub_nr_running(rq, rt_rq->rt_nr_running);
971 rt_rq->rt_queued = 0;
972}
973
974static void
975enqueue_top_rt_rq(struct rt_rq *rt_rq)
976{
977 struct rq *rq = rq_of_rt_rq(rt_rq);
978
979 BUG_ON(&rq->rt != rt_rq);
980
981 if (rt_rq->rt_queued)
982 return;
983 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
984 return;
985
986 add_nr_running(rq, rt_rq->rt_nr_running);
987 rt_rq->rt_queued = 1;
988}
989
925#if defined CONFIG_SMP 990#if defined CONFIG_SMP
926 991
927static void 992static void
@@ -1045,12 +1110,23 @@ void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1045#endif /* CONFIG_RT_GROUP_SCHED */ 1110#endif /* CONFIG_RT_GROUP_SCHED */
1046 1111
1047static inline 1112static inline
1113unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1114{
1115 struct rt_rq *group_rq = group_rt_rq(rt_se);
1116
1117 if (group_rq)
1118 return group_rq->rt_nr_running;
1119 else
1120 return 1;
1121}
1122
1123static inline
1048void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1124void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1049{ 1125{
1050 int prio = rt_se_prio(rt_se); 1126 int prio = rt_se_prio(rt_se);
1051 1127
1052 WARN_ON(!rt_prio(prio)); 1128 WARN_ON(!rt_prio(prio));
1053 rt_rq->rt_nr_running++; 1129 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1054 1130
1055 inc_rt_prio(rt_rq, prio); 1131 inc_rt_prio(rt_rq, prio);
1056 inc_rt_migration(rt_se, rt_rq); 1132 inc_rt_migration(rt_se, rt_rq);
@@ -1062,7 +1138,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1062{ 1138{
1063 WARN_ON(!rt_prio(rt_se_prio(rt_se))); 1139 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1064 WARN_ON(!rt_rq->rt_nr_running); 1140 WARN_ON(!rt_rq->rt_nr_running);
1065 rt_rq->rt_nr_running--; 1141 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1066 1142
1067 dec_rt_prio(rt_rq, rt_se_prio(rt_se)); 1143 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1068 dec_rt_migration(rt_se, rt_rq); 1144 dec_rt_migration(rt_se, rt_rq);
@@ -1119,6 +1195,8 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1119 back = rt_se; 1195 back = rt_se;
1120 } 1196 }
1121 1197
1198 dequeue_top_rt_rq(rt_rq_of_se(back));
1199
1122 for (rt_se = back; rt_se; rt_se = rt_se->back) { 1200 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1123 if (on_rt_rq(rt_se)) 1201 if (on_rt_rq(rt_se))
1124 __dequeue_rt_entity(rt_se); 1202 __dequeue_rt_entity(rt_se);
@@ -1127,13 +1205,18 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1127 1205
1128static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) 1206static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1129{ 1207{
1208 struct rq *rq = rq_of_rt_se(rt_se);
1209
1130 dequeue_rt_stack(rt_se); 1210 dequeue_rt_stack(rt_se);
1131 for_each_sched_rt_entity(rt_se) 1211 for_each_sched_rt_entity(rt_se)
1132 __enqueue_rt_entity(rt_se, head); 1212 __enqueue_rt_entity(rt_se, head);
1213 enqueue_top_rt_rq(&rq->rt);
1133} 1214}
1134 1215
1135static void dequeue_rt_entity(struct sched_rt_entity *rt_se) 1216static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1136{ 1217{
1218 struct rq *rq = rq_of_rt_se(rt_se);
1219
1137 dequeue_rt_stack(rt_se); 1220 dequeue_rt_stack(rt_se);
1138 1221
1139 for_each_sched_rt_entity(rt_se) { 1222 for_each_sched_rt_entity(rt_se) {
@@ -1142,6 +1225,7 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1142 if (rt_rq && rt_rq->rt_nr_running) 1225 if (rt_rq && rt_rq->rt_nr_running)
1143 __enqueue_rt_entity(rt_se, false); 1226 __enqueue_rt_entity(rt_se, false);
1144 } 1227 }
1228 enqueue_top_rt_rq(&rq->rt);
1145} 1229}
1146 1230
1147/* 1231/*
@@ -1159,8 +1243,6 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1159 1243
1160 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 1244 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1161 enqueue_pushable_task(rq, p); 1245 enqueue_pushable_task(rq, p);
1162
1163 inc_nr_running(rq);
1164} 1246}
1165 1247
1166static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1248static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1171,8 +1253,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1171 dequeue_rt_entity(rt_se); 1253 dequeue_rt_entity(rt_se);
1172 1254
1173 dequeue_pushable_task(rq, p); 1255 dequeue_pushable_task(rq, p);
1174
1175 dec_nr_running(rq);
1176} 1256}
1177 1257
1178/* 1258/*
@@ -1362,10 +1442,11 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1362 pull_rt_task(rq); 1442 pull_rt_task(rq);
1363 /* 1443 /*
1364 * pull_rt_task() can drop (and re-acquire) rq->lock; this 1444 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1365 * means a dl task can slip in, in which case we need to 1445 * means a dl or stop task can slip in, in which case we need
1366 * re-start task selection. 1446 * to re-start task selection.
1367 */ 1447 */
1368 if (unlikely(rq->dl.dl_nr_running)) 1448 if (unlikely((rq->stop && rq->stop->on_rq) ||
1449 rq->dl.dl_nr_running))
1369 return RETRY_TASK; 1450 return RETRY_TASK;
1370 } 1451 }
1371 1452
@@ -1376,10 +1457,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1376 if (prev->sched_class == &rt_sched_class) 1457 if (prev->sched_class == &rt_sched_class)
1377 update_curr_rt(rq); 1458 update_curr_rt(rq);
1378 1459
1379 if (!rt_rq->rt_nr_running) 1460 if (!rt_rq->rt_queued)
1380 return NULL;
1381
1382 if (rt_rq_throttled(rt_rq))
1383 return NULL; 1461 return NULL;
1384 1462
1385 put_prev_task(rq, prev); 1463 put_prev_task(rq, prev);
@@ -1891,9 +1969,9 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
1891 */ 1969 */
1892 if (p->on_rq && rq->curr != p) { 1970 if (p->on_rq && rq->curr != p) {
1893#ifdef CONFIG_SMP 1971#ifdef CONFIG_SMP
1894 if (rq->rt.overloaded && push_rt_task(rq) && 1972 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
1895 /* Don't resched if we changed runqueues */ 1973 /* Don't resched if we changed runqueues */
1896 rq != task_rq(p)) 1974 push_rt_task(rq) && rq != task_rq(p))
1897 check_resched = 0; 1975 check_resched = 0;
1898#endif /* CONFIG_SMP */ 1976#endif /* CONFIG_SMP */
1899 if (check_resched && p->prio < rq->curr->prio) 1977 if (check_resched && p->prio < rq->curr->prio)