aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorKirill Tkhai <tkhai@yandex.ru>2014-03-14 18:15:00 -0400
committerIngo Molnar <mingo@kernel.org>2014-04-18 06:07:28 -0400
commitf4ebcbc0d7e009783256c9daf76bc4b90e645c14 (patch)
tree66475782c202ce7e5de63a447ea520d2a8f34300 /kernel
parent653d07a6989a9a4166dcd1025aa252b3605737fd (diff)
sched/rt: Substract number of tasks of throttled queues from rq->nr_running
Now rq->rt becomes to be able to be in dequeued or enqueued state. We add new member rt_rq->rt_queued, which is used to indicate this. The member is used only for top queue rq->rt_rq. The goal is to fit generic scheme which is used in deadline and fair classes, i.e. throttled rt_rq's rt_nr_running is beeing substracted from rq->nr_running. Signed-off-by: Kirill Tkhai <tkhai@yandex.ru> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1394835300.18748.33.camel@HP-250-G1-Notebook-PC Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/rt.c73
-rw-r--r--kernel/sched/sched.h2
2 files changed, 63 insertions, 12 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index f6aa3cdbee84..2add019ddbd0 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -79,6 +79,8 @@ void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
79 rt_rq->overloaded = 0; 79 rt_rq->overloaded = 0;
80 plist_head_init(&rt_rq->pushable_tasks); 80 plist_head_init(&rt_rq->pushable_tasks);
81#endif 81#endif
82 /* We start is dequeued state, because no RT tasks are queued */
83 rt_rq->rt_queued = 0;
82 84
83 rt_rq->rt_time = 0; 85 rt_rq->rt_time = 0;
84 rt_rq->rt_throttled = 0; 86 rt_rq->rt_throttled = 0;
@@ -404,6 +406,9 @@ static inline void set_post_schedule(struct rq *rq)
404} 406}
405#endif /* CONFIG_SMP */ 407#endif /* CONFIG_SMP */
406 408
409static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
410static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
411
407static inline int on_rt_rq(struct sched_rt_entity *rt_se) 412static inline int on_rt_rq(struct sched_rt_entity *rt_se)
408{ 413{
409 return !list_empty(&rt_se->run_list); 414 return !list_empty(&rt_se->run_list);
@@ -465,8 +470,11 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
465 rt_se = rt_rq->tg->rt_se[cpu]; 470 rt_se = rt_rq->tg->rt_se[cpu];
466 471
467 if (rt_rq->rt_nr_running) { 472 if (rt_rq->rt_nr_running) {
468 if (rt_se && !on_rt_rq(rt_se)) 473 if (!rt_se)
474 enqueue_top_rt_rq(rt_rq);
475 else if (!on_rt_rq(rt_se))
469 enqueue_rt_entity(rt_se, false); 476 enqueue_rt_entity(rt_se, false);
477
470 if (rt_rq->highest_prio.curr < curr->prio) 478 if (rt_rq->highest_prio.curr < curr->prio)
471 resched_task(curr); 479 resched_task(curr);
472 } 480 }
@@ -479,7 +487,9 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
479 487
480 rt_se = rt_rq->tg->rt_se[cpu]; 488 rt_se = rt_rq->tg->rt_se[cpu];
481 489
482 if (rt_se && on_rt_rq(rt_se)) 490 if (!rt_se)
491 dequeue_top_rt_rq(rt_rq);
492 else if (on_rt_rq(rt_se))
483 dequeue_rt_entity(rt_se); 493 dequeue_rt_entity(rt_se);
484} 494}
485 495
@@ -545,12 +555,18 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
545 555
546static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 556static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
547{ 557{
548 if (rt_rq->rt_nr_running) 558 struct rq *rq = rq_of_rt_rq(rt_rq);
549 resched_task(rq_of_rt_rq(rt_rq)->curr); 559
560 if (!rt_rq->rt_nr_running)
561 return;
562
563 enqueue_top_rt_rq(rt_rq);
564 resched_task(rq->curr);
550} 565}
551 566
552static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 567static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
553{ 568{
569 dequeue_top_rt_rq(rt_rq);
554} 570}
555 571
556static inline const struct cpumask *sched_rt_period_mask(void) 572static inline const struct cpumask *sched_rt_period_mask(void)
@@ -935,6 +951,38 @@ static void update_curr_rt(struct rq *rq)
935 } 951 }
936} 952}
937 953
954static void
955dequeue_top_rt_rq(struct rt_rq *rt_rq)
956{
957 struct rq *rq = rq_of_rt_rq(rt_rq);
958
959 BUG_ON(&rq->rt != rt_rq);
960
961 if (!rt_rq->rt_queued)
962 return;
963
964 BUG_ON(!rq->nr_running);
965
966 rq->nr_running -= rt_rq->rt_nr_running;
967 rt_rq->rt_queued = 0;
968}
969
970static void
971enqueue_top_rt_rq(struct rt_rq *rt_rq)
972{
973 struct rq *rq = rq_of_rt_rq(rt_rq);
974
975 BUG_ON(&rq->rt != rt_rq);
976
977 if (rt_rq->rt_queued)
978 return;
979 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
980 return;
981
982 rq->nr_running += rt_rq->rt_nr_running;
983 rt_rq->rt_queued = 1;
984}
985
938#if defined CONFIG_SMP 986#if defined CONFIG_SMP
939 987
940static void 988static void
@@ -1143,6 +1191,8 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1143 back = rt_se; 1191 back = rt_se;
1144 } 1192 }
1145 1193
1194 dequeue_top_rt_rq(rt_rq_of_se(back));
1195
1146 for (rt_se = back; rt_se; rt_se = rt_se->back) { 1196 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1147 if (on_rt_rq(rt_se)) 1197 if (on_rt_rq(rt_se))
1148 __dequeue_rt_entity(rt_se); 1198 __dequeue_rt_entity(rt_se);
@@ -1151,13 +1201,18 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1151 1201
1152static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) 1202static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1153{ 1203{
1204 struct rq *rq = rq_of_rt_se(rt_se);
1205
1154 dequeue_rt_stack(rt_se); 1206 dequeue_rt_stack(rt_se);
1155 for_each_sched_rt_entity(rt_se) 1207 for_each_sched_rt_entity(rt_se)
1156 __enqueue_rt_entity(rt_se, head); 1208 __enqueue_rt_entity(rt_se, head);
1209 enqueue_top_rt_rq(&rq->rt);
1157} 1210}
1158 1211
1159static void dequeue_rt_entity(struct sched_rt_entity *rt_se) 1212static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1160{ 1213{
1214 struct rq *rq = rq_of_rt_se(rt_se);
1215
1161 dequeue_rt_stack(rt_se); 1216 dequeue_rt_stack(rt_se);
1162 1217
1163 for_each_sched_rt_entity(rt_se) { 1218 for_each_sched_rt_entity(rt_se) {
@@ -1166,6 +1221,7 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1166 if (rt_rq && rt_rq->rt_nr_running) 1221 if (rt_rq && rt_rq->rt_nr_running)
1167 __enqueue_rt_entity(rt_se, false); 1222 __enqueue_rt_entity(rt_se, false);
1168 } 1223 }
1224 enqueue_top_rt_rq(&rq->rt);
1169} 1225}
1170 1226
1171/* 1227/*
@@ -1183,8 +1239,6 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1183 1239
1184 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 1240 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1185 enqueue_pushable_task(rq, p); 1241 enqueue_pushable_task(rq, p);
1186
1187 inc_nr_running(rq);
1188} 1242}
1189 1243
1190static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1244static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1195,8 +1249,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1195 dequeue_rt_entity(rt_se); 1249 dequeue_rt_entity(rt_se);
1196 1250
1197 dequeue_pushable_task(rq, p); 1251 dequeue_pushable_task(rq, p);
1198
1199 dec_nr_running(rq);
1200} 1252}
1201 1253
1202/* 1254/*
@@ -1401,10 +1453,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1401 if (prev->sched_class == &rt_sched_class) 1453 if (prev->sched_class == &rt_sched_class)
1402 update_curr_rt(rq); 1454 update_curr_rt(rq);
1403 1455
1404 if (!rt_rq->rt_nr_running) 1456 if (!rt_rq->rt_queued)
1405 return NULL;
1406
1407 if (rt_rq_throttled(rt_rq))
1408 return NULL; 1457 return NULL;
1409 1458
1410 put_prev_task(rq, prev); 1459 put_prev_task(rq, prev);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 456e492a3dca..c8d9ee418ca7 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -409,6 +409,8 @@ struct rt_rq {
409 int overloaded; 409 int overloaded;
410 struct plist_head pushable_tasks; 410 struct plist_head pushable_tasks;
411#endif 411#endif
412 int rt_queued;
413
412 int rt_throttled; 414 int rt_throttled;
413 u64 rt_time; 415 u64 rt_time;
414 u64 rt_runtime; 416 u64 rt_runtime;