aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c110
1 files changed, 73 insertions, 37 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index a2740b775b45..d8cdf1618551 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -229,6 +229,14 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
229 229
230#ifdef CONFIG_SMP 230#ifdef CONFIG_SMP
231 231
232static int pull_rt_task(struct rq *this_rq);
233
234static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
235{
236 /* Try to pull RT tasks here if we lower this rq's prio */
237 return rq->rt.highest_prio.curr > prev->prio;
238}
239
232static inline int rt_overloaded(struct rq *rq) 240static inline int rt_overloaded(struct rq *rq)
233{ 241{
234 return atomic_read(&rq->rd->rto_count); 242 return atomic_read(&rq->rd->rto_count);
@@ -315,6 +323,15 @@ static inline int has_pushable_tasks(struct rq *rq)
315 return !plist_head_empty(&rq->rt.pushable_tasks); 323 return !plist_head_empty(&rq->rt.pushable_tasks);
316} 324}
317 325
326static inline void set_post_schedule(struct rq *rq)
327{
328 /*
329 * We detect this state here so that we can avoid taking the RQ
330 * lock again later if there is no need to push
331 */
332 rq->post_schedule = has_pushable_tasks(rq);
333}
334
318static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 335static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
319{ 336{
320 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 337 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
@@ -359,6 +376,19 @@ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
359{ 376{
360} 377}
361 378
379static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
380{
381 return false;
382}
383
384static inline int pull_rt_task(struct rq *this_rq)
385{
386 return 0;
387}
388
389static inline void set_post_schedule(struct rq *rq)
390{
391}
362#endif /* CONFIG_SMP */ 392#endif /* CONFIG_SMP */
363 393
364static inline int on_rt_rq(struct sched_rt_entity *rt_se) 394static inline int on_rt_rq(struct sched_rt_entity *rt_se)
@@ -440,11 +470,6 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
440 dequeue_rt_entity(rt_se); 470 dequeue_rt_entity(rt_se);
441} 471}
442 472
443static inline int rt_rq_throttled(struct rt_rq *rt_rq)
444{
445 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
446}
447
448static int rt_se_boosted(struct sched_rt_entity *rt_se) 473static int rt_se_boosted(struct sched_rt_entity *rt_se)
449{ 474{
450 struct rt_rq *rt_rq = group_rt_rq(rt_se); 475 struct rt_rq *rt_rq = group_rt_rq(rt_se);
@@ -515,11 +540,6 @@ static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
515{ 540{
516} 541}
517 542
518static inline int rt_rq_throttled(struct rt_rq *rt_rq)
519{
520 return rt_rq->rt_throttled;
521}
522
523static inline const struct cpumask *sched_rt_period_mask(void) 543static inline const struct cpumask *sched_rt_period_mask(void)
524{ 544{
525 return cpu_online_mask; 545 return cpu_online_mask;
@@ -538,6 +558,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
538 558
539#endif /* CONFIG_RT_GROUP_SCHED */ 559#endif /* CONFIG_RT_GROUP_SCHED */
540 560
561bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
562{
563 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
564
565 return (hrtimer_active(&rt_b->rt_period_timer) ||
566 rt_rq->rt_time < rt_b->rt_runtime);
567}
568
541#ifdef CONFIG_SMP 569#ifdef CONFIG_SMP
542/* 570/*
543 * We ran out of runtime, see if we can borrow some from our neighbours. 571 * We ran out of runtime, see if we can borrow some from our neighbours.
@@ -1310,15 +1338,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
1310{ 1338{
1311 struct sched_rt_entity *rt_se; 1339 struct sched_rt_entity *rt_se;
1312 struct task_struct *p; 1340 struct task_struct *p;
1313 struct rt_rq *rt_rq; 1341 struct rt_rq *rt_rq = &rq->rt;
1314
1315 rt_rq = &rq->rt;
1316
1317 if (!rt_rq->rt_nr_running)
1318 return NULL;
1319
1320 if (rt_rq_throttled(rt_rq))
1321 return NULL;
1322 1342
1323 do { 1343 do {
1324 rt_se = pick_next_rt_entity(rq, rt_rq); 1344 rt_se = pick_next_rt_entity(rq, rt_rq);
@@ -1332,21 +1352,45 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
1332 return p; 1352 return p;
1333} 1353}
1334 1354
1335static struct task_struct *pick_next_task_rt(struct rq *rq) 1355static struct task_struct *
1356pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1336{ 1357{
1337 struct task_struct *p = _pick_next_task_rt(rq); 1358 struct task_struct *p;
1359 struct rt_rq *rt_rq = &rq->rt;
1360
1361 if (need_pull_rt_task(rq, prev)) {
1362 pull_rt_task(rq);
1363 /*
1364 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1365 * means a dl task can slip in, in which case we need to
1366 * re-start task selection.
1367 */
1368 if (unlikely(rq->dl.dl_nr_running))
1369 return RETRY_TASK;
1370 }
1371
1372 /*
1373 * We may dequeue prev's rt_rq in put_prev_task().
1374 * So, we update time before rt_nr_running check.
1375 */
1376 if (prev->sched_class == &rt_sched_class)
1377 update_curr_rt(rq);
1378
1379 if (!rt_rq->rt_nr_running)
1380 return NULL;
1381
1382 if (rt_rq_throttled(rt_rq))
1383 return NULL;
1384
1385 put_prev_task(rq, prev);
1386
1387 p = _pick_next_task_rt(rq);
1338 1388
1339 /* The running task is never eligible for pushing */ 1389 /* The running task is never eligible for pushing */
1340 if (p) 1390 if (p)
1341 dequeue_pushable_task(rq, p); 1391 dequeue_pushable_task(rq, p);
1342 1392
1343#ifdef CONFIG_SMP 1393 set_post_schedule(rq);
1344 /*
1345 * We detect this state here so that we can avoid taking the RQ
1346 * lock again later if there is no need to push
1347 */
1348 rq->post_schedule = has_pushable_tasks(rq);
1349#endif
1350 1394
1351 return p; 1395 return p;
1352} 1396}
@@ -1716,13 +1760,6 @@ skip:
1716 return ret; 1760 return ret;
1717} 1761}
1718 1762
1719static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1720{
1721 /* Try to pull RT tasks here if we lower this rq's prio */
1722 if (rq->rt.highest_prio.curr > prev->prio)
1723 pull_rt_task(rq);
1724}
1725
1726static void post_schedule_rt(struct rq *rq) 1763static void post_schedule_rt(struct rq *rq)
1727{ 1764{
1728 push_rt_tasks(rq); 1765 push_rt_tasks(rq);
@@ -1825,7 +1862,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
1825 resched_task(rq->curr); 1862 resched_task(rq->curr);
1826} 1863}
1827 1864
1828void init_sched_rt_class(void) 1865void __init init_sched_rt_class(void)
1829{ 1866{
1830 unsigned int i; 1867 unsigned int i;
1831 1868
@@ -1999,7 +2036,6 @@ const struct sched_class rt_sched_class = {
1999 .set_cpus_allowed = set_cpus_allowed_rt, 2036 .set_cpus_allowed = set_cpus_allowed_rt,
2000 .rq_online = rq_online_rt, 2037 .rq_online = rq_online_rt,
2001 .rq_offline = rq_offline_rt, 2038 .rq_offline = rq_offline_rt,
2002 .pre_schedule = pre_schedule_rt,
2003 .post_schedule = post_schedule_rt, 2039 .post_schedule = post_schedule_rt,
2004 .task_woken = task_woken_rt, 2040 .task_woken = task_woken_rt,
2005 .switched_from = switched_from_rt, 2041 .switched_from = switched_from_rt,