diff options
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r-- | kernel/sched/rt.c | 102 |
1 files changed, 65 insertions, 37 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 1999021042c7..d8cdf1618551 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -229,6 +229,14 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
229 | 229 | ||
230 | #ifdef CONFIG_SMP | 230 | #ifdef CONFIG_SMP |
231 | 231 | ||
232 | static int pull_rt_task(struct rq *this_rq); | ||
233 | |||
234 | static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) | ||
235 | { | ||
236 | /* Try to pull RT tasks here if we lower this rq's prio */ | ||
237 | return rq->rt.highest_prio.curr > prev->prio; | ||
238 | } | ||
239 | |||
232 | static inline int rt_overloaded(struct rq *rq) | 240 | static inline int rt_overloaded(struct rq *rq) |
233 | { | 241 | { |
234 | return atomic_read(&rq->rd->rto_count); | 242 | return atomic_read(&rq->rd->rto_count); |
@@ -315,6 +323,15 @@ static inline int has_pushable_tasks(struct rq *rq) | |||
315 | return !plist_head_empty(&rq->rt.pushable_tasks); | 323 | return !plist_head_empty(&rq->rt.pushable_tasks); |
316 | } | 324 | } |
317 | 325 | ||
326 | static inline void set_post_schedule(struct rq *rq) | ||
327 | { | ||
328 | /* | ||
329 | * We detect this state here so that we can avoid taking the RQ | ||
330 | * lock again later if there is no need to push | ||
331 | */ | ||
332 | rq->post_schedule = has_pushable_tasks(rq); | ||
333 | } | ||
334 | |||
318 | static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) | 335 | static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) |
319 | { | 336 | { |
320 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); | 337 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); |
@@ -359,6 +376,19 @@ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
359 | { | 376 | { |
360 | } | 377 | } |
361 | 378 | ||
379 | static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) | ||
380 | { | ||
381 | return false; | ||
382 | } | ||
383 | |||
384 | static inline int pull_rt_task(struct rq *this_rq) | ||
385 | { | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | static inline void set_post_schedule(struct rq *rq) | ||
390 | { | ||
391 | } | ||
362 | #endif /* CONFIG_SMP */ | 392 | #endif /* CONFIG_SMP */ |
363 | 393 | ||
364 | static inline int on_rt_rq(struct sched_rt_entity *rt_se) | 394 | static inline int on_rt_rq(struct sched_rt_entity *rt_se) |
@@ -440,11 +470,6 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | |||
440 | dequeue_rt_entity(rt_se); | 470 | dequeue_rt_entity(rt_se); |
441 | } | 471 | } |
442 | 472 | ||
443 | static inline int rt_rq_throttled(struct rt_rq *rt_rq) | ||
444 | { | ||
445 | return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; | ||
446 | } | ||
447 | |||
448 | static int rt_se_boosted(struct sched_rt_entity *rt_se) | 473 | static int rt_se_boosted(struct sched_rt_entity *rt_se) |
449 | { | 474 | { |
450 | struct rt_rq *rt_rq = group_rt_rq(rt_se); | 475 | struct rt_rq *rt_rq = group_rt_rq(rt_se); |
@@ -515,11 +540,6 @@ static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | |||
515 | { | 540 | { |
516 | } | 541 | } |
517 | 542 | ||
518 | static inline int rt_rq_throttled(struct rt_rq *rt_rq) | ||
519 | { | ||
520 | return rt_rq->rt_throttled; | ||
521 | } | ||
522 | |||
523 | static inline const struct cpumask *sched_rt_period_mask(void) | 543 | static inline const struct cpumask *sched_rt_period_mask(void) |
524 | { | 544 | { |
525 | return cpu_online_mask; | 545 | return cpu_online_mask; |
@@ -1318,15 +1338,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) | |||
1318 | { | 1338 | { |
1319 | struct sched_rt_entity *rt_se; | 1339 | struct sched_rt_entity *rt_se; |
1320 | struct task_struct *p; | 1340 | struct task_struct *p; |
1321 | struct rt_rq *rt_rq; | 1341 | struct rt_rq *rt_rq = &rq->rt; |
1322 | |||
1323 | rt_rq = &rq->rt; | ||
1324 | |||
1325 | if (!rt_rq->rt_nr_running) | ||
1326 | return NULL; | ||
1327 | |||
1328 | if (rt_rq_throttled(rt_rq)) | ||
1329 | return NULL; | ||
1330 | 1342 | ||
1331 | do { | 1343 | do { |
1332 | rt_se = pick_next_rt_entity(rq, rt_rq); | 1344 | rt_se = pick_next_rt_entity(rq, rt_rq); |
@@ -1340,21 +1352,45 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) | |||
1340 | return p; | 1352 | return p; |
1341 | } | 1353 | } |
1342 | 1354 | ||
1343 | static struct task_struct *pick_next_task_rt(struct rq *rq) | 1355 | static struct task_struct * |
1356 | pick_next_task_rt(struct rq *rq, struct task_struct *prev) | ||
1344 | { | 1357 | { |
1345 | struct task_struct *p = _pick_next_task_rt(rq); | 1358 | struct task_struct *p; |
1359 | struct rt_rq *rt_rq = &rq->rt; | ||
1360 | |||
1361 | if (need_pull_rt_task(rq, prev)) { | ||
1362 | pull_rt_task(rq); | ||
1363 | /* | ||
1364 | * pull_rt_task() can drop (and re-acquire) rq->lock; this | ||
1365 | * means a dl task can slip in, in which case we need to | ||
1366 | * re-start task selection. | ||
1367 | */ | ||
1368 | if (unlikely(rq->dl.dl_nr_running)) | ||
1369 | return RETRY_TASK; | ||
1370 | } | ||
1371 | |||
1372 | /* | ||
1373 | * We may dequeue prev's rt_rq in put_prev_task(). | ||
1374 | * So, we update time before rt_nr_running check. | ||
1375 | */ | ||
1376 | if (prev->sched_class == &rt_sched_class) | ||
1377 | update_curr_rt(rq); | ||
1378 | |||
1379 | if (!rt_rq->rt_nr_running) | ||
1380 | return NULL; | ||
1381 | |||
1382 | if (rt_rq_throttled(rt_rq)) | ||
1383 | return NULL; | ||
1384 | |||
1385 | put_prev_task(rq, prev); | ||
1386 | |||
1387 | p = _pick_next_task_rt(rq); | ||
1346 | 1388 | ||
1347 | /* The running task is never eligible for pushing */ | 1389 | /* The running task is never eligible for pushing */ |
1348 | if (p) | 1390 | if (p) |
1349 | dequeue_pushable_task(rq, p); | 1391 | dequeue_pushable_task(rq, p); |
1350 | 1392 | ||
1351 | #ifdef CONFIG_SMP | 1393 | set_post_schedule(rq); |
1352 | /* | ||
1353 | * We detect this state here so that we can avoid taking the RQ | ||
1354 | * lock again later if there is no need to push | ||
1355 | */ | ||
1356 | rq->post_schedule = has_pushable_tasks(rq); | ||
1357 | #endif | ||
1358 | 1394 | ||
1359 | return p; | 1395 | return p; |
1360 | } | 1396 | } |
@@ -1724,13 +1760,6 @@ skip: | |||
1724 | return ret; | 1760 | return ret; |
1725 | } | 1761 | } |
1726 | 1762 | ||
1727 | static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) | ||
1728 | { | ||
1729 | /* Try to pull RT tasks here if we lower this rq's prio */ | ||
1730 | if (rq->rt.highest_prio.curr > prev->prio) | ||
1731 | pull_rt_task(rq); | ||
1732 | } | ||
1733 | |||
1734 | static void post_schedule_rt(struct rq *rq) | 1763 | static void post_schedule_rt(struct rq *rq) |
1735 | { | 1764 | { |
1736 | push_rt_tasks(rq); | 1765 | push_rt_tasks(rq); |
@@ -1833,7 +1862,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) | |||
1833 | resched_task(rq->curr); | 1862 | resched_task(rq->curr); |
1834 | } | 1863 | } |
1835 | 1864 | ||
1836 | void init_sched_rt_class(void) | 1865 | void __init init_sched_rt_class(void) |
1837 | { | 1866 | { |
1838 | unsigned int i; | 1867 | unsigned int i; |
1839 | 1868 | ||
@@ -2007,7 +2036,6 @@ const struct sched_class rt_sched_class = { | |||
2007 | .set_cpus_allowed = set_cpus_allowed_rt, | 2036 | .set_cpus_allowed = set_cpus_allowed_rt, |
2008 | .rq_online = rq_online_rt, | 2037 | .rq_online = rq_online_rt, |
2009 | .rq_offline = rq_offline_rt, | 2038 | .rq_offline = rq_offline_rt, |
2010 | .pre_schedule = pre_schedule_rt, | ||
2011 | .post_schedule = post_schedule_rt, | 2039 | .post_schedule = post_schedule_rt, |
2012 | .task_woken = task_woken_rt, | 2040 | .task_woken = task_woken_rt, |
2013 | .switched_from = switched_from_rt, | 2041 | .switched_from = switched_from_rt, |