aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorGregory Haskins <ghaskins@novell.com>2008-12-29 09:39:49 -0500
committerGregory Haskins <ghaskins@novell.com>2008-12-29 09:39:49 -0500
commite864c499d9e57805ae1f9e7ea404dd223759cd53 (patch)
tree26dcc691f414c2d0eee026ea0f4d9e0d9e0475a2 /kernel/sched_rt.c
parent4d9842776a23e52ec4c60e0a79f5e1bbe91e463e (diff)
sched: track the next-highest priority on each runqueue
We will use this later in the series to reduce the amount of rq-lock contention during a pull operation Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c81
1 files changed, 61 insertions, 20 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 0a5277233452..ad36d7232236 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -108,7 +108,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
108 if (rt_rq->rt_nr_running) { 108 if (rt_rq->rt_nr_running) {
109 if (rt_se && !on_rt_rq(rt_se)) 109 if (rt_se && !on_rt_rq(rt_se))
110 enqueue_rt_entity(rt_se); 110 enqueue_rt_entity(rt_se);
111 if (rt_rq->highest_prio < curr->prio) 111 if (rt_rq->highest_prio.curr < curr->prio)
112 resched_task(curr); 112 resched_task(curr);
113 } 113 }
114} 114}
@@ -473,7 +473,7 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
473 struct rt_rq *rt_rq = group_rt_rq(rt_se); 473 struct rt_rq *rt_rq = group_rt_rq(rt_se);
474 474
475 if (rt_rq) 475 if (rt_rq)
476 return rt_rq->highest_prio; 476 return rt_rq->highest_prio.curr;
477#endif 477#endif
478 478
479 return rt_task_of(rt_se)->prio; 479 return rt_task_of(rt_se)->prio;
@@ -547,6 +547,21 @@ static void update_curr_rt(struct rq *rq)
547 } 547 }
548} 548}
549 549
550#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
551
552static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
553
554static inline int next_prio(struct rq *rq)
555{
556 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
557
558 if (next && rt_prio(next->prio))
559 return next->prio;
560 else
561 return MAX_RT_PRIO;
562}
563#endif
564
550static inline 565static inline
551void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 566void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
552{ 567{
@@ -558,14 +573,32 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
558 WARN_ON(!rt_prio(prio)); 573 WARN_ON(!rt_prio(prio));
559 rt_rq->rt_nr_running++; 574 rt_rq->rt_nr_running++;
560#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 575#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
561 if (prio < rt_rq->highest_prio) { 576 if (prio < rt_rq->highest_prio.curr) {
562 577
563 rt_rq->highest_prio = prio; 578 /*
579 * If the new task is higher in priority than anything on the
580 * run-queue, we have a new high that must be published to
581 * the world. We also know that the previous high becomes
582 * our next-highest.
583 */
584 rt_rq->highest_prio.next = rt_rq->highest_prio.curr;
585 rt_rq->highest_prio.curr = prio;
564#ifdef CONFIG_SMP 586#ifdef CONFIG_SMP
565 if (rq->online) 587 if (rq->online)
566 cpupri_set(&rq->rd->cpupri, rq->cpu, prio); 588 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
567#endif 589#endif
568 } 590 } else if (prio == rt_rq->highest_prio.curr)
591 /*
592 * If the next task is equal in priority to the highest on
593 * the run-queue, then we implicitly know that the next highest
594 * task cannot be any lower than current
595 */
596 rt_rq->highest_prio.next = prio;
597 else if (prio < rt_rq->highest_prio.next)
598 /*
599 * Otherwise, we need to recompute next-highest
600 */
601 rt_rq->highest_prio.next = next_prio(rq);
569#endif 602#endif
570#ifdef CONFIG_SMP 603#ifdef CONFIG_SMP
571 if (rt_se->nr_cpus_allowed > 1) 604 if (rt_se->nr_cpus_allowed > 1)
@@ -589,7 +622,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
589{ 622{
590#ifdef CONFIG_SMP 623#ifdef CONFIG_SMP
591 struct rq *rq = rq_of_rt_rq(rt_rq); 624 struct rq *rq = rq_of_rt_rq(rt_rq);
592 int highest_prio = rt_rq->highest_prio; 625 int highest_prio = rt_rq->highest_prio.curr;
593#endif 626#endif
594 627
595 WARN_ON(!rt_prio(rt_se_prio(rt_se))); 628 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
@@ -597,24 +630,32 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
597 rt_rq->rt_nr_running--; 630 rt_rq->rt_nr_running--;
598#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 631#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
599 if (rt_rq->rt_nr_running) { 632 if (rt_rq->rt_nr_running) {
600 struct rt_prio_array *array; 633 int prio = rt_se_prio(rt_se);
634
635 WARN_ON(prio < rt_rq->highest_prio.curr);
601 636
602 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio); 637 /*
603 if (rt_se_prio(rt_se) == rt_rq->highest_prio) { 638 * This may have been our highest or next-highest priority
604 /* recalculate */ 639 * task and therefore we may have some recomputation to do
605 array = &rt_rq->active; 640 */
606 rt_rq->highest_prio = 641 if (prio == rt_rq->highest_prio.curr) {
642 struct rt_prio_array *array = &rt_rq->active;
643
644 rt_rq->highest_prio.curr =
607 sched_find_first_bit(array->bitmap); 645 sched_find_first_bit(array->bitmap);
608 } /* otherwise leave rq->highest prio alone */ 646 }
647
648 if (prio <= rt_rq->highest_prio.next)
649 rt_rq->highest_prio.next = next_prio(rq);
609 } else 650 } else
610 rt_rq->highest_prio = MAX_RT_PRIO; 651 rt_rq->highest_prio.curr = MAX_RT_PRIO;
611#endif 652#endif
612#ifdef CONFIG_SMP 653#ifdef CONFIG_SMP
613 if (rt_se->nr_cpus_allowed > 1) 654 if (rt_se->nr_cpus_allowed > 1)
614 rq->rt.rt_nr_migratory--; 655 rq->rt.rt_nr_migratory--;
615 656
616 if (rq->online && rt_rq->highest_prio != highest_prio) 657 if (rq->online && rt_rq->highest_prio.curr != highest_prio)
617 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio); 658 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
618 659
619 update_rt_migration(rq); 660 update_rt_migration(rq);
620#endif /* CONFIG_SMP */ 661#endif /* CONFIG_SMP */
@@ -1064,7 +1105,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1064 } 1105 }
1065 1106
1066 /* If this rq is still suitable use it. */ 1107 /* If this rq is still suitable use it. */
1067 if (lowest_rq->rt.highest_prio > task->prio) 1108 if (lowest_rq->rt.highest_prio.curr > task->prio)
1068 break; 1109 break;
1069 1110
1070 /* try again */ 1111 /* try again */
@@ -1252,7 +1293,7 @@ static int pull_rt_task(struct rq *this_rq)
1252static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) 1293static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1253{ 1294{
1254 /* Try to pull RT tasks here if we lower this rq's prio */ 1295 /* Try to pull RT tasks here if we lower this rq's prio */
1255 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio) 1296 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1256 pull_rt_task(rq); 1297 pull_rt_task(rq);
1257} 1298}
1258 1299
@@ -1338,7 +1379,7 @@ static void rq_online_rt(struct rq *rq)
1338 1379
1339 __enable_runtime(rq); 1380 __enable_runtime(rq);
1340 1381
1341 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio); 1382 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1342} 1383}
1343 1384
1344/* Assumes rq->lock is held */ 1385/* Assumes rq->lock is held */
@@ -1429,7 +1470,7 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1429 * can release the rq lock and p could migrate. 1470 * can release the rq lock and p could migrate.
1430 * Only reschedule if p is still on the same runqueue. 1471 * Only reschedule if p is still on the same runqueue.
1431 */ 1472 */
1432 if (p->prio > rq->rt.highest_prio && rq->curr == p) 1473 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1433 resched_task(p); 1474 resched_task(p);
1434#else 1475#else
1435 /* For UP simply resched on drop of prio */ 1476 /* For UP simply resched on drop of prio */